diff --git a/CHANGELOG.md b/CHANGELOG.md
index 93577f215158..ade2d61b586a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,8 @@
FEATURES:
* **New provider: `vcd` - VMware vCloud Director** [GH-3785]
* **New provider: `postgresql` - Create PostgreSQL databases and roles** [GH-3653]
+ * **New provider: `chef` - Create chef environments, roles, etc** [GH-3084]
+ * **New resource: `aws_autoscaling_schedule`** [GH-4256]
* **New resource: `google_pubsub_topic`** [GH-3671]
* **New resource: `google_pubsub_subscription`** [GH-3671]
* **New resource: `tls_locally_signed_cert`** [GH-3930]
@@ -19,6 +21,7 @@ IMPROVEMENTS:
* provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` [GH-3853]
* provider/aws: Adding support for Tags to DB SecurityGroup [GH-4260]
* provider/aws: Adding Tag support for DB Param Groups [GH-4259]
+ * provider/aws: Validate IOPs for EBS Volumes [GH-4146]
* provider/aws: DB Subnet group arn output [GH-4261]
* provider/cloudstack: performance improvements [GH-4150]
* provider/docker: Add support for setting the entry point on `docker_container` resources [GH-3761]
@@ -32,6 +35,7 @@ IMPROVEMENTS:
* provider/vsphere: Add folder handling for folder-qualified vm names [GH-3939]
* provider/vsphere: Change ip_address parameter for ipv6 support [GH-4035]
* provider/openstack: Increase instance timeout from 10 to 30 minutes [GH-4223]
+ * provider/google: Add `restart_policy` attribute to `google_managed_instance_group` [GH-3892]
BUG FIXES:
@@ -48,6 +52,8 @@ BUG FIXES:
* provider/aws: Fix missing AMI issue with Launch Configurations [GH-4242]
* provider/aws: Opsworks stack SSH key is write-only [GH-4241]
* provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed [GH-4245]
+ * provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` [GH-4318]
+ * provider/azure: Update for [breaking change to upstream client library](https://github.com/Azure/azure-sdk-for-go/commit/68d50cb53a73edfeb7f17f5e86cdc8eb359a9528). [GH-4300]
* provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic [GH-4214]
* provider/openstack: Handle volumes in "deleting" state [GH-4204]
* provider/vsphere: Create and attach additional disks before bootup [GH-4196]
diff --git a/builtin/bins/provider-chef/main.go b/builtin/bins/provider-chef/main.go
new file mode 100644
index 000000000000..b1bd8b537ef7
--- /dev/null
+++ b/builtin/bins/provider-chef/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "github.com/hashicorp/terraform/builtin/providers/chef"
+ "github.com/hashicorp/terraform/plugin"
+)
+
+func main() {
+ plugin.Serve(&plugin.ServeOpts{
+ ProviderFunc: chef.Provider,
+ })
+}
diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go
index c123cc184c2a..313f74b18a73 100644
--- a/builtin/providers/aws/provider.go
+++ b/builtin/providers/aws/provider.go
@@ -174,6 +174,7 @@ func Provider() terraform.ResourceProvider {
"aws_autoscaling_group": resourceAwsAutoscalingGroup(),
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
+ "aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(),
"aws_cloudformation_stack": resourceAwsCloudFormationStack(),
"aws_cloudtrail": resourceAwsCloudTrail(),
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
diff --git a/builtin/providers/aws/resource_aws_autoscaling_schedule.go b/builtin/providers/aws/resource_aws_autoscaling_schedule.go
new file mode 100644
index 000000000000..b8a1135deee0
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_autoscaling_schedule.go
@@ -0,0 +1,189 @@
+package aws
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/autoscaling"
+ "github.com/hashicorp/terraform/helper/schema"
+)
+
+const awsAutoscalingScheduleTimeLayout = "2006-01-02T15:04:05Z"
+
+func resourceAwsAutoscalingSchedule() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceAwsAutoscalingScheduleCreate,
+ Read: resourceAwsAutoscalingScheduleRead,
+ Update: resourceAwsAutoscalingScheduleCreate,
+ Delete: resourceAwsAutoscalingScheduleDelete,
+
+ Schema: map[string]*schema.Schema{
+ "arn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "scheduled_action_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "autoscaling_group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ValidateFunc: validateASGScheduleTimestamp,
+ },
+ "end_time": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ValidateFunc: validateASGScheduleTimestamp,
+ },
+ "recurrence": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "min_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true,
+ },
+ "max_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true,
+ },
+ "desired_capacity": &schema.Schema{
+ Type: schema.TypeInt,
+ Optional: true,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func resourceAwsAutoscalingScheduleCreate(d *schema.ResourceData, meta interface{}) error {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+ params := &autoscaling.PutScheduledUpdateGroupActionInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ ScheduledActionName: aws.String(d.Get("scheduled_action_name").(string)),
+ }
+
+ if attr, ok := d.GetOk("start_time"); ok {
+ t, err := time.Parse(awsAutoscalingScheduleTimeLayout, attr.(string))
+ if err != nil {
+ return fmt.Errorf("Error Parsing AWS Autoscaling Group Schedule Start Time: %s", err.Error())
+ }
+ params.StartTime = aws.Time(t)
+ }
+
+ if attr, ok := d.GetOk("end_time"); ok {
+ t, err := time.Parse(awsAutoscalingScheduleTimeLayout, attr.(string))
+ if err != nil {
+ return fmt.Errorf("Error Parsing AWS Autoscaling Group Schedule End Time: %s", err.Error())
+ }
+ params.EndTime = aws.Time(t)
+ }
+
+ if attr, ok := d.GetOk("recurrance"); ok {
+ params.Recurrence = aws.String(attr.(string))
+ }
+
+ if attr, ok := d.GetOk("min_size"); ok {
+ params.MinSize = aws.Int64(int64(attr.(int)))
+ }
+
+ if attr, ok := d.GetOk("max_size"); ok {
+ params.MaxSize = aws.Int64(int64(attr.(int)))
+ }
+
+ if attr, ok := d.GetOk("desired_capacity"); ok {
+ params.DesiredCapacity = aws.Int64(int64(attr.(int)))
+ }
+
+ log.Printf("[INFO] Creating Autoscaling Scheduled Action: %s", d.Get("scheduled_action_name").(string))
+ _, err := autoscalingconn.PutScheduledUpdateGroupAction(params)
+ if err != nil {
+ return fmt.Errorf("Error Creating Autoscaling Scheduled Action: %s", err.Error())
+ }
+
+ d.SetId(d.Get("scheduled_action_name").(string))
+
+ return resourceAwsAutoscalingScheduleRead(d, meta)
+}
+
+func resourceAwsAutoscalingScheduleRead(d *schema.ResourceData, meta interface{}) error {
+ sa, err := resourceAwsASGScheduledActionRetrieve(d, meta)
+ if err != nil {
+ return err
+ }
+
+ d.Set("autoscaling_group_name", sa.AutoScalingGroupName)
+ d.Set("arn", sa.ScheduledActionARN)
+ d.Set("desired_capacity", sa.DesiredCapacity)
+ d.Set("min_size", sa.MinSize)
+ d.Set("max_size", sa.MaxSize)
+ d.Set("recurrance", sa.Recurrence)
+ d.Set("start_time", sa.StartTime.Format(awsAutoscalingScheduleTimeLayout))
+ d.Set("end_time", sa.EndTime.Format(awsAutoscalingScheduleTimeLayout))
+
+ return nil
+}
+
+func resourceAwsAutoscalingScheduleDelete(d *schema.ResourceData, meta interface{}) error {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+
+ params := &autoscaling.DeleteScheduledActionInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ ScheduledActionName: aws.String(d.Id()),
+ }
+
+ log.Printf("[INFO] Deleting Autoscaling Scheduled Action: %s", d.Id())
+ _, err := autoscalingconn.DeleteScheduledAction(params)
+ if err != nil {
+ return fmt.Errorf("Error deleting Autoscaling Scheduled Action: %s", err.Error())
+ }
+
+ return nil
+}
+
+func resourceAwsASGScheduledActionRetrieve(d *schema.ResourceData, meta interface{}) (*autoscaling.ScheduledUpdateGroupAction, error) {
+ autoscalingconn := meta.(*AWSClient).autoscalingconn
+
+ params := &autoscaling.DescribeScheduledActionsInput{
+ AutoScalingGroupName: aws.String(d.Get("autoscaling_group_name").(string)),
+ ScheduledActionNames: []*string{aws.String(d.Id())},
+ }
+
+ log.Printf("[INFO] Describing Autoscaling Scheduled Action: %+v", params)
+ actions, err := autoscalingconn.DescribeScheduledActions(params)
+ if err != nil {
+ return nil, fmt.Errorf("Error retrieving Autoscaling Scheduled Actions: %s", err)
+ }
+
+ if len(actions.ScheduledUpdateGroupActions) != 1 ||
+ *actions.ScheduledUpdateGroupActions[0].ScheduledActionName != d.Id() {
+ return nil, fmt.Errorf("Unable to find Autoscaling Scheduled Action: %#v", actions.ScheduledUpdateGroupActions)
+ }
+
+ return actions.ScheduledUpdateGroupActions[0], nil
+}
+
+func validateASGScheduleTimestamp(v interface{}, k string) (ws []string, errors []error) {
+ value := v.(string)
+ _, err := time.Parse(awsAutoscalingScheduleTimeLayout, value)
+ if err != nil {
+ errors = append(errors, fmt.Errorf(
+ "%q cannot be parsed as iso8601 Timestamp Format", value))
+ }
+
+ return
+}
diff --git a/builtin/providers/aws/resource_aws_autoscaling_schedule_test.go b/builtin/providers/aws/resource_aws_autoscaling_schedule_test.go
new file mode 100644
index 000000000000..3bd031526763
--- /dev/null
+++ b/builtin/providers/aws/resource_aws_autoscaling_schedule_test.go
@@ -0,0 +1,117 @@
+package aws
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/autoscaling"
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccAWSAutoscalingSchedule_basic(t *testing.T) {
+ var schedule autoscaling.ScheduledUpdateGroupAction
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckAWSAutoscalingScheduleDestroy,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAWSAutoscalingScheduleConfig,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckScalingScheduleExists("aws_autoscaling_schedule.foobar", &schedule),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckScalingScheduleExists(n string, policy *autoscaling.ScheduledUpdateGroupAction) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("Not found: %s", n)
+ }
+
+ autoScalingGroup, _ := rs.Primary.Attributes["autoscaling_group_name"]
+ conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
+ params := &autoscaling.DescribeScheduledActionsInput{
+ AutoScalingGroupName: aws.String(autoScalingGroup),
+ ScheduledActionNames: []*string{aws.String(rs.Primary.ID)},
+ }
+
+ resp, err := conn.DescribeScheduledActions(params)
+ if err != nil {
+ return err
+ }
+ if len(resp.ScheduledUpdateGroupActions) == 0 {
+ return fmt.Errorf("Scaling Schedule not found")
+ }
+
+ return nil
+ }
+}
+
+func testAccCheckAWSAutoscalingScheduleDestroy(s *terraform.State) error {
+ conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aws_autoscaling_schedule" {
+ continue
+ }
+
+ autoScalingGroup, _ := rs.Primary.Attributes["autoscaling_group_name"]
+ params := &autoscaling.DescribeScheduledActionsInput{
+ AutoScalingGroupName: aws.String(autoScalingGroup),
+ ScheduledActionNames: []*string{aws.String(rs.Primary.ID)},
+ }
+
+ resp, err := conn.DescribeScheduledActions(params)
+
+ if err == nil {
+ if len(resp.ScheduledUpdateGroupActions) != 0 &&
+ *resp.ScheduledUpdateGroupActions[0].ScheduledActionName == rs.Primary.ID {
+ return fmt.Errorf("Scaling Schedule Still Exists: %s", rs.Primary.ID)
+ }
+ }
+ }
+
+ return nil
+}
+
+var testAccAWSAutoscalingScheduleConfig = fmt.Sprintf(`
+resource "aws_launch_configuration" "foobar" {
+ name = "terraform-test-foobar5"
+ image_id = "ami-21f78e11"
+ instance_type = "t1.micro"
+}
+
+resource "aws_autoscaling_group" "foobar" {
+ availability_zones = ["us-west-2a"]
+ name = "terraform-test-foobar5"
+ max_size = 1
+ min_size = 1
+ health_check_grace_period = 300
+ health_check_type = "ELB"
+ force_delete = true
+ termination_policies = ["OldestInstance"]
+ launch_configuration = "${aws_launch_configuration.foobar.name}"
+ tag {
+ key = "Foo"
+ value = "foo-bar"
+ propagate_at_launch = true
+ }
+}
+
+resource "aws_autoscaling_schedule" "foobar" {
+ scheduled_action_name = "foobar"
+ min_size = 0
+ max_size = 1
+ desired_capacity = 0
+ start_time = "2016-12-11T18:00:00Z"
+ end_time = "2016-12-12T06:00:00Z"
+ autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
+}
+`)
diff --git a/builtin/providers/aws/resource_aws_ebs_volume.go b/builtin/providers/aws/resource_aws_ebs_volume.go
index 1680b4f533e0..3046ac46c6d8 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume.go
@@ -76,9 +76,6 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
if value, ok := d.GetOk("encrypted"); ok {
request.Encrypted = aws.Bool(value.(bool))
}
- if value, ok := d.GetOk("iops"); ok {
- request.Iops = aws.Int64(int64(value.(int)))
- }
if value, ok := d.GetOk("kms_key_id"); ok {
request.KmsKeyId = aws.String(value.(string))
}
@@ -88,18 +85,35 @@ func resourceAwsEbsVolumeCreate(d *schema.ResourceData, meta interface{}) error
if value, ok := d.GetOk("snapshot_id"); ok {
request.SnapshotId = aws.String(value.(string))
}
+
+ // IOPs are only valid, and required for, storage type io1. The current minimu
+ // is 100. Instead of a hard validation we we only apply the IOPs to the
+ // request if the type is io1, and log a warning otherwise. This allows users
+ // to "disable" iops. See https://github.com/hashicorp/terraform/pull/4146
+ var t string
if value, ok := d.GetOk("type"); ok {
- request.VolumeType = aws.String(value.(string))
+ t = value.(string)
+ request.VolumeType = aws.String(t)
+ }
+
+ iops := d.Get("iops").(int)
+ if t != "io1" && iops > 0 {
+ log.Printf("[WARN] IOPs is only valid for storate type io1 for EBS Volumes")
+ } else if t == "io1" {
+ // We add the iops value without validating it's size, to allow AWS to
+ // enforce a size requirement (currently 100)
+ request.Iops = aws.Int64(int64(iops))
}
+ log.Printf(
+ "[DEBUG] EBS Volume create opts: %s", request)
result, err := conn.CreateVolume(request)
if err != nil {
return fmt.Errorf("Error creating EC2 volume: %s", err)
}
- log.Printf(
- "[DEBUG] Waiting for Volume (%s) to become available",
- d.Id())
+ log.Println(
+ "[DEBUG] Waiting for Volume to become available")
stateConf := &resource.StateChangeConf{
Pending: []string{"creating"},
@@ -199,9 +213,6 @@ func readVolume(d *schema.ResourceData, volume *ec2.Volume) error {
if volume.Encrypted != nil {
d.Set("encrypted", *volume.Encrypted)
}
- if volume.Iops != nil {
- d.Set("iops", *volume.Iops)
- }
if volume.KmsKeyId != nil {
d.Set("kms_key_id", *volume.KmsKeyId)
}
@@ -214,6 +225,17 @@ func readVolume(d *schema.ResourceData, volume *ec2.Volume) error {
if volume.VolumeType != nil {
d.Set("type", *volume.VolumeType)
}
+
+ if volume.VolumeType != nil && *volume.VolumeType == "io1" {
+ // Only set the iops attribute if the volume type is io1. Setting otherwise
+ // can trigger a refresh/plan loop based on the computed value that is given
+ // from AWS, and prevent us from specifying 0 as a valid iops.
+ // See https://github.com/hashicorp/terraform/pull/4146
+ if volume.Iops != nil {
+ d.Set("iops", *volume.Iops)
+ }
+ }
+
if volume.Tags != nil {
d.Set("tags", tagsToMap(volume.Tags))
}
diff --git a/builtin/providers/aws/resource_aws_ebs_volume_test.go b/builtin/providers/aws/resource_aws_ebs_volume_test.go
index aab92eb01122..940c8157cabf 100644
--- a/builtin/providers/aws/resource_aws_ebs_volume_test.go
+++ b/builtin/providers/aws/resource_aws_ebs_volume_test.go
@@ -26,6 +26,22 @@ func TestAccAWSEBSVolume_basic(t *testing.T) {
})
}
+func TestAccAWSEBSVolume_NoIops(t *testing.T) {
+ var v ec2.Volume
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccAwsEbsVolumeConfigWithNoIops,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckVolumeExists("aws_ebs_volume.iops_test", &v),
+ ),
+ },
+ },
+ })
+}
+
func TestAccAWSEBSVolume_withTags(t *testing.T) {
var v ec2.Volume
resource.Test(t, resource.TestCase{
@@ -86,3 +102,15 @@ resource "aws_ebs_volume" "tags_test" {
}
}
`
+
+const testAccAwsEbsVolumeConfigWithNoIops = `
+resource "aws_ebs_volume" "iops_test" {
+ availability_zone = "us-west-2a"
+ size = 10
+ type = "gp2"
+ iops = 0
+ tags {
+ Name = "TerraformTest"
+ }
+}
+`
diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go
index b5ca83a797d8..748ecc88be8d 100644
--- a/builtin/providers/aws/structure.go
+++ b/builtin/providers/aws/structure.go
@@ -399,10 +399,16 @@ func flattenEcsContainerDefinitions(definitions []*ecs.ContainerDefinition) (str
func flattenParameters(list []*rds.Parameter) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(list))
for _, i := range list {
- result = append(result, map[string]interface{}{
- "name": strings.ToLower(*i.ParameterName),
- "value": strings.ToLower(*i.ParameterValue),
- })
+ if i.ParameterName != nil {
+ r := make(map[string]interface{})
+ r["name"] = strings.ToLower(*i.ParameterName)
+ // Default empty string, guard against nil parameter values
+ r["value"] = ""
+ if i.ParameterValue != nil {
+ r["value"] = strings.ToLower(*i.ParameterValue)
+ }
+ result = append(result, r)
+ }
}
return result
}
diff --git a/builtin/providers/azure/resource_azure_instance.go b/builtin/providers/azure/resource_azure_instance.go
index 8a643931c3d3..c30b07ea41e5 100644
--- a/builtin/providers/azure/resource_azure_instance.go
+++ b/builtin/providers/azure/resource_azure_instance.go
@@ -682,7 +682,7 @@ func retrieveImageDetails(
func retrieveVMImageDetails(
vmImageClient virtualmachineimage.Client,
label string) (func(*virtualmachine.Role) error, string, []string, error) {
- imgs, err := vmImageClient.ListVirtualMachineImages()
+ imgs, err := vmImageClient.ListVirtualMachineImages(virtualmachineimage.ListParameters{})
if err != nil {
return nil, "", nil, fmt.Errorf("Error retrieving image details: %s", err)
}
diff --git a/builtin/providers/chef/provider.go b/builtin/providers/chef/provider.go
new file mode 100644
index 000000000000..7a04b977583e
--- /dev/null
+++ b/builtin/providers/chef/provider.go
@@ -0,0 +1,112 @@
+package chef
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func Provider() terraform.ResourceProvider {
+ return &schema.Provider{
+ Schema: map[string]*schema.Schema{
+ "server_url": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("CHEF_SERVER_URL", nil),
+ Description: "URL of the root of the target Chef server or organization.",
+ },
+ "client_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: schema.EnvDefaultFunc("CHEF_CLIENT_NAME", nil),
+ Description: "Name of a registered client within the Chef server.",
+ },
+ "private_key_pem": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ DefaultFunc: providerPrivateKeyEnvDefault,
+ Description: "PEM-formatted private key for client authentication.",
+ },
+ "allow_unverified_ssl": &schema.Schema{
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "If set, the Chef client will permit unverifiable SSL certificates.",
+ },
+ },
+
+ ResourcesMap: map[string]*schema.Resource{
+ //"chef_acl": resourceChefAcl(),
+ //"chef_client": resourceChefClient(),
+ //"chef_cookbook": resourceChefCookbook(),
+ "chef_data_bag": resourceChefDataBag(),
+ "chef_data_bag_item": resourceChefDataBagItem(),
+ "chef_environment": resourceChefEnvironment(),
+ "chef_node": resourceChefNode(),
+ "chef_role": resourceChefRole(),
+ },
+
+ ConfigureFunc: providerConfigure,
+ }
+}
+
+func providerConfigure(d *schema.ResourceData) (interface{}, error) {
+ config := &chefc.Config{
+ Name: d.Get("client_name").(string),
+ Key: d.Get("private_key_pem").(string),
+ BaseURL: d.Get("server_url").(string),
+ SkipSSL: d.Get("allow_unverified_ssl").(bool),
+ Timeout: 10 * time.Second,
+ }
+
+ return chefc.NewClient(config)
+}
+
+func providerPrivateKeyEnvDefault() (interface{}, error) {
+ if fn := os.Getenv("CHEF_PRIVATE_KEY_FILE"); fn != "" {
+ contents, err := ioutil.ReadFile(fn)
+ if err != nil {
+ return nil, err
+ }
+ return string(contents), nil
+ }
+
+ return nil, nil
+}
+
+func jsonStateFunc(value interface{}) string {
+ // Parse and re-stringify the JSON to make sure it's always kept
+ // in a normalized form.
+ in, ok := value.(string)
+ if !ok {
+ return "null"
+ }
+ var tmp map[string]interface{}
+
+ // Assuming the value must be valid JSON since it passed okay through
+ // our prepareDataBagItemContent function earlier.
+ json.Unmarshal([]byte(in), &tmp)
+
+ jsonValue, _ := json.Marshal(&tmp)
+ return string(jsonValue)
+}
+
+func runListEntryStateFunc(value interface{}) string {
+ // Recipes in run lists can either be naked, like "foo", or can
+ // be explicitly qualified as "recipe[foo]". Whichever form we use,
+ // the server will always normalize to the explicit form,
+ // so we'll normalize too and then we won't generate unnecessary
+ // diffs when we refresh.
+ in := value.(string)
+ if !strings.Contains(in, "[") {
+ return fmt.Sprintf("recipe[%s]", in)
+ }
+ return in
+}
diff --git a/builtin/providers/chef/provider_test.go b/builtin/providers/chef/provider_test.go
new file mode 100644
index 000000000000..1d12945f4604
--- /dev/null
+++ b/builtin/providers/chef/provider_test.go
@@ -0,0 +1,62 @@
+package chef
+
+import (
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform/helper/schema"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+// To run these acceptance tests, you will need access to a Chef server.
+// An easy way to get one is to sign up for a hosted Chef server account
+// at https://manage.chef.io/signup , after which your base URL will
+// be something like https://api.opscode.com/organizations/example/ .
+// You will also need to create a "client" and write its private key to
+// a file somewhere.
+//
+// You can then set the following environment variables to make these
+// tests work:
+// CHEF_SERVER_URL to the base URL as described above.
+// CHEF_CLIENT_NAME to the name of the client object you created.
+// CHEF_PRIVATE_KEY_FILE to the path to the private key file you created.
+//
+// You will probably need to edit the global permissions on your Chef
+// Server account to allow this client (or all clients, if you're lazy)
+// to have both List and Create access on all types of object:
+// https://manage.chef.io/organizations/saymedia/global_permissions
+//
+// With all of that done, you can run like this:
+// make testacc TEST=./builtin/providers/chef
+
+var testAccProviders map[string]terraform.ResourceProvider
+var testAccProvider *schema.Provider
+
+func init() {
+ testAccProvider = Provider().(*schema.Provider)
+ testAccProviders = map[string]terraform.ResourceProvider{
+ "chef": testAccProvider,
+ }
+}
+
+func TestProvider(t *testing.T) {
+ if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+}
+
+func TestProvider_impl(t *testing.T) {
+ var _ terraform.ResourceProvider = Provider()
+}
+
+func testAccPreCheck(t *testing.T) {
+ if v := os.Getenv("CHEF_SERVER_URL"); v == "" {
+ t.Fatal("CHEF_SERVER_URL must be set for acceptance tests")
+ }
+ if v := os.Getenv("CHEF_CLIENT_NAME"); v == "" {
+ t.Fatal("CHEF_CLIENT_NAME must be set for acceptance tests")
+ }
+ if v := os.Getenv("CHEF_PRIVATE_KEY_FILE"); v == "" {
+ t.Fatal("CHEF_PRIVATE_KEY_FILE must be set for acceptance tests")
+ }
+}
diff --git a/builtin/providers/chef/resource_data_bag.go b/builtin/providers/chef/resource_data_bag.go
new file mode 100644
index 000000000000..a9c08748cdc3
--- /dev/null
+++ b/builtin/providers/chef/resource_data_bag.go
@@ -0,0 +1,77 @@
+package chef
+
+import (
+ "github.com/hashicorp/terraform/helper/schema"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func resourceChefDataBag() *schema.Resource {
+ return &schema.Resource{
+ Create: CreateDataBag,
+ Read: ReadDataBag,
+ Delete: DeleteDataBag,
+
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "api_uri": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func CreateDataBag(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ dataBag := &chefc.DataBag{
+ Name: d.Get("name").(string),
+ }
+
+ result, err := client.DataBags.Create(dataBag)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(dataBag.Name)
+ d.Set("api_uri", result.URI)
+ return nil
+}
+
+func ReadDataBag(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ // The Chef API provides no API to read a data bag's metadata,
+ // but we can try to read its items and use that as a proxy for
+ // whether it still exists.
+
+ name := d.Id()
+
+ _, err := client.DataBags.ListItems(name)
+ if err != nil {
+ if errRes, ok := err.(*chefc.ErrorResponse); ok {
+ if errRes.Response.StatusCode == 404 {
+ d.SetId("")
+ return nil
+ }
+ }
+ }
+ return err
+}
+
+func DeleteDataBag(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ name := d.Id()
+
+ _, err := client.DataBags.Delete(name)
+ if err == nil {
+ d.SetId("")
+ }
+ return err
+}
diff --git a/builtin/providers/chef/resource_data_bag_item.go b/builtin/providers/chef/resource_data_bag_item.go
new file mode 100644
index 000000000000..ff6f7ac67327
--- /dev/null
+++ b/builtin/providers/chef/resource_data_bag_item.go
@@ -0,0 +1,120 @@
+package chef
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/hashicorp/terraform/helper/schema"
+
+ chefc "github.com/go-chef/chef"
+)
+
+func resourceChefDataBagItem() *schema.Resource {
+ return &schema.Resource{
+ Create: CreateDataBagItem,
+ Read: ReadDataBagItem,
+ Delete: DeleteDataBagItem,
+
+ Schema: map[string]*schema.Schema{
+ "data_bag_name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "content_json": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ StateFunc: jsonStateFunc,
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ }
+}
+
+func CreateDataBagItem(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ dataBagName := d.Get("data_bag_name").(string)
+ itemId, itemContent, err := prepareDataBagItemContent(d.Get("content_json").(string))
+ if err != nil {
+ return err
+ }
+
+ err = client.DataBags.CreateItem(dataBagName, itemContent)
+ if err != nil {
+ return err
+ }
+
+ d.SetId(itemId)
+ d.Set("id", itemId)
+ return nil
+}
+
+func ReadDataBagItem(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ // The Chef API provides no API to read a data bag's metadata,
+ // but we can try to read its items and use that as a proxy for
+ // whether it still exists.
+
+ itemId := d.Id()
+ dataBagName := d.Get("data_bag_name").(string)
+
+ value, err := client.DataBags.GetItem(dataBagName, itemId)
+ if err != nil {
+ if errRes, ok := err.(*chefc.ErrorResponse); ok {
+ if errRes.Response.StatusCode == 404 {
+ d.SetId("")
+ return nil
+ }
+ } else {
+ return err
+ }
+ }
+
+ jsonContent, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+
+ d.Set("content_json", string(jsonContent))
+
+ return nil
+}
+
+func DeleteDataBagItem(d *schema.ResourceData, meta interface{}) error {
+ client := meta.(*chefc.Client)
+
+ itemId := d.Id()
+ dataBagName := d.Get("data_bag_name").(string)
+
+ err := client.DataBags.DeleteItem(dataBagName, itemId)
+ if err == nil {
+ d.SetId("")
+ d.Set("id", "")
+ }
+ return err
+}
+
+func prepareDataBagItemContent(contentJson string) (string, interface{}, error) {
+ var value map[string]interface{}
+ err := json.Unmarshal([]byte(contentJson), &value)
+ if err != nil {
+ return "", nil, err
+ }
+
+ var itemId string
+ if itemIdI, ok := value["id"]; ok {
+ itemId, _ = itemIdI.(string)
+ }
+
+ if itemId == "" {
+ return "", nil, fmt.Errorf("content_json must have id attribute, set to a string")
+ }
+
+ return itemId, value, nil
+}
diff --git a/builtin/providers/chef/resource_data_bag_item_test.go b/builtin/providers/chef/resource_data_bag_item_test.go
new file mode 100644
index 000000000000..9630d8b6c878
--- /dev/null
+++ b/builtin/providers/chef/resource_data_bag_item_test.go
@@ -0,0 +1,95 @@
+package chef
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ chefc "github.com/go-chef/chef"
+
+ "github.com/hashicorp/terraform/helper/resource"
+ "github.com/hashicorp/terraform/terraform"
+)
+
+func TestAccDataBagItem_basic(t *testing.T) {
+ var dataBagItemName string
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccDataBagItemCheckDestroy(dataBagItemName),
+ Steps: []resource.TestStep{
+ resource.TestStep{
+ Config: testAccDataBagItemConfig_basic,
+ Check: testAccDataBagItemCheck(
+ "chef_data_bag_item.test", &dataBagItemName,
+ ),
+ },
+ },
+ })
+}
+
+func testAccDataBagItemCheck(rn string, name *string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[rn]
+ if !ok {
+ return fmt.Errorf("resource not found: %s", rn)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("data bag item id not set")
+ }
+
+ client := testAccProvider.Meta().(*chefc.Client)
+ content, err := client.DataBags.GetItem("terraform-acc-test-bag-item-basic", rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error getting data bag item: %s", err)
+ }
+
+ expectedContent := map[string]interface{}{
+ "id": "terraform_acc_test",
+ "something_else": true,
+ }
+ if !reflect.DeepEqual(content, expectedContent) {
+ return fmt.Errorf("wrong content: expected %#v, got %#v", expectedContent, content)
+ }
+
+ if expected := "terraform_acc_test"; rs.Primary.Attributes["id"] != expected {
+ return fmt.Errorf("wrong id; expected %#v, got %#v", expected, rs.Primary.Attributes["id"])
+ }
+
+ *name = rs.Primary.ID
+
+ return nil
+ }
+}
+
+func testAccDataBagItemCheckDestroy(name string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ client := testAccProvider.Meta().(*chefc.Client)
+ _, err := client.DataBags.GetItem("terraform-acc-test-bag-item-basic", name)
+ if err == nil {
+ return fmt.Errorf("data bag item still exists")
+ }
+ if _, ok := err.(*chefc.ErrorResponse); err != nil && !ok {
+ return fmt.Errorf("got something other than an HTTP error (%v) when getting data bag item", err)
+ }
+
+ return nil
+ }
+}
+
+const testAccDataBagItemConfig_basic = `
+resource "chef_data_bag" "test" {
+ name = "terraform-acc-test-bag-item-basic"
+}
+resource "chef_data_bag_item" "test" {
+ data_bag_name = "terraform-acc-test-bag-item-basic"
+ depends_on = ["chef_data_bag.test"]
+ content_json = <
count = 5
-
ami = "ami-043a5034"
-instance_type = "m1.small"
+ami = "ami-d05e75b8"
+instance_type = "t2.micro"
}
diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 1da203dca2fd..c2df5bbf5c9b 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -128,6 +128,10 @@ aws_autoscaling_policy +