From dfb5be241363f6ce21b1e0e71b465560cf1f22c5 Mon Sep 17 00:00:00 2001 From: Chris Marchesi Date: Sat, 29 Apr 2017 12:08:04 -0700 Subject: [PATCH 1/5] Rename NodeRefreshableResource to NodeRefreshableResourceInstance In prep for NodeRefreshableResource becoming an NodeAbstractCountResource and implementing GraphNodeDynamicExpandable. --- terraform/graph_builder_refresh.go | 2 +- terraform/node_resource_refresh.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/terraform/graph_builder_refresh.go b/terraform/graph_builder_refresh.go index 88ae3380c4b9..3acba002d0d3 100644 --- a/terraform/graph_builder_refresh.go +++ b/terraform/graph_builder_refresh.go @@ -57,7 +57,7 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { } concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableResource{ + return &NodeRefreshableResourceInstance{ NodeAbstractResource: a, } } diff --git a/terraform/node_resource_refresh.go b/terraform/node_resource_refresh.go index 3a44926cef76..a6788eb12106 100644 --- a/terraform/node_resource_refresh.go +++ b/terraform/node_resource_refresh.go @@ -6,19 +6,19 @@ import ( "github.com/hashicorp/terraform/config" ) -// NodeRefreshableResource represents a resource that is "applyable": +// NodeRefreshableResourceInstance represents a resource that is "applyable": // it is ready to be applied and is represented by a diff. -type NodeRefreshableResource struct { +type NodeRefreshableResourceInstance struct { *NodeAbstractResource } // GraphNodeDestroyer -func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress { +func (n *NodeRefreshableResourceInstance) DestroyAddr() *ResourceAddress { return n.Addr } // GraphNodeEvalable -func (n *NodeRefreshableResource) EvalTree() EvalNode { +func (n *NodeRefreshableResourceInstance) EvalTree() EvalNode { // Eval info is different depending on what kind of resource this is switch mode := n.Addr.Mode; mode { case config.ManagedResourceMode: @@ -44,7 +44,7 @@ func (n *NodeRefreshableResource) EvalTree() EvalNode { } } -func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode { +func (n *NodeRefreshableResourceInstance) evalTreeManagedResource() EvalNode { addr := n.NodeAbstractResource.Addr // stateId is the ID to put into the state From b807505d55b513e6c68248e79658311c08038583 Mon Sep 17 00:00:00 2001 From: Chris Marchesi Date: Sat, 29 Apr 2017 23:07:01 -0700 Subject: [PATCH 2/5] core: New refresh graph building behaviour Currently, the refresh graph uses the resources from state as a base, with data sources then layered on. Config is not consulted for resources and hence new resources that are added with count (or any new resource from config, for that matter) do not get added to the graph during refresh. This is leading to issues with scale in and scale out when the same value for count is used in both resources, and data sources that may depend on that resource (and possibly vice versa). While the resources exist in config and can be used, the fact that ConfigTransformer for resources is missing means that they don't get added into the graph, leading to "index out of range" errors and what not. Further to that, if we add these new resources to the graph for scale out, considerations need to be taken for scale in as well, which are not being caught 100% by the current implementation of NodeRefreshableDataResource. Scale-in resources should be treated as orphans, which according to the instance-form NodeRefreshableResource node, should be NodeDestroyableDataResource nodes, but this this logic is currently not rolled into NodeRefreshableDataResource. This causes issues on scale-in in the form of race-ish "index out of range" errors again. This commit updates the refresh graph so that StateTransformer is no longer used as the base of the graph. Instead, we add resources from the state and config in a hybrid fashion: * First off, resource nodes are added from config, but only if resources currently exist in state. NodeRefreshableManagedResource is a new expandable resource node that will expand count and add orphans from state. Any count-expanded node that has config but no state is also transformed into a plannable resource, via a new ResourceRefreshPlannableTransformer. * The NodeRefreshableDataResource node type will now add count orphans as NodeDestroyableDataResource nodes. This achieves the same effect as if the data sources were added by StateTransformer, but ensures there are no races in the dependency chain, with the added benefit of directing these nodes straight to the proper NodeDestroyableDataResource node. * Finally, config orphans (nodes that don't exist in config anymore period) are then added, to complete the graph. This should ensure as much as possible that there is a refresh graph that best represents both the current state and config with updated variables and counts. --- terraform/graph_builder_refresh.go | 49 +++++++++-- terraform/node_data_refresh.go | 20 +++++ terraform/node_resource_refresh.go | 88 +++++++++++++++++-- .../transform_resource_refresh_plannable.go | 55 ++++++++++++ 4 files changed, 198 insertions(+), 14 deletions(-) create mode 100644 terraform/transform_resource_refresh_plannable.go diff --git a/terraform/graph_builder_refresh.go b/terraform/graph_builder_refresh.go index 3acba002d0d3..0634f9698d8f 100644 --- a/terraform/graph_builder_refresh.go +++ b/terraform/graph_builder_refresh.go @@ -1,6 +1,8 @@ package terraform import ( + "log" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" @@ -56,8 +58,16 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { } } - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableResourceInstance{ + concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + concreteManagedResourceInstance := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResourceInstance{ NodeAbstractResource: a, } } @@ -71,13 +81,25 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { } steps := []GraphTransformer{ - // Creates all the resources represented in the state - &StateTransformer{ - Concrete: concreteResource, - State: b.State, - }, - - // Creates all the data resources that aren't in the state + // Creates all the managed resources that aren't in the state, but only if + // we have a state already. No resources in state means there's not + // anything to refresh. + func() GraphTransformer { + if b.State.HasResources() { + return &ConfigTransformer{ + Concrete: concreteManagedResource, + Module: b.Module, + Unique: true, + ModeFilter: true, + Mode: config.ManagedResourceMode, + } + } + log.Println("[TRACE] No managed resources in state during refresh, skipping managed resource transformer") + return nil + }(), + + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. &ConfigTransformer{ Concrete: concreteDataResource, Module: b.Module, @@ -86,6 +108,15 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { Mode: config.DataResourceMode, }, + // Add any fully-orphaned resources from config (ones that have been + // removed completely, not ones that are just orphaned due to a scaled-in + // count. + &OrphanResourceTransformer{ + Concrete: concreteManagedResourceInstance, + State: b.State, + Module: b.Module, + }, + // Attach the state &AttachStateTransformer{State: b.State}, diff --git a/terraform/node_data_refresh.go b/terraform/node_data_refresh.go index d504c892c4e3..9f83d3edd97d 100644 --- a/terraform/node_data_refresh.go +++ b/terraform/node_data_refresh.go @@ -33,6 +33,17 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er } } + // We also need a destroyable resource for orphans that are a result of a + // scaled-in count. + concreteResourceDestroyable := func(a *NodeAbstractResource) dag.Vertex { + // Add the config since we don't do that via transforms + a.Config = n.Config + + return &NodeDestroyableDataResource{ + NodeAbstractResource: n.NodeAbstractResource, + } + } + // Start creating the steps steps := []GraphTransformer{ // Expand the count. @@ -42,6 +53,15 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er Addr: n.ResourceAddr(), }, + // Add the count orphans. As these are orphaned refresh nodes, we add them + // directly as NodeDestroyableDataResource. + &OrphanResourceCountTransformer{ + Concrete: concreteResourceDestroyable, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + // Attach the state &AttachStateTransformer{State: state}, diff --git a/terraform/node_resource_refresh.go b/terraform/node_resource_refresh.go index a6788eb12106..6ab9df7a26f8 100644 --- a/terraform/node_resource_refresh.go +++ b/terraform/node_resource_refresh.go @@ -4,21 +4,99 @@ import ( "fmt" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" ) -// NodeRefreshableResourceInstance represents a resource that is "applyable": +// NodeRefreshableManagedResource represents a resource that is expanabled into +// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. +type NodeRefreshableManagedResource struct { + *NodeAbstractCountResource +} + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count, err := n.Config.Count() + if err != nil { + return nil, err + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Switch up any node missing state to a plannable resource. This helps + // catch cases where data sources depend on the counts from this resource + // during a scale out. + &ResourceRefreshPlannableTransformer{ + State: state, + }, + + // Add the count orphans to make sure these resources are accounted for + // during a scale in. + &OrphanResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableManagedResource", + } + + return b.Build(ctx.Path()) +} + +// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": // it is ready to be applied and is represented by a diff. -type NodeRefreshableResourceInstance struct { +type NodeRefreshableManagedResourceInstance struct { *NodeAbstractResource } // GraphNodeDestroyer -func (n *NodeRefreshableResourceInstance) DestroyAddr() *ResourceAddress { +func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *ResourceAddress { return n.Addr } // GraphNodeEvalable -func (n *NodeRefreshableResourceInstance) EvalTree() EvalNode { +func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { // Eval info is different depending on what kind of resource this is switch mode := n.Addr.Mode; mode { case config.ManagedResourceMode: @@ -44,7 +122,7 @@ func (n *NodeRefreshableResourceInstance) EvalTree() EvalNode { } } -func (n *NodeRefreshableResourceInstance) evalTreeManagedResource() EvalNode { +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { addr := n.NodeAbstractResource.Addr // stateId is the ID to put into the state diff --git a/terraform/transform_resource_refresh_plannable.go b/terraform/transform_resource_refresh_plannable.go new file mode 100644 index 000000000000..35358a3180eb --- /dev/null +++ b/terraform/transform_resource_refresh_plannable.go @@ -0,0 +1,55 @@ +package terraform + +import ( + "fmt" + "log" +) + +// ResourceRefreshPlannableTransformer is a GraphTransformer that replaces any +// nodes that don't have state yet exist in config with +// NodePlannableResourceInstance. +// +// This transformer is used when expanding count on managed resource nodes +// during the refresh phase to ensure that data sources that have +// interpolations that depend on resources existing in the graph can be walked +// properly. +type ResourceRefreshPlannableTransformer struct { + // The full global state. + State *State +} + +// Transform implements GraphTransformer for +// ResourceRefreshPlannableTransformer. +func (t *ResourceRefreshPlannableTransformer) Transform(g *Graph) error { +nextVertex: + for _, v := range g.Vertices() { + addr := v.(*NodeRefreshableManagedResourceInstance).Addr + + // Find the state for this address, if there is one + filter := &StateFilter{State: t.State} + results, err := filter.Filter(addr.String()) + if err != nil { + return err + } + + // Check to see if we have a state for this resource. If we do, skip this + // node. + for _, result := range results { + if _, ok := result.Value.(*ResourceState); ok { + continue nextVertex + } + } + // If we don't, convert this resource to a NodePlannableResourceInstance node + // with all of the data we need to make it happen. + log.Printf("[TRACE] No state for %s, converting to NodePlannableResourceInstance", addr.String()) + new := &NodePlannableResourceInstance{ + NodeAbstractResource: v.(*NodeRefreshableManagedResourceInstance).NodeAbstractResource, + } + // Replace the node in the graph + if !g.Replace(v, new) { + return fmt.Errorf("ResourceRefreshPlannableTransformer: Could not replace node %#v with %#v", v, new) + } + } + + return nil +} From 7b1618efdef61cb1237cc4500b094ab70e80fd3c Mon Sep 17 00:00:00 2001 From: Chris Marchesi Date: Sun, 30 Apr 2017 08:54:32 -0700 Subject: [PATCH 3/5] core: Fix destroy factory in data source refresh expander --- terraform/node_data_refresh.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terraform/node_data_refresh.go b/terraform/node_data_refresh.go index 9f83d3edd97d..45129b3cbf5e 100644 --- a/terraform/node_data_refresh.go +++ b/terraform/node_data_refresh.go @@ -40,7 +40,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er a.Config = n.Config return &NodeDestroyableDataResource{ - NodeAbstractResource: n.NodeAbstractResource, + NodeAbstractResource: a, } } From f63ad1dbd16814fcdd5dfb85e12e3fb91176335b Mon Sep 17 00:00:00 2001 From: Chris Marchesi Date: Sun, 30 Apr 2017 20:31:44 -0700 Subject: [PATCH 4/5] providers/test: Add count resource <-> data source dep count scale tests These tests cover the new refresh behaviour and would fail with "index out of range" if the refresh graph is not expanded to take new resources into account as well (scale out), or if it does not with expanded count orphans in a way that makes sure they don't get interpolated when walked (scale in). --- .../providers/test/resource_data_dep_test.go | 224 ++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 builtin/providers/test/resource_data_dep_test.go diff --git a/builtin/providers/test/resource_data_dep_test.go b/builtin/providers/test/resource_data_dep_test.go new file mode 100644 index 000000000000..109b4be661c3 --- /dev/null +++ b/builtin/providers/test/resource_data_dep_test.go @@ -0,0 +1,224 @@ +package test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +// TestResourceDataDep_alignedCountScaleOut tests to make sure interpolation +// works (namely without index errors) when a data source and a resource share +// the same count variable during scale-out with an existing state. +func TestResourceDataDep_alignedCountScaleOut(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: func(s *terraform.State) error { + return nil + }, + Steps: []resource.TestStep{ + { + Config: testResourceDataDepConfig(2), + }, + { + Config: testResourceDataDepConfig(4), + Check: resource.TestCheckOutput("out", "value_from_api,value_from_api,value_from_api,value_from_api"), + }, + }, + }) +} + +// TestResourceDataDep_alignedCountScaleIn tests to make sure interpolation +// works (namely without index errors) when a data source and a resource share +// the same count variable during scale-in with an existing state. +func TestResourceDataDep_alignedCountScaleIn(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: func(s *terraform.State) error { + return nil + }, + Steps: []resource.TestStep{ + { + Config: testResourceDataDepConfig(4), + }, + { + Config: testResourceDataDepConfig(2), + Check: resource.TestCheckOutput("out", "value_from_api,value_from_api"), + }, + }, + }) +} + +// TestDataResourceDep_alignedCountScaleOut functions like +// TestResourceDataDep_alignedCountScaleOut, but with the dependencies swapped +// (resource now depends on data source, a pretty regular use case, but +// included here to check for regressions). +func TestDataResourceDep_alignedCountScaleOut(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: func(s *terraform.State) error { + return nil + }, + Steps: []resource.TestStep{ + { + Config: testDataResourceDepConfig(2), + }, + { + Config: testDataResourceDepConfig(4), + Check: resource.TestCheckOutput("out", "test,test,test,test"), + }, + }, + }) +} + +// TestDataResourceDep_alignedCountScaleIn functions like +// TestResourceDataDep_alignedCountScaleIn, but with the dependencies swapped +// (resource now depends on data source, a pretty regular use case, but +// included here to check for regressions). +func TestDataResourceDep_alignedCountScaleIn(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: func(s *terraform.State) error { + return nil + }, + Steps: []resource.TestStep{ + { + Config: testDataResourceDepConfig(4), + }, + { + Config: testDataResourceDepConfig(2), + Check: resource.TestCheckOutput("out", "test,test"), + }, + }, + }) +} + +// TestResourceResourceDep_alignedCountScaleOut functions like +// TestResourceDataDep_alignedCountScaleOut, but with a resource-to-resource +// dependency instead, a pretty regular use case, but included here to check +// for regressions. +func TestResourceResourceDep_alignedCountScaleOut(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: func(s *terraform.State) error { + return nil + }, + Steps: []resource.TestStep{ + { + Config: testResourceResourceDepConfig(2), + }, + { + Config: testResourceResourceDepConfig(4), + Check: resource.TestCheckOutput("out", "test,test,test,test"), + }, + }, + }) +} + +// TestResourceResourceDep_alignedCountScaleIn functions like +// TestResourceDataDep_alignedCountScaleIn, but with a resource-to-resource +// dependency instead, a pretty regular use case, but included here to check +// for regressions. +func TestResourceResourceDep_alignedCountScaleIn(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: func(s *terraform.State) error { + return nil + }, + Steps: []resource.TestStep{ + { + Config: testResourceResourceDepConfig(4), + }, + { + Config: testResourceResourceDepConfig(2), + Check: resource.TestCheckOutput("out", "test,test"), + }, + }, + }) +} + +func testResourceDataDepConfig(count int) string { + return fmt.Sprintf(` +variable count { + default = "%d" +} + +resource "test_resource" "foo" { + count = "${var.count}" + required = "yes" + + required_map = { + "foo" = "bar" + } +} + +data "test_data_source" "bar" { + count = "${var.count}" + input = "${test_resource.foo.*.computed_read_only[count.index]}" +} + +output "out" { + value = "${join(",", data.test_data_source.bar.*.output)}" +} +`, count) +} + +func testDataResourceDepConfig(count int) string { + return fmt.Sprintf(` +variable count { + default = "%d" +} + +data "test_data_source" "foo" { + count = "${var.count}" + input = "test" +} + +resource "test_resource" "bar" { + count = "${var.count}" + required = "yes" + optional = "${data.test_data_source.foo.*.output[count.index]}" + + required_map = { + "foo" = "bar" + } +} + +output "out" { + value = "${join(",", test_resource.bar.*.optional)}" +} +`, count) +} + +func testResourceResourceDepConfig(count int) string { + return fmt.Sprintf(` +variable count { + default = "%d" +} + +resource "test_resource" "foo" { + count = "${var.count}" + required = "yes" + optional = "test" + + required_map = { + "foo" = "bar" + } +} + +resource "test_resource" "bar" { + count = "${var.count}" + required = "yes" + optional = "${test_resource.foo.*.optional[count.index]}" + + required_map = { + "foo" = "bar" + } +} + +output "out" { + value = "${join(",", test_resource.bar.*.optional)}" +} +`, count) +} From 11b4794612643b0f82a521471e71fce15acf9192 Mon Sep 17 00:00:00 2001 From: Chris Marchesi Date: Tue, 2 May 2017 20:36:38 -0700 Subject: [PATCH 5/5] core: Test for new refresh graph behaviour Tests on DynamicExpand for both resources and data sources, cover scale in/out scenarios, and also a verification for the behaviour of config orphans. --- terraform/graph_builder_refresh_test.go | 96 +++++++++++ terraform/node_data_refresh_test.go | 154 ++++++++++++++++++ terraform/node_resource_refresh_test.go | 154 ++++++++++++++++++ .../refresh-config-orphan/main.tf | 3 + .../refresh-data-scale-inout/main.tf | 3 + .../refresh-resource-scale-inout/main.tf | 3 + 6 files changed, 413 insertions(+) create mode 100644 terraform/graph_builder_refresh_test.go create mode 100644 terraform/node_data_refresh_test.go create mode 100644 terraform/node_resource_refresh_test.go create mode 100644 terraform/test-fixtures/refresh-config-orphan/main.tf create mode 100644 terraform/test-fixtures/refresh-data-scale-inout/main.tf create mode 100644 terraform/test-fixtures/refresh-resource-scale-inout/main.tf diff --git a/terraform/graph_builder_refresh_test.go b/terraform/graph_builder_refresh_test.go new file mode 100644 index 000000000000..e4383ab4ec5e --- /dev/null +++ b/terraform/graph_builder_refresh_test.go @@ -0,0 +1,96 @@ +package terraform + +import "testing" + +func TestRefreshGraphBuilder_configOrphans(t *testing.T) { + + m := testModule(t, "refresh-config-orphan") + + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo.0": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "foo", + }, + }, + }, + "aws_instance.foo.1": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "bar", + }, + }, + }, + "aws_instance.foo.2": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "baz", + }, + }, + }, + "data.aws_instance.foo.0": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "foo", + }, + }, + }, + "data.aws_instance.foo.1": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "bar", + }, + }, + }, + "data.aws_instance.foo.2": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "baz", + }, + }, + }, + }, + }, + }, + } + + b := &RefreshGraphBuilder{ + Module: m, + State: state, + Providers: []string{"aws"}, + } + g, err := b.Build(rootModulePath) + if err != nil { + t.Fatalf("Error building graph: %s", err) + } + + actual := g.StringWithNodeTypes() + expected := `aws_instance.foo - *terraform.NodeRefreshableManagedResource + provider.aws - *terraform.NodeApplyableProvider +data.aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance + provider.aws - *terraform.NodeApplyableProvider +data.aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance + provider.aws - *terraform.NodeApplyableProvider +data.aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance + provider.aws - *terraform.NodeApplyableProvider +provider.aws - *terraform.NodeApplyableProvider +provider.aws (close) - *terraform.graphNodeCloseProvider + aws_instance.foo - *terraform.NodeRefreshableManagedResource + data.aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance + data.aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance + data.aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance +` + if expected != actual { + t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual) + } +} diff --git a/terraform/node_data_refresh_test.go b/terraform/node_data_refresh_test.go new file mode 100644 index 000000000000..6aa3af37af53 --- /dev/null +++ b/terraform/node_data_refresh_test.go @@ -0,0 +1,154 @@ +package terraform + +import ( + "sync" + "testing" +) + +func TestNodeRefreshableDataResourceDynamicExpand_scaleOut(t *testing.T) { + var stateLock sync.RWMutex + + addr, err := ParseResourceAddress("data.aws_instance.foo") + if err != nil { + t.Fatalf("bad: %s", err) + } + + m := testModule(t, "refresh-data-scale-inout") + + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "data.aws_instance.foo.0": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "foo", + }, + }, + }, + "data.aws_instance.foo.1": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "bar", + }, + }, + }, + }, + }, + }, + } + + n := &NodeRefreshableDataResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: addr, + Config: m.Config().Resources[0], + }, + }, + } + + g, err := n.DynamicExpand(&MockEvalContext{ + PathPath: []string{"root"}, + StateState: state, + StateLock: &stateLock, + }) + + actual := g.StringWithNodeTypes() + expected := `data.aws_instance.foo[0] - *terraform.NodeRefreshableDataResourceInstance +data.aws_instance.foo[1] - *terraform.NodeRefreshableDataResourceInstance +data.aws_instance.foo[2] - *terraform.NodeRefreshableDataResourceInstance +root - terraform.graphNodeRoot + data.aws_instance.foo[0] - *terraform.NodeRefreshableDataResourceInstance + data.aws_instance.foo[1] - *terraform.NodeRefreshableDataResourceInstance + data.aws_instance.foo[2] - *terraform.NodeRefreshableDataResourceInstance +` + if expected != actual { + t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual) + } +} + +func TestNodeRefreshableDataResourceDynamicExpand_scaleIn(t *testing.T) { + var stateLock sync.RWMutex + + addr, err := ParseResourceAddress("data.aws_instance.foo") + if err != nil { + t.Fatalf("bad: %s", err) + } + + m := testModule(t, "refresh-data-scale-inout") + + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "data.aws_instance.foo.0": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "foo", + }, + }, + }, + "data.aws_instance.foo.1": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "bar", + }, + }, + }, + "data.aws_instance.foo.2": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "baz", + }, + }, + }, + "data.aws_instance.foo.3": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "qux", + }, + }, + }, + }, + }, + }, + } + + n := &NodeRefreshableDataResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: addr, + Config: m.Config().Resources[0], + }, + }, + } + + g, err := n.DynamicExpand(&MockEvalContext{ + PathPath: []string{"root"}, + StateState: state, + StateLock: &stateLock, + }) + + actual := g.StringWithNodeTypes() + expected := `data.aws_instance.foo[0] - *terraform.NodeRefreshableDataResourceInstance +data.aws_instance.foo[1] - *terraform.NodeRefreshableDataResourceInstance +data.aws_instance.foo[2] - *terraform.NodeRefreshableDataResourceInstance +data.aws_instance.foo[3] - *terraform.NodeDestroyableDataResource +root - terraform.graphNodeRoot + data.aws_instance.foo[0] - *terraform.NodeRefreshableDataResourceInstance + data.aws_instance.foo[1] - *terraform.NodeRefreshableDataResourceInstance + data.aws_instance.foo[2] - *terraform.NodeRefreshableDataResourceInstance + data.aws_instance.foo[3] - *terraform.NodeDestroyableDataResource +` + if expected != actual { + t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual) + } +} diff --git a/terraform/node_resource_refresh_test.go b/terraform/node_resource_refresh_test.go new file mode 100644 index 000000000000..b2ac4d346066 --- /dev/null +++ b/terraform/node_resource_refresh_test.go @@ -0,0 +1,154 @@ +package terraform + +import ( + "sync" + "testing" +) + +func TestNodeRefreshableManagedResourceDynamicExpand_scaleOut(t *testing.T) { + var stateLock sync.RWMutex + + addr, err := ParseResourceAddress("aws_instance.foo") + if err != nil { + t.Fatalf("bad: %s", err) + } + + m := testModule(t, "refresh-resource-scale-inout") + + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo.0": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "foo", + }, + }, + }, + "aws_instance.foo.1": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "bar", + }, + }, + }, + }, + }, + }, + } + + n := &NodeRefreshableManagedResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: addr, + Config: m.Config().Resources[0], + }, + }, + } + + g, err := n.DynamicExpand(&MockEvalContext{ + PathPath: []string{"root"}, + StateState: state, + StateLock: &stateLock, + }) + + actual := g.StringWithNodeTypes() + expected := `aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance +aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance +aws_instance.foo[2] - *terraform.NodePlannableResourceInstance +root - terraform.graphNodeRoot + aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance + aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance + aws_instance.foo[2] - *terraform.NodePlannableResourceInstance +` + if expected != actual { + t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual) + } +} + +func TestNodeRefreshableManagedResourceDynamicExpand_scaleIn(t *testing.T) { + var stateLock sync.RWMutex + + addr, err := ParseResourceAddress("aws_instance.foo") + if err != nil { + t.Fatalf("bad: %s", err) + } + + m := testModule(t, "refresh-resource-scale-inout") + + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo.0": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "foo", + }, + }, + }, + "aws_instance.foo.1": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "bar", + }, + }, + }, + "aws_instance.foo.2": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "baz", + }, + }, + }, + "aws_instance.foo.3": &ResourceState{ + Type: "aws_instance", + Deposed: []*InstanceState{ + &InstanceState{ + ID: "qux", + }, + }, + }, + }, + }, + }, + } + + n := &NodeRefreshableManagedResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: &NodeAbstractResource{ + Addr: addr, + Config: m.Config().Resources[0], + }, + }, + } + + g, err := n.DynamicExpand(&MockEvalContext{ + PathPath: []string{"root"}, + StateState: state, + StateLock: &stateLock, + }) + + actual := g.StringWithNodeTypes() + expected := `aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance +aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance +aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance +aws_instance.foo[3] - *terraform.NodeRefreshableManagedResourceInstance +root - terraform.graphNodeRoot + aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance + aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance + aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance + aws_instance.foo[3] - *terraform.NodeRefreshableManagedResourceInstance +` + if expected != actual { + t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual) + } +} diff --git a/terraform/test-fixtures/refresh-config-orphan/main.tf b/terraform/test-fixtures/refresh-config-orphan/main.tf new file mode 100644 index 000000000000..acef373b35de --- /dev/null +++ b/terraform/test-fixtures/refresh-config-orphan/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + count = 3 +} diff --git a/terraform/test-fixtures/refresh-data-scale-inout/main.tf b/terraform/test-fixtures/refresh-data-scale-inout/main.tf new file mode 100644 index 000000000000..480ba948352c --- /dev/null +++ b/terraform/test-fixtures/refresh-data-scale-inout/main.tf @@ -0,0 +1,3 @@ +data "aws_instance" "foo" { + count = 3 +} diff --git a/terraform/test-fixtures/refresh-resource-scale-inout/main.tf b/terraform/test-fixtures/refresh-resource-scale-inout/main.tf new file mode 100644 index 000000000000..acef373b35de --- /dev/null +++ b/terraform/test-fixtures/refresh-resource-scale-inout/main.tf @@ -0,0 +1,3 @@ +resource "aws_instance" "foo" { + count = 3 +}