Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core: Don't try to register separate checkable resources for each module instance #31860

Merged
merged 3 commits into from Sep 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
22 changes: 22 additions & 0 deletions internal/dag/graph.go
Expand Up @@ -230,6 +230,28 @@ func (g *Graph) Connect(edge Edge) {
s.Add(source)
}

// Subsume imports all of the nodes and edges from the given graph into the
// reciever, leaving the given graph unchanged.
//
// If any of the nodes in the given graph are already present in the reciever
// then the existing node will be retained and any new edges from the given
// graph will be connected with it.
//
// If the given graph has edges in common with the reciever then they will be
// ignored, because each pair of nodes can only be connected once.
func (g *Graph) Subsume(other *Graph) {
// We're using Set.Filter just as a "visit each element" here, so we're
// not doing anything with the result (which will always be empty).
other.vertices.Filter(func(i interface{}) bool {
g.Add(i)
return false
})
other.edges.Filter(func(i interface{}) bool {
g.Connect(i.(Edge))
return false
})
}

// String outputs some human-friendly output for the graph structure.
func (g *Graph) StringWithNodeTypes() string {
var buf bytes.Buffer
Expand Down
123 changes: 123 additions & 0 deletions internal/terraform/context_plan2_test.go
Expand Up @@ -401,6 +401,129 @@ resource "test_resource" "b" {
}
}

func TestContext2Plan_resourceChecksInExpandedModule(t *testing.T) {
// When a resource is in a nested module we have two levels of expansion
// to do: first expand the module the resource is declared in, and then
// expand the resource itself.
//
// In earlier versions of Terraform we did that expansion as two levels
// of DynamicExpand, which led to a bug where we didn't have any central
// location from which to register all of the instances of a checkable
// resource.
//
// We now handle the full expansion all in one graph node and one dynamic
// subgraph, which avoids the problem. This is a regression test for the
// earlier bug. If this test is panicking with "duplicate checkable objects
// report" then that suggests the bug is reintroduced and we're now back
// to reporting each module instance separately again, which is incorrect.

p := testProvider("test")
p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
Provider: providers.Schema{
Block: &configschema.Block{},
},
ResourceTypes: map[string]providers.Schema{
"test": {
Block: &configschema.Block{},
},
},
}
p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
resp.NewState = req.PriorState
return resp
}
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
resp.PlannedState = cty.EmptyObjectVal
return resp
}
p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
resp.NewState = req.PlannedState
return resp
}

m := testModuleInline(t, map[string]string{
"main.tf": `
module "child" {
source = "./child"
count = 2 # must be at least 2 for this test to be valid
}
`,
"child/child.tf": `
locals {
a = "a"
}

resource "test" "test1" {
lifecycle {
postcondition {
# It doesn't matter what this checks as long as it
# passes, because if we don't handle expansion properly
# then we'll crash before we even get to evaluating this.
condition = local.a == local.a
error_message = "Postcondition failed."
}
}
}

resource "test" "test2" {
count = 2

lifecycle {
postcondition {
# It doesn't matter what this checks as long as it
# passes, because if we don't handle expansion properly
# then we'll crash before we even get to evaluating this.
condition = local.a == local.a
error_message = "Postcondition failed."
}
}
}
`,
})

ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})

priorState := states.NewState()
plan, diags := ctx.Plan(m, priorState, DefaultPlanOpts)
assertNoErrors(t, diags)

resourceInsts := []addrs.AbsResourceInstance{
mustResourceInstanceAddr("module.child[0].test.test1"),
mustResourceInstanceAddr("module.child[0].test.test2[0]"),
mustResourceInstanceAddr("module.child[0].test.test2[1]"),
mustResourceInstanceAddr("module.child[1].test.test1"),
mustResourceInstanceAddr("module.child[1].test.test2[0]"),
mustResourceInstanceAddr("module.child[1].test.test2[1]"),
}

for _, instAddr := range resourceInsts {
t.Run(fmt.Sprintf("results for %s", instAddr), func(t *testing.T) {
if rc := plan.Changes.ResourceInstance(instAddr); rc != nil {
if got, want := rc.Action, plans.Create; got != want {
t.Errorf("wrong action for %s\ngot: %s\nwant: %s", instAddr, got, want)
}
if got, want := rc.ActionReason, plans.ResourceInstanceChangeNoReason; got != want {
t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", instAddr, got, want)
}
} else {
t.Errorf("no planned change for %s", instAddr)
}

if checkResult := plan.Checks.GetObjectResult(instAddr); checkResult != nil {
if got, want := checkResult.Status, checks.StatusPass; got != want {
t.Errorf("wrong check status for %s\ngot: %s\nwant: %s", instAddr, got, want)
}
} else {
t.Errorf("no check result for %s", instAddr)
}
})
}
}

func TestContext2Plan_dataResourceChecksManagedResourceChange(t *testing.T) {
// This tests the situation where the remote system contains data that
// isn't valid per a data resource postcondition, but that the
Expand Down
5 changes: 3 additions & 2 deletions internal/terraform/graph.go
Expand Up @@ -82,8 +82,9 @@ func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics {
log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v))

g, err := ev.DynamicExpand(vertexCtx)
if err != nil {
diags = diags.Append(err)
diags = diags.Append(err)
if diags.HasErrors() {
log.Printf("[TRACE] vertex %q: failed expanding dynamic subgraph: %s", dag.VertexName(v), err)
return
}
if g != nil {
Expand Down