Skip to content

Commit

Permalink
prevent cycles when connecting destroy nodes
Browse files Browse the repository at this point in the history
When adding destroy edges between resources from different providers,
and a provider itself depends on the other provider's resources, we can
get cycles in the final dependency graph.

The problem is a little deeper than simply not connecting these nodes,
since the edges are still needed when doing a full destroy operation.
For now we can get by assuming the edges are required, and reverting
them only if they result in a cycle. This works because destroy edges
are the last edges added to managed resources during graph building.

This was rarely a problem before v1.3, because noop nodes were not added
to the apply graph, and unused values were aggressively pruned. In v1.3
however all nodes are kept in the graph so that postcondition blocks are
always evaluated during apply, increasing the chances of the cycles
appearing.
  • Loading branch information
jbardin committed Sep 23, 2022
1 parent 4153685 commit abbfa0c
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 2 deletions.
14 changes: 14 additions & 0 deletions internal/terraform/context_apply2_test.go
Expand Up @@ -1201,6 +1201,20 @@ output "out" {
state, diags := ctx.Apply(plan, m)
assertNoErrors(t, diags)

// Add some changes to make the planning more complex.
// Taint the object to make sure a replacement works in the plan.
otherObjAddr := mustResourceInstanceAddr("other_object.other")
otherObj := state.ResourceInstance(otherObjAddr)
otherObj.Current.Status = states.ObjectTainted
// Force a change which needs to be reverted.
testObjAddr := mustResourceInstanceAddr(`module.mod["a"].test_object.a`)
testObjA := state.ResourceInstance(testObjAddr)
testObjA.Current.AttrsJSON = []byte(`{"test_bool":null,"test_list":null,"test_map":null,"test_number":null,"test_string":"changed"}`)

_, diags = ctx.Plan(m, state, opts)
assertNoErrors(t, diags)
return

otherProvider.ConfigureProviderCalled = false
otherProvider.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) {
// check that our config is complete, even during a destroy plan
Expand Down
61 changes: 59 additions & 2 deletions internal/terraform/transform_destroy_edge.go
Expand Up @@ -39,6 +39,63 @@ type GraphNodeCreator interface {
// still subnets.
type DestroyEdgeTransformer struct{}

// tryInterProviderDestroyEdge checks if we're inserting a destroy edge
// across a provider boundary, and only adds the edge if it results in no cycles.
//
// FIXME: The cycles can arise in valid configurations when a provider depends
// on resources from another provider. In the future we may want to inspect
// the dependencies of the providers themselves, to avoid needing to use the
// blunt hammer of checking for cycles.
//
// A reduced example of this dependency problem looks something like:
/*
createA <- createB
| \ / |
| providerB <- |
v \ v
destroyA -------------> destroyB
*/
//
// The edge from destroyA to destroyB would be skipped in this case, but there
// are still other combinations of changes which could connect the A and B
// groups around providerB in various ways.
//
// The most difficult problem here happens during a full destroy operation.
// That creates a special case where resources on which a provider depends must
// exist for evaluation before they are destroyed. This means that any provider
// dependencies must wait until all that provider's resources have first been
// destroyed. This is where these cross-provider edges are still required to
// ensure the correct order.
func (t *DestroyEdgeTransformer) tryInterProviderDestroyEdge(g *Graph, from, to dag.Vertex) {
e := dag.BasicEdge(from, to)
g.Connect(e)

pc, ok := from.(GraphNodeProviderConsumer)
if !ok {
return
}
fromProvider := pc.Provider()

pc, ok = to.(GraphNodeProviderConsumer)
if !ok {
return
}
toProvider := pc.Provider()

sameProvider := fromProvider.Equals(toProvider)

// Check for cycles, and back out the edge if there are any.
// The cycles we are looking for only appears between providers, so don't
// waste time checking for cycles if both nodes use the same provider.
if !sameProvider && len(g.Cycles()) > 0 {
log.Printf("[DEBUG] DestroyEdgeTransformer: skipping inter-provider edge %s->%s which creates a cycle",
dag.VertexName(from), dag.VertexName(to))
g.RemoveEdge(e)
}
}

func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
// Build a map of what is being destroyed (by address string) to
// the list of destroyers.
Expand Down Expand Up @@ -93,7 +150,7 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
for _, desDep := range destroyersByResource[resAddr.String()] {
if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(desDep, des) {
log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(desDep), dag.VertexName(des))
g.Connect(dag.BasicEdge(desDep, des))
t.tryInterProviderDestroyEdge(g, desDep, des)
} else {
log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(desDep), dag.VertexName(des))
}
Expand All @@ -105,7 +162,7 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
for _, createDep := range creators[resAddr.String()] {
if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(createDep, des) {
log.Printf("[DEBUG] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(createDep), dag.VertexName(des))
g.Connect(dag.BasicEdge(createDep, des))
t.tryInterProviderDestroyEdge(g, createDep, des)
} else {
log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(createDep), dag.VertexName(des))
}
Expand Down

0 comments on commit abbfa0c

Please sign in to comment.