Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make the pre-destroy refresh a full plan #32208

Merged
merged 1 commit into from Nov 17, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
23 changes: 21 additions & 2 deletions internal/terraform/context_plan.go
Expand Up @@ -33,6 +33,16 @@ type PlanOpts struct {
// instance using its corresponding provider.
SkipRefresh bool

// PreDestroyRefresh indicated that this is being passed to a plan used to
// refresh the state immediately before a destroy plan.
// FIXME: This is a temporary fix to allow the pre-destroy refresh to
// succeed. The refreshing operation during destroy must be a special case,
// which can allow for missing instances in the state, and avoid blocking
// on failing condition tests. The destroy plan itself should be
// responsible for this special case of refreshing, and the separate
// pre-destroy plan removed entirely.
PreDestroyRefresh bool

// SetVariables are the raw values for root module variables as provided
// by the user who is requesting the run, prior to any normalization or
// substitution of defaults. See the documentation for the InputValue
Expand Down Expand Up @@ -337,8 +347,16 @@ func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State
if !opts.SkipRefresh && !prevRunState.Empty() {
log.Printf("[TRACE] Context.destroyPlan: calling Context.plan to get the effect of refreshing the prior state")
refreshOpts := *opts
refreshOpts.Mode = plans.RefreshOnlyMode
refreshPlan, refreshDiags := c.refreshOnlyPlan(config, prevRunState, &refreshOpts)
refreshOpts.Mode = plans.NormalMode
refreshOpts.PreDestroyRefresh = true

// FIXME: A normal plan is required here to refresh the state, because
// the state and configuration may not match during a destroy, and a
// normal refresh plan can fail with evaluation errors. In the future
// the destroy plan should take care of refreshing instances itself,
// where the special cases of evaluation and skipping condition checks
// can be done.
refreshPlan, refreshDiags := c.plan(config, prevRunState, &refreshOpts)
if refreshDiags.HasErrors() {
// NOTE: Normally we'd append diagnostics regardless of whether
// there are errors, just in case there are warnings we'd want to
Expand Down Expand Up @@ -558,6 +576,7 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State,
Targets: opts.Targets,
ForceReplace: opts.ForceReplace,
skipRefresh: opts.SkipRefresh,
preDestroyRefresh: opts.PreDestroyRefresh,
Operation: walkPlan,
}).Build(addrs.RootModuleInstance)
return graph, walkPlan, diags
Expand Down
100 changes: 100 additions & 0 deletions internal/terraform/context_plan2_test.go
Expand Up @@ -3827,3 +3827,103 @@ resource "test_object" "b" {
_, diags = ctx.Plan(m, state, opts)
assertNoErrors(t, diags)
}

func TestContext2Plan_destroyPartialState(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "test_object" "a" {
}

output "out" {
value = module.mod.out
}

module "mod" {
source = "./mod"
}
`,

"./mod/main.tf": `
resource "test_object" "a" {
count = 2

lifecycle {
precondition {
# test_object_b has already been destroyed, so referencing the first
# instance must not fail during a destroy plan.
condition = test_object.b[0].test_string == "invalid"
error_message = "should not block destroy"
}
precondition {
# this failing condition should bot block a destroy plan
condition = !local.continue
error_message = "should not block destroy"
}
}
}

resource "test_object" "b" {
count = 2
}

locals {
continue = true
}

output "out" {
# the reference to test_object.b[0] may not be valid during a destroy plan,
# but should not fail.
value = local.continue ? test_object.a[1].test_string != "invalid" && test_object.b[0].test_string != "invalid" : false

precondition {
# test_object_b has already been destroyed, so referencing the first
# instance must not fail during a destroy plan.
condition = test_object.b[0].test_string == "invalid"
error_message = "should not block destroy"
}
precondition {
# this failing condition should bot block a destroy plan
condition = test_object.a[0].test_string == "invalid"
error_message = "should not block destroy"
}
}
`})

p := simpleMockProvider()

// This state could be the result of a failed destroy, leaving only 2
// remaining instances. We want to be able to continue the destroy to
// remove everything without blocking on invalid references or failing
// conditions.
state := states.NewState()
mod := state.EnsureModule(addrs.RootModuleInstance.Child("mod", addrs.NoKey))
mod.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.a[0]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{"test_string":"current"}`),
Dependencies: []addrs.ConfigResource{},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
)
mod.SetResourceInstanceCurrent(
mustResourceInstanceAddr("test_object.a[1]").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{"test_string":"current"}`),
Dependencies: []addrs.ConfigResource{},
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
)

ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})

_, diags := ctx.Plan(m, state, &PlanOpts{
Mode: plans.DestroyMode,
})
assertNoErrors(t, diags)
}
8 changes: 7 additions & 1 deletion internal/terraform/graph_builder_plan.go
Expand Up @@ -49,6 +49,11 @@ type PlanGraphBuilder struct {
// skipRefresh indicates that we should skip refreshing managed resources
skipRefresh bool

// preDestroyRefresh indicates that we are executing the refresh which
// happens immediately before a destroy plan, which happens to use the
// normal planing mode so skipPlanChanges cannot be set.
preDestroyRefresh bool

// skipPlanChanges indicates that we should skip the step of comparing
// prior state with configuration and generating planned changes to
// resource instances. (This is for the "refresh only" planning mode,
Expand Down Expand Up @@ -111,7 +116,7 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
&LocalTransformer{Config: b.Config},
&OutputTransformer{
Config: b.Config,
RefreshOnly: b.skipPlanChanges,
RefreshOnly: b.skipPlanChanges || b.preDestroyRefresh,
PlanDestroy: b.Operation == walkPlanDestroy,

// NOTE: We currently treat anything built with the plan graph
Expand Down Expand Up @@ -214,6 +219,7 @@ func (b *PlanGraphBuilder) initPlan() {
NodeAbstractResource: a,
skipRefresh: b.skipRefresh,
skipPlanChanges: b.skipPlanChanges,
preDestroyRefresh: b.preDestroyRefresh,
forceReplace: b.ForceReplace,
}
}
Expand Down
9 changes: 8 additions & 1 deletion internal/terraform/node_resource_abstract_instance.go
Expand Up @@ -37,6 +37,8 @@ type NodeAbstractResourceInstance struct {
storedProviderConfig addrs.AbsProviderConfig

Dependencies []addrs.ConfigResource

preDestroyRefresh bool
}

// NewNodeAbstractResourceInstance creates an abstract resource instance graph
Expand Down Expand Up @@ -682,6 +684,11 @@ func (n *NodeAbstractResourceInstance) plan(
return plan, state, keyData, diags.Append(err)
}

checkRuleSeverity := tfdiags.Error
if n.preDestroyRefresh {
checkRuleSeverity = tfdiags.Warning
}

if plannedChange != nil {
// If we already planned the action, we stick to that plan
createBeforeDestroy = plannedChange.Action == plans.CreateThenDelete
Expand All @@ -708,7 +715,7 @@ func (n *NodeAbstractResourceInstance) plan(
addrs.ResourcePrecondition,
n.Config.Preconditions,
ctx, n.Addr, keyData,
tfdiags.Error,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
if diags.HasErrors() {
Expand Down
3 changes: 3 additions & 0 deletions internal/terraform/node_resource_plan.go
Expand Up @@ -25,6 +25,8 @@ type nodeExpandPlannableResource struct {
// skipRefresh indicates that we should skip refreshing individual instances
skipRefresh bool

preDestroyRefresh bool

// skipPlanChanges indicates we should skip trying to plan change actions
// for any instances.
skipPlanChanges bool
Expand Down Expand Up @@ -328,6 +330,7 @@ func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext,
a.ProviderMetas = n.ProviderMetas
a.dependsOn = n.dependsOn
a.Dependencies = n.dependencies
a.preDestroyRefresh = n.preDestroyRefresh

return &NodePlannableResourceInstance{
NodeAbstractResourceInstance: a,
Expand Down
13 changes: 9 additions & 4 deletions internal/terraform/node_resource_plan_instance.go
Expand Up @@ -83,7 +83,7 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di
}

checkRuleSeverity := tfdiags.Error
if n.skipPlanChanges {
if n.skipPlanChanges || n.preDestroyRefresh {
checkRuleSeverity = tfdiags.Warning
}

Expand Down Expand Up @@ -128,6 +128,11 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
var change *plans.ResourceInstanceChange
var instanceRefreshState *states.ResourceInstanceObject

checkRuleSeverity := tfdiags.Error
if n.skipPlanChanges || n.preDestroyRefresh {
checkRuleSeverity = tfdiags.Warning
}

_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
if diags.HasErrors() {
Expand Down Expand Up @@ -280,7 +285,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
addrs.ResourcePostcondition,
n.Config.Postconditions,
ctx, n.ResourceInstanceAddr(), repeatData,
tfdiags.Error,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
} else {
Expand All @@ -298,7 +303,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
addrs.ResourcePrecondition,
n.Config.Preconditions,
ctx, addr, repeatData,
tfdiags.Warning,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)

Expand All @@ -321,7 +326,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext)
addrs.ResourcePostcondition,
n.Config.Postconditions,
ctx, addr, repeatData,
tfdiags.Warning,
checkRuleSeverity,
)
diags = diags.Append(checkDiags)
}
Expand Down