Skip to content

Commit

Permalink
avoid re-writing state for noop applies
Browse files Browse the repository at this point in the history
We need to avoid re-writing the state for every NoOp apply. We may
still be evaluating the instance to account for any side-effects in the
condition checks, however the state of the instance has not changes.
Re-writing the state is a non-current operation, which may require
encoding a fairly large instance state and re-serializing the entire
state blob, so it is best avoided if possible.
  • Loading branch information
jbardin committed Nov 1, 2022
1 parent eb88ccb commit ffe2e39
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 28 deletions.
71 changes: 71 additions & 0 deletions internal/terraform/context_apply2_test.go
Expand Up @@ -1713,3 +1713,74 @@ output "from_resource" {
t.Fatalf("unexpected check %s: %s\n", outCheck.Status, outCheck.FailureMessages)
}
}

// NoOp changes may have conditions to evaluate, but should not re-plan and
// apply the entire resource.
func TestContext2Apply_noRePlanNoOp(t *testing.T) {
m := testModuleInline(t, map[string]string{
"main.tf": `
resource "test_object" "x" {
}
resource "test_object" "y" {
# test_object.w is being re-created, so this precondition must be evaluated
# during apply, however this resource should otherwise be a NoOp.
lifecycle {
precondition {
condition = test_object.x.test_string == null
error_message = "test_object.x.test_string should be null"
}
}
}
`})

p := simpleMockProvider()
// make sure we can compute the attr
testString := p.GetProviderSchemaResponse.ResourceTypes["test_object"].Block.Attributes["test_string"]
testString.Computed = true
testString.Optional = false

yAddr := mustResourceInstanceAddr("test_object.y")

state := states.NewState()
mod := state.RootModule()
mod.SetResourceInstanceCurrent(
yAddr.Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{"test_string":"y"}`),
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
)

ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
},
})

opts := SimplePlanOpts(plans.NormalMode, nil)
plan, diags := ctx.Plan(m, state, opts)
assertNoErrors(t, diags)

for _, c := range plan.Changes.Resources {
if c.Addr.Equal(yAddr) && c.Action != plans.NoOp {
t.Fatalf("unexpected %s change for test_object.y", c.Action)
}
}

// test_object.y is a NoOp change from the plan, but is included in the
// graph due to the conditions which must be evaluated. This however should
// not cause the resource to be re-planned.
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
testString := req.ProposedNewState.GetAttr("test_string")
if !testString.IsNull() && testString.AsString() == "y" {
resp.Diagnostics = resp.Diagnostics.Append(errors.New("Unexpected apply-time plan for test_object.y. Original plan was a NoOp"))
}
resp.PlannedState = req.ProposedNewState
return resp
}

_, diags = ctx.Apply(plan, m)
assertNoErrors(t, diags)
}
45 changes: 23 additions & 22 deletions internal/terraform/node_resource_abstract_instance.go
Expand Up @@ -715,6 +715,13 @@ func (n *NodeAbstractResourceInstance) plan(
return plan, state, keyData, diags // failed preconditions prevent further evaluation
}

// If we have a previous plan and the action was a noop, then the only
// reason we're in this method was to evaluate the preconditions. There's
// no need to re-plan this resource.
if plannedChange != nil && plannedChange.Action == plans.NoOp {
return plannedChange, currentState.DeepCopy(), keyData, diags
}

origConfigVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
Expand Down Expand Up @@ -2059,44 +2066,38 @@ func (n *NodeAbstractResourceInstance) evalDestroyProvisionerConfig(ctx EvalCont
}

// apply accepts an applyConfig, instead of using n.Config, so destroy plans can
// send a nil config. Most of the errors generated in apply are returned as
// diagnostics, but if provider.ApplyResourceChange itself fails, that error is
// returned as an error and nil diags are returned.
// send a nil config. The keyData information can be empty if the config is
// nil, since it is only used to evaluate the configuration.
func (n *NodeAbstractResourceInstance) apply(
ctx EvalContext,
state *states.ResourceInstanceObject,
change *plans.ResourceInstanceChange,
applyConfig *configs.Resource,
createBeforeDestroy bool) (*states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) {
keyData instances.RepetitionData,
createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {

var diags tfdiags.Diagnostics
var keyData instances.RepetitionData
if state == nil {
state = &states.ResourceInstanceObject{}
}

if applyConfig != nil {
forEach, _ := evaluateForEachExpression(applyConfig.ForEach, ctx)
keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach)
}

if change.Action == plans.NoOp {
// If this is a no-op change then we don't want to actually change
// anything, so we'll just echo back the state we were given and
// let our internal checks and updates proceed.
log.Printf("[TRACE] NodeAbstractResourceInstance.apply: skipping %s because it has no planned action", n.Addr)
return state, keyData, diags
return state, diags
}

provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return nil, keyData, diags.Append(err)
return nil, diags.Append(err)
}
schema, _ := providerSchema.SchemaForResourceType(n.Addr.Resource.Resource.Mode, n.Addr.Resource.Resource.Type)
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type))
return nil, keyData, diags
return nil, diags
}

log.Printf("[INFO] Starting apply for %s", n.Addr)
Expand All @@ -2107,7 +2108,7 @@ func (n *NodeAbstractResourceInstance) apply(
configVal, _, configDiags = ctx.EvaluateBlock(applyConfig.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, keyData, diags
return nil, diags
}
}

Expand All @@ -2132,13 +2133,13 @@ func (n *NodeAbstractResourceInstance) apply(
strings.Join(unknownPaths, "\n"),
),
))
return nil, keyData, diags
return nil, diags
}

metaConfigVal, metaDiags := n.providerMetas(ctx)
diags = diags.Append(metaDiags)
if diags.HasErrors() {
return nil, keyData, diags
return nil, diags
}

log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr, change.Action)
Expand Down Expand Up @@ -2166,7 +2167,7 @@ func (n *NodeAbstractResourceInstance) apply(
Status: state.Status,
Value: change.After,
}
return newState, keyData, diags
return newState, diags
}

resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{
Expand Down Expand Up @@ -2237,7 +2238,7 @@ func (n *NodeAbstractResourceInstance) apply(
// Bail early in this particular case, because an object that doesn't
// conform to the schema can't be saved in the state anyway -- the
// serializer will reject it.
return nil, keyData, diags
return nil, diags
}

// After this point we have a type-conforming result object and so we
Expand Down Expand Up @@ -2363,7 +2364,7 @@ func (n *NodeAbstractResourceInstance) apply(
// prior state as the new value, making this effectively a no-op. If
// the item really _has_ been deleted then our next refresh will detect
// that and fix it up.
return state.DeepCopy(), keyData, diags
return state.DeepCopy(), diags

case diags.HasErrors() && !newVal.IsNull():
// if we have an error, make sure we restore the object status in the new state
Expand All @@ -2380,7 +2381,7 @@ func (n *NodeAbstractResourceInstance) apply(
newState.Dependencies = state.Dependencies
}

return newState, keyData, diags
return newState, diags

case !newVal.IsNull():
// Non error case with a new state
Expand All @@ -2390,11 +2391,11 @@ func (n *NodeAbstractResourceInstance) apply(
Private: resp.Private,
CreateBeforeDestroy: createBeforeDestroy,
}
return newState, keyData, diags
return newState, diags

default:
// Non error case, were the object was deleted
return nil, keyData, diags
return nil, diags
}
}

Expand Down
24 changes: 20 additions & 4 deletions internal/terraform/node_resource_apply_instance.go
Expand Up @@ -6,6 +6,7 @@ import (

"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/instances"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/plans/objchange"
"github.com/hashicorp/terraform/internal/states"
Expand Down Expand Up @@ -295,7 +296,19 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}

state, repeatData, applyDiags := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy())
var repeatData instances.RepetitionData
if n.Config != nil {
forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx)
repeatData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach)
}

// If there is no change, there was nothing to apply, and we don't need to
// re-write the state, but we do need to re-evaluate postconditions.
if diffApply.Action == plans.NoOp {
return diags.Append(n.managedResourcePostconditions(ctx, repeatData))
}

state, applyDiags := n.apply(ctx, state, diffApply, n.Config, repeatData, n.CreateBeforeDestroy())
diags = diags.Append(applyDiags)

// We clear the change out here so that future nodes don't see a change
Expand Down Expand Up @@ -370,15 +383,18 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
// _after_ writing the state because we want to check against
// the result of the operation, and to fail on future operations
// until the user makes the condition succeed.
return diags.Append(n.managedResourcePostconditions(ctx, repeatData))
}

func (n *NodeApplyableResourceInstance) managedResourcePostconditions(ctx EvalContext, repeatData instances.RepetitionData) (diags tfdiags.Diagnostics) {

checkDiags := evalCheckRules(
addrs.ResourcePostcondition,
n.Config.Postconditions,
ctx, n.ResourceInstanceAddr(), repeatData,
tfdiags.Error,
)
diags = diags.Append(checkDiags)

return diags
return diags.Append(checkDiags)
}

// checkPlannedChange produces errors if the _actual_ expected value is not
Expand Down
3 changes: 2 additions & 1 deletion internal/terraform/node_resource_destroy.go
Expand Up @@ -4,6 +4,7 @@ import (
"fmt"
"log"

"github.com/hashicorp/terraform/internal/instances"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/tfdiags"

Expand Down Expand Up @@ -210,7 +211,7 @@ func (n *NodeDestroyResourceInstance) managedResourceExecute(ctx EvalContext) (d
// Managed resources need to be destroyed, while data sources
// are only removed from state.
// we pass a nil configuration to apply because we are destroying
s, _, d := n.apply(ctx, state, changeApply, nil, false)
s, d := n.apply(ctx, state, changeApply, nil, instances.RepetitionData{}, false)
state, diags = s, diags.Append(d)
// we don't return immediately here on error, so that the state can be
// finalized
Expand Down
3 changes: 2 additions & 1 deletion internal/terraform/node_resource_destroy_deposed.go
Expand Up @@ -6,6 +6,7 @@ import (

"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/dag"
"github.com/hashicorp/terraform/internal/instances"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/tfdiags"
Expand Down Expand Up @@ -249,7 +250,7 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op w
}

// we pass a nil configuration to apply because we are destroying
state, _, applyDiags := n.apply(ctx, state, change, nil, false)
state, applyDiags := n.apply(ctx, state, change, nil, instances.RepetitionData{}, false)
diags = diags.Append(applyDiags)
// don't return immediately on errors, we need to handle the state

Expand Down

0 comments on commit ffe2e39

Please sign in to comment.