-
Notifications
You must be signed in to change notification settings - Fork 9.4k
/
component_instance.go
1490 lines (1344 loc) · 56.4 KB
/
component_instance.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package stackeval
import (
"context"
"fmt"
"github.com/hashicorp/hcl/v2"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/collections"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/hashicorp/terraform/internal/instances"
"github.com/hashicorp/terraform/internal/lang/marks"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/promising"
"github.com/hashicorp/terraform/internal/providers"
"github.com/hashicorp/terraform/internal/stacks/stackaddrs"
"github.com/hashicorp/terraform/internal/stacks/stackconfig/stackconfigtypes"
"github.com/hashicorp/terraform/internal/stacks/stackplan"
"github.com/hashicorp/terraform/internal/stacks/stackruntime/hooks"
"github.com/hashicorp/terraform/internal/stacks/stackstate"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/hashicorp/terraform/internal/tfdiags"
)
type ComponentInstance struct {
call *Component
key addrs.InstanceKey
main *Main
repetition instances.RepetitionData
moduleTreePlan promising.Once[withDiagnostics[*plans.Plan]]
}
var _ Plannable = (*ComponentInstance)(nil)
var _ ExpressionScope = (*ComponentInstance)(nil)
func newComponentInstance(call *Component, key addrs.InstanceKey, repetition instances.RepetitionData) *ComponentInstance {
return &ComponentInstance{
call: call,
key: key,
main: call.main,
repetition: repetition,
}
}
func (c *ComponentInstance) Addr() stackaddrs.AbsComponentInstance {
callAddr := c.call.Addr()
stackAddr := callAddr.Stack
return stackaddrs.AbsComponentInstance{
Stack: stackAddr,
Item: stackaddrs.ComponentInstance{
Component: callAddr.Item,
Key: c.key,
},
}
}
func (c *ComponentInstance) RepetitionData() instances.RepetitionData {
return c.repetition
}
func (c *ComponentInstance) InputVariableValues(ctx context.Context, phase EvalPhase) cty.Value {
ret, _ := c.CheckInputVariableValues(ctx, phase)
return ret
}
func (c *ComponentInstance) CheckInputVariableValues(ctx context.Context, phase EvalPhase) (cty.Value, tfdiags.Diagnostics) {
wantTy, defs := c.call.Config(ctx).InputsType(ctx)
decl := c.call.Declaration(ctx)
if wantTy == cty.NilType {
// Suggests that the target module is invalid in some way, so we'll
// just report that we don't know the input variable values and trust
// that the module's problems will be reported by some other return
// path.
return cty.DynamicVal, nil
}
// We actually checked the errors statically already, so we only care about
// the value here.
return EvalComponentInputVariables(ctx, wantTy, defs, decl, phase, c)
}
// inputValuesForModulesRuntime adapts the result of
// [ComponentInstance.InputVariableValues] to the representation that the
// main Terraform modules runtime expects.
func (c *ComponentInstance) inputValuesForModulesRuntime(ctx context.Context, phase EvalPhase) terraform.InputValues {
valsObj := c.InputVariableValues(ctx, phase)
if valsObj == cty.NilVal {
return nil
}
// valsObj might be an unknown value during the planning phase, in which
// case we'll return an InputValues with all of the expected variables
// defined as unknown values of their expected type constraints. To
// achieve that, we'll do our work with the configuration's object type
// constraint instead of with the value we've been given directly.
wantTy, _ := c.call.Config(ctx).InputsType(ctx)
if wantTy == cty.NilType {
// The configuration is too invalid for us to know what type we're
// expecting, so we'll just bail.
return nil
}
wantAttrs := wantTy.AttributeTypes()
ret := make(terraform.InputValues, len(wantAttrs))
for name, aty := range wantAttrs {
v := valsObj.GetAttr(name)
if !v.IsKnown() {
// We'll ensure that it has the expected type even if
// InputVariableValues didn't know what types to use.
v = cty.UnknownVal(aty)
}
ret[name] = &terraform.InputValue{
Value: v,
SourceType: terraform.ValueFromCaller,
}
}
return ret
}
// Providers evaluates the "providers" argument from the component
// configuration and returns a mapping from the provider configuration
// addresses that the component's root module expect to have populated
// to the address of the [ProviderInstance] from the stack configuration
// to pass into that slot.
//
// If the second return value "valid" is true then the providers argument
// is valid and so the returned map should be complete. If "valid" is false
// then there are some problems with the providers argument and so the
// map might be incomplete, and so callers should use it only with a great
// deal of care.
func (c *ComponentInstance) Providers(ctx context.Context, phase EvalPhase) (map[addrs.RootProviderConfig]stackaddrs.AbsProviderConfigInstance, bool) {
ret, diags := c.CheckProviders(ctx, phase)
return ret, !diags.HasErrors()
}
// CheckProviders evaluates the "providers" argument from the component
// configuration and returns a mapping from the provider configuration
// addresses that the component's root module expect to have populated
// to the address of the [ProviderInstance] from the stack configuration
// to pass into that slot.
//
// If the "providers" argument is invalid then this will return error
// diagnostics along with a partial result.
func (c *ComponentInstance) CheckProviders(ctx context.Context, phase EvalPhase) (map[addrs.RootProviderConfig]stackaddrs.AbsProviderConfigInstance, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
ret := make(map[addrs.RootProviderConfig]stackaddrs.AbsProviderConfigInstance)
declConfigs := c.call.Declaration(ctx).ProviderConfigs
configProviders := c.call.Config(ctx).RequiredProviderInstances(ctx)
// First, we'll iterate through the configProviders and check that we have
// a definition for each of them. We'll also resolve the reference that we
// have and make sure it points to an actual provider instance.
for _, elem := range configProviders.Elems {
// sourceAddr is the addrs.RootProviderConfig that should be used to
// set this provider in the component later.
sourceAddr := elem.Key
// componentAddr is the addrs.LocalProviderConfig that specifies the
// local name and (optional) alias of the provider in the component.
componentAddr := elem.Value
// We validated the config providers during the static analysis, so we
// know this expression exists and resolves to the correct type.
expr := declConfigs[componentAddr]
inst, instDiags, ok := c.checkProvider(ctx, sourceAddr, componentAddr, expr, phase)
diags = diags.Append(instDiags)
if ok {
ret[sourceAddr] = inst
}
}
// Second, we want to iterate through the providers that are required by
// the state and not required by the configuration. Unfortunately, we don't
// currently store enough information to be able to retrieve the original
// provider directly from the state. We only store the provider type and
// alias of the original provider. Stacks can have multiple instances of the
// same provider type, local name, and alias. This means we need the user to
// still provide an entry for this provider in the declConfigs.
// TODO: There's another TODO in the state package that suggests we should
// store the additional information we need. Once this is fixed we can
// come and tidy this up as well.
stack := c.call.Stack(ctx)
stackConfig := stack.StackConfig(ctx)
moduleTree := c.call.Config(ctx).ModuleTree(ctx)
// We'll search through the declConfigs to find any keys that match the
// type and alias of a any provider needed by the state. This is backwards
// when compared to how we resolved the configProviders. But we don't have
// the information we need to do it the other way around.
previousProviders := c.main.PreviousProviderInstances(c.Addr(), phase)
for localProviderAddr, expr := range declConfigs {
provider := moduleTree.ProviderForConfigAddr(localProviderAddr)
sourceAddr := addrs.RootProviderConfig{
Provider: provider,
Alias: localProviderAddr.Alias,
}
if _, exists := ret[sourceAddr]; exists || !previousProviders.Has(sourceAddr) {
// Then this declConfig either matches a configProvider and we've
// already processed it, or it matches a provider that isn't
// required by the config or the state. In the first case, this is
// fine we have matched the right provider already. In the second
// case, we could raise a warning or something but it's not a big
// deal so we can ignore it.
continue
}
// Otherwise, this is a declConfig for a provider that is not in the
// configProviders and is in the previousProviders. So, we should
// process it.
inst, instDiags, ok := c.checkProvider(ctx, sourceAddr, localProviderAddr, expr, phase)
diags = diags.Append(instDiags)
if ok {
ret[sourceAddr] = inst
}
if _, ok := stackConfig.ProviderLocalName(ctx, provider); !ok {
// Even though we have an entry for this provider in the declConfigs
// doesn't mean we have an entry for this in our required providers.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Component requires undeclared provider",
Detail: fmt.Sprintf(
"The root module for %s has resources in state that require a configuration for provider %q, which isn't declared as a dependency of this stack configuration.\n\nDeclare this provider in the stack's required_providers block, and then assign a configuration for that provider in this component's \"providers\" argument.",
c.Addr(), provider.ForDisplay(),
),
Subject: c.call.Declaration(ctx).DeclRange.ToHCL().Ptr(),
})
continue
}
}
// Finally, let's check that we have a provider configuration for every
// provider needed by the state.
for _, previousProvider := range previousProviders {
if _, ok := ret[previousProvider]; ok {
// Then we have a provider for this, so great!
continue
}
// If we get here, then we didn't find an entry for this provider in
// the declConfigs. This is an error because we need to have an entry
// for every provider that we have in the state.
// localAddr helps with the error message.
localAddr := addrs.LocalProviderConfig{
LocalName: moduleTree.Module.LocalNameForProvider(previousProvider.Provider),
Alias: previousProvider.Alias,
}
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing required provider configuration",
Detail: fmt.Sprintf(
"The root module for %s has resources in state that require a provider configuration named %q for provider %q, which is not assigned in the component's \"providers\" argument.",
c.Addr(), localAddr.StringCompact(), previousProvider.Provider.ForDisplay(),
),
Subject: c.call.Declaration(ctx).DeclRange.ToHCL().Ptr(),
})
}
return ret, diags
}
func (c *ComponentInstance) checkProvider(ctx context.Context, sourceAddr addrs.RootProviderConfig, componentAddr addrs.LocalProviderConfig, expr hcl.Expression, phase EvalPhase) (stackaddrs.AbsProviderConfigInstance, tfdiags.Diagnostics, bool) {
var diags tfdiags.Diagnostics
var ret stackaddrs.AbsProviderConfigInstance
result, hclDiags := EvalExprAndEvalContext(ctx, expr, phase, c)
diags = diags.Append(hclDiags)
if hclDiags.HasErrors() {
return ret, diags, false
}
v := result.Value
// The first set of checks can perform a redundant check in some cases. For
// providers required by the configuration the type validation should have
// been performed by the static analysis. However, we'll repeat the checks
// here to also catch the case where providers are required by the existing
// state but are not defined in the configuration. This isn't checked by
// the static analysis.
const errSummary = "Invalid provider configuration"
if actualTy := result.Value.Type(); stackconfigtypes.IsProviderConfigType(actualTy) {
// Then we at least got a provider reference of some kind.
actualTypeAddr := stackconfigtypes.ProviderForProviderConfigType(actualTy)
if actualTypeAddr != sourceAddr.Provider {
// But, unfortunately, the underlying types of the providers
// do not match up.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: errSummary,
Detail: fmt.Sprintf(
"The provider configuration slot %s requires a configuration for provider %q, not for provider %q.",
componentAddr.StringCompact(), sourceAddr.Provider, actualTypeAddr,
),
Subject: result.Expression.Range().Ptr(),
})
return ret, diags, false
}
} else {
// We got something that isn't a provider reference at all.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: errSummary,
Detail: fmt.Sprintf(
"The provider configuration slot %s requires a configuration for provider %q.",
componentAddr.StringCompact(), sourceAddr.Provider,
),
Subject: result.Expression.Range().Ptr(),
})
return ret, diags, false
}
// Now, we differ from the static analysis in that we should have
// returned a concrete value while we may have got unknown during the
// static analysis.
if v.IsNull() {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: errSummary,
Detail: fmt.Sprintf(
"The provider configuration slot %s is required, but this definition returned null.",
componentAddr.StringCompact(),
),
Subject: result.Expression.Range().Ptr(),
})
return ret, diags, false
}
if !v.IsKnown() {
// TODO: Once we support deferred changes we should return
// something that lets the caller know the configuration is
// incomplete so it can defer planning the entire component.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: errSummary,
Detail: fmt.Sprintf(
"This expression depends on values that won't be known until the apply phase, so Terraform cannot determine which provider configuration to use while planning changes for %s.",
c.Addr().String(),
),
Subject: result.Expression.Range().Ptr(),
})
return ret, diags, false
}
// If it's of the correct type, known, and not null then we should
// be able to retrieve a specific provider instance address that
// this value refers to.
ret = stackconfigtypes.ProviderInstanceForValue(v)
// The reference must be to a provider instance that's actually
// configured.
providerInstStack := c.main.Stack(ctx, ret.Stack, phase)
if providerInstStack != nil {
provider := providerInstStack.Provider(ctx, ret.Item.ProviderConfig)
if provider != nil {
insts := provider.Instances(ctx, phase)
if insts == nil {
// If we get here then we don't yet know which instances
// this provider has, so we'll be optimistic that it'll
// show up in a later phase.
return ret, diags, true
}
if _, exists := insts[ret.Item.Key]; exists {
return ret, diags, true
}
}
}
// If we fall here then something on the path to the provider instance
// doesn't exist, and so effectively the provider instance doesn't exist.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: errSummary,
Detail: fmt.Sprintf(
"Expression result refers to undefined provider instance %s.",
ret,
),
Subject: result.Expression.Range().Ptr(),
})
return ret, diags, true
}
func (c *ComponentInstance) neededProviderSchemas(ctx context.Context) (map[addrs.Provider]providers.ProviderSchema, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
decl := c.call.Declaration(ctx)
moduleTree := c.call.Config(ctx).ModuleTree(ctx)
if moduleTree == nil {
// The configuration is presumably invalid, but it's not our
// responsibility to report errors in the configuration.
// We'll just return nothing and let a different codepath detect
// and report this error.
return nil, diags
}
providerSchemas := make(map[addrs.Provider]providers.ProviderSchema)
for _, sourceAddr := range moduleTree.ProviderTypes() {
pTy := c.main.ProviderType(ctx, sourceAddr)
if pTy == nil {
continue // not our job to report a missing provider type
}
schema, err := pTy.Schema(ctx)
if err != nil {
// FIXME: it's not technically our job to report a schema
// fetch failure, but currently there is no single other
// place that definitely does it, so we'll do it here at
// the risk of some redundant errors if we end up using
// the same provider multiple times.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Provider initialization error",
Detail: fmt.Sprintf("Failed to fetch the provider schema for %s: %s.", sourceAddr, err),
Subject: decl.DeclRange.ToHCL().Ptr(),
})
continue
}
providerSchemas[sourceAddr] = schema
}
return providerSchemas, diags
}
func (c *ComponentInstance) neededProviderClients(ctx context.Context, phase EvalPhase) (clients map[addrs.RootProviderConfig]providers.Interface, valid bool) {
providerInstAddrs, valid := c.Providers(ctx, phase)
if !valid {
return nil, false
}
providerInsts := make(map[addrs.RootProviderConfig]providers.Interface)
for calleeAddr, callerAddr := range providerInstAddrs {
providerInstStack := c.main.Stack(ctx, callerAddr.Stack, phase)
if providerInstStack == nil {
continue
}
provider := providerInstStack.Provider(ctx, callerAddr.Item.ProviderConfig)
if provider == nil {
continue
}
insts := provider.Instances(ctx, phase)
if insts == nil {
// If we get here then we don't yet know which instances
// this provider has, so we'll be optimistic that it'll
// show up in a later phase.
continue
}
inst, exists := insts[callerAddr.Item.Key]
if !exists {
continue
}
providerInsts[calleeAddr] = inst.Client(ctx, phase)
}
return providerInsts, true
}
func (c *ComponentInstance) ModuleTreePlan(ctx context.Context) *plans.Plan {
ret, _ := c.CheckModuleTreePlan(ctx)
return ret
}
func (c *ComponentInstance) CheckModuleTreePlan(ctx context.Context) (*plans.Plan, tfdiags.Diagnostics) {
if !c.main.Planning() {
panic("called CheckModuleTreePlan with an evaluator not instantiated for planning")
}
return doOnceWithDiags(
ctx, &c.moduleTreePlan, c.main,
func(ctx context.Context) (*plans.Plan, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
addr := c.Addr()
h := hooksFromContext(ctx)
hookSingle(ctx, hooksFromContext(ctx).PendingComponentInstancePlan, c.Addr())
seq, ctx := hookBegin(ctx, h.BeginComponentInstancePlan, h.ContextAttach, addr)
decl := c.call.Declaration(ctx)
// This is our main bridge from the stacks language into the main Terraform
// module language during the planning phase. We need to ask the main
// language runtime to plan the module tree associated with this
// component and return the result.
moduleTree := c.call.Config(ctx).ModuleTree(ctx)
if moduleTree == nil {
// Presumably the configuration is invalid in some way, so
// we can't create a plan and the relevant diagnostics will
// get reported when the plan driver visits the ComponentConfig
// object.
return nil, diags
}
prevState := c.PlanPrevState(ctx)
providerSchemas, moreDiags := c.neededProviderSchemas(ctx)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
return nil, diags
}
tfCtx, err := terraform.NewContext(&terraform.ContextOpts{
Hooks: []terraform.Hook{
&componentInstanceTerraformHook{
ctx: ctx,
seq: seq,
hooks: hooksFromContext(ctx),
addr: c.Addr(),
},
},
PreloadedProviderSchemas: providerSchemas,
Provisioners: c.main.availableProvisioners(),
})
if err != nil {
// Should not get here because we should always pass a valid
// ContextOpts above.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to instantiate Terraform modules runtime",
fmt.Sprintf("Could not load the main Terraform language runtime: %s.\n\nThis is a bug in Terraform; please report it!", err),
))
return nil, diags
}
stackPlanOpts := c.main.PlanningOpts()
inputValues := c.inputValuesForModulesRuntime(ctx, PlanPhase)
if inputValues == nil {
// inputValuesForModulesRuntime uses nil (as opposed to a
// non-nil zerolen map) to represent that the definition of
// the input variables was so invalid that we cannot do
// anything with it, in which case we'll just return early
// and assume the plan walk driver will find the diagnostics
// via another return path.
return nil, diags
}
providerClients, valid := c.neededProviderClients(ctx, PlanPhase)
if !valid {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Cannot plan component",
Detail: fmt.Sprintf("Cannot generate a plan for %s because its provider configuration assignments are invalid.", c.Addr()),
Subject: decl.DeclRange.ToHCL().Ptr(),
})
return nil, diags
}
// If any of our upstream components have incomplete plans then
// we need to force treating everything in this component as
// deferred so we can preserve the correct dependency ordering.
upstreamDeferred := false
for _, depAddr := range c.call.RequiredComponents(ctx).Elems() {
depStack := c.main.Stack(ctx, depAddr.Stack, PlanPhase)
if depStack == nil {
upstreamDeferred = true // to be conservative
break
}
depComponent := depStack.Component(ctx, depAddr.Item)
if depComponent == nil {
upstreamDeferred = true // to be conservative
break
}
if !depComponent.PlanIsComplete(ctx) {
upstreamDeferred = true
break
}
}
// The instance is also upstream deferred if the for_each value for this instance is unknown.
if c.key == addrs.WildcardKey {
upstreamDeferred = true
}
// NOTE: This ComponentInstance type only deals with component
// instances currently declared in the configuration. See
// [ComponentInstanceRemoved] for the model of a component instance
// that existed in the prior state but is not currently declared
// in the configuration.
plan, moreDiags := tfCtx.Plan(moduleTree, prevState, &terraform.PlanOpts{
Mode: stackPlanOpts.PlanningMode,
SetVariables: inputValues,
ExternalProviders: providerClients,
DeferralAllowed: stackPlanOpts.DeferralAllowed,
ExternalDependencyDeferred: upstreamDeferred,
// This is set by some tests but should not be used in main code.
// (nil means to use the real time when tfCtx.Plan was called.)
ForcePlanTimestamp: stackPlanOpts.ForcePlanTimestamp,
})
diags = diags.Append(moreDiags)
if plan != nil {
cic := &hooks.ComponentInstanceChange{
Addr: addr,
}
for _, rsrcChange := range plan.DriftedResources {
hookMore(ctx, seq, h.ReportResourceInstanceDrift, &hooks.ResourceInstanceChange{
Addr: stackaddrs.AbsResourceInstanceObject{
Component: addr,
Item: rsrcChange.ObjectAddr(),
},
Change: rsrcChange,
})
}
for _, rsrcChange := range plan.Changes.Resources {
if rsrcChange.Importing != nil {
cic.Import++
}
cic.CountNewAction(rsrcChange.Action)
hookMore(ctx, seq, h.ReportResourceInstancePlanned, &hooks.ResourceInstanceChange{
Addr: stackaddrs.AbsResourceInstanceObject{
Component: addr,
Item: rsrcChange.ObjectAddr(),
},
Change: rsrcChange,
})
}
hookMore(ctx, seq, h.ReportComponentInstancePlanned, cic)
}
if diags.HasErrors() {
hookMore(ctx, seq, h.ErrorComponentInstancePlan, addr)
} else {
hookMore(ctx, seq, h.EndComponentInstancePlan, addr)
}
return plan, diags
},
)
}
// ApplyModuleTreePlan applies a plan returned by a previous call to
// [ComponentInstance.CheckModuleTreePlan].
//
// Applying a plan often has significant externally-visible side-effects, and
// so this method should be called only once for a given plan. In practice
// we currently ensure that is true by calling it only from the package-level
// [ApplyPlan] function, which arranges for this function to be called
// concurrently with the same method on other component instances and with
// a whole-tree walk to gather up results and diagnostics.
func (c *ComponentInstance) ApplyModuleTreePlan(ctx context.Context, plan *plans.Plan) (*ComponentInstanceApplyResult, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if !c.main.Applying() {
panic("called ApplyModuleTreePlan with an evaluator not instantiated for applying")
}
// NOTE WELL: This function MUST either successfully apply the component
// instance's plan or return at least one error diagnostic explaining why
// it cannot.
//
// All return paths must include a non-nil ComponentInstanceApplyResult.
// If an error occurs before we even begin applying the plan then the
// result should report that the changes are incomplete and that the
// new state is exactly the previous run state.
//
// If the underlying modules runtime raises errors when asked to apply the
// plan, then this function should pass all of those errors through to its
// own diagnostics while still returning the presumably-partially-updated
// result state.
addr := c.Addr()
decl := c.call.Declaration(ctx)
// This is the result to return along with any errors that prevent us from
// even starting the modules runtime apply phase. It reports that nothing
// changed at all.
noOpResult := c.PlaceholderApplyResultForSkippedApply(ctx, plan)
// We'll gather up our set of potentially-affected objects before we do
// anything else, because the modules runtime tends to mutate the objects
// accessible through the given plan pointer while it does its work and
// so we're likely to get a different/incomplete answer if we ask after
// work has already been done.
affectedResourceInstanceObjects := resourceInstanceObjectsAffectedByPlan(plan)
h := hooksFromContext(ctx)
hookSingle(ctx, hooksFromContext(ctx).PendingComponentInstanceApply, c.Addr())
seq, ctx := hookBegin(ctx, h.BeginComponentInstanceApply, h.ContextAttach, addr)
moduleTree := c.call.Config(ctx).ModuleTree(ctx)
if moduleTree == nil {
// We should not get here because if the configuration was statically
// invalid then we should've detected that during the plan phase.
// We'll emit a diagnostic about it just to make sure we're explicit
// that the plan didn't get applied, but if anyone sees this error
// it suggests a bug in whatever calling system sent us the plan
// and configuration -- it's sent us the wrong configuration, perhaps --
// and so we cannot know exactly what to blame with only the information
// we have here.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Component configuration is invalid during apply",
fmt.Sprintf(
"Despite apparently successfully creating a plan earlier, %s seems to have an invalid configuration during the apply phase. This should not be possible, and suggests a bug in whatever subsystem is managing the plan and apply workflow.",
addr.String(),
),
))
return noOpResult, diags
}
providerSchemas, moreDiags := c.neededProviderSchemas(ctx)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
return noOpResult, diags
}
tfHook := &componentInstanceTerraformHook{
ctx: ctx,
seq: seq,
hooks: hooksFromContext(ctx),
addr: c.Addr(),
}
tfCtx, err := terraform.NewContext(&terraform.ContextOpts{
Hooks: []terraform.Hook{
tfHook,
},
PreloadedProviderSchemas: providerSchemas,
Provisioners: c.main.availableProvisioners(),
})
if err != nil {
// Should not get here because we should always pass a valid
// ContextOpts above.
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to instantiate Terraform modules runtime",
fmt.Sprintf("Could not load the main Terraform language runtime: %s.\n\nThis is a bug in Terraform; please report it!", err),
))
return noOpResult, diags
}
// We'll need to make some light modifications to the plan to include
// information we've learned in other parts of the apply walk that
// should've filled in some unknown value placeholders. It would be rude
// to modify the plan that our caller is holding though, so we'll
// shallow-copy it. This is NOT a deep copy, so don't modify anything
// that's reachable through any pointers without copying those first too.
modifiedPlan := *plan
inputValues := c.inputValuesForModulesRuntime(ctx, ApplyPhase)
if inputValues == nil {
// inputValuesForModulesRuntime uses nil (as opposed to a
// non-nil zerolen map) to represent that the definition of
// the input variables was so invalid that we cannot do
// anything with it, in which case we'll just return early
// and assume the plan walk driver will find the diagnostics
// via another return path.
return noOpResult, diags
}
// TODO: Check that the final input values are consistent with what
// we had during planning. If not, that suggests a bug elsewhere.
//
// UGH: the "modules runtime"'s model of planning was designed around
// the goal of producing a traditional Terraform CLI-style saved plan
// file and so it has the input variable values already encoded as
// plans.DynamicValue opaque byte arrays, and so we need to convert
// our resolved input values into that format. It would be better
// if plans.Plan used the typical in-memory format for input values
// and let the plan file serializer worry about encoding, but we'll
// defer that API change for now to avoid disrupting other codepaths.
modifiedPlan.VariableValues = make(map[string]plans.DynamicValue, len(inputValues))
modifiedPlan.VariableMarks = make(map[string][]cty.PathValueMarks, len(inputValues))
for name, iv := range inputValues {
val, pvm := iv.Value.UnmarkDeepWithPaths()
dv, err := plans.NewDynamicValue(val, cty.DynamicPseudoType)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Failed to encode input variable value",
fmt.Sprintf(
"Could not encode the value of input variable %q of %s: %s.\n\nThis is a bug in Terraform; please report it!",
name, c.Addr(), err,
),
))
continue
}
modifiedPlan.VariableValues[name] = dv
modifiedPlan.VariableMarks[name] = pvm
}
if diags.HasErrors() {
return noOpResult, diags
}
providerClients, valid := c.neededProviderClients(ctx, ApplyPhase)
if !valid {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Cannot apply component plan",
Detail: fmt.Sprintf("Cannot apply the plan for %s because the configured provider configuration assignments are invalid.", c.Addr()),
Subject: decl.DeclRange.ToHCL().Ptr(),
})
return noOpResult, diags
}
var newState *states.State
if modifiedPlan.Applyable {
// NOTE: tfCtx.Apply tends to make changes to the given plan while it
// works, and so code after this point should not make any further use
// of either "modifiedPlan" or "plan" (since they share lots of the same
// pointers to mutable objects and so both can get modified together.)
newState, moreDiags = tfCtx.Apply(&modifiedPlan, moduleTree, &terraform.ApplyOpts{
ExternalProviders: providerClients,
})
diags = diags.Append(moreDiags)
} else {
// For a non-applyable plan, we just skip trying to apply it altogether
// and just propagate the prior state (including any refreshing we
// did during the plan phase) forward.
newState = modifiedPlan.PriorState
}
if newState != nil {
cic := &hooks.ComponentInstanceChange{
Addr: addr,
// We'll increment these gradually as we visit each change below.
Add: 0,
Change: 0,
Remove: 0,
}
// We need to report what changes were applied, which is mostly just
// re-announcing what was planned but we'll check to see if our
// terraform.Hook implementation saw a "successfully applied" event
// for each resource instance object before counting it.
applied := tfHook.ResourceInstanceObjectsSuccessfullyApplied()
for _, rioAddr := range applied {
action := tfHook.ResourceInstanceObjectAppliedAction(rioAddr)
// FIXME: We can't count imports here because they aren't "actions"
// in the sense that our hook gets informed about, and so the
// import number will always be zero in the apply phase.
cic.CountNewAction(action)
}
hookMore(ctx, seq, h.ReportComponentInstanceApplied, cic)
}
if diags.HasErrors() {
hookMore(ctx, seq, h.ErrorComponentInstanceApply, addr)
} else {
hookMore(ctx, seq, h.EndComponentInstanceApply, addr)
}
if newState == nil {
// The modules runtime returns a nil state only if an error occurs
// so early that it couldn't take any actions at all, and so we
// must assume that the state is totally unchanged in that case.
newState = plan.PrevRunState
affectedResourceInstanceObjects = nil
}
return &ComponentInstanceApplyResult{
FinalState: newState,
AffectedResourceInstanceObjects: affectedResourceInstanceObjects,
// Currently our definition of "complete" is that the apply phase
// didn't return any errors, since we expect the modules runtime
// to either perform all of the actions that were planned or
// return errors explaining why it cannot.
Complete: !diags.HasErrors(),
}, diags
}
// PlanPrevState returns the previous state for this component instance during
// the planning phase, or panics if called in any other phase.
func (c *ComponentInstance) PlanPrevState(ctx context.Context) *states.State {
// The following call will panic if we aren't in the plan phase.
stackState := c.main.PlanPrevState()
ret := stackState.ComponentInstanceStateForModulesRuntime(c.Addr())
if ret == nil {
ret = states.NewState() // so caller doesn't need to worry about nil
}
return ret
}
// ApplyResult returns the result from applying a plan for this object using
// [ApplyModuleTreePlan].
//
// Use the Complete field of the returned object to determine whether the
// apply ran to completion successfully enough for dependent work to proceed.
// If Complete is false then dependent work should not start, and instead
// dependents should unwind their stacks in a way that describes a no-op result.
func (c *ComponentInstance) ApplyResult(ctx context.Context) *ComponentInstanceApplyResult {
ret, _ := c.CheckApplyResult(ctx)
return ret
}
// CheckApplyResult returns the results from applying a plan for this object
// using [ApplyModuleTreePlan], and diagnostics describing any problems
// encountered when applying it.
func (c *ComponentInstance) CheckApplyResult(ctx context.Context) (*ComponentInstanceApplyResult, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
changes := c.main.ApplyChangeResults()
applyResult, moreDiags, err := changes.ComponentInstanceResult(ctx, c.Addr())
diags = diags.Append(moreDiags)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Component instance apply not scheduled",
fmt.Sprintf("Terraform needs the result from applying changes to %s, but that apply was apparently not scheduled to run. This is a bug in Terraform.", c.Addr()),
))
}
return applyResult, diags
}
// PlaceholderApplyResultForSkippedApply returns a [ComponentInstanceApplyResult]
// which describes the hypothetical result of skipping the apply phase for
// this component instance altogether.
//
// It doesn't have any logic to check whether the apply _was_ actually skipped;
// the caller that's orchestrating the changes during the apply phase must
// decided that for itself and then choose between either calling
// [ComponentInstance.ApplyModuleTreePlan] to apply as normal, or returning
// the result of this function instead to explain that the apply was skipped.
func (c *ComponentInstance) PlaceholderApplyResultForSkippedApply(ctx context.Context, plan *plans.Plan) *ComponentInstanceApplyResult {
// (We have this in here as a method just because it helps keep all of
// the logic for constructing [ComponentInstanceApplyResult] objects
// together in the same file, rather than having the caller synthesize
// a result itself only in this one special situation.)
return &ComponentInstanceApplyResult{
FinalState: plan.PrevRunState,
Complete: false,
}
}
// ApplyResultState returns the new state resulting from applying a plan for
// this object using [ApplyModuleTreePlan], or nil if the apply failed and
// so there is no new state to return.
func (c *ComponentInstance) ApplyResultState(ctx context.Context) *states.State {
ret, _ := c.CheckApplyResultState(ctx)
return ret
}
// CheckApplyResultState returns the new state resulting from applying a plan for
// this object using [ApplyModuleTreePlan] and diagnostics describing any
// problems encountered when applying it.
func (c *ComponentInstance) CheckApplyResultState(ctx context.Context) (*states.State, tfdiags.Diagnostics) {
result, diags := c.CheckApplyResult(ctx)
var newState *states.State
if result != nil {
newState = result.FinalState
}
return newState, diags
}
// InspectingState returns the state as captured in the snapshot provided when
// instantiating [Main] for [InspectPhase] evaluation.
func (c *ComponentInstance) InspectingState(ctx context.Context) *states.State {
wholeState := c.main.InspectingState()
return wholeState.ComponentInstanceStateForModulesRuntime(c.Addr())
}
func (c *ComponentInstance) ResultValue(ctx context.Context, phase EvalPhase) cty.Value {
switch phase {
case PlanPhase:
plan := c.ModuleTreePlan(ctx)
if plan == nil {
// Planning seems to have failed so we cannot decide a result value yet.
// We can't do any better than DynamicVal here because in the
// modules language output values don't have statically-declared
// result types.
return cty.DynamicVal
}
// We need to vary our behavior here slightly depending on what action
// we're planning to take with this overall component: normally we want
// to use the "planned new state"'s output values, but if we're actually
// planning to destroy all of the infrastructure managed by this
// component then the planned new state has no output values at all,
// so we'll use the prior state's output values instead just in case
// we also need to plan destroying another component instance
// downstream of this one which will make use of this instance's
// output values _before_ we destroy it.
//
// FIXME: We're using UIMode for this decision, despite its doc comment
// saying we shouldn't, because this behavior is an offshoot of the
// already-documented annoying exception to that rule where various
// parts of Terraform use UIMode == DestroyMode in particular to deal
// with necessary variations during a "full destroy". Hopefully we'll
// eventually find a more satisfying solution for that, in which case
// we should update the following to use that solution too.
attrs := make(map[string]cty.Value)
if plan.UIMode != plans.DestroyMode {
outputChanges := plan.Changes.Outputs
for _, changeSrc := range outputChanges {
name := changeSrc.Addr.OutputValue.Name
change, err := changeSrc.Decode()
if err != nil {