-
Notifications
You must be signed in to change notification settings - Fork 1.7k
/
turbo_json.go
758 lines (644 loc) · 26.2 KB
/
turbo_json.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
package fs
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
"github.com/muhammadmuzzammil1998/jsonc"
"github.com/pkg/errors"
"github.com/vercel/turbo/cli/internal/turbopath"
"github.com/vercel/turbo/cli/internal/util"
)
const (
configFile = "turbo.json"
envPipelineDelimiter = "$"
topologicalPipelineDelimiter = "^"
)
// SpaceConfig is used to marshal and unmarshal the
// `experimentalSpaceId` field in a turbo.json
type SpaceConfig struct {
ID string `json:"id"`
}
type rawTurboJSON struct {
// Global root filesystem dependencies
GlobalDependencies []string `json:"globalDependencies,omitempty"`
// Global env
GlobalEnv []string `json:"globalEnv,omitempty"`
// Global passthrough env
GlobalPassthroughEnv []string `json:"experimentalGlobalPassThroughEnv,omitempty"`
// Pipeline is a map of Turbo pipeline entries which define the task graph
// and cache behavior on a per task or per package-task basis.
Pipeline Pipeline `json:"pipeline"`
// Configuration options when interfacing with the remote cache
RemoteCacheOptions RemoteCacheOptions `json:"remoteCache,omitempty"`
// Extends can be the name of another workspace
Extends []string `json:"extends,omitempty"`
// Configuration for the space
Space *SpaceConfig `json:"experimentalSpaces,omitempty"`
}
// pristineTurboJSON is used when marshaling a TurboJSON object into a json string
// Notably, it includes a PristinePipeline instead of the regular Pipeline. (i.e. TaskDefinition
// instead of BookkeepingTaskDefinition.)
type pristineTurboJSON struct {
GlobalDependencies []string `json:"globalDependencies,omitempty"`
GlobalEnv []string `json:"globalEnv,omitempty"`
GlobalPassthroughEnv []string `json:"experimentalGlobalPassThroughEnv,omitempty"`
Pipeline PristinePipeline `json:"pipeline"`
RemoteCacheOptions RemoteCacheOptions `json:"remoteCache,omitempty"`
Extends []string `json:"extends,omitempty"`
Space *SpaceConfig `json:"experimentalSpaces,omitempty"`
}
// TurboJSON represents a turbo.json configuration file
type TurboJSON struct {
GlobalDeps []string
GlobalEnv []string
GlobalPassthroughEnv []string
Pipeline Pipeline
RemoteCacheOptions RemoteCacheOptions
Extends []string // A list of Workspace names
SpaceID string
}
// RemoteCacheOptions is a struct for deserializing .remoteCache of configFile
type RemoteCacheOptions struct {
TeamID string `json:"teamId,omitempty"`
Signature bool `json:"signature,omitempty"`
}
// rawTaskWithDefaults exists to Marshal (i.e. turn a TaskDefinition into json).
// We use this for printing ResolvedTaskConfiguration, because we _want_ to show
// the user the default values for key they have not configured.
type rawTaskWithDefaults struct {
Outputs []string `json:"outputs"`
Cache *bool `json:"cache"`
DependsOn []string `json:"dependsOn"`
Inputs []string `json:"inputs"`
OutputMode util.TaskOutputMode `json:"outputMode"`
PassthroughEnv []string `json:"experimentalPassThroughEnv,omitempty"`
Env []string `json:"env"`
Persistent bool `json:"persistent"`
}
// rawTask exists to Unmarshal from json. When fields are omitted, we _want_
// them to be missing, so that we can distinguish missing from empty value.
type rawTask struct {
Outputs []string `json:"outputs,omitempty"`
Cache *bool `json:"cache,omitempty"`
DependsOn []string `json:"dependsOn,omitempty"`
Inputs []string `json:"inputs,omitempty"`
OutputMode *util.TaskOutputMode `json:"outputMode,omitempty"`
Env []string `json:"env,omitempty"`
PassthroughEnv []string `json:"experimentalPassthroughEnv,omitempty"`
Persistent *bool `json:"persistent,omitempty"`
}
// taskDefinitionHashable exists as a definition for PristinePipeline, which is used down
// stream for calculating the global hash. We want to exclude experimental fields here
// because we don't want experimental fields to be part of the global hash.
type taskDefinitionHashable struct {
Outputs TaskOutputs
ShouldCache bool
EnvVarDependencies []string
TopologicalDependencies []string
TaskDependencies []string
Inputs []string
OutputMode util.TaskOutputMode
Persistent bool
}
// taskDefinitionExperiments is a list of config fields in a task definition that are considered
// experimental. We keep these separated so we can compute a global hash without these.
type taskDefinitionExperiments struct {
PassthroughEnv []string
}
// PristinePipeline is a map of task names to TaskDefinition or taskDefinitionHashable.
// Depending on whether any experimental fields are defined, we will use either struct.
// The purpose is to omit experimental fields when making a pristine version, so that
// it doesn't show up in --dry/--summarize output or affect the global hash.
type PristinePipeline map[string]interface{}
// Pipeline is a struct for deserializing .pipeline in configFile
type Pipeline map[string]BookkeepingTaskDefinition
// BookkeepingTaskDefinition holds the underlying TaskDefinition and some bookkeeping data
// about the TaskDefinition. This wrapper struct allows us to leave TaskDefinition untouched.
type BookkeepingTaskDefinition struct {
definedFields util.Set
experimentalFields util.Set
experimental taskDefinitionExperiments
TaskDefinition taskDefinitionHashable
}
// TaskDefinition is a representation of the configFile pipeline for further computation.
type TaskDefinition struct {
Outputs TaskOutputs
ShouldCache bool
// This field is custom-marshalled from rawTask.Env and rawTask.DependsOn
EnvVarDependencies []string
// rawTask.PassthroughEnv
PassthroughEnv []string
// TopologicalDependencies are tasks from package dependencies.
// E.g. "build" is a topological dependency in:
// dependsOn: ['^build'].
// This field is custom-marshalled from rawTask.DependsOn
TopologicalDependencies []string
// TaskDependencies are anything that is not a topological dependency
// E.g. both something and //whatever are TaskDependencies in:
// dependsOn: ['something', '//whatever']
// This field is custom-marshalled from rawTask.DependsOn
TaskDependencies []string
// Inputs indicate the list of files this Task depends on. If any of those files change
// we can conclude that any cached outputs or logs for this Task should be invalidated.
Inputs []string
// OutputMode determins how we should log the output.
OutputMode util.TaskOutputMode
// Persistent indicates whether the Task is expected to exit or not
// Tasks marked Persistent do not exit (e.g. --watch mode or dev servers)
Persistent bool
}
// GetTask returns a TaskDefinition based on the ID (package#task format) or name (e.g. "build")
func (pc Pipeline) GetTask(taskID string, taskName string) (*BookkeepingTaskDefinition, error) {
// first check for package-tasks
taskDefinition, ok := pc[taskID]
if !ok {
// then check for regular tasks
fallbackTaskDefinition, notcool := pc[taskName]
// if neither, then bail
if !notcool {
// Return an empty TaskDefinition
return nil, fmt.Errorf("Could not find task \"%s\" in pipeline", taskID)
}
// override if we need to...
taskDefinition = fallbackTaskDefinition
}
return &taskDefinition, nil
}
// LoadTurboConfig loads, or optionally, synthesizes a TurboJSON instance
func LoadTurboConfig(dir turbopath.AbsoluteSystemPath, rootPackageJSON *PackageJSON, includeSynthesizedFromRootPackageJSON bool) (*TurboJSON, error) {
// If the root package.json stil has a `turbo` key, log a warning and remove it.
if rootPackageJSON.LegacyTurboConfig != nil {
log.Printf("[WARNING] \"turbo\" in package.json is no longer supported. Migrate to %s by running \"npx @turbo/codemod create-turbo-config\"\n", configFile)
rootPackageJSON.LegacyTurboConfig = nil
}
var turboJSON *TurboJSON
turboFromFiles, err := readTurboConfig(dir.UntypedJoin(configFile))
if !includeSynthesizedFromRootPackageJSON && err != nil {
// If the file didn't exist, throw a custom error here instead of propagating
if errors.Is(err, os.ErrNotExist) {
return nil, errors.Wrap(err, fmt.Sprintf("Could not find %s. Follow directions at https://turbo.build/repo/docs to create one", configFile))
}
// There was an error, and we don't have any chance of recovering
// because we aren't synthesizing anything
return nil, err
} else if !includeSynthesizedFromRootPackageJSON {
// We're not synthesizing anything and there was no error, we're done
return turboFromFiles, nil
} else if errors.Is(err, os.ErrNotExist) {
// turbo.json doesn't exist, but we're going try to synthesize something
turboJSON = &TurboJSON{
Pipeline: make(Pipeline),
}
} else if err != nil {
// some other happened, we can't recover
return nil, err
} else {
// we're synthesizing, but we have a starting point
// Note: this will have to change to support task inference in a monorepo
// for now, we're going to error on any "root" tasks and turn non-root tasks into root tasks
pipeline := make(Pipeline)
for taskID, taskDefinition := range turboFromFiles.Pipeline {
if util.IsPackageTask(taskID) {
return nil, fmt.Errorf("Package tasks (<package>#<task>) are not allowed in single-package repositories: found %v", taskID)
}
pipeline[util.RootTaskID(taskID)] = taskDefinition
}
turboJSON = turboFromFiles
turboJSON.Pipeline = pipeline
}
for scriptName := range rootPackageJSON.Scripts {
if !turboJSON.Pipeline.HasTask(scriptName) {
taskName := util.RootTaskID(scriptName)
// Explicitly set ShouldCache to false in this definition and add the bookkeeping fields
// so downstream we can pretend that it was set on purpose (as if read from a config file)
// rather than defaulting to the 0-value of a boolean field.
turboJSON.Pipeline[taskName] = BookkeepingTaskDefinition{
definedFields: util.SetFromStrings([]string{"ShouldCache"}),
TaskDefinition: taskDefinitionHashable{
ShouldCache: false,
},
}
}
}
return turboJSON, nil
}
// TurboJSONValidation is the signature for a validation function passed to Validate()
type TurboJSONValidation func(*TurboJSON) []error
// Validate calls an array of validation functions on the TurboJSON struct.
// The validations can be customized by the caller.
func (tj *TurboJSON) Validate(validations []TurboJSONValidation) []error {
allErrors := []error{}
for _, validation := range validations {
errors := validation(tj)
allErrors = append(allErrors, errors...)
}
return allErrors
}
// TaskOutputs represents the patterns for including and excluding files from outputs
type TaskOutputs struct {
Inclusions []string
Exclusions []string
}
// Sort contents of task outputs
func (to TaskOutputs) Sort() TaskOutputs {
var inclusions []string
var exclusions []string
copy(inclusions, to.Inclusions)
copy(exclusions, to.Exclusions)
sort.Strings(inclusions)
sort.Strings(exclusions)
return TaskOutputs{Inclusions: inclusions, Exclusions: exclusions}
}
// readTurboConfig reads turbo.json from a provided path
func readTurboConfig(turboJSONPath turbopath.AbsoluteSystemPath) (*TurboJSON, error) {
// If the configFile exists, use that
if turboJSONPath.FileExists() {
turboJSON, err := readTurboJSON(turboJSONPath)
if err != nil {
return nil, fmt.Errorf("%s: %w", configFile, err)
}
return turboJSON, nil
}
// If there's no turbo.json, return an error.
return nil, os.ErrNotExist
}
// readTurboJSON reads the configFile in to a struct
func readTurboJSON(path turbopath.AbsoluteSystemPath) (*TurboJSON, error) {
file, err := path.Open()
if err != nil {
return nil, err
}
var turboJSON *TurboJSON
data, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
err = jsonc.Unmarshal(data, &turboJSON)
if err != nil {
return nil, err
}
return turboJSON, nil
}
// GetTaskDefinition returns a TaskDefinition from a serialized definition in configFile
func (pc Pipeline) GetTaskDefinition(taskID string) (TaskDefinition, bool) {
if entry, ok := pc[taskID]; ok {
return entry.GetTaskDefinition(), true
}
_, task := util.GetPackageTaskFromId(taskID)
entry, ok := pc[task]
return entry.GetTaskDefinition(), ok
}
// HasTask returns true if the given task is defined in the pipeline, either directly or
// via a package task (`pkg#task`)
func (pc Pipeline) HasTask(task string) bool {
for key := range pc {
if key == task {
return true
}
if util.IsPackageTask(key) {
_, taskName := util.GetPackageTaskFromId(key)
if taskName == task {
return true
}
}
}
return false
}
// Pristine returns a PristinePipeline, this is used for printing to console and pruning
func (pc Pipeline) Pristine() PristinePipeline {
pristine := PristinePipeline{}
for taskName, taskDef := range pc {
// If there are any experimental fields, we will include them with 0-values
// if there aren't, we will omit them entirely
if taskDef.hasExperimentalFields() {
pristine[taskName] = taskDef.GetTaskDefinition() // merges experimental fields in
} else {
pristine[taskName] = taskDef.TaskDefinition // has no experimental fields
}
}
return pristine
}
// hasField checks the internal bookkeeping definedFields field to
// see whether a field was actually in the underlying turbo.json
// or whether it was initialized with its 0-value.
func (btd BookkeepingTaskDefinition) hasField(fieldName string) bool {
return btd.definedFields.Includes(fieldName) || btd.experimentalFields.Includes(fieldName)
}
// hasExperimentalFields keeps track of whether any experimental fields were found
func (btd BookkeepingTaskDefinition) hasExperimentalFields() bool {
return len(btd.experimentalFields) > 0
}
// GetTaskDefinition gets a TaskDefinition by merging the experimental and non-experimental fields
// into a single representation to use downstream.
func (btd BookkeepingTaskDefinition) GetTaskDefinition() TaskDefinition {
return TaskDefinition{
Outputs: btd.TaskDefinition.Outputs,
ShouldCache: btd.TaskDefinition.ShouldCache,
EnvVarDependencies: btd.TaskDefinition.EnvVarDependencies,
TopologicalDependencies: btd.TaskDefinition.TopologicalDependencies,
TaskDependencies: btd.TaskDefinition.TaskDependencies,
Inputs: btd.TaskDefinition.Inputs,
OutputMode: btd.TaskDefinition.OutputMode,
Persistent: btd.TaskDefinition.Persistent,
// From experimental fields
PassthroughEnv: btd.experimental.PassthroughEnv,
}
}
// MergeTaskDefinitions accepts an array of BookkeepingTaskDefinitions and merges them into
// a single TaskDefinition. It uses the bookkeeping definedFields to determine which fields should
// be overwritten and when 0-values should be respected.
func MergeTaskDefinitions(taskDefinitions []BookkeepingTaskDefinition) (*TaskDefinition, error) {
// Start with an empty definition
mergedTaskDefinition := &TaskDefinition{}
// Set the default, because the 0-value will be false, and if no turbo.jsons had
// this field set for this task, we want it to be true.
mergedTaskDefinition.ShouldCache = true
// For each of the TaskDefinitions we know of, merge them in
for _, bookkeepingTaskDef := range taskDefinitions {
taskDef := bookkeepingTaskDef.GetTaskDefinition()
if bookkeepingTaskDef.hasField("Outputs") {
mergedTaskDefinition.Outputs = taskDef.Outputs
}
if bookkeepingTaskDef.hasField("ShouldCache") {
mergedTaskDefinition.ShouldCache = taskDef.ShouldCache
}
if bookkeepingTaskDef.hasField("EnvVarDependencies") {
mergedTaskDefinition.EnvVarDependencies = taskDef.EnvVarDependencies
}
if bookkeepingTaskDef.hasField("PassthroughEnv") {
mergedTaskDefinition.PassthroughEnv = taskDef.PassthroughEnv
}
if bookkeepingTaskDef.hasField("DependsOn") {
mergedTaskDefinition.TopologicalDependencies = taskDef.TopologicalDependencies
}
if bookkeepingTaskDef.hasField("DependsOn") {
mergedTaskDefinition.TaskDependencies = taskDef.TaskDependencies
}
if bookkeepingTaskDef.hasField("Inputs") {
mergedTaskDefinition.Inputs = taskDef.Inputs
}
if bookkeepingTaskDef.hasField("OutputMode") {
mergedTaskDefinition.OutputMode = taskDef.OutputMode
}
if bookkeepingTaskDef.hasField("Persistent") {
mergedTaskDefinition.Persistent = taskDef.Persistent
}
}
return mergedTaskDefinition, nil
}
// UnmarshalJSON deserializes a single task definition from
// turbo.json into a TaskDefinition struct
func (btd *BookkeepingTaskDefinition) UnmarshalJSON(data []byte) error {
task := rawTask{}
if err := json.Unmarshal(data, &task); err != nil {
return err
}
btd.definedFields = util.Set{}
btd.experimentalFields = util.Set{}
if task.Outputs != nil {
var inclusions []string
var exclusions []string
// Assign a bookkeeping field so we know that there really were
// outputs configured in the underlying config file.
btd.definedFields.Add("Outputs")
for _, glob := range task.Outputs {
if strings.HasPrefix(glob, "!") {
if filepath.IsAbs(glob[1:]) {
log.Printf("[WARNING] Using an absolute path in \"outputs\" (%v) will not work and will be an error in a future version", glob)
}
exclusions = append(exclusions, glob[1:])
} else {
if filepath.IsAbs(glob) {
log.Printf("[WARNING] Using an absolute path in \"outputs\" (%v) will not work and will be an error in a future version", glob)
}
inclusions = append(inclusions, glob)
}
}
btd.TaskDefinition.Outputs = TaskOutputs{
Inclusions: inclusions,
Exclusions: exclusions,
}
sort.Strings(btd.TaskDefinition.Outputs.Inclusions)
sort.Strings(btd.TaskDefinition.Outputs.Exclusions)
}
if task.Cache == nil {
btd.TaskDefinition.ShouldCache = true
} else {
btd.definedFields.Add("ShouldCache")
btd.TaskDefinition.ShouldCache = *task.Cache
}
envVarDependencies := make(util.Set)
envVarPassthroughs := make(util.Set)
btd.TaskDefinition.TopologicalDependencies = []string{} // TODO @mehulkar: this should be a set
btd.TaskDefinition.TaskDependencies = []string{} // TODO @mehulkar: this should be a set
// If there was a dependsOn field, add the bookkeeping
// we don't care what's in the field, just that it was there
// We'll use this marker to overwrite while merging TaskDefinitions.
if task.DependsOn != nil {
btd.definedFields.Add("DependsOn")
}
for _, dependency := range task.DependsOn {
if strings.HasPrefix(dependency, envPipelineDelimiter) {
log.Printf("[DEPRECATED] Declaring an environment variable in \"dependsOn\" is deprecated, found %s. Use the \"env\" key or use `npx @turbo/codemod migrate-env-var-dependencies`.\n", dependency)
envVarDependencies.Add(strings.TrimPrefix(dependency, envPipelineDelimiter))
} else if strings.HasPrefix(dependency, topologicalPipelineDelimiter) {
// Note: This will get assigned multiple times in the loop, but we only care that it's true
btd.TaskDefinition.TopologicalDependencies = append(btd.TaskDefinition.TopologicalDependencies, strings.TrimPrefix(dependency, topologicalPipelineDelimiter))
} else {
btd.TaskDefinition.TaskDependencies = append(btd.TaskDefinition.TaskDependencies, dependency)
}
}
sort.Strings(btd.TaskDefinition.TaskDependencies)
sort.Strings(btd.TaskDefinition.TopologicalDependencies)
// Append env key into EnvVarDependencies
if task.Env != nil {
btd.definedFields.Add("EnvVarDependencies")
if err := gatherEnvVars(task.Env, "env", &envVarDependencies); err != nil {
return err
}
}
btd.TaskDefinition.EnvVarDependencies = envVarDependencies.UnsafeListOfStrings()
sort.Strings(btd.TaskDefinition.EnvVarDependencies)
if task.PassthroughEnv != nil {
btd.experimentalFields.Add("PassthroughEnv")
if err := gatherEnvVars(task.PassthroughEnv, "passthrougEnv", &envVarPassthroughs); err != nil {
return err
}
}
btd.experimental.PassthroughEnv = envVarPassthroughs.UnsafeListOfStrings()
sort.Strings(btd.experimental.PassthroughEnv)
if task.Inputs != nil {
// Note that we don't require Inputs to be sorted, we're going to
// hash the resulting files and sort that instead
btd.definedFields.Add("Inputs")
// TODO: during rust port, this should be moved to a post-parse validation step
for _, input := range task.Inputs {
if filepath.IsAbs(input) {
log.Printf("[WARNING] Using an absolute path in \"inputs\" (%v) will not work and will be an error in a future version", input)
}
}
btd.TaskDefinition.Inputs = task.Inputs
}
if task.OutputMode != nil {
btd.definedFields.Add("OutputMode")
btd.TaskDefinition.OutputMode = *task.OutputMode
}
if task.Persistent != nil {
btd.definedFields.Add("Persistent")
btd.TaskDefinition.Persistent = *task.Persistent
} else {
btd.TaskDefinition.Persistent = false
}
return nil
}
// MarshalJSON serializes taskDefinitionHashable struct into json
func (c taskDefinitionHashable) MarshalJSON() ([]byte, error) {
task := makeRawTask(
c.Persistent,
c.ShouldCache,
c.OutputMode,
c.Inputs,
c.Outputs,
c.EnvVarDependencies,
c.TaskDependencies,
c.TopologicalDependencies,
)
return json.Marshal(task)
}
// MarshalJSON serializes TaskDefinition struct into json
func (c TaskDefinition) MarshalJSON() ([]byte, error) {
task := makeRawTask(
c.Persistent,
c.ShouldCache,
c.OutputMode,
c.Inputs,
c.Outputs,
c.EnvVarDependencies,
c.TaskDependencies,
c.TopologicalDependencies,
)
if len(c.PassthroughEnv) > 0 {
task.PassthroughEnv = append(task.PassthroughEnv, c.PassthroughEnv...)
}
sort.Strings(task.PassthroughEnv)
return json.Marshal(task)
}
// UnmarshalJSON deserializes the contents of turbo.json into a TurboJSON struct
func (tj *TurboJSON) UnmarshalJSON(data []byte) error {
raw := &rawTurboJSON{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
envVarDependencies := make(util.Set)
envVarPassthroughs := make(util.Set)
globalFileDependencies := make(util.Set)
if err := gatherEnvVars(raw.GlobalEnv, "globalEnv", &envVarDependencies); err != nil {
return err
}
if err := gatherEnvVars(raw.GlobalPassthroughEnv, "experimentalGlobalPassThroughEnv", &envVarPassthroughs); err != nil {
return err
}
// TODO: In the rust port, warnings should be refactored to a post-parse validation step
for _, value := range raw.GlobalDependencies {
if strings.HasPrefix(value, envPipelineDelimiter) {
log.Printf("[DEPRECATED] Declaring an environment variable in \"globalDependencies\" is deprecated, found %s. Use the \"globalEnv\" key or use `npx @turbo/codemod migrate-env-var-dependencies`.\n", value)
envVarDependencies.Add(strings.TrimPrefix(value, envPipelineDelimiter))
} else {
if filepath.IsAbs(value) {
log.Printf("[WARNING] Using an absolute path in \"globalDependencies\" (%v) will not work and will be an error in a future version", value)
}
globalFileDependencies.Add(value)
}
}
// turn the set into an array and assign to the TurboJSON struct fields.
tj.GlobalEnv = envVarDependencies.UnsafeListOfStrings()
sort.Strings(tj.GlobalEnv)
if raw.GlobalPassthroughEnv != nil {
tj.GlobalPassthroughEnv = envVarPassthroughs.UnsafeListOfStrings()
sort.Strings(tj.GlobalPassthroughEnv)
}
tj.GlobalDeps = globalFileDependencies.UnsafeListOfStrings()
sort.Strings(tj.GlobalDeps)
// copy these over, we don't need any changes here.
tj.Pipeline = raw.Pipeline
tj.RemoteCacheOptions = raw.RemoteCacheOptions
tj.Extends = raw.Extends
// Directly to SpaceID, we don't need to keep the struct
if raw.Space != nil {
tj.SpaceID = raw.Space.ID
}
return nil
}
// MarshalJSON converts a TurboJSON into the equivalent json object in bytes
// note: we go via rawTurboJSON so that the output format is correct.
// This is used by `turbo prune` to generate a pruned turbo.json
// and also by --summarize & --dry=json to serialize the known config
// into something we can print to screen
func (tj *TurboJSON) MarshalJSON() ([]byte, error) {
raw := pristineTurboJSON{}
raw.GlobalDependencies = tj.GlobalDeps
raw.GlobalEnv = tj.GlobalEnv
raw.GlobalPassthroughEnv = tj.GlobalPassthroughEnv
raw.Pipeline = tj.Pipeline.Pristine()
raw.RemoteCacheOptions = tj.RemoteCacheOptions
if tj.SpaceID != "" {
raw.Space = &SpaceConfig{ID: tj.SpaceID}
}
return json.Marshal(&raw)
}
func makeRawTask(persistent bool, shouldCache bool, outputMode util.TaskOutputMode, inputs []string, outputs TaskOutputs, envVarDependencies []string, taskDependencies []string, topologicalDependencies []string) *rawTaskWithDefaults {
// Initialize with empty arrays, so we get empty arrays serialized into JSON
task := &rawTaskWithDefaults{
Outputs: []string{},
Inputs: []string{},
Env: []string{},
PassthroughEnv: []string{},
DependsOn: []string{},
}
task.Persistent = persistent
task.Cache = &shouldCache
task.OutputMode = outputMode
if len(inputs) > 0 {
task.Inputs = inputs
}
if len(envVarDependencies) > 0 {
task.Env = append(task.Env, envVarDependencies...)
}
if len(outputs.Inclusions) > 0 {
task.Outputs = append(task.Outputs, outputs.Inclusions...)
}
for _, i := range outputs.Exclusions {
task.Outputs = append(task.Outputs, "!"+i)
}
if len(taskDependencies) > 0 {
task.DependsOn = append(task.DependsOn, taskDependencies...)
}
for _, i := range topologicalDependencies {
task.DependsOn = append(task.DependsOn, "^"+i)
}
// These _should_ already be sorted when the TaskDefinition struct was unmarshaled,
// but we want to ensure they're sorted on the way out also, just in case something
// in the middle mutates the items.
sort.Strings(task.DependsOn)
sort.Strings(task.Outputs)
sort.Strings(task.Env)
sort.Strings(task.Inputs)
return task
}
// gatherEnvVars puts env vars into the provided set as long as they don't have an invalid value.
func gatherEnvVars(vars []string, key string, into *util.Set) error {
for _, value := range vars {
if strings.HasPrefix(value, envPipelineDelimiter) {
// Hard error to help people specify this correctly during migration.
// TODO: Remove this error after we have run summary.
return fmt.Errorf("You specified \"%s\" in the \"%s\" key. You should not prefix your environment variables with \"%s\"", value, key, envPipelineDelimiter)
}
into.Add(value)
}
return nil
}