-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
child.ts
232 lines (189 loc) · 8 KB
/
child.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
import v8 from 'node:v8'
import * as nodeos from 'node:os'
import EventEmitter from 'node:events'
import { Tinypool } from 'tinypool'
import type { TinypoolChannel, Options as TinypoolOptions } from 'tinypool'
import { createBirpc } from 'birpc'
import type { ContextTestEnvironment, ResolvedConfig, RunnerRPC, RuntimeRPC, Vitest } from '../../types'
import type { ChildContext } from '../../types/child'
import type { PoolProcessOptions, ProcessPool, RunWithFiles } from '../pool'
import type { WorkspaceProject } from '../workspace'
import { envsOrder, groupFilesByEnv } from '../../utils/test-helpers'
import { groupBy } from '../../utils'
import { createMethodsRPC } from './rpc'
function createChildProcessChannel(project: WorkspaceProject) {
const emitter = new EventEmitter()
const cleanup = () => emitter.removeAllListeners()
const events = { message: 'message', response: 'response' }
const channel: TinypoolChannel = {
onMessage: callback => emitter.on(events.message, callback),
postMessage: message => emitter.emit(events.response, message),
}
const rpc = createBirpc<RunnerRPC, RuntimeRPC>(
createMethodsRPC(project),
{
eventNames: ['onCancel'],
serialize: v8.serialize,
deserialize: v => v8.deserialize(Buffer.from(v)),
post(v) {
emitter.emit(events.message, v)
},
on(fn) {
emitter.on(events.response, fn)
},
},
)
project.ctx.onCancel(reason => rpc.onCancel(reason))
return { channel, cleanup }
}
function stringifyRegex(input: RegExp | string): string {
if (typeof input === 'string')
return input
return `$$vitest:${input.toString()}`
}
export function createChildProcessPool(ctx: Vitest, { execArgv, env, forksPath }: PoolProcessOptions): ProcessPool {
const numCpus
= typeof nodeos.availableParallelism === 'function'
? nodeos.availableParallelism()
: nodeos.cpus().length
const threadsCount = ctx.config.watch
? Math.max(Math.floor(numCpus / 2), 1)
: Math.max(numCpus - 1, 1)
const maxThreads = ctx.config.poolOptions?.forks?.maxForks ?? threadsCount
const minThreads = ctx.config.poolOptions?.forks?.minForks ?? threadsCount
const options: TinypoolOptions = {
runtime: 'child_process',
filename: forksPath,
maxThreads,
minThreads,
env,
execArgv,
terminateTimeout: ctx.config.teardownTimeout,
}
if (ctx.config.poolOptions?.forks?.isolate ?? true) {
options.isolateWorkers = true
options.concurrentTasksPerWorker = 1
}
if (ctx.config.poolOptions?.forks?.singleFork) {
options.concurrentTasksPerWorker = 1
options.maxThreads = 1
options.minThreads = 1
}
const pool = new Tinypool(options)
const runWithFiles = (name: string): RunWithFiles => {
let id = 0
async function runFiles(project: WorkspaceProject, config: ResolvedConfig, files: string[], environment: ContextTestEnvironment, invalidates: string[] = []) {
ctx.state.clearFiles(project, files)
const { channel, cleanup } = createChildProcessChannel(project)
const workerId = ++id
const data: ChildContext = {
config,
files,
invalidates,
environment,
workerId,
projectName: project.getName(),
}
try {
await pool.run(data, { name, channel })
}
catch (error) {
// Worker got stuck and won't terminate - this may cause process to hang
if (error instanceof Error && /Failed to terminate worker/.test(error.message))
ctx.state.addProcessTimeoutCause(`Failed to terminate worker while running ${files.join(', ')}.`)
// Intentionally cancelled
else if (ctx.isCancelling && error instanceof Error && /The task has been cancelled/.test(error.message))
ctx.state.cancelFiles(files, ctx.config.root)
else
throw error
}
finally {
cleanup()
}
}
const Sequencer = ctx.config.sequence.sequencer
const sequencer = new Sequencer(ctx)
return async (specs, invalidates) => {
// Cancel pending tasks from pool when possible
ctx.onCancel(() => pool.cancelPendingTasks())
const configs = new Map<WorkspaceProject, ResolvedConfig>()
const getConfig = (project: WorkspaceProject): ResolvedConfig => {
if (configs.has(project))
return configs.get(project)!
const _config = project.getSerializableConfig()
const config = {
..._config,
// v8 serialize does not support regex
testNamePattern: _config.testNamePattern
? stringifyRegex(_config.testNamePattern)
: undefined,
} as ResolvedConfig
configs.set(project, config)
return config
}
const workspaceMap = new Map<string, WorkspaceProject[]>()
for (const [project, file] of specs) {
const workspaceFiles = workspaceMap.get(file) ?? []
workspaceFiles.push(project)
workspaceMap.set(file, workspaceFiles)
}
// it's possible that project defines a file that is also defined by another project
const { shard } = ctx.config
if (shard)
specs = await sequencer.shard(specs)
specs = await sequencer.sort(specs)
const singleFork = specs.filter(([project]) => project.config.poolOptions?.forks?.singleFork)
const multipleForks = specs.filter(([project]) => !project.config.poolOptions?.forks?.singleFork)
if (multipleForks.length) {
const filesByEnv = await groupFilesByEnv(multipleForks)
const files = Object.values(filesByEnv).flat()
const results: PromiseSettledResult<void>[] = []
if (ctx.config.poolOptions?.forks?.isolate ?? true) {
results.push(...await Promise.allSettled(files.map(({ file, environment, project }) =>
runFiles(project, getConfig(project), [file], environment, invalidates))))
}
else {
// When isolation is disabled, we still need to isolate environments and workspace projects from each other.
// Tasks are still running parallel but environments are isolated between tasks.
const grouped = groupBy(files, ({ project, environment }) => project.getName() + environment.name + JSON.stringify(environment.options))
for (const group of Object.values(grouped)) {
// Push all files to pool's queue
results.push(...await Promise.allSettled(group.map(({ file, environment, project }) =>
runFiles(project, getConfig(project), [file], environment, invalidates))))
// Once all tasks are running or finished, recycle worker for isolation.
// On-going workers will run in the previous environment.
await new Promise<void>(resolve => pool.queueSize === 0 ? resolve() : pool.once('drain', resolve))
await pool.recycleWorkers()
}
}
const errors = results.filter((r): r is PromiseRejectedResult => r.status === 'rejected').map(r => r.reason)
if (errors.length > 0)
throw new AggregateError(errors, 'Errors occurred while running tests. For more information, see serialized error.')
}
if (singleFork.length) {
const filesByEnv = await groupFilesByEnv(singleFork)
const envs = envsOrder.concat(
Object.keys(filesByEnv).filter(env => !envsOrder.includes(env)),
)
for (const env of envs) {
const files = filesByEnv[env]
if (!files?.length)
continue
const filesByOptions = groupBy(files, ({ project, environment }) => project.getName() + JSON.stringify(environment.options))
for (const files of Object.values(filesByOptions)) {
// Always run environments isolated between each other
await pool.recycleWorkers()
const filenames = files.map(f => f.file)
await runFiles(files[0].project, getConfig(files[0].project), filenames, files[0].environment, invalidates)
}
}
}
}
}
return {
runTests: runWithFiles('run'),
close: async () => {
await pool.destroy()
},
}
}