forked from vitest-dev/vitest
/
threads.ts
194 lines (158 loc) · 6.46 KB
/
threads.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
import { MessageChannel } from 'node:worker_threads'
import { cpus } from 'node:os'
import { pathToFileURL } from 'node:url'
import { createBirpc } from 'birpc'
import { resolve } from 'pathe'
import type { Options as TinypoolOptions } from 'tinypool'
import Tinypool from 'tinypool'
import { distDir } from '../../paths'
import type { ContextTestEnvironment, ResolvedConfig, RunnerRPC, RuntimeRPC, Vitest, WorkerContext } from '../../types'
import type { PoolProcessOptions, ProcessPool, RunWithFiles } from '../pool'
import { envsOrder, groupFilesByEnv } from '../../utils/test-helpers'
import { AggregateError, groupBy } from '../../utils/base'
import type { WorkspaceProject } from '../workspace'
import { createMethodsRPC } from './rpc'
const workerPath = pathToFileURL(resolve(distDir, './worker.js')).href
function createWorkerChannel(project: WorkspaceProject) {
const channel = new MessageChannel()
const port = channel.port2
const workerPort = channel.port1
const rpc = createBirpc<RunnerRPC, RuntimeRPC>(
createMethodsRPC(project),
{
eventNames: ['onCancel'],
post(v) {
port.postMessage(v)
},
on(fn) {
port.on('message', fn)
},
},
)
project.ctx.onCancel(reason => rpc.onCancel(reason))
return { workerPort, port }
}
export function createThreadsPool(ctx: Vitest, { execArgv, env }: PoolProcessOptions): ProcessPool {
const threadsCount = ctx.config.watch
? Math.max(Math.floor(cpus().length / 2), 1)
: Math.max(cpus().length - 1, 1)
const maxThreads = ctx.config.maxThreads ?? threadsCount
const minThreads = ctx.config.minThreads ?? threadsCount
const options: TinypoolOptions = {
filename: workerPath,
// TODO: investigate further
// It seems atomics introduced V8 Fatal Error https://github.com/vitest-dev/vitest/issues/1191
useAtomics: ctx.config.useAtomics ?? false,
maxThreads,
minThreads,
env,
execArgv,
terminateTimeout: ctx.config.teardownTimeout,
}
if (ctx.config.isolate) {
options.isolateWorkers = true
options.concurrentTasksPerWorker = 1
}
if (ctx.config.singleThread) {
options.concurrentTasksPerWorker = 1
options.maxThreads = 1
options.minThreads = 1
}
const pool = new Tinypool(options)
const runWithFiles = (name: string): RunWithFiles => {
let id = 0
async function runFiles(project: WorkspaceProject, config: ResolvedConfig, files: string[], environment: ContextTestEnvironment, invalidates: string[] = []) {
ctx.state.clearFiles(project, files)
const { workerPort, port } = createWorkerChannel(project)
const workerId = ++id
const data: WorkerContext = {
port: workerPort,
config,
files,
invalidates,
environment,
workerId,
}
try {
await pool.run(data, { transferList: [workerPort], name })
}
catch (error) {
// Worker got stuck and won't terminate - this may cause process to hang
if (error instanceof Error && /Failed to terminate worker/.test(error.message))
ctx.state.addProcessTimeoutCause(`Failed to terminate worker while running ${files.join(', ')}.`)
// Intentionally cancelled
else if (ctx.isCancelling && error instanceof Error && /The task has been cancelled/.test(error.message))
ctx.state.cancelFiles(files, ctx.config.root)
else
throw error
}
finally {
port.close()
workerPort.close()
}
}
const Sequencer = ctx.config.sequence.sequencer
const sequencer = new Sequencer(ctx)
return async (specs, invalidates) => {
// Cancel pending tasks from pool when possible
ctx.onCancel(() => pool.cancelPendingTasks())
const configs = new Map<WorkspaceProject, ResolvedConfig>()
const getConfig = (project: WorkspaceProject): ResolvedConfig => {
if (configs.has(project))
return configs.get(project)!
const config = project.getSerializableConfig()
configs.set(project, config)
return config
}
const workspaceMap = new Map<string, WorkspaceProject[]>()
for (const [project, file] of specs) {
const workspaceFiles = workspaceMap.get(file) ?? []
workspaceFiles.push(project)
workspaceMap.set(file, workspaceFiles)
}
// it's possible that project defines a file that is also defined by another project
const { shard } = ctx.config
if (shard)
specs = await sequencer.shard(specs)
specs = await sequencer.sort(specs)
const singleThreads = specs.filter(([project]) => project.config.singleThread)
const multipleThreads = specs.filter(([project]) => !project.config.singleThread)
if (multipleThreads.length) {
const filesByEnv = await groupFilesByEnv(multipleThreads)
const promises = Object.values(filesByEnv).flat()
const results = await Promise.allSettled(promises
.map(({ file, environment, project }) => runFiles(project, getConfig(project), [file], environment, invalidates)))
const errors = results.filter((r): r is PromiseRejectedResult => r.status === 'rejected').map(r => r.reason)
if (errors.length > 0)
throw new AggregateError(errors, 'Errors occurred while running tests. For more information, see serialized error.')
}
if (singleThreads.length) {
const filesByEnv = await groupFilesByEnv(singleThreads)
const envs = envsOrder.concat(
Object.keys(filesByEnv).filter(env => !envsOrder.includes(env)),
)
// always run environments isolated between each other
for (const env of envs) {
const files = filesByEnv[env]
if (!files?.length)
continue
const filesByOptions = groupBy(files, ({ project, environment }) => project.getName() + JSON.stringify(environment.options))
const promises = Object.values(filesByOptions).map(async (files) => {
const filenames = files.map(f => f.file)
await runFiles(files[0].project, getConfig(files[0].project), filenames, files[0].environment, invalidates)
})
await Promise.all(promises)
}
}
}
}
return {
runTests: runWithFiles('run'),
close: async () => {
// node before 16.17 has a bug that causes FATAL ERROR because of the race condition
const nodeVersion = Number(process.version.match(/v(\d+)\.(\d+)/)?.[0].slice(1))
if (nodeVersion >= 16.17)
await pool.destroy()
},
}
}