/
cli.ts
140 lines (120 loc) · 5.26 KB
/
cli.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import { normalize } from 'pathe'
import cac from 'cac'
import c from 'picocolors'
import { version } from '../../package.json'
import type { Vitest, VitestRunMode } from '../types'
import type { CliOptions } from './cli-api'
import { startVitest } from './cli-api'
import { divider } from './reporters/renderers/utils'
const cli = cac('vitest')
cli
.version(version)
.option('-r, --root <path>', 'root path')
.option('-c, --config <path>', 'path to config file')
.option('-u, --update', 'update snapshot')
.option('-w, --watch', 'watch mode')
.option('-t, --testNamePattern <pattern>', 'run tests with full names matching the specified pattern')
.option('--dir <path>', 'base directory to scan for the test files')
.option('--ui', 'enable UI')
.option('--open', 'open UI automatically (default: !process.env.CI))')
.option('--api [api]', 'serve API, available options: --api.port <port>, --api.host [host] and --api.strictPort')
.option('--threads', 'enabled threads (default: true)')
.option('--silent', 'silent console output from tests')
.option('--isolate', 'isolate environment for each test file (default: true)')
.option('--reporter <name>', 'reporter')
.option('--outputDiffMaxSize <length>', 'object diff output max size (default: 10000)')
.option('--outputDiffMaxLines <length>', 'max lines in diff output window (default: 50)')
.option('--outputTruncateLength <length>', 'diff output line length (default: 80)')
.option('--outputDiffLines <lines>', 'number of lines in single diff (default: 15)')
.option('--outputFile <filename/-s>', 'write test results to a file when the --reporter=json or --reporter=junit option is also specified, use cac\'s dot notation for individual outputs of multiple reporters')
.option('--coverage', 'enable coverage report')
.option('--run', 'do not watch')
.option('--mode <name>', 'override Vite mode (default: test)')
.option('--globals', 'inject apis globally')
.option('--dom', 'mock browser api with happy-dom')
.option('--browser', 'run tests in browser')
.option('--environment <env>', 'runner environment (default: node)')
.option('--passWithNoTests', 'pass when no tests found')
.option('--logHeapUsage', 'show the size of heap for each test')
.option('--allowOnly', 'Allow tests and suites that are marked as only (default: !process.env.CI)')
.option('--dangerouslyIgnoreUnhandledErrors', 'Ignore any unhandled errors that occur')
.option('--shard <shard>', 'Test suite shard to execute in a format of <index>/<count>')
.option('--changed [since]', 'Run tests that are affected by the changed files (default: false)')
.option('--sequence <options>', 'Define in what order to run tests (use --sequence.shuffle to run tests in random order)')
.option('--no-color', 'Removes colors from the console output')
.option('--segfault-retry <times>', 'Return tests on segment fault (default: 0)', { default: 0 })
.option('--inspect', 'Enable Node.js inspector')
.option('--inspect-brk', 'Enable Node.js inspector with break')
.help()
cli
.command('run [...filters]')
.action(run)
cli
.command('related [...filters]')
.action(runRelated)
cli
.command('watch [...filters]')
.action(watch)
cli
.command('dev [...filters]')
.action(watch)
cli
.command('bench [...filters]')
.action(benchmark)
cli
.command('typecheck [...filters]')
.action(typecheck)
cli
.command('[...filters]')
.action((filters, options) => start('test', filters, options))
cli.parse()
async function runRelated(relatedFiles: string[] | string, argv: CliOptions): Promise<void> {
argv.related = relatedFiles
argv.passWithNoTests ??= true
await start('test', [], argv)
}
async function watch(cliFilters: string[], options: CliOptions): Promise<void> {
options.watch = true
await start('test', cliFilters, options)
}
async function run(cliFilters: string[], options: CliOptions): Promise<void> {
options.run = true
await start('test', cliFilters, options)
}
async function benchmark(cliFilters: string[], options: CliOptions): Promise<void> {
console.warn(c.yellow('Benchmarking is an experimental feature.\nBreaking changes might not follow semver, please pin Vitest\'s version when using it.'))
await start('benchmark', cliFilters, options)
}
async function typecheck(cliFilters: string[] = [], options: CliOptions = {}) {
console.warn(c.yellow('Testing types with tsc and vue-tsc is an experimental feature.\nBreaking changes might not follow semver, please pin Vitest\'s version when using it.'))
await start('typecheck', cliFilters, options)
}
function normalizeCliOptions(argv: CliOptions): CliOptions {
if (argv.root)
argv.root = normalize(argv.root)
else
delete argv.root
if (argv.config)
argv.config = normalize(argv.config)
else
delete argv.config
if (argv.dir)
argv.dir = normalize(argv.dir)
else
delete argv.dir
return argv
}
async function start(mode: VitestRunMode, cliFilters: string[], options: CliOptions): Promise<Vitest | undefined> {
try {
const ctx = await startVitest(mode, cliFilters.map(normalize), normalizeCliOptions(options))
if (!ctx?.config.watch)
await ctx?.exit()
return ctx
}
catch (e) {
console.error(`\n${c.red(divider(c.bold(c.inverse(' Unhandled Error '))))}`)
console.error(e)
console.error('\n\n')
process.exit(1)
}
}