Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(benchmark): support comparing benchmark result #5398

Merged
merged 39 commits into from
May 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
d6dc0c1
wip: prototype bench compare
hi-ogawa Mar 17, 2024
6dca36a
wip
hi-ogawa Mar 17, 2024
01ad3d7
wip: benchmark comparison
hi-ogawa Mar 18, 2024
6d7b0a0
chore: cleanup example
hi-ogawa Mar 19, 2024
5194160
wip: mockup
hi-ogawa Mar 19, 2024
5219d0d
wip: support --compare
hi-ogawa Mar 19, 2024
22c3b4d
chore: remove unused
hi-ogawa Mar 19, 2024
0a23d81
chore: cleanup
hi-ogawa Mar 19, 2024
1658a2d
chore: unused
hi-ogawa Mar 19, 2024
130ac47
chore: tweak
hi-ogawa Mar 19, 2024
4e4e006
chore: lint
hi-ogawa Mar 19, 2024
c02a0c1
test: tweak example
hi-ogawa Mar 19, 2024
2943411
chore: tweak style
hi-ogawa Mar 19, 2024
25702bc
test: add test
hi-ogawa Mar 20, 2024
5c7e9ca
test: skip ci
hi-ogawa Mar 20, 2024
cb1dc73
Merge branch 'main' into feat-bench-compare
hi-ogawa Apr 4, 2024
77d9736
chore: rename
hi-ogawa Apr 4, 2024
525a2a8
chore: lockfile
hi-ogawa Apr 4, 2024
846690e
Merge branch 'main' into feat-bench-compare
hi-ogawa Apr 9, 2024
7377105
feat: compare on non-tty
hi-ogawa Apr 9, 2024
e9f52f9
fix: bench only --compare option
hi-ogawa Apr 9, 2024
4f7f707
refactor: minor
hi-ogawa Apr 9, 2024
4541a10
Merge branch 'main' into feat-bench-compare
hi-ogawa Apr 11, 2024
86088a8
refactor: add FormattedBenchamrkReport
hi-ogawa Apr 12, 2024
00dedcd
Merge branch 'main' into feat-bench-compare
hi-ogawa Apr 12, 2024
81bfab3
Merge branch 'main' into feat-bench-compare
hi-ogawa Apr 12, 2024
8296ed7
feat: add filepath
hi-ogawa Apr 12, 2024
3b329d4
chore: tweak format
hi-ogawa Apr 12, 2024
0ef0bc8
refactor: tweak
hi-ogawa Apr 13, 2024
147baf9
chore: lockfile
hi-ogawa Apr 13, 2024
2186c0c
feat: add --outputJson
hi-ogawa Apr 13, 2024
e3a0607
chore: log outputFile
hi-ogawa Apr 13, 2024
c3ce691
test: update
hi-ogawa Apr 13, 2024
35931ed
chore: remove json reporter
hi-ogawa Apr 13, 2024
b03080a
docs: options
hi-ogawa Apr 13, 2024
5fbfb6c
docs: benchmark report screenshots
hi-ogawa Apr 13, 2024
34e90f3
docs: examples
hi-ogawa Apr 13, 2024
5d4a806
Apply suggestions from code review
sheremet-va May 2, 2024
f1cc815
Merge branch 'main' into feat-bench-compare
hi-ogawa May 3, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
26 changes: 26 additions & 0 deletions docs/config/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,32 @@ By providing an object instead of a string you can define individual outputs whe

To provide object via CLI command, use the following syntax: `--outputFile.json=./path --outputFile.junit=./other-path`.

#### benchmark.outputJson <Version>1.6.0</Version> {#benchmark-outputJson}

- **Type:** `string | undefined`
- **Default:** `undefined`

A file path to store the benchmark result, which can be used for `--compare` option later.

For example:

```sh
# save main branch's result
git checkout main
vitest bench --outputJson main.json

# change a branch and compare against main
git checkout feature
vitest bench --compare main.json
```

#### benchmark.compare <Version>1.6.0</Version> {#benchmark-compare}

- **Type:** `string | undefined`
- **Default:** `undefined`

A file path to a previous benchmark result to compare against current runs.

### alias

- **Type:** `Record<string, string> | Array<{ find: string | RegExp, replacement: string, customResolver?: ResolverFunction | ResolverObject }>`
Expand Down
3 changes: 3 additions & 0 deletions docs/guide/features.md
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,9 @@ describe('sort', () => {
})
```

<img alt="Benchmark report" img-dark src="https://github.com/vitest-dev/vitest/assets/4232207/6f0383ea-38ba-4f14-8a05-ab243afea01d">
<img alt="Benchmark report" img-light src="https://github.com/vitest-dev/vitest/assets/4232207/efbcb427-ecf1-4882-88de-210cd73415f6">

## Type Testing <Badge type="warning">Experimental</Badge> {#type-testing}

Since Vitest 0.25.0 you can [write tests](/guide/testing-types) to catch type regressions. Vitest comes with [`expect-type`](https://github.com/mmkal/expect-type) package to provide you with a similar and easy to understand API.
Expand Down
2 changes: 1 addition & 1 deletion packages/vitest/src/defaults.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import { isCI } from './utils/env'

export const defaultInclude = ['**/*.{test,spec}.?(c|m)[jt]s?(x)']
export const defaultExclude = ['**/node_modules/**', '**/dist/**', '**/cypress/**', '**/.{idea,git,cache,output,temp}/**', '**/{karma,rollup,webpack,vite,vitest,jest,ava,babel,nyc,cypress,tsup,build,eslint,prettier}.config.*']
export const benchmarkConfigDefaults: Required<Omit<BenchmarkUserOptions, 'outputFile'>> = {
export const benchmarkConfigDefaults: Required<Omit<BenchmarkUserOptions, 'outputFile' | 'compare' | 'outputJson'>> = {
include: ['**/*.{bench,benchmark}.?(c|m)[jt]s?(x)'],
exclude: defaultExclude,
includeSource: [],
Expand Down
30 changes: 18 additions & 12 deletions packages/vitest/src/node/cli/cac.ts
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import { normalize } from 'pathe'
import cac, { type CAC } from 'cac'
import cac, { type CAC, type Command } from 'cac'
import c from 'picocolors'
import { version } from '../../../package.json'
import { toArray } from '../../utils/base'
import type { Vitest, VitestRunMode } from '../../types'
import type { CliOptions } from './cli-api'
import type { CLIOption } from './cli-config'
import { cliOptionsConfig } from './cli-config'
import type { CLIOption, CLIOptions as CLIOptionsConfig } from './cli-config'
import { benchCliOptionsConfig, cliOptionsConfig } from './cli-config'

function addCommand(cli: CAC, name: string, option: CLIOption<any>) {
function addCommand(cli: CAC | Command, name: string, option: CLIOption<any>) {
const commandName = option.alias || name
let command = option.shorthand ? `-${option.shorthand}, --${commandName}` : `--${commandName}`
if ('argument' in option)
Expand Down Expand Up @@ -56,17 +56,20 @@ interface CLIOptions {
allowUnknownOptions?: boolean
}

function addCliOptions(cli: CAC | Command, options: CLIOptionsConfig<any>) {
for (const [optionName, option] of Object.entries(options)) {
if (option)
addCommand(cli, optionName, option)
}
}

export function createCLI(options: CLIOptions = {}) {
const cli = cac('vitest')

cli
.version(version)

for (const optionName in cliOptionsConfig) {
const option = (cliOptionsConfig as any)[optionName] as CLIOption<any> | null
if (option)
addCommand(cli, optionName, option)
}
addCliOptions(cli, cliOptionsConfig)

cli.help((info) => {
const helpSection = info.find(current => current.title?.startsWith('For more info, run any command'))
Expand Down Expand Up @@ -158,9 +161,12 @@ export function createCLI(options: CLIOptions = {}) {
.command('dev [...filters]', undefined, options)
.action(watch)

cli
.command('bench [...filters]', undefined, options)
.action(benchmark)
addCliOptions(
cli
.command('bench [...filters]', undefined, options)
.action(benchmark),
benchCliOptionsConfig,
)

// TODO: remove in Vitest 2.0
cli
Expand Down
13 changes: 13 additions & 0 deletions packages/vitest/src/node/cli/cli-config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -638,4 +638,17 @@ export const cliOptionsConfig: VitestCLIOptions = {
name: null,
includeTaskLocation: null,
snapshotEnvironment: null,
compare: null,
outputJson: null,
}

export const benchCliOptionsConfig: Pick<VitestCLIOptions, 'compare' | 'outputJson'> = {
compare: {
description: 'benchmark output file to compare against',
argument: '<filename>',
},
outputJson: {
description: 'benchmark output file',
argument: '<filename>',
},
}
6 changes: 6 additions & 0 deletions packages/vitest/src/node/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -381,6 +381,12 @@ export function resolveConfig(

if (options.outputFile)
resolved.benchmark.outputFile = options.outputFile

// --compare from cli
if (options.compare)
resolved.benchmark.compare = options.compare
if (options.outputJson)
resolved.benchmark.outputJson = options.outputJson
}

resolved.setupFiles = toArray(resolved.setupFiles || []).map(file =>
Expand Down
2 changes: 0 additions & 2 deletions packages/vitest/src/node/reporters/benchmark/index.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import { VerboseReporter } from '../verbose'
import { JsonReporter } from './json'
import { TableReporter } from './table'

export const BenchmarkReportsMap = {
default: TableReporter,
verbose: VerboseReporter,
json: JsonReporter,
}
export type BenchmarkBuiltinReporters = keyof typeof BenchmarkReportsMap
82 changes: 0 additions & 82 deletions packages/vitest/src/node/reporters/benchmark/json.ts

This file was deleted.

105 changes: 100 additions & 5 deletions packages/vitest/src/node/reporters/benchmark/table/index.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import fs from 'node:fs'
import c from 'picocolors'
import * as pathe from 'pathe'
import type { TaskResultPack } from '@vitest/runner'
import type { UserConsoleLog } from '../../../../types/general'
import { BaseReporter } from '../../base'
import { getFullName } from '../../../../utils'
import type { BenchmarkResult, File } from '../../../../types'
import { getFullName, getTasks } from '../../../../utils'
import { getStateSymbol } from '../../renderers/utils'
import { type TableRendererOptions, createTableRenderer, renderTree } from './tableRender'

Expand All @@ -20,11 +23,24 @@ export class TableReporter extends BaseReporter {
super.onWatcherStart()
}

onCollected() {
async onCollected() {
this.rendererOptions.logger = this.ctx.logger
this.rendererOptions.showHeap = this.ctx.config.logHeapUsage
this.rendererOptions.slowTestThreshold = this.ctx.config.slowTestThreshold
if (this.ctx.config.benchmark?.compare) {
const compareFile = pathe.resolve(this.ctx.config.root, this.ctx.config.benchmark?.compare)
try {
this.rendererOptions.compare = flattenFormattedBenchamrkReport(
JSON.parse(
await fs.promises.readFile(compareFile, 'utf-8'),
),
)
}
catch (e) {
this.ctx.logger.error(`Failed to read '${compareFile}'`, e)
}
}
if (this.isTTY) {
this.rendererOptions.logger = this.ctx.logger
this.rendererOptions.showHeap = this.ctx.config.logHeapUsage
this.rendererOptions.slowTestThreshold = this.ctx.config.slowTestThreshold
const files = this.ctx.state.getFiles(this.watchFilters)
if (!this.renderer)
this.renderer = createTableRenderer(files, this.rendererOptions).start()
Expand Down Expand Up @@ -56,6 +72,18 @@ export class TableReporter extends BaseReporter {
await this.stopListRender()
this.ctx.logger.log()
await super.onFinished(files, errors)

// write output for future comparison
let outputFile = this.ctx.config.benchmark?.outputJson
if (outputFile) {
hi-ogawa marked this conversation as resolved.
Show resolved Hide resolved
outputFile = pathe.resolve(this.ctx.config.root, outputFile)
const outputDirectory = pathe.dirname(outputFile)
if (!fs.existsSync(outputDirectory))
await fs.promises.mkdir(outputDirectory, { recursive: true })
const output = createFormattedBenchamrkReport(files)
await fs.promises.writeFile(outputFile, JSON.stringify(output, null, 2))
this.ctx.logger.log(`Benchmark report written to ${outputFile}`)
}
}

async onWatcherStart() {
Expand All @@ -80,3 +108,70 @@ export class TableReporter extends BaseReporter {
super.onUserConsoleLog(log)
}
}

export interface FormattedBenchamrkReport {
files: {
filepath: string
groups: FormattedBenchmarkGroup[]
}[]
}

// flat results with TaskId as a key
export interface FlatBenchmarkReport {
[id: string]: FormattedBenchmarkResult
}

interface FormattedBenchmarkGroup {
fullName: string
benchmarks: FormattedBenchmarkResult[]
}

export type FormattedBenchmarkResult = Omit<BenchmarkResult, 'samples'> & {
id: string
sampleCount: number
}

function createFormattedBenchamrkReport(files: File[]) {
const report: FormattedBenchamrkReport = { files: [] }
for (const file of files) {
const groups: FormattedBenchmarkGroup[] = []
for (const task of getTasks(file)) {
if (task && task.type === 'suite') {
const benchmarks: FormattedBenchmarkResult[] = []
for (const t of task.tasks) {
const benchmark = t.meta.benchmark && t.result?.benchmark
if (benchmark) {
const { samples, ...rest } = benchmark
benchmarks.push({
id: t.id,
sampleCount: samples.length,
...rest,
})
}
}
if (benchmarks.length) {
groups.push({
fullName: getFullName(task, ' > '),
benchmarks,
})
}
}
}
report.files.push({
filepath: file.filepath,
groups,
})
}
return report
}

function flattenFormattedBenchamrkReport(report: FormattedBenchamrkReport): FlatBenchmarkReport {
const flat: FlatBenchmarkReport = {}
for (const file of report.files) {
for (const group of file.groups) {
for (const t of group.benchmarks)
flat[t.id] = t
}
}
return flat
}