Skip to content

Commit

Permalink
misc: add benchmarking script for edge rendering (vercel#40716)
Browse files Browse the repository at this point in the history
This PR adds the benchmarking script I've been using for vercel#40251 to
measure the performance improvements that we make to the Edge SSR
runtime.

This tool:
- uploads two version of the benchmarking project to Vercel, one with
the latest canary and the one with your current local changes in dist
(don't forget to build!)
- runs some tests against the published url to measure TTFB
- displays a nice chart and table

What this doesn't do (yet):

- allow you to choose which URL to compare
- allow you to change the measured metric
- run a battery of differnet test


## Bug

- [ ] Related issues linked using `fixes #number`
- [ ] Integration tests added
- [ ] Errors have a helpful link attached, see `contributing.md`

## Feature

- [ ] Implements an existing feature request or RFC. Make sure the
feature request has been accepted for implementation before opening a
PR.
- [ ] Related issues linked using `fixes #number`
- [ ] Integration tests added
- [ ] Documentation added
- [ ] Telemetry added. In case of a feature if it's used or not.
- [ ] Errors have a helpful link attached, see `contributing.md`


https://user-images.githubusercontent.com/11064311/191270204-04447e20-5a40-43a9-bcda-b7eaeb3d270a.mov


## Documentation / Examples

- [ ] Make sure the linting passes by running `pnpm lint`
- [ ] The "examples guidelines" are followed from [our contributing
doc](https://github.com/vercel/next.js/blob/canary/contributing/examples/adding-examples.md)

Co-authored-by: kodiakhq[bot] <49736102+kodiakhq[bot]@users.noreply.github.com>
  • Loading branch information
2 people authored and BowlingX committed Oct 5, 2022
1 parent 241253c commit c6ea6d1
Show file tree
Hide file tree
Showing 18 changed files with 864 additions and 15 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Expand Up @@ -45,4 +45,4 @@ test-timings.json

# Cache
*.tsbuildinfo
.swc/
.swc/
8 changes: 8 additions & 0 deletions bench/vercel/.env.dev
@@ -0,0 +1,8 @@
# The Vercel team you want to deploy the project too
VERCEL_TEST_TEAM=

# The corresponding Vercel token
VERCEL_TEST_TOKEN=

# The Vercel project you want to deploy the test project too
VERCEL_TEST_PROJECT_NAME=
5 changes: 5 additions & 0 deletions bench/vercel/.gitignore
@@ -0,0 +1,5 @@
.vercel
.next
*.tgz
yarn.lock
.env
22 changes: 22 additions & 0 deletions bench/vercel/README.md
@@ -0,0 +1,22 @@
# Benchmarking Next.js on production

This script allows you to measure some performance metrics of your local build of Next.js on production by uploading your current build to Vercel with an example app and running some basic benchmarks on it.

## Requirements

- the Vercel CLI

## Setup

Rename the provided `./env.local` file to `./env` and fill in the required `VERCEL_TEST_TOKEN` and `VERCEL_TEST_TEAM` values. You can find and generate those from vercel.com.

Run `pnpm install`, `pnpm bench` and profit.

Note: if you made some changes to Next.js, make sure you compiled them by running at the root of the monorepo either `pnpm dev` or `pnpm build --force`.

## How it works

- with the Vercel CLI, we setup a project
- we `npm pack` the local Next build and add it to the repo
- we upload the repo to Vercel and let it build
- once it builds, we get the deployment url and run some tests
70 changes: 70 additions & 0 deletions bench/vercel/bench.js
@@ -0,0 +1,70 @@
import { Command } from 'commander'
import console from 'console'

import chalk from 'chalk'

import PQueue from 'p-queue'
import { generateProjects, cleanupProjectFolders } from './project-utils.js'
import { printBenchmarkResults } from './chart.js'
import { genRetryableRequest } from './gen-request.js'

const program = new Command()

const queue = new PQueue({ concurrency: 25 })
const TTFB_OUTLIERS_THRESHOLD = 250

program.option('-p, --path <path>')

program.parse(process.argv)

const options = program.opts()

if (options.path) {
console.log('Running benchmark for path: ', options.path)
}

try {
const [originDeploymentURL, headDeploymentURL] = await generateProjects()

const originBenchmarkURL = `${originDeploymentURL}${options.path || ''}`
const headBenchmarkURL = `${headDeploymentURL}${options.path || ''}`

console.log(`Origin deployment URL: ${originBenchmarkURL}`)
console.log(`Head deployment URL: ${headBenchmarkURL}`)
console.log(`Running benchmark...`)

const benchResults = await runBenchmark(originBenchmarkURL)

const headBenchResults = await runBenchmark(headBenchmarkURL)

console.log(chalk.bold('Benchmark results for cold:'))
printBenchmarkResults(
{
origin: benchResults,
head: headBenchResults,
},
(r) => r.cold && r.firstByte <= TTFB_OUTLIERS_THRESHOLD && r.firstByte
)
console.log(chalk.bold('Benchmark results for hot:'))
printBenchmarkResults(
{
origin: benchResults,
head: headBenchResults,
},
(r) => !r.cold && r.firstByte <= TTFB_OUTLIERS_THRESHOLD && r.firstByte
)
} catch (err) {
console.log(chalk.red('Benchmark failed: ', err))
} finally {
await cleanupProjectFolders()
}

async function runBenchmark(url) {
return (
await Promise.all(
Array.from({ length: 500 }).map(() =>
queue.add(() => genRetryableRequest(url))
)
)
).filter(Boolean)
}
2 changes: 2 additions & 0 deletions bench/vercel/benchmark-app/.gitignore
@@ -0,0 +1,2 @@
.vercel
webpack-stats-client.json
14 changes: 14 additions & 0 deletions bench/vercel/benchmark-app/app/layout.js
@@ -0,0 +1,14 @@
import * as React from 'react'

export default function Root({ children }) {
return (
<html>
<head></head>
<body>{children}</body>
</html>
)
}

export const config = {
runtime: 'experimental-edge',
}
14 changes: 14 additions & 0 deletions bench/vercel/benchmark-app/app/rsc/page.js
@@ -0,0 +1,14 @@
import * as React from 'react'

// if (!('hot' in Math)) Math.hot = false

export default function page() {
// const previous = Math.hot
// Math.hot = true
// return <div>{previous ? 'HOT' : 'COLD'}</div>
return <div>hello</div>
}

export const config = {
runtime: 'experimental-edge',
}
34 changes: 34 additions & 0 deletions bench/vercel/benchmark-app/next.config.js
@@ -0,0 +1,34 @@
const { StatsWriterPlugin } = require('webpack-stats-plugin')
const { BundleAnalyzerPlugin } = require('webpack-bundle-analyzer')

module.exports = {
experimental: {
appDir: true,
},
webpack: (config, options) => {
const { nextRuntime = 'client' } = options
if (process.env.ANALYZE) {
if (nextRuntime === 'edge')
config.plugins.push(
new BundleAnalyzerPlugin({
analyzerMode: 'static',
openAnalyzer: true,
reportFilename: options.isServer
? '../analyze/server.html'
: './analyze/client.html',
})
)
config.plugins.push(
new StatsWriterPlugin({
filename: `../webpack-stats-${nextRuntime}.json`,
stats: {
assets: true,
chunks: true,
modules: true,
},
})
)
}
return config
},
}
14 changes: 14 additions & 0 deletions bench/vercel/benchmark-app/package.json
@@ -0,0 +1,14 @@
{
"name": "stats-app",
"private": true,
"license": "MIT",
"dependencies": {
"webpack-bundle-analyzer": "^4.6.1",
"webpack-stats-plugin": "^1.1.0"
},
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start"
}
}
20 changes: 20 additions & 0 deletions bench/vercel/benchmark-app/pages/index.js
@@ -0,0 +1,20 @@
if (!('hot' in Math)) Math.hot = false

export default function page({ hot }) {
return `${hot ? 'HOT' : 'COLD'}`
}

export async function getServerSideProps() {
const wasHot = Math.hot
Math.hot = true

return {
props: {
hot: wasHot,
},
}
}

export const config = {
runtime: 'experimental-edge',
}
99 changes: 99 additions & 0 deletions bench/vercel/chart.js
@@ -0,0 +1,99 @@
import downsampler from 'downsample-lttb'
import asciichart from 'asciichart'
import terminalSize from 'term-size'

const CHART_WIDTH = terminalSize().columns - 15 // space for the labels

function getMetrics(data) {
const sorted = [...data].sort((a, b) => a - b)
const getPercentile = (percentile) => {
const index = Math.floor((sorted.length - 1) * percentile)
return sorted[index]
}
return {
hits: sorted.length,
confidenceInterval: round(getConfidenceInterval(sorted)),
median: getPercentile(0.5),
avg: sorted.reduce((a, b) => a + b, 0) / sorted.length,
p75: getPercentile(0.75),
p95: getPercentile(0.95),
p99: getPercentile(0.99),
p25: getPercentile(0.25),
min: sorted[0],
max: sorted[sorted.length - 1],
}
}

function round(num) {
return Math.round(num * 100) / 100
}

// thanks Copilot
function getConfidenceInterval(data) {
const n = data.length
const m = data.reduce((a, b) => a + b) / n
const s = Math.sqrt(
data.map((x) => Math.pow(x - m, 2)).reduce((a, b) => a + b) / n
)
const z = 1.96 // 95% confidence
const e = z * (s / Math.sqrt(n))
return e
}

export function downsample(data, maxPoints) {
const sortedData = [...data].sort((a, b) => a - b)
return downsampler
.processData(
// the downsampler expects a 2d array of [x, y] values, so we need to add an index
sortedData.map((p, i) => [p, i]),
maxPoints
)
.map((p) => p[0])
}

export function printBenchmarkResults({ origin, head }, metricSelector) {
const [processedOriginData, processedHeadData] = [origin, head].map(
(results) => results.map(metricSelector).filter(Boolean)
)

const [originMetrics, headMetrics] = [
processedOriginData,
processedHeadData,
].map(getMetrics)

const deltaMetrics = {
min: headMetrics.min - originMetrics.min,
max: headMetrics.max - originMetrics.max,
avg: headMetrics.avg - originMetrics.avg,
median: headMetrics.median - originMetrics.median,
p95: headMetrics.p95 - originMetrics.p95,
p99: headMetrics.p99 - originMetrics.p99,
p75: headMetrics.p75 - originMetrics.p75,
p25: headMetrics.p25 - originMetrics.p25,
}

console.table({
origin: originMetrics,
head: headMetrics,
delta: deltaMetrics,
})

const [originData, headData] = [processedOriginData, processedHeadData].map(
(data) =>
downsample(
data,
Math.min(
CHART_WIDTH,
processedOriginData.length,
processedHeadData.length
)
)
)

console.log(
asciichart.plot([originData, headData], {
height: 15,
colors: [asciichart.blue, asciichart.red],
})
)
}
41 changes: 41 additions & 0 deletions bench/vercel/gen-request.js
@@ -0,0 +1,41 @@
import https from 'https'
import timer from '@szmarczak/http-timer'

// a wrapper around genAsyncRequest that will retry the request 5 times if it fails
export async function genRetryableRequest(url) {
let retries = 0
while (retries < 5) {
try {
return await genAsyncRequest(url)
} catch (err) {}
retries++
await new Promise((r) => setTimeout(r, 1000))
}
throw new Error(`Failed to fetch ${url}, too many retries`)
}

// a wrapper around http.request that is enhanced with timing information
async function genAsyncRequest(url) {
return new Promise((resolve, reject) => {
const request = https.get(url)
timer(request)
request.on('response', (response) => {
let body = ''
response.on('data', (data) => {
body += data
})
response.on('end', () => {
resolve({
...response.timings.phases,
cold: !body.includes('HOT'),
})
})
response.on('error', (err) => {
reject(err)
})
})
request.on('error', (err) => {
reject(err)
})
})
}

0 comments on commit c6ea6d1

Please sign in to comment.