Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

misc: add benchmarking script for edge rendering #40716

Merged
merged 6 commits into from Sep 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Expand Up @@ -45,4 +45,4 @@ test-timings.json

# Cache
*.tsbuildinfo
.swc/
.swc/
8 changes: 8 additions & 0 deletions bench/vercel/.env.dev
@@ -0,0 +1,8 @@
# The Vercel team you want to deploy the project too
VERCEL_TEST_TEAM=

# The corresponding Vercel token
VERCEL_TEST_TOKEN=

# The Vercel project you want to deploy the test project too
VERCEL_TEST_PROJECT_NAME=
5 changes: 5 additions & 0 deletions bench/vercel/.gitignore
@@ -0,0 +1,5 @@
.vercel
.next
*.tgz
yarn.lock
.env
22 changes: 22 additions & 0 deletions bench/vercel/README.md
@@ -0,0 +1,22 @@
# Benchmarking Next.js on production

This script allows you to measure some performance metrics of your local build of Next.js on production by uploading your current build to Vercel with an example app and running some basic benchmarks on it.

## Requirements

- the Vercel CLI

## Setup

Rename the provided `./env.local` file to `./env` and fill in the required `VERCEL_TEST_TOKEN` and `VERCEL_TEST_TEAM` values. You can find and generate those from vercel.com.

Run `pnpm install`, `pnpm bench` and profit.

Note: if you made some changes to Next.js, make sure you compiled them by running at the root of the monorepo either `pnpm dev` or `pnpm build --force`.

## How it works

- with the Vercel CLI, we setup a project
- we `npm pack` the local Next build and add it to the repo
- we upload the repo to Vercel and let it build
- once it builds, we get the deployment url and run some tests
70 changes: 70 additions & 0 deletions bench/vercel/bench.js
@@ -0,0 +1,70 @@
import { Command } from 'commander'
import console from 'console'

import chalk from 'chalk'

import PQueue from 'p-queue'
import { generateProjects, cleanupProjectFolders } from './project-utils.js'
import { printBenchmarkResults } from './chart.js'
import { genRetryableRequest } from './gen-request.js'

const program = new Command()

const queue = new PQueue({ concurrency: 25 })
const TTFB_OUTLIERS_THRESHOLD = 250

program.option('-p, --path <path>')

program.parse(process.argv)

const options = program.opts()

if (options.path) {
console.log('Running benchmark for path: ', options.path)
}

try {
const [originDeploymentURL, headDeploymentURL] = await generateProjects()

const originBenchmarkURL = `${originDeploymentURL}${options.path || ''}`
const headBenchmarkURL = `${headDeploymentURL}${options.path || ''}`

console.log(`Origin deployment URL: ${originBenchmarkURL}`)
console.log(`Head deployment URL: ${headBenchmarkURL}`)
console.log(`Running benchmark...`)

const benchResults = await runBenchmark(originBenchmarkURL)

const headBenchResults = await runBenchmark(headBenchmarkURL)

console.log(chalk.bold('Benchmark results for cold:'))
printBenchmarkResults(
{
origin: benchResults,
head: headBenchResults,
},
(r) => r.cold && r.firstByte <= TTFB_OUTLIERS_THRESHOLD && r.firstByte
)
console.log(chalk.bold('Benchmark results for hot:'))
printBenchmarkResults(
{
origin: benchResults,
head: headBenchResults,
},
(r) => !r.cold && r.firstByte <= TTFB_OUTLIERS_THRESHOLD && r.firstByte
)
} catch (err) {
console.log(chalk.red('Benchmark failed: ', err))
} finally {
await cleanupProjectFolders()
}

async function runBenchmark(url) {
return (
await Promise.all(
Array.from({ length: 500 }).map(() =>
queue.add(() => genRetryableRequest(url))
)
)
).filter(Boolean)
}
2 changes: 2 additions & 0 deletions bench/vercel/benchmark-app/.gitignore
@@ -0,0 +1,2 @@
.vercel
webpack-stats-client.json
14 changes: 14 additions & 0 deletions bench/vercel/benchmark-app/app/layout.js
@@ -0,0 +1,14 @@
import * as React from 'react'

export default function Root({ children }) {
return (
<html>
<head></head>
<body>{children}</body>
</html>
)
}

export const config = {
runtime: 'experimental-edge',
}
14 changes: 14 additions & 0 deletions bench/vercel/benchmark-app/app/rsc/page.js
@@ -0,0 +1,14 @@
import * as React from 'react'

// if (!('hot' in Math)) Math.hot = false

export default function page() {
// const previous = Math.hot
// Math.hot = true
// return <div>{previous ? 'HOT' : 'COLD'}</div>
return <div>hello</div>
}

export const config = {
runtime: 'experimental-edge',
}
34 changes: 34 additions & 0 deletions bench/vercel/benchmark-app/next.config.js
@@ -0,0 +1,34 @@
const { StatsWriterPlugin } = require('webpack-stats-plugin')
const { BundleAnalyzerPlugin } = require('webpack-bundle-analyzer')

module.exports = {
experimental: {
appDir: true,
},
webpack: (config, options) => {
const { nextRuntime = 'client' } = options
if (process.env.ANALYZE) {
if (nextRuntime === 'edge')
config.plugins.push(
new BundleAnalyzerPlugin({
analyzerMode: 'static',
openAnalyzer: true,
reportFilename: options.isServer
? '../analyze/server.html'
: './analyze/client.html',
})
)
config.plugins.push(
new StatsWriterPlugin({
filename: `../webpack-stats-${nextRuntime}.json`,
stats: {
assets: true,
chunks: true,
modules: true,
},
})
)
}
return config
},
}
14 changes: 14 additions & 0 deletions bench/vercel/benchmark-app/package.json
@@ -0,0 +1,14 @@
{
"name": "stats-app",
"private": true,
"license": "MIT",
"dependencies": {
"webpack-bundle-analyzer": "^4.6.1",
"webpack-stats-plugin": "^1.1.0"
},
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start"
}
}
20 changes: 20 additions & 0 deletions bench/vercel/benchmark-app/pages/index.js
@@ -0,0 +1,20 @@
if (!('hot' in Math)) Math.hot = false

export default function page({ hot }) {
return `${hot ? 'HOT' : 'COLD'}`
}

export async function getServerSideProps() {
const wasHot = Math.hot
Math.hot = true

return {
props: {
hot: wasHot,
},
}
}

export const config = {
runtime: 'experimental-edge',
}
99 changes: 99 additions & 0 deletions bench/vercel/chart.js
@@ -0,0 +1,99 @@
import downsampler from 'downsample-lttb'
import asciichart from 'asciichart'
import terminalSize from 'term-size'

const CHART_WIDTH = terminalSize().columns - 15 // space for the labels

function getMetrics(data) {
const sorted = [...data].sort((a, b) => a - b)
const getPercentile = (percentile) => {
const index = Math.floor((sorted.length - 1) * percentile)
return sorted[index]
}
return {
hits: sorted.length,
confidenceInterval: round(getConfidenceInterval(sorted)),
median: getPercentile(0.5),
avg: sorted.reduce((a, b) => a + b, 0) / sorted.length,
p75: getPercentile(0.75),
p95: getPercentile(0.95),
p99: getPercentile(0.99),
p25: getPercentile(0.25),
min: sorted[0],
max: sorted[sorted.length - 1],
}
}

function round(num) {
return Math.round(num * 100) / 100
}

// thanks Copilot
function getConfidenceInterval(data) {
const n = data.length
const m = data.reduce((a, b) => a + b) / n
const s = Math.sqrt(
data.map((x) => Math.pow(x - m, 2)).reduce((a, b) => a + b) / n
)
const z = 1.96 // 95% confidence
const e = z * (s / Math.sqrt(n))
return e
}

export function downsample(data, maxPoints) {
const sortedData = [...data].sort((a, b) => a - b)
return downsampler
.processData(
// the downsampler expects a 2d array of [x, y] values, so we need to add an index
sortedData.map((p, i) => [p, i]),
maxPoints
)
.map((p) => p[0])
}

export function printBenchmarkResults({ origin, head }, metricSelector) {
const [processedOriginData, processedHeadData] = [origin, head].map(
(results) => results.map(metricSelector).filter(Boolean)
)

const [originMetrics, headMetrics] = [
processedOriginData,
processedHeadData,
].map(getMetrics)

const deltaMetrics = {
min: headMetrics.min - originMetrics.min,
max: headMetrics.max - originMetrics.max,
avg: headMetrics.avg - originMetrics.avg,
median: headMetrics.median - originMetrics.median,
p95: headMetrics.p95 - originMetrics.p95,
p99: headMetrics.p99 - originMetrics.p99,
p75: headMetrics.p75 - originMetrics.p75,
p25: headMetrics.p25 - originMetrics.p25,
}

console.table({
origin: originMetrics,
head: headMetrics,
delta: deltaMetrics,
})

const [originData, headData] = [processedOriginData, processedHeadData].map(
(data) =>
downsample(
data,
Math.min(
CHART_WIDTH,
processedOriginData.length,
processedHeadData.length
)
)
)

console.log(
asciichart.plot([originData, headData], {
height: 15,
colors: [asciichart.blue, asciichart.red],
})
)
}
41 changes: 41 additions & 0 deletions bench/vercel/gen-request.js
@@ -0,0 +1,41 @@
import https from 'https'
import timer from '@szmarczak/http-timer'

// a wrapper around genAsyncRequest that will retry the request 5 times if it fails
export async function genRetryableRequest(url) {
let retries = 0
while (retries < 5) {
try {
return await genAsyncRequest(url)
} catch (err) {}
retries++
await new Promise((r) => setTimeout(r, 1000))
}
throw new Error(`Failed to fetch ${url}, too many retries`)
}

// a wrapper around http.request that is enhanced with timing information
async function genAsyncRequest(url) {
return new Promise((resolve, reject) => {
const request = https.get(url)
timer(request)
request.on('response', (response) => {
let body = ''
response.on('data', (data) => {
body += data
})
response.on('end', () => {
resolve({
...response.timings.phases,
cold: !body.includes('HOT'),
})
})
response.on('error', (err) => {
reject(err)
})
})
request.on('error', (err) => {
reject(err)
})
})
}