Skip to content

Commit

Permalink
Merge branch 'main' into fix/vercel-request
Browse files Browse the repository at this point in the history
  • Loading branch information
natemoo-re authored Mar 1, 2023
2 parents 6b8c05d + 2678264 commit a7d8310
Show file tree
Hide file tree
Showing 35 changed files with 760 additions and 70 deletions.
5 changes: 5 additions & 0 deletions .changeset/cold-mirrors-joke.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'astro': patch
---

Run astro sync in build mode
6 changes: 6 additions & 0 deletions .eslintrc.cjs
Original file line number Diff line number Diff line change
Expand Up @@ -38,5 +38,11 @@ module.exports = {
'no-console': ['error', { allow: ['warn', 'error', 'info', 'debug'] }],
},
},
{
files: ['benchmark/**/*.js'],
rules: {
'no-console': 'off',
},
},
],
};
51 changes: 26 additions & 25 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ name: Benchmark
on:
issue_comment:
types: [created]
workflow_dispatch:

env:
TURBO_TOKEN: ${{ secrets.TURBO_TOKEN }}
Expand All @@ -16,19 +17,14 @@ jobs:
permissions:
contents: read
outputs:
PR-BENCH-16: ${{ steps.benchmark-pr.outputs.BENCH_RESULT16 }}
PR-BENCH-18: ${{ steps.benchmark-pr.outputs.BENCH_RESULT18 }}
MAIN-BENCH-16: ${{ steps.benchmark-main.outputs.BENCH_RESULT16 }}
MAIN-BENCH-18: ${{ steps.benchmark-main.outputs.BENCH_RESULT18 }}
strategy:
matrix:
node-version: [16, 18]
PR-BENCH: ${{ steps.benchmark-pr.outputs.BENCH_RESULT }}
MAIN-BENCH: ${{ steps.benchmark-main.outputs.BENCH_RESULT }}
steps:
# https://github.com/actions/checkout/issues/331#issuecomment-1438220926
- uses: actions/checkout@v3
with:
persist-credentials: false
ref: ${{github.event.pull_request.head.sha}}
repository: ${{github.event.pull_request.head.repo.full_name}}
ref: refs/pull/${{ github.event.issue.number }}/head

- name: Setup PNPM
uses: pnpm/action-setup@v2
Expand All @@ -45,13 +41,22 @@ jobs:
- name: Build Packages
run: pnpm run build

- name: Get bench command
id: bench-command
run: |
benchcmd=$(echo "${{ github.event.comment.body }}" | grep '!bench' | awk -F ' ' '{print $2}')
echo "bench=$benchcmd" >> $GITHUB_OUTPUT
shell: bash

- name: Run benchmark
id: benchmark-pr
run: |
pnpm run --silent benchmark 2> ./bench-result.md
result=$(awk '/requests in/' ./bench-result.md)
echo "::set-output name=BENCH_RESULT${{matrix.node-version}}::$result"
echo "$result"
result=$(pnpm run --silent benchmark ${{ steps.bench-command.outputs.bench }})
processed=$(node ./benchmark/ci-helper.js "$result")
echo "BENCH_RESULT<<BENCHEOF" >> $GITHUB_OUTPUT
echo "### PR Benchmark" >> $GITHUB_OUTPUT
echo "$processed" >> $GITHUB_OUTPUT
echo "BENCHEOF" >> $GITHUB_OUTPUT
shell: bash

# main benchmark
Expand All @@ -70,10 +75,12 @@ jobs:
- name: Run benchmark
id: benchmark-main
run: |
pnpm run --silent benchmark 2> ./bench-result.md
result=$(awk '/requests in/' ./bench-result.md)
echo "::set-output name=BENCH_RESULT${{matrix.node-version}}::$result"
echo "$result"
result=$(pnpm run --silent benchmark ${{ steps.bench-command.outputs.bench }})
processed=$(node ./benchmark/ci-helper.js "$result")
echo "BENCH_RESULT<<BENCHEOF" >> $GITHUB_OUTPUT
echo "### Main Benchmark" >> $GITHUB_OUTPUT
echo "$processed" >> $GITHUB_OUTPUT
echo "BENCHEOF" >> $GITHUB_OUTPUT
shell: bash

output-benchmark:
Expand All @@ -89,12 +96,6 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
pr_number: ${{ github.event.issue.number }}
message: |
**Node**: 16
**PR**: ${{ needs.benchmark.outputs.PR-BENCH-16 }}
**MAIN**: ${{ needs.benchmark.outputs.MAIN-BENCH-16 }}
---
${{ needs.benchmark.outputs.PR-BENCH }}
**Node**: 18
**PR**: ${{ needs.benchmark.outputs.PR-BENCH-18 }}
**MAIN**: ${{ needs.benchmark.outputs.MAIN-BENCH-18 }}
${{ needs.benchmark.outputs.MAIN-BENCH }}
14 changes: 7 additions & 7 deletions .github/workflows/check-merge.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,12 @@ jobs:
--header 'content-type: application/json' \
-d '["semver minor"]'
- name: Send PR review
if: steps.find-blockers.outputs.found == 'true'
run: | # approve the pull request
curl --request POST \
--url https://api.github.com/repos/${{github.repository}}/pulls/${{github.event.number}}/reviews \
--header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
--header 'content-type: application/json' \
-d '{"event":"REQUEST_CHANGES","body":"This PR is blocked because it contains a `minor` changeset. A reviewer will merge this at the next release if approved."}'
uses: peter-evans/create-or-update-comment@v2
continue-on-error: true
with:
issue-number: ${{ github.event.issue.number }}
body: |
This PR is blocked because it contains a `minor` changeset. A reviewer will merge this at the next release if approved.
edit-mode: replace
1 change: 1 addition & 0 deletions .github/workflows/check.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: Examples astro check

on:
workflow_dispatch:
push:
branches:
- main
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: CI

on:
workflow_dispatch:
push:
branches:
- main
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/format.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: "Format Code"

on:
workflow_dispatch:
push:
branches:
- main
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: Main Checks

on:
workflow_dispatch:
push:
branches:
- main
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/scripts.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: Scripts

on:
workflow_dispatch:
pull_request:
branches:
- "main"
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/snapshot-release.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: Create a Snapshot Release

on:
workflow_dispatch:
issue_comment:
types: [created]

Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ dist/
_site/
scripts/smoke/*-main/
scripts/memory/project/src/pages/
benchmark/projects/
benchmark/results/
*.log
package-lock.json
.turbo/
Expand Down
5 changes: 5 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# benchmark

Astro's main benchmark suite. It exposes the `astro-benchmark` CLI command. Run `astro-benchmark --help` to see all available commands!

If you'd like to understand how the benchmark works, check out the other READMEs in the subfolders.
7 changes: 7 additions & 0 deletions benchmark/bench/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# bench

This `bench` folder contains different benchmarking files that you can run via `astro-benchmark <bench-file-name>`, e.g. `astro-benchmark memory`. Files that start with an underscore are not benchmarking files.

Benchmarking files will run against a project to measure its performance, and write the results down as JSON in the `results` folder. The `results` folder is gitignored and its result files can be safely deleted if you're not using them.

You can duplicate `_template.js` to start a new benchmark test. All shared utilities are kept in `_util.js`.
12 changes: 12 additions & 0 deletions benchmark/bench/_template.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
/** Default project to run for this benchmark if not specified */
export const defaultProject = 'project-name';

/**
* Run benchmark on `projectDir` and write results to `outputFile`.
* Use `console.log` to report the results too. Logs that start with 10 `=`
* and end with 10 `=` will be extracted by CI to display in the PR comment.
* Usually after the first 10 `=` you'll want to add a title like `#### Test`.
* @param {URL} projectDir
* @param {URL} outputFile
*/
export async function run(projectDir, outputFile) {}
3 changes: 3 additions & 0 deletions benchmark/bench/_util.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
import { createRequire } from 'module';

export const astroBin = createRequire(import.meta.url).resolve('astro');
58 changes: 58 additions & 0 deletions benchmark/bench/memory.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import fs from 'fs/promises';
import { fileURLToPath } from 'url';
import { execaCommand } from 'execa';
import { markdownTable } from 'markdown-table';
import { astroBin } from './_util.js';

/** @typedef {Record<string, import('../../packages/astro/src/core/config/timer').Stat>} AstroTimerStat */

/** Default project to run for this benchmark if not specified */
export const defaultProject = 'memory-default';

/**
* @param {URL} projectDir
* @param {URL} outputFile
*/
export async function run(projectDir, outputFile) {
const root = fileURLToPath(projectDir);
const outputFilePath = fileURLToPath(outputFile);

console.log('Building and benchmarking...');
await execaCommand(`node --expose-gc --max_old_space_size=256 ${astroBin} build`, {
cwd: root,
stdio: 'inherit',
env: {
ASTRO_TIMER_PATH: outputFilePath,
},
});

console.log('Raw results written to', outputFilePath);

console.log('Result preview:');
console.log('='.repeat(10));
console.log(`#### Memory\n\n`);
console.log(printResult(JSON.parse(await fs.readFile(outputFilePath, 'utf-8'))));
console.log('='.repeat(10));

console.log('Done!');
}

/**
* @param {AstroTimerStat} output
*/
function printResult(output) {
return markdownTable(
[
['', 'Elapsed time (s)', 'Memory used (MB)', 'Final memory (MB)'],
...Object.entries(output).map(([name, stat]) => [
name,
(stat.elapsedTime / 1000).toFixed(2),
(stat.heapUsedChange / 1024 / 1024).toFixed(2),
(stat.heapUsedTotal / 1024 / 1024).toFixed(2),
]),
],
{
align: ['l', 'r', 'r', 'r'],
}
);
}
85 changes: 85 additions & 0 deletions benchmark/bench/server-stress.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import fs from 'fs/promises';
import { fileURLToPath } from 'url';
import autocannon from 'autocannon';
import { execaCommand } from 'execa';
import { waitUntilBusy } from 'port-authority';
import { astroBin } from './_util.js';

const port = 4321;

export const defaultProject = 'server-stress-default';

/**
* @param {URL} projectDir
* @param {URL} outputFile
*/
export async function run(projectDir, outputFile) {
const root = fileURLToPath(projectDir);

console.log('Building...');
await execaCommand(`${astroBin} build`, {
cwd: root,
stdio: 'inherit',
});

console.log('Previewing...');
const previewProcess = execaCommand(`${astroBin} preview --port ${port}`, {
cwd: root,
stdio: 'inherit',
});

console.log('Waiting for server ready...');
await waitUntilBusy(port, { timeout: 5000 });

console.log('Running benchmark...');
const result = await benchmarkCannon();

console.log('Killing server...');
if (!previewProcess.kill('SIGTERM')) {
console.warn('Failed to kill server process id:', previewProcess.pid);
}

console.log('Writing results to', fileURLToPath(outputFile));
await fs.writeFile(outputFile, JSON.stringify(result, null, 2));

console.log('Result preview:');
console.log('='.repeat(10));
console.log(`#### Server stress\n\n`);
let text = autocannon.printResult(result);
// Truncate the logs in CI so that the generated comment from the `!bench` command
// is shortened. Also we only need this information when comparing runs.
// Full log example: https://github.com/mcollina/autocannon#command-line
if (process.env.CI) {
text = text.match(/^.*?requests in.*?read$/m)?.[0];
}
console.log(text);
console.log('='.repeat(10));

console.log('Done!');
}

/**
* @returns {Promise<import('autocannon').Result>}
*/
async function benchmarkCannon() {
return new Promise((resolve, reject) => {
const instance = autocannon(
{
url: `https://localhost:${port}`,
connections: 100,
duration: 30,
pipelining: 10,
},
(err, result) => {
if (err) {
reject(err);
} else {
// @ts-expect-error untyped but documented
instance.stop();
resolve(result);
}
}
);
autocannon.track(instance, { renderResultsTable: false });
});
}
13 changes: 13 additions & 0 deletions benchmark/ci-helper.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// This script helps extract the benchmark logs that are between the `==========` lines.
// They are a convention defined in the `./bench/_template.js` file, which are used to log
// out with the `!bench` command. See `/.github/workflows/benchmark.yml` to see how it's used.
const benchLogs = process.argv[2];
const resultRegex = /==========(.*?)==========/gs;

let processedLog = '';
let m;
while ((m = resultRegex.exec(benchLogs))) {
processedLog += m[1] + '\n';
}

console.log(processedLog);
Loading

0 comments on commit a7d8310

Please sign in to comment.