Skip to content

Refactor more hash cache logic from Lurk Alpha #255

Refactor more hash cache logic from Lurk Alpha

Refactor more hash cache logic from Lurk Alpha #255

Workflow file for this run

# Run final tests only when attempting to merge, shown as skipped status checks beforehand
name: Merge group tests
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
branches: [main]
merge_group:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
linux-ignored:
if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
runs-on: buildjet-16vcpu-ubuntu-2204
env:
RUSTFLAGS: -D warnings
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions-rs/toolchain@v1
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Linux Tests
run: |
cargo nextest run --profile ci --workspace --cargo-profile dev-ci --run-ignored ignored-only -E 'all() - test(test_demo)'
linux-arm:
if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
runs-on: buildjet-16vcpu-ubuntu-2204-arm
env:
RUSTFLAGS: -D warnings
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions-rs/toolchain@v1
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Linux Tests
run: |
cargo nextest run --profile ci --workspace --cargo-profile dev-ci
- name: Linux Gadget Tests w/o debug assertions
run: |
cargo nextest run --profile ci --workspace --cargo-profile dev-no-assertions -E 'test(circuit::gadgets)'
mac-m1:
if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
runs-on: macos-latest-xlarge
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions-rs/toolchain@v1
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Linux Tests
run: |
cargo nextest run --profile ci --workspace --cargo-profile dev-ci
- name: Linux Gadget Tests w/o debug assertions
run: |
cargo nextest run --profile ci --workspace --cargo-profile dev-no-assertions -E 'test(circuit::gadgets)'
# TODO: Make this a required status check
# Run comparative benchmark against main, reject on regression
gpu-benchmark:
if: github.event_name != 'pull_request' || github.event.action == 'enqueued'
name: Run fibonacci bench on GPU
runs-on: [self-hosted, gpu-bench]
steps:
# TODO: Factor out GPU setup into an action or into justfile, it's used in 4 places
# Set up GPU
# Check we have access to the machine's Nvidia drivers
- run: nvidia-smi
# Check that CUDA is installed with a driver-compatible version
# This must also be compatible with the GPU architecture, see above link
- run: nvcc --version
- uses: actions/checkout@v4
# Install dependencies
- uses: actions-rs/toolchain@v1
- uses: Swatinem/rust-cache@v2
- uses: taiki-e/install-action@v2
with:
tool: just@1.15
- name: Install criterion
run: |
cargo install cargo-criterion
cargo install criterion-table
# Checkout base branch for comparative bench
- uses: actions/checkout@v4
with:
ref: main
path: main
# Copy the script so the base can bench with the same parameters
- name: Copy source script to base branch
run: cp justfile bench.env ../main/benches
working-directory: ${{ github.workspace }}/benches
- name: Set base ref variable
run: echo "BASE_REF=$(git rev-parse HEAD)" | tee -a $GITHUB_ENV
working-directory: ${{ github.workspace }}/main
- name: Set bench output format type
run: echo "LURK_BENCH_OUTPUT=commit-comment" | tee -a $GITHUB_ENV
- name: Run GPU bench on base branch
run: just --dotenv-filename bench.env gpu-bench fibonacci
working-directory: ${{ github.workspace }}/main/benches
- name: Copy bench output to PR branch
run: |
mkdir -p target
cp -r main/target/criterion target
cp main/${{ env.BASE_REF }}.json .
- name: Run GPU bench on PR branch
run: just --dotenv-filename bench.env gpu-bench fibonacci
working-directory: ${{ github.workspace }}/benches
- name: copy the benchmark template and prepare it with data
run: |
cp .github/tables.toml .
# Get GPU name
GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader,nounits | tail -n1)
# Get CPU model
CPU_MODEL=$(grep '^model name' /proc/cpuinfo | head -1 | awk -F ': ' '{ print $2 }')
# Get total RAM in GB
TOTAL_RAM=$(grep MemTotal /proc/meminfo | awk '{$2=$2/(1024^2); print $2, "GB RAM";}')
# Use conditionals to ensure that only non-empty variables are inserted
[[ ! -z "$GPU_NAME" ]] && sed -i "/^\"\"\"$/i $GPU_NAME" tables.toml
[[ ! -z "$CPU_MODEL" ]] && sed -i "/^\"\"\"$/i $CPU_MODEL" tables.toml
[[ ! -z "$TOTAL_RAM" ]] && sed -i "/^\"\"\"$/i $TOTAL_RAM" tables.toml
working-directory: ${{ github.workspace }}
# Create a `criterion-table` and write in commit comment
- name: Run `criterion-table`
run: cat ${{ env.BASE_REF }}.json ${{ github.sha }}.json | criterion-table > BENCHMARKS.md
- name: Write bench on commit comment
uses: peter-evans/commit-comment@v3
with:
body-path: BENCHMARKS.md
# TODO: Use jq for JSON parsing if needed
# Check for benchmark regression based on Criterion's configured noise threshold
- name: Performance regression check
id: check-regression
run: echo "regress_count=$(grep -c 'Regressed' ${{ github.sha }}.json)" | tee -a $GITHUB_OUTPUT
# Fail job if regression found
- uses: actions/github-script@v7
if: ${{ steps.check-regression.outputs.regress_count > 0 }}
with:
script: |
core.setFailed('Fibonacci bench regression detected')