Skip to content

chore: Rename to argument #381

chore: Rename to argument

chore: Rename to argument #381

Workflow file for this run

name: Rust
on:
merge_group:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
branches: [main, dev]
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os:
- ubuntu-latest
fail-fast: false
env:
RUSTFLAGS: -D warnings
steps:
- uses: actions/checkout@v4
with:
repository: argumentcomputer/ci-workflows
- uses: ./.github/actions/ci-env
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
# make sure benches don't bit-rot
- name: build benches
# TODO: --all-features
run: cargo build --benches --release
- name: cargo test
# TODO: --all-features
run: |
cargo nextest run --release
- name: Doctests
run: |
cargo test --doc
- name: Abomonation feature
run: cargo run --features "abomonation" --example poseidon_constants_serde
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt, clippy
- uses: Swatinem/rust-cache@v2
# See '.cargo/config' for list of enabled/disabled clippy lints
- name: rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all --check
- name: cargo clippy
run: cargo xclippy -D warnings
msrv:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install cargo-msrv
run: cargo install cargo-msrv
- name: Check Rust MSRV
run: cargo msrv verify
# Check documentation links aren't broken
link-checker:
uses: argumentcomputer/ci-workflows/.github/workflows/links-check.yml@main
with:
fail-fast: true
# Lint dependencies for licensing and auditing issues as per https://github.com/argumentcomputer/neptune/blob/main/deny.toml
licenses-audits:
uses: argumentcomputer/ci-workflows/.github/workflows/licenses-audits.yml@main
# Runs the test suite on a self-hosted GPU machine with CUDA enabled
test-cuda:
name: Rust tests on CUDA
runs-on: self-hosted
env:
NVIDIA_VISIBLE_DEVICES: all
NVIDIA_DRIVER_CAPABILITITES: compute,utility
EC_GPU_FRAMEWORK: cuda
steps:
- uses: actions/checkout@v4
with:
repository: argumentcomputer/ci-workflows
- uses: ./.github/actions/ci-env
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
# Check we have access to the machine's Nvidia drivers
- run: nvidia-smi
# The `compute`/`sm` number corresponds to the Nvidia GPU architecture
# In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable
# See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/
- name: Set env for CUDA compute
run: echo "CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | sed 's/\.//g')" >> $GITHUB_ENV
- name: set env for EC_GPU
run: echo 'EC_GPU_CUDA_NVCC_ARGS=--fatbin --gpu-architecture=sm_${{ env.CUDA_ARCH }} --generate-code=arch=compute_${{ env.CUDA_ARCH }},code=sm_${{ env.CUDA_ARCH }}' >> $GITHUB_ENV
- run: echo "${{ env.EC_GPU_CUDA_NVCC_ARGS}}"
# Check that CUDA is installed with a driver-compatible version
# This must also be compatible with the GPU architecture, see above link
- run: nvcc --version
- name: CUDA tests
run: |
cargo nextest run --release --no-default-features --features cuda,pasta,bls,arity2,arity4,arity8,arity11,arity16,arity24,arity36
# Runs the test suite on a self-hosted GPU machine with CUDA and OpenCL enabled (that is using the OpenCL backend for NVIDIA GPUs)
test-opencl:
name: Rust tests on OpenCL
runs-on: self-hosted
env:
NVIDIA_VISIBLE_DEVICES: all
NVIDIA_DRIVER_CAPABILITITES: compute,utility
EC_GPU_FRAMEWORK: opencl
steps:
- uses: actions/checkout@v4
with:
repository: argumentcomputer/ci-workflows
- uses: ./.github/actions/ci-env
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
- name: Install GPU deps
run: |
apt-get update
apt-get -y install ocl-icd-opencl-dev
# Check we have access to the machine's Nvidia drivers
- run: nvidia-smi
# The `compute`/`sm` number corresponds to the Nvidia GPU architecture
# In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable
# See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/
- name: Set env for CUDA compute
run: echo "CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | sed 's/\.//g')" >> $GITHUB_ENV
- name: set env for EC_GPU
run: echo 'EC_GPU_CUDA_NVCC_ARGS=--fatbin --gpu-architecture=sm_${{ env.CUDA_ARCH }} --generate-code=arch=compute_${{ env.CUDA_ARCH }},code=sm_${{ env.CUDA_ARCH }}' >> $GITHUB_ENV
- run: echo "${{ env.EC_GPU_CUDA_NVCC_ARGS}}"
# Check that CUDA is installed with a driver-compatible version
# This must also be compatible with the GPU architecture, see above link
- run: nvcc --version
# Check that we can access the OpenCL headers
- run: clinfo
- name: OpenCL tests
run: |
cargo nextest run --release --all-features