Skip to content

Commit

Permalink
Merge branch 'lurk-lab:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
samuelburnham authored Oct 25, 2023
2 parents 79f9c5c + aa67bdb commit e7d1b1a
Show file tree
Hide file tree
Showing 35 changed files with 1,279 additions and 976 deletions.
26 changes: 10 additions & 16 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ bellpepper-core = { workspace = true }
bincode = { workspace = true }
blstrs = { workspace = true }
bytecount = "=0.6.4"
camino = { workspace = true }
camino = { workspace = true, features = ["serde1"] }
clap = { workspace = true, features = ["derive"] }
config = "0.13.3"
dashmap = "5.5.0"
Expand All @@ -34,7 +34,7 @@ itertools = "0.9"
lurk-macros = { path = "lurk-macros" }
lurk-metrics = { path = "lurk-metrics" }
metrics = { workspace = true }
neptune = { workspace = true, features = ["arity2","arity4","arity8","arity16","pasta","bls"] }
neptune = { workspace = true, features = ["arity2", "arity4", "arity8", "arity16", "pasta", "bls"] }
nom = "7.1.3"
nom_locate = "4.1.0"
nova = { workspace = true }
Expand All @@ -58,7 +58,7 @@ serde_repr = "0.1.14"
tap = "1.0.1"
stable_deref_trait = "1.2.0"
thiserror = { workspace = true }
abomonation = { workspace = true}
abomonation = { workspace = true }
abomonation_derive = { git = "https://github.com/lurk-lab/abomonation_derive.git" }
crossbeam = "0.8.2"
byteorder = "1.4.3"
Expand All @@ -69,7 +69,7 @@ ansi_term = "0.12.1"
tracing = { workspace = true }
tracing-texray = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
elsa = { version = "1.9.0", git="https://github.com/lurk-lab/elsa", branch = "sync_index_map", features = ["indexmap"] }
elsa = { version = "1.9.0", git = "https://github.com/lurk-lab/elsa", branch = "sync_index_map", features = ["indexmap"] }
arc-swap = "1.6.0"

[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
Expand All @@ -78,7 +78,10 @@ pasta-msm = { workspace = true }
proptest = { workspace = true }
proptest-derive = { workspace = true }
rand = "0.8.5"
rustyline = { version = "11.0", features = ["derive", "with-file-history"], default-features = false }
rustyline = { version = "11.0", features = [
"derive",
"with-file-history",
], default-features = false }
home = "0.5.5"

[target.'cfg(target_arch = "wasm32")'.dependencies]
Expand Down Expand Up @@ -110,12 +113,7 @@ vergen = { version = "8", features = ["build", "git", "gitcl"] }

[workspace]
resolver = "2"
members = [
"clutch",
"fcomm",
"lurk-macros",
"lurk-metrics"
]
members = ["clutch", "fcomm", "lurk-macros", "lurk-metrics"]

# Dependencies that should be kept in sync through the whole workspace
[workspace.dependencies]
Expand Down Expand Up @@ -177,10 +175,6 @@ harness = false
name = "fibonacci"
harness = false

[[bench]]
name = "fibonacci_lem"
harness = false

[[bench]]
name = "synthesis"
harness = false
Expand All @@ -203,4 +197,4 @@ harness = false

[patch.crates-io]
# This is needed to ensure halo2curves, which imports pasta-curves, uses the *same* traits in bn256_grumpkin
pasta_curves = { git="https://github.com/lurk-lab/pasta_curves", branch="dev" }
pasta_curves = { git = "https://github.com/lurk-lab/pasta_curves", branch = "dev" }
14 changes: 14 additions & 0 deletions benches/common/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
use camino::Utf8PathBuf;
use lurk::cli::paths::lurk_default_dir;
use lurk::config::lurk_config;
use once_cell::sync::Lazy;

/// Edit this path to use a config file specific to benchmarking
/// E.g. `Utf8PathBuf::from("/home/<user>/lurk-rs/lurk-bench.toml");`
pub static BENCH_CONFIG_PATH: Lazy<Utf8PathBuf> =
Lazy::new(|| lurk_default_dir().join("lurk.toml"));

/// Sets the config settings with the given file
pub fn set_bench_config() {
lurk_config(Some(&BENCH_CONFIG_PATH), None);
}
118 changes: 37 additions & 81 deletions benches/end2end.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
use camino::Utf8Path;
use criterion::{
black_box, criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, SamplingMode,
};
use pasta_curves::pallas::Scalar as Fr;
use pasta_curves::pallas::Scalar as Fq;

use lurk::{
circuit::circuit_frame::MultiFrame,
Expand All @@ -22,11 +21,12 @@ use lurk::{
state::State,
store::Store,
};
use pasta_curves::pallas;
use std::time::Duration;
use std::{cell::RefCell, rc::Rc, sync::Arc};

const PUBLIC_PARAMS_PATH: &str = "/var/tmp/lurk_benches/public_params";
mod common;
use common::set_bench_config;

const DEFAULT_REDUCTION_COUNT: usize = 10;

fn go_base<F: LurkField>(store: &Store<F>, state: Rc<RefCell<State>>, a: u64, b: u64) -> Ptr<F> {
Expand Down Expand Up @@ -58,8 +58,9 @@ fn end2end_benchmark(c: &mut Criterion) {
.measurement_time(Duration::from_secs(120))
.sample_size(10);

set_bench_config();
let limit = 1_000_000_000;
let lang_pallas = Lang::<pallas::Scalar, Coproc<pallas::Scalar>>::new();
let lang_pallas = Lang::<Fq, Coproc<Fq>>::new();
let lang_pallas_rc = Arc::new(lang_pallas.clone());
let reduction_count = DEFAULT_REDUCTION_COUNT;

Expand All @@ -75,11 +76,7 @@ fn end2end_benchmark(c: &mut Criterion) {
true,
Kind::NovaPublicParams,
);
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(
&instance,
Utf8Path::new(PUBLIC_PARAMS_PATH),
)
.unwrap();
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap();

let size = (10, 0);
let benchmark_id = BenchmarkId::new("end2end_go_base_nova", format!("_{}_{}", size.0, size.1));
Expand All @@ -88,7 +85,7 @@ fn end2end_benchmark(c: &mut Criterion) {

group.bench_with_input(benchmark_id, &size, |b, &s| {
b.iter(|| {
let ptr = go_base::<pallas::Scalar>(&store, state.clone(), s.0, s.1);
let ptr = go_base::<Fq>(&store, state.clone(), s.0, s.1);
let _result = prover
.evaluate_and_prove(&pp, ptr, env, &store, limit, &lang_pallas_rc)
.unwrap();
Expand All @@ -107,8 +104,7 @@ fn store_benchmark(c: &mut Criterion) {
.measurement_time(Duration::from_secs(5))
.sample_size(60);

let bls12_store = Store::<Fr>::default();
let pallas_store = Store::<pallas::Scalar>::default();
let pallas_store = Store::<Fq>::default();

let state = State::init_lurk_state().rccell();

Expand All @@ -117,18 +113,10 @@ fn store_benchmark(c: &mut Criterion) {
for size in sizes {
let parameter_string = format!("_{}_{}", size.0, size.1);

let bls12_id = BenchmarkId::new("store_go_base_bls12", &parameter_string);
group.bench_with_input(bls12_id, &size, |b, &s| {
b.iter(|| {
let result = go_base::<Fr>(&bls12_store, state.clone(), s.0, s.1);
black_box(result)
})
});

let pasta_id = BenchmarkId::new("store_go_base_pallas", &parameter_string);
group.bench_with_input(pasta_id, &size, |b, &s| {
b.iter(|| {
let result = go_base::<pallas::Scalar>(&pallas_store, state.clone(), s.0, s.1);
let result = go_base::<Fq>(&pallas_store, state.clone(), s.0, s.1);
black_box(result)
})
});
Expand All @@ -146,8 +134,7 @@ fn hydration_benchmark(c: &mut Criterion) {
.measurement_time(Duration::from_secs(5))
.sample_size(60);

let bls12_store = Store::<Fr>::default();
let pallas_store = Store::<pallas::Scalar>::default();
let pallas_store = Store::<Fq>::default();

let state = State::init_lurk_state().rccell();

Expand All @@ -156,18 +143,10 @@ fn hydration_benchmark(c: &mut Criterion) {
for size in sizes {
let parameter_string = format!("_{}_{}", size.0, size.1);

{
let benchmark_id = BenchmarkId::new("hydration_go_base_bls12", &parameter_string);
group.bench_with_input(benchmark_id, &size, |b, &s| {
let _ptr = go_base::<Fr>(&bls12_store, state.clone(), s.0, s.1);
b.iter(|| bls12_store.hydrate_scalar_cache())
});
}

{
let benchmark_id = BenchmarkId::new("hydration_go_base_pallas", &parameter_string);
group.bench_with_input(benchmark_id, &size, |b, &s| {
let _ptr = go_base::<pallas::Scalar>(&pallas_store, state.clone(), s.0, s.1);
let _ptr = go_base::<Fq>(&pallas_store, state.clone(), s.0, s.1);
b.iter(|| pallas_store.hydrate_scalar_cache())
});
}
Expand All @@ -186,10 +165,8 @@ fn eval_benchmark(c: &mut Criterion) {
.sample_size(60);

let limit = 1_000_000_000;
let lang_bls12 = Lang::<Fr, Coproc<Fr>>::new();
let lang_pallas = Lang::<pallas::Scalar, Coproc<pallas::Scalar>>::new();
let bls12_store = Store::<Fr>::default();
let pallas_store = Store::<pallas::Scalar>::default();
let lang_pallas = Lang::<Fq, Coproc<Fq>>::new();
let pallas_store = Store::<Fq>::default();

let state = State::init_lurk_state().rccell();

Expand All @@ -198,27 +175,10 @@ fn eval_benchmark(c: &mut Criterion) {
for size in sizes {
let parameter_string = format!("_{}_{}", size.0, size.1);

{
let benchmark_id = BenchmarkId::new("eval_go_base_bls12", &parameter_string);
group.bench_with_input(benchmark_id, &size, |b, &s| {
let ptr = go_base::<Fr>(&bls12_store, state.clone(), s.0, s.1);
b.iter(|| {
Evaluator::new(
ptr,
empty_sym_env(&bls12_store),
&bls12_store,
limit,
&lang_bls12,
)
.eval()
})
});
}

{
let benchmark_id = BenchmarkId::new("eval_go_base_pallas", &parameter_string);
group.bench_with_input(benchmark_id, &size, |b, &s| {
let ptr = go_base::<pallas::Scalar>(&pallas_store, state.clone(), s.0, s.1);
let ptr = go_base::<Fq>(&pallas_store, state.clone(), s.0, s.1);
b.iter(|| {
Evaluator::new(
ptr,
Expand Down Expand Up @@ -279,8 +239,9 @@ fn prove_benchmark(c: &mut Criterion) {
.measurement_time(Duration::from_secs(120))
.sample_size(10);

set_bench_config();
let limit = 1_000_000_000;
let lang_pallas = Lang::<pallas::Scalar, Coproc<pallas::Scalar>>::new();
let lang_pallas = Lang::<Fq, Coproc<Fq>>::new();
let lang_pallas_rc = Arc::new(lang_pallas.clone());
let store = Store::default();
let reduction_count = DEFAULT_REDUCTION_COUNT;
Expand All @@ -289,20 +250,18 @@ fn prove_benchmark(c: &mut Criterion) {
let benchmark_id = BenchmarkId::new("prove_go_base_nova", format!("_{}_{}", size.0, size.1));

let state = State::init_lurk_state().rccell();

// use cached public params
let instance = Instance::new(
reduction_count,
lang_pallas_rc.clone(),
true,
Kind::NovaPublicParams,
);
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(
&instance,
Utf8Path::new(PUBLIC_PARAMS_PATH),
)
.unwrap();
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap();

group.bench_with_input(benchmark_id, &size, |b, &s| {
let ptr = go_base::<pallas::Scalar>(&store, state.clone(), s.0, s.1);
let ptr = go_base::<Fq>(&store, state.clone(), s.0, s.1);
let prover = NovaProver::new(reduction_count, lang_pallas.clone());
let frames = MultiFrame::get_evaluation_frames(
|count| prover.needs_frame_padding(count),
Expand Down Expand Up @@ -331,8 +290,9 @@ fn prove_compressed_benchmark(c: &mut Criterion) {
.measurement_time(Duration::from_secs(120))
.sample_size(10);

set_bench_config();
let limit = 1_000_000_000;
let lang_pallas = Lang::<pallas::Scalar, Coproc<pallas::Scalar>>::new();
let lang_pallas = Lang::<Fq, Coproc<Fq>>::new();
let lang_pallas_rc = Arc::new(lang_pallas.clone());
let store = Store::default();
let reduction_count = DEFAULT_REDUCTION_COUNT;
Expand All @@ -344,20 +304,18 @@ fn prove_compressed_benchmark(c: &mut Criterion) {
);

let state = State::init_lurk_state().rccell();

// use cached public params
let instance = Instance::new(
reduction_count,
lang_pallas_rc.clone(),
true,
Kind::NovaPublicParams,
);
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(
&instance,
Utf8Path::new(PUBLIC_PARAMS_PATH),
)
.unwrap();
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap();

group.bench_with_input(benchmark_id, &size, |b, &s| {
let ptr = go_base::<pallas::Scalar>(&store, state.clone(), s.0, s.1);
let ptr = go_base::<Fq>(&store, state.clone(), s.0, s.1);
let prover = NovaProver::new(reduction_count, lang_pallas.clone());
let frames = prover
.get_evaluation_frames(
Expand Down Expand Up @@ -387,24 +345,23 @@ fn verify_benchmark(c: &mut Criterion) {
.measurement_time(Duration::from_secs(10))
.sample_size(10);

set_bench_config();
let limit = 1_000_000_000;
let lang_pallas = Lang::<pallas::Scalar, Coproc<pallas::Scalar>>::new();
let lang_pallas = Lang::<Fq, Coproc<Fq>>::new();
let lang_pallas_rc = Arc::new(lang_pallas.clone());
let store = Store::default();
let reduction_count = DEFAULT_REDUCTION_COUNT;

let state = State::init_lurk_state().rccell();

// use cached public params
let instance = Instance::new(
reduction_count,
lang_pallas_rc.clone(),
true,
Kind::NovaPublicParams,
);
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(
&instance,
Utf8Path::new(PUBLIC_PARAMS_PATH),
)
.unwrap();
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap();

let sizes = vec![(10, 0)];
for size in sizes {
Expand Down Expand Up @@ -449,24 +406,23 @@ fn verify_compressed_benchmark(c: &mut Criterion) {
.measurement_time(Duration::from_secs(10))
.sample_size(10);

set_bench_config();
let limit = 1_000_000_000;
let lang_pallas = Lang::<pallas::Scalar, Coproc<pallas::Scalar>>::new();
let lang_pallas = Lang::<Fq, Coproc<Fq>>::new();
let lang_pallas_rc = Arc::new(lang_pallas.clone());
let store = Store::default();
let reduction_count = DEFAULT_REDUCTION_COUNT;

let state = State::init_lurk_state().rccell();

// use cached public params
let instance = Instance::new(
reduction_count,
lang_pallas_rc.clone(),
true,
Kind::NovaPublicParams,
);
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(
&instance,
Utf8Path::new(PUBLIC_PARAMS_PATH),
)
.unwrap();
let pp = public_parameters::public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap();

let sizes = vec![(10, 0)];
for size in sizes {
Expand Down
Loading

0 comments on commit e7d1b1a

Please sign in to comment.