Skip to content

Commit

Permalink
fix target arch
Browse files Browse the repository at this point in the history
  • Loading branch information
Hanting Zhang committed Mar 21, 2024
1 parent e6521bf commit 086832c
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 8 deletions.
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ rayon-scan = "0.1.0"
# grumpkin-msm has been patched to support MSMs for the pasta curve cycle
# see: https://github.com/lurk-lab/grumpkin-msm/pull/3
grumpkin-msm = { git = "https://github.com/lurk-lab/grumpkin-msm", branch = "dev" }
ingonyama-grumpkin-msm = { git = "https://github.com/lurk-lab/ingonyama-grumpkin-msm" }
ingonyama-grumpkin-msm = { git = "https://github.com/lurk-lab/ingonyama-grumpkin-msm", optional = true }

[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = { version = "0.2.0", default-features = false, features = ["js"] }
Expand Down Expand Up @@ -117,7 +117,7 @@ abomonate = []
asm = ["halo2curves/asm"]
# Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems.
portable = ["grumpkin-msm/portable"]
cuda = ["grumpkin-msm/cuda"]
cuda = ["grumpkin-msm/cuda", "ingonyama-grumpkin-msm"]
flamegraph = ["pprof/flamegraph", "pprof/criterion"]

[profile.dev-ci]
Expand Down
19 changes: 13 additions & 6 deletions src/provider/bn256_grumpkin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use digest::{ExtendableOutput, Update};
use ff::{FromUniformBytes, PrimeField};
use group::{cofactor::CofactorCurveAffine, Curve, Group as AnotherGroup};
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
use grumpkin_msm::{grumpkin as grumpkin_msm};
use grumpkin_msm::grumpkin as grumpkin_msm;
// Remove this when https://github.com/zcash/pasta_curves/issues/41 resolves
use halo2curves::{bn256::G2Affine, CurveAffine, CurveExt};
use num_bigint::BigInt;
Expand All @@ -35,18 +35,25 @@ pub mod grumpkin {
};
}

fn ingonyama_bn256_msm(points: &[bn256::Affine], scalars: &[bn256::Scalar]) -> bn256::Point {
let stream = ingonyama_grumpkin_msm::Config::new();
let cfg = ingonyama_grumpkin_msm::default_config(&stream.stream);
ingonyama_grumpkin_msm::bn256_msm(&points, &scalars, &cfg)
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
fn bn256_msm(points: &[bn256::Affine], scalars: &[bn256::Scalar]) -> bn256::Point {
cfg_if::cfg_if! {
if #[cfg(feature = "cuda")] {
let stream = ingonyama_grumpkin_msm::Config::new();
let cfg = ingonyama_grumpkin_msm::default_config(&stream.stream);
ingonyama_grumpkin_msm::bn256_msm(&points, &scalars, &cfg)
} else {
grumpkin_msm::bn256(points, scalars)
}
}
}

#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
impl_traits!(
bn256,
"30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001",
"30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47",
ingonyama_bn256_msm
bn256_msm
);
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
impl_traits!(
Expand Down

0 comments on commit 086832c

Please sign in to comment.