Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add API for generic data directory #335

Open
wants to merge 6 commits into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,13 @@ ref-cast = "1.0.20" # allocation-less conversion in multilinear polys
derive_more = "0.99.17" # lightens impl macros for pasta
static_assertions = "1.1.0"
rayon-scan = "0.1.0"
camino = "1.1.6"

[target.'cfg(any(target_arch = "x86_64", target_arch = "aarch64"))'.dependencies]
# grumpkin-msm has been patched to support MSMs for the pasta curve cycle
# see: https://github.com/lurk-lab/grumpkin-msm/pull/3
grumpkin-msm = { git = "https://github.com/lurk-lab/grumpkin-msm", branch = "dev" }
home = "0.5.9"

[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = { version = "0.2.0", default-features = false, features = ["js"] }
Expand Down
2 changes: 2 additions & 0 deletions examples/minroot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,8 @@ fn main() {
.with(EnvFilter::from_default_env())
.with(TeXRayLayer::new());
tracing::subscriber::set_global_default(subscriber).unwrap();
arecibo::data::set_write_data(true);

type C1 = MinRootCircuit<<Bn256EngineKZG as Engine>::GE>;

println!("Nova-based VDF with MinRoot delay function");
Expand Down
168 changes: 168 additions & 0 deletions src/data.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,168 @@
//! Very minimal utilities for reading/writing general arecibo data in disk.
use camino::Utf8Path;
#[cfg(not(target_arch = "wasm32"))]
use camino::Utf8PathBuf;
use once_cell::sync::OnceCell;
use serde::{de::DeserializeOwned, Serialize};
use std::sync::Mutex;
#[cfg(not(target_arch = "wasm32"))]
use std::{
collections::HashMap,
fs::{self, File, OpenOptions},
io::{BufReader, BufWriter},
};

/// Global flag for writing config.
pub static WRITE: bool = false;

/// Path to the directory where Arecibo data will be stored.
pub static ARECIBO_DATA: &str = ".arecibo_data";

/// Global configuration for Arecibo data storage, including root directory and counters.
/// This configuration is initialized on first use.
pub static ARECIBO_CONFIG: OnceCell<Mutex<DataConfig>> = OnceCell::new();

/// Configuration for managing Arecibo data files, including the root directory,
/// witness counter, and cross-term counter for organizing files.
#[derive(Debug, Clone, Default)]
pub struct DataConfig {
#[cfg(not(target_arch = "wasm32"))]
root_dir: Utf8PathBuf,
#[cfg(not(target_arch = "wasm32"))]
section_label_counters: HashMap<String, usize>,
write_data: bool,
witness_size: usize,
}

#[cfg(not(target_arch = "wasm32"))]
/// Initializes the global configuration for Arecibo data storage, setting up the root directory
/// and initializing counters. We create the root directory if it does not already exist.
pub fn init_config() -> Mutex<DataConfig> {
let root_dir = home::home_dir().unwrap().join(ARECIBO_DATA);
let root_dir = Utf8PathBuf::from_path_buf(root_dir).unwrap();
if !root_dir.exists() {
fs::create_dir_all(&root_dir).expect("Failed to create arecibo data directory");
}

let config = DataConfig {
root_dir,
section_label_counters: HashMap::new(),
write_data: WRITE,
witness_size: 0,
};

Mutex::new(config)
}

#[cfg(target_arch = "wasm32")]
/// Initializes the global configuration for Arecibo data storage, setting up the root directory
/// and initializing counters. We create the root directory if it does not already exist.
pub fn init_config() -> Mutex<DataConfig> {
Mutex::new(DataConfig::default())
}

#[cfg(not(target_arch = "wasm32"))]
/// Writes Arecibo data to disk, organizing it into sections and labeling it with a unique identifier.
/// This function serializes the given payload and writes it into the appropriate section and file.
/// For now, we just increment the relevant counter to ensure uniqueness.
pub fn write_arecibo_data<T: Serialize>(
section: impl AsRef<Utf8Path>,
label: impl AsRef<Utf8Path>,
payload: &T,
) {
let mutex = ARECIBO_CONFIG.get_or_init(init_config);
let mut config = mutex.lock().unwrap();

let section_path = config.root_dir.join(section.as_ref());
if !section_path.exists() {
fs::create_dir_all(&section_path).expect("Failed to create section directory");
}

let section_label = format!("{}/{}", section.as_ref(), label.as_ref());
let counter = config.section_label_counters.entry(section_label).or_insert(0);

let file_path = section_path.join(format!("{}_{:?}", label.as_ref().as_str(), counter));
*counter += 1;

let file = OpenOptions::new()
.read(true)
.write(true)
.truncate(true)
.create(true)
.open(file_path)
.expect("Failed to create data file");

let writer = BufWriter::new(&file);
bincode::serialize_into(writer, payload).expect("Failed to write data");
}

#[cfg(target_arch = "wasm32")]
/// Writes Arecibo data to disk, organizing it into sections and labeling it with a unique identifier.
/// This function serializes the given payload and writes it into the appropriate section and file.
/// For now, we just increment the relevant counter to ensure uniqueness.
pub fn write_arecibo_data<T: Serialize>(
_section: impl AsRef<Utf8Path>,
_label: impl AsRef<Utf8Path>,
_payload: &T,
) {
// Do nothing
}

#[cfg(not(target_arch = "wasm32"))]
/// Reads and deserializes data from a specified section and label.
pub fn read_arecibo_data<T: DeserializeOwned>(
section: impl AsRef<Utf8Path>,
label: impl AsRef<Utf8Path>,
) -> T {
let mutex = ARECIBO_CONFIG.get_or_init(init_config);
let config = mutex.lock().unwrap();

let section_path = config.root_dir.join(section.as_ref());
assert!(section_path.exists(), "Section directory does not exist");

// Assuming the label uniquely identifies the file, and ignoring the counter for simplicity
let file_path = section_path.join(label.as_ref());
assert!(file_path.exists(), "Data file does not exist");

let file = File::open(file_path).expect("Failed to open data file");
let reader = BufReader::new(file);

bincode::deserialize_from(reader).expect("Failed to read data")
}

#[cfg(target_arch = "wasm32")]
/// Reads and deserializes data from a specified section and label.
pub fn read_arecibo_data<T: DeserializeOwned>(
_section: impl AsRef<Utf8Path>,
_label: impl AsRef<Utf8Path>,
) -> T {
unimplemented!("not supported on wasm yet")
}

/// Are we configured to write data?
pub fn write_data() -> bool {
let mutex = ARECIBO_CONFIG.get_or_init(init_config);
let config = mutex.lock().unwrap();
config.write_data
}

/// Set the configuration for writing data.
pub fn set_write_data(write_data: bool) {
let mutex = ARECIBO_CONFIG.get_or_init(init_config);
let mut config = mutex.lock().unwrap();
config.write_data = write_data;
}

/// Are we configured to write data?
pub fn witness_size() -> usize {
let mutex = ARECIBO_CONFIG.get_or_init(init_config);
let config = mutex.lock().unwrap();
config.witness_size
}

/// Set the configuration for writing data.
pub fn set_witness_size(witness_size: usize) {
let mutex = ARECIBO_CONFIG.get_or_init(init_config);
let mut config = mutex.lock().unwrap();
config.witness_size = witness_size;
}
33 changes: 27 additions & 6 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,17 @@ mod nifs;

// public modules
pub mod constants;
pub mod cyclefold;
pub mod data;
pub mod errors;
pub mod gadgets;
pub mod provider;
pub mod r1cs;
pub mod spartan;
pub mod traits;

pub mod cyclefold;
pub mod supernova;
pub mod traits;

use once_cell::sync::OnceCell;
use traits::{CurveCycleEquipped, Dual};

use crate::data::{set_witness_size, write_arecibo_data, write_data};
use crate::digest::{DigestComputer, SimpleDigestible};
use crate::{
bellpepper::{
Expand All @@ -49,6 +47,7 @@ use errors::NovaError;
use ff::{Field, PrimeField};
use gadgets::scalar_as_base;
use nifs::NIFS;
use once_cell::sync::OnceCell;
use r1cs::{
CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness,
};
Expand All @@ -60,6 +59,7 @@ use traits::{
snark::RelaxedR1CSSNARKTrait,
AbsorbInROTrait, Engine, ROConstants, ROConstantsCircuit, ROTrait,
};
use traits::{CurveCycleEquipped, Dual};

/// A type that holds parameters for the primary and secondary circuits of Nova and SuperNova
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Abomonation)]
Expand Down Expand Up @@ -490,6 +490,26 @@ where
T: r1cs::default_T::<Dual<E1>>(r1cs_secondary.num_cons),
};

if write_data() {
write_arecibo_data(
format!("sparse_matrices_{:?}", pp.digest()),
"A",
&r1cs_primary.A,
);
write_arecibo_data(
format!("sparse_matrices_{:?}", pp.digest()),
"B",
&r1cs_primary.B,
);
write_arecibo_data(
format!("sparse_matrices_{:?}", pp.digest()),
"C",
&r1cs_primary.C,
);

set_witness_size(r1cs_primary.A.num_cols());
}

Ok(Self {
z0_primary: z0_primary.to_vec(),
z0_secondary: z0_secondary.to_vec(),
Expand All @@ -502,6 +522,7 @@ where

buffer_primary,
buffer_secondary,

i: 0,
zi_primary,
zi_secondary,
Expand Down
2 changes: 1 addition & 1 deletion src/nifs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ impl<E: Engine> NIFS<E> {
U2.absorb_in_ro(&mut ro);

// compute a commitment to the cross-term
let comm_T = S.commit_T_into(ck, U1, W1, U2, W2, T, ABC_Z_1, ABC_Z_2)?;
let comm_T = S.commit_T_into(ck, pp_digest, U1, W1, U2, W2, T, ABC_Z_1, ABC_Z_2)?;

// append `comm_T` to the transcript and obtain a challenge
comm_T.absorb_in_ro(&mut ro);
Expand Down
15 changes: 14 additions & 1 deletion src/r1cs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ pub(crate) mod util;

use crate::{
constants::{BN_LIMB_WIDTH, BN_N_LIMBS},
data::{witness_size, write_arecibo_data, write_data},
digest::{DigestComputer, SimpleDigestible},
errors::NovaError,
gadgets::{f_to_nat, nat_to_limbs, scalar_as_base},
Expand Down Expand Up @@ -52,7 +53,7 @@ pub struct R1CSResult<E: Engine> {
/// A type that holds a witness for a given R1CS instance
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct R1CSWitness<E: Engine> {
W: Vec<E::Scalar>,
pub(crate) W: Vec<E::Scalar>,
}

/// A type that holds an R1CS instance
Expand Down Expand Up @@ -465,6 +466,7 @@ impl<E: Engine> R1CSShape<E> {
pub fn commit_T_into(
&self,
ck: &CommitmentKey<E>,
pp_digest: &E::Scalar,
U1: &RelaxedR1CSInstance<E>,
W1: &RelaxedR1CSWitness<E>,
U2: &R1CSInstance<E>,
Expand All @@ -473,6 +475,11 @@ impl<E: Engine> R1CSShape<E> {
ABC_Z_1: &mut R1CSResult<E>,
ABC_Z_2: &mut R1CSResult<E>,
) -> Result<Commitment<E>, NovaError> {
if write_data() && self.A.num_cols() == witness_size() {
let witness = [&W1.W[..], &[U1.u], &U1.X[..]].concat();
write_arecibo_data(format!("witness_{:?}", pp_digest), "", &witness);
}

tracing::info_span!("AZ_1, BZ_1, CZ_1")
.in_scope(|| self.multiply_witness_into(&W1.W, &U1.u, &U1.X, ABC_Z_1))?;

Expand All @@ -482,6 +489,12 @@ impl<E: Engine> R1CSShape<E> {
CZ: CZ_1,
} = ABC_Z_1;

if write_data() && self.A.num_cols() == witness_size() {
write_arecibo_data(format!("result_{:?}", pp_digest), "AZ", &AZ_1);
write_arecibo_data(format!("result_{:?}", pp_digest), "BZ", &BZ_1);
write_arecibo_data(format!("result_{:?}", pp_digest), "CZ", &CZ_1);
}

tracing::info_span!("AZ_2, BZ_2, CZ_2")
.in_scope(|| self.multiply_witness_into(&W2.W, &E::Scalar::ONE, &U2.X, ABC_Z_2))?;

Expand Down
Loading