From 532a4ea55c32ec0bebfb3bb3118c20ad355563f0 Mon Sep 17 00:00:00 2001 From: Adrian Hamelink Date: Tue, 30 Jan 2024 12:25:44 +0100 Subject: [PATCH] revamp --- src/parafold/circuit.rs | 69 ++ src/parafold/cycle_fold/circuit.rs | 129 +++- src/parafold/cycle_fold/circuit_alloc.rs | 96 --- src/parafold/cycle_fold/mod.rs | 180 ++--- src/parafold/cycle_fold/prover.rs | 137 +++- src/parafold/ecc.rs | 123 +++- src/parafold/mod.rs | 5 +- .../{nifs_primary => nifs}/circuit.rs | 140 ++-- .../circuit.rs => nifs/circuit_secondary.rs} | 126 +++- src/parafold/nifs/mod.rs | 28 + src/parafold/nifs/prover.rs | 412 +++++++++++ src/parafold/nifs_primary/circuit_alloc.rs | 118 --- src/parafold/nifs_primary/mod.rs | 64 -- src/parafold/nifs_primary/prover.rs | 234 ------ src/parafold/nifs_secondary/circuit_alloc.rs | 89 --- src/parafold/nifs_secondary/mod.rs | 41 -- src/parafold/nifs_secondary/prover.rs | 17 - src/parafold/nivc/circuit.rs | 671 ++++++++++++------ src/parafold/nivc/circuit_alloc.rs | 169 ----- src/parafold/nivc/hash.rs | 26 - src/parafold/nivc/mod.rs | 80 +-- src/parafold/nivc/prover.rs | 254 +++++-- src/parafold/prover.rs | 162 ++--- src/parafold/transcript/circuit.rs | 143 ++-- src/parafold/transcript/mod.rs | 11 + src/parafold/transcript/prover.rs | 124 ++-- 26 files changed, 2012 insertions(+), 1636 deletions(-) create mode 100644 src/parafold/circuit.rs delete mode 100644 src/parafold/cycle_fold/circuit_alloc.rs rename src/parafold/{nifs_primary => nifs}/circuit.rs (54%) rename src/parafold/{nifs_secondary/circuit.rs => nifs/circuit_secondary.rs} (53%) create mode 100644 src/parafold/nifs/mod.rs create mode 100644 src/parafold/nifs/prover.rs delete mode 100644 src/parafold/nifs_primary/circuit_alloc.rs delete mode 100644 src/parafold/nifs_primary/mod.rs delete mode 100644 src/parafold/nifs_primary/prover.rs delete mode 100644 src/parafold/nifs_secondary/circuit_alloc.rs delete mode 100644 src/parafold/nifs_secondary/mod.rs delete mode 100644 src/parafold/nifs_secondary/prover.rs delete mode 100644 src/parafold/nivc/circuit_alloc.rs delete mode 100644 src/parafold/nivc/hash.rs diff --git a/src/parafold/circuit.rs b/src/parafold/circuit.rs new file mode 100644 index 000000000..46df8970a --- /dev/null +++ b/src/parafold/circuit.rs @@ -0,0 +1,69 @@ +use bellpepper_core::{ConstraintSystem, SynthesisError}; + +use crate::parafold::nivc::circuit::AllocatedNIVCState; +use crate::parafold::nivc::{NIVCMergeProof, NIVCUpdateProof, NIVCIO}; +use crate::parafold::transcript::circuit::AllocatedTranscript; +use crate::parafold::transcript::TranscriptConstants; +use crate::traits::circuit_supernova::EnforcingStepCircuit; +use crate::traits::Engine; + +pub fn synthesize_step( + mut cs: CS, + ro_consts: &TranscriptConstants, + proof: NIVCUpdateProof, + step_circuit: &SF, +) -> Result, SynthesisError> +where + E1: Engine, + E2: Engine, + CS: ConstraintSystem, + SF: EnforcingStepCircuit, +{ + // Fold proof for previous state + let (mut state, transcript) = + AllocatedNIVCState::from_proof(cs.namespace(|| "verify self"), ro_consts, proof)?; + + let io = state.update_io(cs.namespace(|| "step"), step_circuit); + + transcript.inputize(cs.namespace(|| "inputize transcript"))?; + state.inputize(cs.namespace(|| "inputize state"))?; + + io +} +/// Circuit +pub fn synthesize_merge( + mut cs: CS, + ro_consts: &TranscriptConstants, + proof_L: NIVCUpdateProof, + proof_R: NIVCUpdateProof, + proof_merge: NIVCMergeProof, +) -> Result, SynthesisError> +where + E1: Engine, + E2: Engine, + CS: ConstraintSystem, +{ + // Verify L + let (self_L, transcript_L) = + AllocatedNIVCState::from_proof(cs.namespace(|| "verify proof_L"), ro_consts, proof_L)?; + // Verify R + let (self_R, transcript_R) = + AllocatedNIVCState::from_proof(cs.namespace(|| "verify proof_R"), ro_consts, proof_R)?; + // Merge transcripts + let mut transcript = AllocatedTranscript::merge(transcript_L, transcript_R); + + // Merge states + let (state, io_native) = AllocatedNIVCState::merge( + cs.namespace(|| "merge"), + self_L, + self_R, + ro_consts, + proof_merge, + &mut transcript, + )?; + + transcript.inputize(cs.namespace(|| "inputize transcript"))?; + state.inputize(cs.namespace(|| "inputize state"))?; + + Ok(io_native) +} diff --git a/src/parafold/cycle_fold/circuit.rs b/src/parafold/cycle_fold/circuit.rs index 881cf770a..9efc88b5a 100644 --- a/src/parafold/cycle_fold/circuit.rs +++ b/src/parafold/cycle_fold/circuit.rs @@ -1,64 +1,125 @@ use bellpepper_core::num::AllocatedNum; use bellpepper_core::{ConstraintSystem, SynthesisError}; +use itertools::{chain, zip_eq}; -use crate::parafold::cycle_fold::{ - AllocatedHashedCommitment, AllocatedScalarMulAccumulator, AllocatedScalarMulFoldProof, - AllocatedScalarMulMergeProof, -}; -use crate::parafold::nifs_secondary::AllocatedSecondaryRelaxedR1CSInstance; +use crate::parafold::cycle_fold::AllocatedHashedCommitment; +use crate::parafold::nifs::circuit_secondary::AllocatedSecondaryRelaxedR1CSInstance; +use crate::parafold::nifs::FoldProof; use crate::parafold::transcript::circuit::AllocatedTranscript; +use crate::parafold::transcript::TranscriptConstants; use crate::traits::Engine; +use crate::Commitment; -impl AllocatedScalarMulAccumulator +#[derive(Debug, Clone)] +pub struct AllocatedScalarMulAccumulator { + constants: TranscriptConstants, + deferred: Vec>, +} + +impl AllocatedScalarMulAccumulator where E1: Engine, - E2: Engine, { + pub fn new(constants: TranscriptConstants) -> Self { + Self { + constants, + deferred: vec![], + } + } + + pub fn alloc_transcript( + &self, + mut cs: CS, + commitment: Commitment, + transcript: &mut AllocatedTranscript, + ) -> AllocatedHashedCommitment + where + CS: ConstraintSystem, + { + let c = AllocatedHashedCommitment::alloc(&mut cs, commitment, &self.constants); + transcript.absorb(c.as_preimage()); + c + } + /// Compute the result `C <- A + x * B` by folding a proof over the secondary curve. pub fn scalar_mul( &mut self, - /*mut*/ cs: CS, + mut cs: CS, A: AllocatedHashedCommitment, B: AllocatedHashedCommitment, - _x: AllocatedNum, - proof: AllocatedScalarMulFoldProof, + x: AllocatedNum, transcript: &mut AllocatedTranscript, ) -> Result, SynthesisError> where CS: ConstraintSystem, { - let AllocatedScalarMulFoldProof { output, proof } = proof; - transcript.absorb(&output); - - let X_new = vec![ - A.hash, - B.hash, - // TODO: alloc x as big nat - // BigNat::(cs.namespace(|| "alloc x"), Some(x))?, - output.hash.clone(), - ]; + let A_value = A.value; + let B_value = B.value; + let x_value = x.get_value().ok_or(SynthesisError::AssignmentMissing)?; + let C_value = A_value + B_value * x_value; + let C = self.alloc_transcript(cs.namespace(|| "alloc output"), C_value, transcript); - self.acc.fold(cs, X_new, proof, transcript)?; + self.deferred.push(AllocatedScalarMulInstance { + A, + B, + x, + C: C.clone(), + }); - Ok(output) + Ok(C) } /// Merges another existing [AllocatedScalarMulAccumulator] into `self` - pub fn merge( - cs: CS, - self_L: Self, - self_R: Self, - proof: AllocatedScalarMulMergeProof, + pub fn merge(mut self_L: Self, mut self_R: Self) -> Self { + self_L.deferred.append(&mut self_R.deferred); + self_L + } + + pub fn finalize( + self, + mut cs: CS, + mut acc_cf: AllocatedSecondaryRelaxedR1CSInstance, + proofs: impl IntoIterator>, transcript: &mut AllocatedTranscript, - ) -> Result + ) -> Result, SynthesisError> where CS: ConstraintSystem, + E2: Engine, { - let Self { acc: acc_L } = self_L; - let Self { acc: acc_R } = self_R; - let AllocatedScalarMulMergeProof { proof } = proof; - let acc_next = - AllocatedSecondaryRelaxedR1CSInstance::merge(cs, acc_L, acc_R, proof, transcript)?; - Ok(Self { acc: acc_next }) + for (instance, proof) in zip_eq(self.deferred, proofs) { + let AllocatedScalarMulInstance { A, B, x, C } = instance; + let _X_tmp: Vec<_> = chain![A.as_preimage(), B.as_preimage(), [x], C.as_preimage()].collect(); + + // TODO: In order to avoid computing unnecessary proofs, we can check + // - x = 0 => C = A + + // Convert the elements in the instance to a bignum modulo E1::Base. + // Since |E1::Scalar| < |E1::Base|, we can create the limbs without an initial bound-check + // We should check here that the limbs are of the right size, but not-necessarily bound check them. + // X = [A.as_bignum(), B.as_bignum(), x.as_bignum(), C.as_bignum()] + let X = vec![]; + acc_cf.fold(cs.namespace(|| "fold cf instance"), X, proof, transcript)?; + } + + Ok(acc_cf) + } +} + +#[derive(Debug, Clone)] +pub struct AllocatedScalarMulInstance { + A: AllocatedHashedCommitment, + B: AllocatedHashedCommitment, + x: AllocatedNum, + C: AllocatedHashedCommitment, +} + +impl AllocatedScalarMulInstance { + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { + chain![ + self.A.as_preimage(), + self.B.as_preimage(), + [self.x.clone()], + self.C.as_preimage() + ] } } diff --git a/src/parafold/cycle_fold/circuit_alloc.rs b/src/parafold/cycle_fold/circuit_alloc.rs deleted file mode 100644 index 5be745379..000000000 --- a/src/parafold/cycle_fold/circuit_alloc.rs +++ /dev/null @@ -1,96 +0,0 @@ -use bellpepper_core::{ConstraintSystem, SynthesisError}; - -use crate::constants::{BN_LIMB_WIDTH, BN_N_LIMBS}; -use crate::gadgets::nonnative::bignat::BigNat; -use crate::parafold::cycle_fold::AllocatedScalarMulAccumulator; -use crate::parafold::cycle_fold::{ - AllocatedHashedCommitment, AllocatedScalarMulFoldProof, AllocatedScalarMulMergeProof, - HashedCommitment, ScalarMulAccumulatorInstance, ScalarMulFoldProof, ScalarMulMergeProof, -}; -use crate::parafold::nifs_secondary::{ - AllocatedSecondaryFoldProof, AllocatedSecondaryMergeProof, AllocatedSecondaryRelaxedR1CSInstance, -}; -use crate::traits::Engine; - -impl AllocatedScalarMulAccumulator -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(cs: CS, acc: FA) -> Self - where - CS: ConstraintSystem, - FA: FnOnce() -> ScalarMulAccumulatorInstance, - { - let ScalarMulAccumulatorInstance { acc } = acc(); - Self { - acc: AllocatedSecondaryRelaxedR1CSInstance::alloc_infallible(cs, || acc), - } - } - - pub fn to_native(&self) -> Result, SynthesisError> { - let acc = self.acc.to_native()?; - Ok(ScalarMulAccumulatorInstance { acc }) - } -} - -impl AllocatedHashedCommitment { - pub fn alloc_infallible(mut cs: CS, hashed_commitment: FH) -> Self - where - CS: ConstraintSystem, - FH: FnOnce() -> HashedCommitment, - { - let value = hashed_commitment(); - // TODO: Check if we need `max_word` - // TODO: handle error - let hash = BigNat::alloc_from_limbs( - cs.namespace(|| "alloc hash"), - || Ok(value.hash_limbs.to_vec()), - None, - BN_LIMB_WIDTH, - BN_N_LIMBS, - ) - .unwrap(); - Self { value, hash } - } - - pub fn to_native(&self) -> Result, SynthesisError> { - Ok(self.value.clone()) - } -} - -impl AllocatedScalarMulFoldProof -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(mut cs: CS, proof: FF) -> Self - where - CS: ConstraintSystem, - FF: FnOnce() -> ScalarMulFoldProof, - { - let ScalarMulFoldProof { output, proof } = proof(); - let output = - AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc output"), || output); - let proof = - AllocatedSecondaryFoldProof::alloc_infallible(cs.namespace(|| "alloc proof"), || proof); - Self { output, proof } - } -} - -impl AllocatedScalarMulMergeProof -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(mut cs: CS, proof: FP) -> Self - where - CS: ConstraintSystem, - FP: FnOnce() -> ScalarMulMergeProof, - { - let ScalarMulMergeProof { proof, .. } = proof(); - let proof = - AllocatedSecondaryMergeProof::alloc_infallible(cs.namespace(|| "alloc proof"), || proof); - Self { proof } - } -} diff --git a/src/parafold/cycle_fold/mod.rs b/src/parafold/cycle_fold/mod.rs index 114b835fb..d1079ffda 100644 --- a/src/parafold/cycle_fold/mod.rs +++ b/src/parafold/cycle_fold/mod.rs @@ -1,56 +1,38 @@ -use std::marker::PhantomData; - use bellpepper_core::num::AllocatedNum; -use ff::Field; +use bellpepper_core::ConstraintSystem; +use ff::{Field, PrimeFieldBits}; +use neptune::Poseidon; -use crate::constants::BN_N_LIMBS; -use crate::gadgets::nonnative::bignat::BigNat; -use crate::parafold::nifs_secondary::prover::SecondaryRelaxedR1CSInstance; -use crate::parafold::nifs_secondary::{ - AllocatedSecondaryFoldProof, AllocatedSecondaryMergeProof, AllocatedSecondaryRelaxedR1CSInstance, -}; -use crate::parafold::nifs_secondary::{SecondaryFoldProof, SecondaryMergeProof}; +use crate::constants::{BN_LIMB_WIDTH, BN_N_LIMBS}; +use crate::parafold::transcript::TranscriptConstants; use crate::traits::commitment::CommitmentTrait; use crate::traits::Engine; use crate::Commitment; -use crate::parafold::transcript; pub mod circuit; -mod circuit_alloc; pub mod prover; -/// A native group element for the [Engine] is given by `point = (x, y)` where the coordinates are over the base field. -/// Inside a circuit, it is represented only as the hash `h = H(x, y)`, where `H` is a hash function with -/// efficient arithmetization over the base field. The identity element is represented by the zero `hash`. -#[derive(Debug, Clone, Default, PartialEq)] -pub struct HashedCommitment { - point: Commitment, - // Poseidon hash of (x,y) = point. We set hash = 0 when `point` = infinity - hash: E1::Base, - // E1 representation of `BN_N_LIMBS` limbs with BN_LIMB_WIDTH bits. - hash_limbs: Vec, -} - -/// Circuit representation of a [GroupElement] +/// Compressed representation of a [Commitment] for a proof over the [Engine]'s scalar field. /// /// # Details /// Let F_r be the scalar field over which the circuit is defined, and F_q be the base field of the group G over which /// the proof is defined, whose scalar field is F_r. We will assume that r < q, which is the case when we instantiate /// the proof over the BN254/Grumpkin curve cycle. /// -/// A [GroupElement] corresponds to a group element C \in G, and would usually be represented by +/// A [HashedCommitment] corresponds to a group element C \in G, and would usually be represented by /// its coordinates (x,y) \in F_q, possibly with a boolean flag indicating whether it is equal to the identity element. /// /// Representing F_q scalars within a circuit over F_r is expensive since we need to encode these /// with range-checked limbs, and any operation performed over these non-native scalar require many constraints /// to properly constrain the modular reduction by q. /// -/// An important observation we can make is that the minimal operation we need to support over [GroupElement]s is +/// An important observation we can make is that the minimal operation we need to support over [HashedCommitment]s is /// "multiply-add", and that the internal of the group element are ignored by the recursive verifier. /// -/// We chose to represent the [GroupElement] C as the F_q element +/// We chose to represent the [HashedCommitment] C as the F_q element /// h_C = H(C) = H((x,y)), /// where H is a hash function with efficient arithmetization over F_q. +/// If C is the identity, then we define h_C = 0. /// /// The circuit on the secondary curve has IO (h_C, h_A, h_B, x) \in (F_q, F_q, F_q, F_r), /// with private inputs A, B \in G, and checks @@ -60,103 +42,93 @@ pub struct HashedCommitment { /// When folding a proof for the above IO on the primary curve, each IO elements leads to a non-native "multiply-add", /// so this additional hashing that occurs in the secondary circuit ensure we only need to perform this expensive /// operation 4 times. Moreover, the fact that r { - // hash = if let Some(point) = value { H_secondary(point) } else { 0 } - value: HashedCommitment, - hash: BigNat, -} - -impl transcript::circuit::TranscriptRepresentable - for AllocatedHashedCommitment -{ - fn to_field_vec(&self) -> Vec> { - // - todo!() - } +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct HashedCommitment { + point: Commitment, + // Poseidon hash of (x,y) = point. We set hash = 0 when `point` = infinity + hash: E1::Base, + // E1 representation of `BN_N_LIMBS` limbs with BN_LIMB_WIDTH bits. + hash_limbs: [E1::Scalar; BN_N_LIMBS], } impl HashedCommitment { - pub fn new(C: Commitment) -> Self { - let (_x, _y, infinity) = C.to_coordinates(); + /// Convert a [Commitment] to it's compressed representation. + /// + /// # TODO: + /// - The Poseidon constants for `H(x,y)` over F_q are defined by `constants.1`. + pub fn new(point: Commitment, constants: &TranscriptConstants) -> Self { + let (x, y, infinity) = point.to_coordinates(); if infinity { Self { - point: C, + point, hash: E1::Base::ZERO, - hash_limbs: vec![E1::Scalar::ZERO; BN_N_LIMBS], + hash_limbs: [E1::Scalar::ZERO; BN_N_LIMBS], } } else { - // TODO - // Compute hash = H(x,y) - // decompose hash into BN_N_LIMBS with BN_LIMB_WIDTH bits each + let hash = Poseidon::new_with_preimage(&[x, y], &constants.1).hash(); + let hash_limbs = hash + .to_le_bits() + .chunks_exact(BN_LIMB_WIDTH) + .map(|limb_bits| { + // TODO: Find more efficient trick + let mut limb = E1::Scalar::ZERO; + for bit in limb_bits.iter().rev() { + // double limb + limb += limb; + if *bit { + limb += E1::Scalar::ONE; + } + } + limb + }) + .collect::>(); + Self { - point: C, - hash: E1::Base::ZERO, - hash_limbs: vec![E1::Scalar::ZERO; BN_N_LIMBS], + point, + hash, + hash_limbs: hash_limbs.try_into().unwrap(), } } } -} -impl transcript::prover::TranscriptRepresentable for HashedCommitment { - fn to_field_vec(&self) -> Vec { - self.hash_limbs.clone() + pub fn as_preimage(&self) -> impl IntoIterator { + self.hash_limbs } } -#[derive(Debug, Clone, PartialEq)] -pub struct ScalarMulAccumulatorInstance { - acc: SecondaryRelaxedR1CSInstance, -} - -/// Circuit representation of a RelaxedR1CS accumulator of the non-native scalar multiplication circuit. -/// -/// # Future work -/// While the secondary circuit will be quite small, generating a proof for it may lead to bottlenecks in -/// the full prover pipeline, since each operation needs to be proved sequentially. The small size of the circuit -/// also prevents efficient use of parallelism. -/// -/// One way to side-step this issue is to defer all proving until the end of the interaction, while still ensuring -/// that the non-interactive instantiation of the protocol is safe. +/// Allocated [HashedCommitment] /// -/// Whenever `scalar_mul(A, B, x, transcript)` is called, the function will only compute the result `C <- A + x * B` -/// and add `C` to the `transcript`. This defines an instance `X = [A, B, C, x]` of the secondary circuit, -/// which can be appended to a list of deferred instances stored inside the mutable accumulator. -/// At the end of the protocol, the accumulator is "finalized" before being returned as output. This involves the prover -/// synthesizing and proving each instance of the circuit, until the list of deferred instances is empty. +/// # Details +/// Inside the primary circuit, a [Commitment] C is represented by the limbs of the hash `h_C = H(C.x, C.y)`. +/// The limbs of `h_C` are not range-checked and we assume this check occurs during the conversion to a big integer. /// -/// The `merge` operation can simply compute the actual merged folding accumulators, while concatenating the two lists -/// of deferred instances. +/// # TODO +/// - Investigate whether a `is_infinity` flag is needed. It could be used to avoid synthesizing secondary circuits +/// when the scalar multiplication is trivial. #[derive(Debug, Clone)] -pub struct AllocatedScalarMulAccumulator { - acc: AllocatedSecondaryRelaxedR1CSInstance, +pub struct AllocatedHashedCommitment { + value: Commitment, + // hash = if let Some(point) = value { H_secondary(point) } else { 0 } + hash_limbs: [AllocatedNum; BN_N_LIMBS], } -/// A proof for a non-native group operation C = A + x * B, where x is a native scalar -/// and A, B, C, are non-native group elements -#[derive(Debug, Clone, Default)] -pub struct ScalarMulFoldProof { - output: HashedCommitment, - proof: SecondaryFoldProof, -} +impl AllocatedHashedCommitment { + pub fn alloc(mut cs: CS, c: Commitment, constants: &TranscriptConstants) -> Self + where + CS: ConstraintSystem, + { + let hashed = HashedCommitment::::new(c, constants); + let hash_limbs = hashed + .hash_limbs + .map(|limb| AllocatedNum::alloc_infallible(cs.namespace(|| "alloc limb"), || limb)); -#[derive(Debug, Clone)] -pub struct AllocatedScalarMulFoldProof { - output: AllocatedHashedCommitment, - proof: AllocatedSecondaryFoldProof, -} - -/// -#[derive(Debug, Clone)] -pub struct ScalarMulMergeProof { - proof: SecondaryMergeProof, - _marker: PhantomData, -} + Self { + value: c, + hash_limbs, + } + } -#[derive(Debug, Clone)] -pub struct AllocatedScalarMulMergeProof { - proof: AllocatedSecondaryMergeProof, + pub fn as_preimage(&self) -> impl IntoIterator> { + self.hash_limbs.clone() + } } diff --git a/src/parafold/cycle_fold/prover.rs b/src/parafold/cycle_fold/prover.rs index 51e642660..08a8e2f20 100644 --- a/src/parafold/cycle_fold/prover.rs +++ b/src/parafold/cycle_fold/prover.rs @@ -1,51 +1,114 @@ -use crate::parafold::cycle_fold::{ - HashedCommitment, ScalarMulAccumulatorInstance, ScalarMulFoldProof, ScalarMulMergeProof, -}; +use bellpepper_core::ConstraintSystem; + +use crate::bellpepper::solver::SatisfyingAssignment; +use crate::parafold::cycle_fold::HashedCommitment; +use crate::parafold::nifs::prover::RelaxedR1CS; +use crate::parafold::nifs::FoldProof; use crate::parafold::transcript::prover::Transcript; +use crate::parafold::transcript::TranscriptConstants; +use crate::r1cs::R1CSShape; use crate::traits::Engine; +use crate::{Commitment, CommitmentKey}; -#[derive(Debug)] -pub struct ScalarMulAccumulator { - // ro consts secondary? - // used to hash the incoming point - instance: ScalarMulAccumulatorInstance, - W: Vec, - E: Vec, +/// A [ScalarMulAccumulator] represents a coprocessor for efficiently computing non-native ECC scalar multiplications +/// inside a circuit. +/// +/// # Details +/// During an interactive proof, all scalar multiplications operations are deferred and collected +/// into this data structure. Since the results of the operation are provided non-deterministically, it must be +/// added to the Fiat-Shamir transcript as it represents a value "provided by the prover". +/// +/// All operations are proved in a batch at the end of the circuit in order to minimize latency for the prover. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ScalarMulAccumulator { + constants: TranscriptConstants, + deferred: Vec>, } -impl ScalarMulAccumulator { +impl ScalarMulAccumulator { + pub fn new(constants: TranscriptConstants) -> Self { + Self { + constants, + deferred: vec![], + } + } + + /// Given two commitments `A`, `B` and a scalar `x`, compute `C <- A + x * B` /// - pub fn scalar_mul>( + /// # Details + /// Since the result `C` is computed by the prover, it is added to the transcript. + /// The tuple `[A, B, x, C]` is added to the `deferred` list which will be proved in a batch later on. + pub fn scalar_mul( &mut self, - _A: &HashedCommitment, - _B: &HashedCommitment, - _x: &E1::Scalar, - _transcript: &mut Transcript, - ) -> (HashedCommitment, ScalarMulFoldProof) { - // Compute C = A + x * B - // Compute W proof of this operation - // compute H(C) as the circuit representation of C, where H is Poseidon on the secondary curve - // Add C,W to the transcript - // Set X = [H(A), H(B), X, H(C)] and fold into self - // return proof - todo!() + A: Commitment, + B: Commitment, + x: E1::Scalar, + transcript: &mut Transcript, + ) -> Commitment { + let C_value = A + B * x; + + let C = self.add_to_transcript(C_value, transcript); + + self.deferred.push(ScalarMulInstance { + A: HashedCommitment::new(A, &self.constants), + B: HashedCommitment::new(B, &self.constants), + x, + C, + }); + C_value } - /// Compute - pub fn merge>( + /// Convert a [Commitment] to a [HashedCommitment] and add it to the transcript. + pub fn add_to_transcript( + &self, + C: Commitment, + transcript: &mut Transcript, + ) -> HashedCommitment { + let C = HashedCommitment::new(C, &self.constants); + transcript.absorb(C.hash_limbs); + C + } + + /// Consume all deferred scalar multiplication instances and create a folding proof for each result. + /// The proofs are folded into a mutable RelaxedR1CS for the corresponding circuit over the secondary curve. + pub fn finalize( self, - _other: &Self, - _transcript: &mut Transcript, - ) -> (Self, ScalarMulMergeProof) { - // self and other will not need to be added to the transcript since they are obtained from an accumulator - // we need to compute the T cross term vector - // add T to transcript - // return linear combination of both accumulators - todo!() + ck: &CommitmentKey, + shape: &R1CSShape, + acc_cf: &mut RelaxedR1CS, + transcript: &mut Transcript, + ) -> Vec> + where + E2: Engine, + { + self + .deferred + .into_iter() + .map(|_instance| { + let cs = SatisfyingAssignment::::new(); + // TODO: synthesize the circuit that proves `instance` + let (X, W) = cs.to_assignments(); + acc_cf.fold_secondary(ck, shape, X, &W, transcript) + }) + .collect() } - /// Return the succinct instance of the accumulator - pub(crate) fn instance(&self) -> ScalarMulAccumulatorInstance { - self.instance.clone() + pub fn simulate_finalize(self, transcript: &mut Transcript) -> Vec> + where + E2: Engine, + { + self + .deferred + .into_iter() + .map(|_| RelaxedR1CS::::simulate_fold_secondary(transcript)) + .collect() } } + +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct ScalarMulInstance { + A: HashedCommitment, + B: HashedCommitment, + x: E1::Scalar, + C: HashedCommitment, +} diff --git a/src/parafold/ecc.rs b/src/parafold/ecc.rs index 4aa9b51a6..657e63dc4 100644 --- a/src/parafold/ecc.rs +++ b/src/parafold/ecc.rs @@ -11,8 +11,10 @@ use crate::gadgets::utils::{ select_num_or_one, select_num_or_zero, select_num_or_zero2, select_one_or_diff2, select_one_or_num2, select_zero_or_num2, }; -use crate::parafold::transcript::circuit::TranscriptRepresentable; -use crate::traits::Group; +use crate::parafold::transcript::circuit::AllocatedTranscript; +use crate::traits::commitment::CommitmentTrait; +use crate::traits::{Engine, Group}; +use crate::Commitment; /// `AllocatedPoint` provides an elliptic curve abstraction inside a circuit. #[derive(Debug, Clone)] @@ -23,18 +25,66 @@ pub struct AllocatedPoint { _marker: PhantomData, } -impl TranscriptRepresentable for AllocatedPoint { - fn to_field_vec(&self) -> Vec> { - todo!() - } -} - impl AllocatedPoint where F: PrimeField, - // G: Group, G: Group, { + pub fn select_default(self, mut cs: CS, is_default: &Boolean) -> Result + where + CS: ConstraintSystem, + { + let zero = alloc_zero(cs.namespace(|| "alloc 0")); + let one = alloc_one(cs.namespace(|| "alloc 1")); + let Self { + x, y, is_infinity, .. + } = self; + let x = conditionally_select(cs.namespace(|| "select x"), &zero, &x, is_default)?; + let y = conditionally_select(cs.namespace(|| "select y"), &zero, &y, is_default)?; + let is_infinity = conditionally_select( + cs.namespace(|| "select is_infinity"), + &one, + &is_infinity, + is_default, + )?; + Ok(Self { + x, + y, + is_infinity, + _marker: Default::default(), + }) + } + + pub fn enforce_trivial(&self, mut cs: CS, is_trivial: &Boolean) + where + CS: ConstraintSystem, + { + // is_trivial => (is_identity == 1) + // is_trivial == is_identity + cs.enforce( + || "is_trivial - E.is_infinity = 0", + |lc| lc, + |lc| lc, + |_| is_trivial.lc(CS::one(), F::ONE) - self.is_infinity.get_variable(), + ); + } + + pub fn alloc_transcript( + mut cs: CS, + c: Commitment, + transcript: &mut AllocatedTranscript, + ) -> Self + where + CS: ConstraintSystem, + E1: Engine, + E2: Engine, + { + let c = Self::alloc(&mut cs, Some(c.to_coordinates())).unwrap(); + c.check_on_curve(cs.namespace(|| "check on curve")).unwrap(); + transcript.absorb([c.x.clone(), c.y.clone()]); + c + } + /// Allocates a new point on the curve using coordinates provided by `coords`. /// If coords = None, it allocates the default infinity point pub fn alloc(mut cs: CS, coords: Option<(F, F, bool)>) -> Result @@ -61,7 +111,7 @@ where x, y, is_infinity, - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -112,19 +162,19 @@ where } /// Allocates a default point on the curve, set to the identity point. - pub fn default(mut cs: CS) -> Result + pub fn default(mut cs: CS) -> Self where CS: ConstraintSystem, { let zero = alloc_zero(cs.namespace(|| "zero")); let one = alloc_one(cs.namespace(|| "one")); - Ok(Self { + Self { x: zero.clone(), y: zero, is_infinity: one, - _marker: PhantomData::default(), - }) + _marker: PhantomData, + } } /// Returns coordinates associated with the point. @@ -150,7 +200,7 @@ where x: self.x.clone(), y, is_infinity: self.is_infinity.clone(), - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -369,7 +419,7 @@ where x, y, is_infinity, - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -479,7 +529,7 @@ where x, y, is_infinity, - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -521,7 +571,7 @@ where // convert back to AllocatedPoint let res = { // we set acc.is_infinity = self.is_infinity - let acc = acc.to_allocated_point(&self.is_infinity)?; + let acc = acc.to_allocated_point(&self.is_infinity); // we remove the initial slack if bits[0] is as not as assumed (i.e., it is not 1) let acc_minus_initial = { @@ -539,7 +589,7 @@ where // when self.is_infinity = 1, return the default point, else return res // we already set res.is_infinity to be self.is_infinity, so we do not need to set it here - let default = Self::default(cs.namespace(|| "default"))?; + let default = Self::default(cs.namespace(|| "default")); let x = conditionally_select2( cs.namespace(|| "check if self.is_infinity is zero (x)"), &default.x, @@ -559,9 +609,9 @@ where x, y, is_infinity: res.is_infinity, - _marker: PhantomData::default(), + _marker: PhantomData, }; - let mut p_complete = p.to_allocated_point(&self.is_infinity)?; + let mut p_complete = p.to_allocated_point(&self.is_infinity); for (i, bit) in complete_bits.iter().enumerate() { let temp = acc.add(cs.namespace(|| format!("add_complete {i}")), &p_complete)?; @@ -603,7 +653,7 @@ where x, y, is_infinity, - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -630,9 +680,13 @@ where x, y, is_infinity, - _marker: PhantomData::default(), + _marker: PhantomData, }) } + + pub fn as_preimage(&self) -> impl IntoIterator> { + [self.x.clone(), self.y.clone(), self.is_infinity.clone()] + } } #[derive(Clone)] @@ -653,7 +707,7 @@ where Self { x, y, - _marker: PhantomData::default(), + _marker: PhantomData, } } @@ -672,7 +726,7 @@ where Ok(Self { x, y, - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -681,21 +735,18 @@ where Self { x: p.x.clone(), y: p.y.clone(), - _marker: PhantomData::default(), + _marker: PhantomData, } } /// Returns an `AllocatedPoint` from an `AllocatedPointNonInfinity` - pub fn to_allocated_point( - &self, - is_infinity: &AllocatedNum, - ) -> Result, SynthesisError> { - Ok(AllocatedPoint { + pub fn to_allocated_point(&self, is_infinity: &AllocatedNum) -> AllocatedPoint { + AllocatedPoint { x: self.x.clone(), y: self.y.clone(), is_infinity: is_infinity.clone(), - _marker: PhantomData::default(), - }) + _marker: PhantomData, + } } /// Returns coordinates associated with the point. @@ -765,7 +816,7 @@ where Ok(Self { x, y, - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -826,7 +877,7 @@ where Ok(Self { x, y, - _marker: PhantomData::default(), + _marker: PhantomData, }) } @@ -843,7 +894,7 @@ where Ok(Self { x, y, - _marker: PhantomData::default(), + _marker: PhantomData, }) } } diff --git a/src/parafold/mod.rs b/src/parafold/mod.rs index 5611e6917..e0f0c9a26 100644 --- a/src/parafold/mod.rs +++ b/src/parafold/mod.rs @@ -3,15 +3,14 @@ mod cycle_fold; #[allow(dead_code)] mod ecc; #[allow(dead_code)] -mod nifs_primary; -#[allow(dead_code)] -mod nifs_secondary; +mod nifs; #[allow(dead_code)] mod nivc; #[allow(dead_code)] mod prover; #[allow(dead_code)] mod transcript; +mod circuit; // pub struct ProvingKey { // /// Commitment Key diff --git a/src/parafold/nifs_primary/circuit.rs b/src/parafold/nifs/circuit.rs similarity index 54% rename from src/parafold/nifs_primary/circuit.rs rename to src/parafold/nifs/circuit.rs index de5af5f84..6cd7ad94d 100644 --- a/src/parafold/nifs_primary/circuit.rs +++ b/src/parafold/nifs/circuit.rs @@ -3,37 +3,40 @@ use bellpepper_core::{ConstraintSystem, SynthesisError}; use ff::PrimeField; use itertools::*; -use crate::parafold::cycle_fold::AllocatedScalarMulAccumulator; -use crate::parafold::nifs_primary::{ - AllocatedFoldProof, AllocatedMergeProof, AllocatedRelaxedR1CSInstance, -}; +use crate::parafold::cycle_fold::circuit::AllocatedScalarMulAccumulator; +use crate::parafold::cycle_fold::AllocatedHashedCommitment; +use crate::parafold::nifs::{FoldProof, MergeProof, RelaxedR1CSInstance}; use crate::parafold::transcript::circuit::AllocatedTranscript; +use crate::parafold::transcript::TranscriptConstants; use crate::traits::Engine; +/// Allocated [RelaxedR1CSInstance] +#[derive(Debug, Clone)] +pub struct AllocatedRelaxedR1CSInstance { + u: AllocatedNum, + X: Vec>, + W: AllocatedHashedCommitment, + E: AllocatedHashedCommitment, +} + impl AllocatedRelaxedR1CSInstance { /// Folds an R1CSInstance into `self` - pub fn fold>( - &mut self, + pub fn fold( + self, mut cs: CS, X_new: Vec>, - acc_sm: &mut AllocatedScalarMulAccumulator, - fold_proof: AllocatedFoldProof, + acc_sm: &mut AllocatedScalarMulAccumulator, + fold_proof: FoldProof, transcript: &mut AllocatedTranscript, - ) -> Result<(), SynthesisError> + ) -> Result where CS: ConstraintSystem, { - let AllocatedFoldProof { - W: W_new, - T, - W_sm_proof, - E_sm_proof, - } = fold_proof; + let FoldProof { W: W_new, T } = fold_proof; + let W_new = acc_sm.alloc_transcript(cs.namespace(|| "alloc W_new"), W_new, transcript); + let T = acc_sm.alloc_transcript(cs.namespace(|| "alloc E"), T, transcript); - transcript.absorb(&W_new); - transcript.absorb(&T); - - let r = transcript.squeeze(cs.namespace(|| "squeeze r"))?; + let r = transcript.squeeze(&mut cs.namespace(|| "squeeze r"))?; let Self { W: W_curr, @@ -55,7 +58,6 @@ impl AllocatedRelaxedR1CSInstance { W_curr.clone(), W_new.clone(), r.clone(), - W_sm_proof, transcript, )?; let E_next = acc_sm.scalar_mul( @@ -63,68 +65,57 @@ impl AllocatedRelaxedR1CSInstance { E_curr.clone(), T.clone(), r.clone(), - E_sm_proof, transcript, )?; - *self = Self { + Ok(Self { u: u_next, X: X_next, W: W_next, E: E_next, - }; - - Ok(()) + }) } - pub fn merge_many>( + /// Optimized merge over the primary curve, where the same `r` is used across many accumulators. + pub fn merge_many( mut cs: CS, accs_L: Vec, accs_R: Vec, - acc_sm: &mut AllocatedScalarMulAccumulator, - merge_proofs: Vec>, + acc_sm: &mut AllocatedScalarMulAccumulator, + proofs: Vec>, transcript: &mut AllocatedTranscript, ) -> Result, SynthesisError> where CS: ConstraintSystem, { - let (nifs_proofs, sm_proofs): (Vec<_>, Vec<_>) = merge_proofs + // Add all cross-term commitments to the transcript. + let Ts = proofs .into_iter() - .map(|merge_proof| { - let AllocatedMergeProof { - T, - W_sm_proof, - E1_sm_proof, - E2_sm_proof, - } = merge_proof; - (T, [W_sm_proof, E1_sm_proof, E2_sm_proof]) - }) - .unzip(); - - for nifs_proof in &nifs_proofs { - transcript.absorb(nifs_proof); - } + .map(|proof| acc_sm.alloc_transcript(cs.namespace(|| "alloc Ts"), proof.T, transcript)) + .collect::>(); + // Get common challenge let r = transcript.squeeze(cs.namespace(|| "squeeze r"))?; + // Merge all accumulators let accs_next = zip_eq(accs_L, accs_R) - .zip_eq(zip_eq(nifs_proofs, sm_proofs)) - .map(|((acc_L, acc_R), (T, sm_proof))| { + .zip_eq(Ts) + .map(|((acc_L, acc_R), T)| { let Self { u: u_L, X: X_L, W: W_L, E: E_L, + .. } = acc_L; let Self { u: u_R, X: X_R, W: W_R, E: E_R, + .. } = acc_R; - let [W_sm_proof, E1_sm_proof, E2_sm_proof] = sm_proof; - let u_next = mul_add(cs.namespace(|| "u_new"), &u_L, &u_R, &r)?; let X_next = zip_eq(X_L, X_R) .enumerate() @@ -135,7 +126,6 @@ impl AllocatedRelaxedR1CSInstance { W_L.clone(), W_R.clone(), r.clone(), - W_sm_proof, transcript, )?; let E1_next = acc_sm.scalar_mul( @@ -143,29 +133,73 @@ impl AllocatedRelaxedR1CSInstance { T.clone(), E_R.clone(), r.clone(), - E1_sm_proof, transcript, )?; let E_next = acc_sm.scalar_mul( - cs.namespace(|| "E2_next"), + cs.namespace(|| "E_next"), E_L.clone(), E1_next.clone(), r.clone(), - E2_sm_proof, transcript, )?; Ok::(Self { - W: W_next, - E: E_next, u: u_next, X: X_next, + W: W_next, + E: E_next, }) }) .collect::, _>>()?; Ok(accs_next) } + + /// Compute the hash of the accumulator over the primary curve. + pub fn hash( + &self, + mut cs: CS, + constants: &TranscriptConstants, + ) -> Result, SynthesisError> + where + CS: ConstraintSystem, + { + let mut transcript = AllocatedTranscript::::new(constants.clone()); + transcript.absorb(self.as_preimage()); + transcript.squeeze(&mut cs) + } + + pub fn alloc( + mut cs: CS, + instance: RelaxedR1CSInstance, + constants: &TranscriptConstants, + ) -> Self + where + CS: ConstraintSystem, + { + // TODO: Add the circuit digest + let RelaxedR1CSInstance { u, X, W, E } = instance; + let u = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc u"), || u); + let X = X + .into_iter() + .enumerate() + .map(|(i, X)| AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc X[{i}]")), || X)) + .collect(); + let W = AllocatedHashedCommitment::alloc(cs.namespace(|| "alloc W"), W, constants); + let E = AllocatedHashedCommitment::alloc(cs.namespace(|| "alloc E"), E, constants); + + Self { u, X, W, E } + } + + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { + // TODO: Add the circuit digest + chain![ + [self.u.clone()], + self.X.iter().cloned(), + self.W.as_preimage(), + self.E.as_preimage() + ] + } } fn mul_add( diff --git a/src/parafold/nifs_secondary/circuit.rs b/src/parafold/nifs/circuit_secondary.rs similarity index 53% rename from src/parafold/nifs_secondary/circuit.rs rename to src/parafold/nifs/circuit_secondary.rs index f013289fe..f0c87ba4e 100644 --- a/src/parafold/nifs_secondary/circuit.rs +++ b/src/parafold/nifs/circuit_secondary.rs @@ -1,19 +1,29 @@ +use bellpepper_core::boolean::Boolean; +use bellpepper_core::num::AllocatedNum; use bellpepper_core::{ConstraintSystem, SynthesisError}; use ff::PrimeField; use itertools::zip_eq; use num_bigint::BigInt; -use num_traits::Num as numTraitsNum; +use num_traits::{Num as numTraitsNum, Zero}; use crate::constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_CHALLENGE_BITS}; use crate::gadgets::nonnative::bignat::BigNat; use crate::gadgets::nonnative::util::Num; -use crate::gadgets::utils::{alloc_bignat_constant, le_bits_to_num}; -use crate::parafold::nifs_secondary::{ - AllocatedSecondaryFoldProof, AllocatedSecondaryMergeProof, AllocatedSecondaryRelaxedR1CSInstance, -}; +use crate::gadgets::utils::{alloc_bignat_constant, conditionally_select_bignat, le_bits_to_num}; +use crate::parafold::ecc::AllocatedPoint; +use crate::parafold::nifs::{FoldProof, MergeProof, RelaxedR1CSInstance}; use crate::parafold::transcript::circuit::AllocatedTranscript; use crate::traits::Engine; +#[derive(Debug, Clone)] +pub struct AllocatedSecondaryRelaxedR1CSInstance { + pub u: BigNat, + pub X: Vec>, + pub W: AllocatedPoint, + pub E: AllocatedPoint, + // q: BigNat, // = E2::Base::MODULUS +} + impl AllocatedSecondaryRelaxedR1CSInstance where E1: Engine, @@ -23,7 +33,7 @@ where &mut self, mut cs: CS, X_new: Vec>, - fold_proof: AllocatedSecondaryFoldProof, + fold_proof: FoldProof, transcript: &mut AllocatedTranscript, ) -> Result<(), SynthesisError> where @@ -37,9 +47,15 @@ where BN_N_LIMBS, )?; - let AllocatedSecondaryFoldProof { W: W_new, T } = fold_proof; - transcript.absorb(&W_new); - transcript.absorb(&T); + let FoldProof { W: W_new, T } = fold_proof; + + let W_new = AllocatedPoint::alloc_transcript::<_, E1, E2>( + cs.namespace(|| "alloc W_new"), + W_new, + transcript, + ); + let T = + AllocatedPoint::alloc_transcript::<_, E1, E2>(cs.namespace(|| "alloc T"), T, transcript); let r_bits = transcript.squeeze_bits(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; @@ -64,7 +80,7 @@ where .map(|(i, (x_curr_bn, x_new_bn))| { add_mul_bn( cs.namespace(|| format!("x_next[{i}]")), - &x_curr_bn, + x_curr_bn, &x_new_bn, &r_bn, &q_bn, @@ -74,10 +90,10 @@ where let W_next = W_new .scalar_mul(cs.namespace(|| "r * W_new"), &r_bits)? - .add(cs.namespace(|| "W_curr + r * W_new"), &W_curr)?; + .add(cs.namespace(|| "W_curr + r * W_new"), W_curr)?; let E_next = T .scalar_mul(cs.namespace(|| "r * T"), &r_bits)? - .add(cs.namespace(|| "W_curr + r * T"), &E_curr)?; + .add(cs.namespace(|| "W_curr + r * T"), E_curr)?; *self = Self { u: u_next, @@ -92,7 +108,7 @@ where mut cs: CS, self_L: Self, self_R: Self, - merge_proof: AllocatedSecondaryMergeProof, + merge_proof: MergeProof, transcript: &mut AllocatedTranscript, ) -> Result where @@ -106,8 +122,11 @@ where BN_N_LIMBS, )?; - let AllocatedSecondaryMergeProof { T } = merge_proof; - transcript.absorb(&T); + let MergeProof { T } = merge_proof; + + let T = + AllocatedPoint::alloc_transcript::<_, E1, E2>(cs.namespace(|| "alloc T"), T, transcript); + transcript.absorb(T.as_preimage()); let r_bits = transcript.squeeze_bits(cs.namespace(|| "r bits"), NUM_CHALLENGE_BITS)?; let r = le_bits_to_num(cs.namespace(|| "r"), &r_bits)?; @@ -164,6 +183,83 @@ where E: E_next, }) } + + pub fn enforce_trivial(&self, mut cs: CS, is_trivial: &Boolean) + where + CS: ConstraintSystem, + { + // TODO: If is_trivial + // u = 0 + // X = [0, ..., 0] + self + .W + .enforce_trivial(cs.namespace(|| "enforce trivial W"), is_trivial); + self + .E + .enforce_trivial(cs.namespace(|| "enforce trivial E"), is_trivial); + } + + pub fn alloc(/*mut*/ _cs: CS, _instance: RelaxedR1CSInstance) -> Self + where + CS: ConstraintSystem, + { + // Both u, X need to be allocated as BigInt + todo!() + // let SecondaryRelaxedR1CSInstance { u, X, W, E } = instance(); + // let u = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc u"), || u); + // let X = X + // .into_iter() + // .enumerate() + // .map(|(i, X)| AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc X[{i}]")), || X)) + // .collect(); + // let W = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc W"), || W); + // let E = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc E"), || E); + // + // Self { + // u: BigNat::alloc_from_nat(), + // X: vec![], + // W: (), + // E: (), + // } + } + + pub fn alloc_transcript( + mut cs: CS, + instance: RelaxedR1CSInstance, + transcript: &mut AllocatedTranscript, + ) -> Self + where + CS: ConstraintSystem, + { + let instance = Self::alloc(&mut cs, instance); + transcript.absorb(instance.as_preimage()); + instance + } + + pub fn select_default(self, mut cs: CS, is_default: &Boolean) -> Result + where + CS: ConstraintSystem, + { + let bn_zero = alloc_bignat_constant( + cs.namespace(|| "alloc zero"), + &BigInt::zero(), + BN_LIMB_WIDTH, + BN_N_LIMBS, + )?; + let Self { u, X, W, E } = self; + let u = conditionally_select_bignat(cs.namespace(|| "select u"), &bn_zero, &u, is_default)?; + let X = X + .into_iter() + .map(|x| conditionally_select_bignat(cs.namespace(|| "select x"), &bn_zero, &x, is_default)) + .collect::, _>>()?; + let W = W.select_default(cs.namespace(|| "select W"), is_default)?; + let E = E.select_default(cs.namespace(|| "select E"), is_default)?; + Ok(Self { u, X, W, E }) + } + + pub fn as_preimage(&self) -> impl IntoIterator> { + vec![] + } } fn add_mul_bn( diff --git a/src/parafold/nifs/mod.rs b/src/parafold/nifs/mod.rs new file mode 100644 index 000000000..061bd9120 --- /dev/null +++ b/src/parafold/nifs/mod.rs @@ -0,0 +1,28 @@ +use crate::traits::Engine; +use crate::Commitment; +pub mod circuit; +pub mod circuit_secondary; +pub mod prover; + +/// Instance of a Relaxed-R1CS accumulator for a circuit. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RelaxedR1CSInstance { + // TODO: Add pp_digest for this circuit. + u: E1::Scalar, + X: Vec, + W: Commitment, + E: Commitment, +} + +/// A proof for folding a statement X of a circuit C into a Relaxed-R1CS circuit for the same circuit C +#[derive(Debug, Clone, Default)] +pub struct FoldProof { + W: Commitment, + T: Commitment, +} + +/// A proof for merging two valid Relaxed-R1CS accumulators for the same circuit C +#[derive(Debug, Clone)] +pub struct MergeProof { + T: Commitment, +} diff --git a/src/parafold/nifs/prover.rs b/src/parafold/nifs/prover.rs new file mode 100644 index 000000000..863bfec20 --- /dev/null +++ b/src/parafold/nifs/prover.rs @@ -0,0 +1,412 @@ +use ff::Field; +use itertools::*; +use rayon::prelude::*; + +use crate::constants::{BN_N_LIMBS, NUM_CHALLENGE_BITS}; +use crate::parafold::cycle_fold::prover::ScalarMulAccumulator; +use crate::parafold::cycle_fold::HashedCommitment; +use crate::parafold::nifs::{FoldProof, MergeProof, RelaxedR1CSInstance}; +use crate::parafold::transcript::prover::Transcript; +use crate::parafold::transcript::TranscriptConstants; +use crate::r1cs::R1CSShape; +use crate::traits::commitment::{CommitmentEngineTrait, CommitmentTrait}; +use crate::traits::Engine; +use crate::{zip_with, Commitment, CommitmentKey, CE}; + +/// A full Relaxed-R1CS accumulator for a circuit +/// # TODO: +/// It would make sense to store the [R1CSShape] here since +/// - There is only one accumulator per shape +/// - +#[derive(Debug)] +pub struct RelaxedR1CS { + instance: RelaxedR1CSInstance, + W: Vec, + E: Vec, + // TODO: store cache for Folding T +} + +impl RelaxedR1CS { + pub fn new(shape: &R1CSShape) -> Self { + Self { + instance: RelaxedR1CSInstance { + u: E::Scalar::ZERO, + X: vec![E::Scalar::ZERO; shape.num_io], + W: Commitment::::default(), + E: Commitment::::default(), + }, + W: vec![E::Scalar::ZERO; shape.num_vars], + E: vec![E::Scalar::ZERO; shape.num_cons], + } + } + pub fn instance(&self) -> &RelaxedR1CSInstance { + &self.instance + } + + pub fn simulate_fold_primary( + acc_sm: &mut ScalarMulAccumulator, + transcript: &mut Transcript, + ) -> FoldProof { + let W = Commitment::::default(); + let T = Commitment::::default(); + transcript.absorb_commitment_primary(W); + transcript.absorb_commitment_primary(T); + let r = transcript.squeeze(); + let _ = acc_sm.scalar_mul(W, W, r, transcript); + let _ = acc_sm.scalar_mul(T, T, r, transcript); + FoldProof { W, T } + } + + pub fn simulate_fold_secondary>( + transcript: &mut Transcript, + ) -> FoldProof { + let W = Commitment::::default(); + let T = Commitment::::default(); + transcript.absorb_commitment_secondary::(W); + transcript.absorb_commitment_secondary::(T); + let _r = transcript.squeeze(); + FoldProof { W, T } + } + + /// Given the public IO `X_new` for a circuit with R1CS representation `shape`, + /// along with a satisfying witness vector `W_new`, and assuming `self` is a valid accumulator for the same circuit, + /// this function will fold the statement into `self` and return a [FoldProof] that will allow the verifier to perform + /// the same transformation over the corresponding [RelaxedR1CSInstance] of the input `self`. + /// + /// # Warning + /// We assume the R1CS IO `X_new` has already been absorbed in some form into the transcript in order to avoid + /// unnecessary hashing. The caller is responsible for ensuring this assumption is valid. + pub fn fold_primary( + &mut self, + ck: &CommitmentKey, + shape: &R1CSShape, + X_new: Vec, + W_new: &[E::Scalar], + acc_sm: &mut ScalarMulAccumulator, + transcript: &mut Transcript, + ) -> FoldProof { + // TODO: Parallelize both of these operations + let W_comm_new = { E::CE::commit(ck, W_new) }; + let (T, T_comm) = { self.compute_fold_proof(ck, shape, None, &X_new, W_new) }; + + acc_sm.add_to_transcript(W_comm_new, transcript); + acc_sm.add_to_transcript(T_comm, transcript); + let r = transcript.squeeze(); + + self + .W + .par_iter_mut() + .zip_eq(W_new.par_iter()) + .for_each(|(w, w_new)| *w += r * w_new); + self + .E + .par_iter_mut() + .zip_eq(T.par_iter()) + .for_each(|(e, t)| *e += r * t); + + // For non-relaxed instances, u_new = 1 + self.instance.u += r; + self + .instance + .X + .iter_mut() + .zip_eq(X_new) + .for_each(|(x, x_new)| *x += r * x_new); + + // Compute scalar multiplications and resulting instances to be proved with the CycleFold circuit + // W_comm_next = W_comm_curr + r * W_comm_new + self.instance.W = acc_sm.scalar_mul(self.instance.W, W_comm_new, r, transcript); + + // E_comm_next = E_comm_curr + r * T + self.instance.E = acc_sm.scalar_mul(self.instance.E, T_comm, r, transcript); + + FoldProof { + W: W_comm_new, + T: T_comm, + } + } + + pub fn fold_secondary>( + &mut self, + ck: &CommitmentKey, + shape: &R1CSShape, + X_new: Vec, + W_new: &[E::Scalar], + transcript: &mut Transcript, + ) -> FoldProof { + // TODO: Parallelize both of these operations + let W_comm_new = { E::CE::commit(ck, W_new) }; + let (T, T_comm) = { self.compute_fold_proof(ck, shape, None, &X_new, W_new) }; + + transcript.absorb(comm_to_base::(&W_comm_new)); + transcript.absorb(comm_to_base::(&T_comm)); + // TODO: Squeeze + let r = transcript.squeeze_bits_secondary(NUM_CHALLENGE_BITS); + + self + .W + .par_iter_mut() + .zip_eq(W_new.par_iter()) + .for_each(|(w, w_new)| *w += r * w_new); + self + .E + .par_iter_mut() + .zip_eq(T.par_iter()) + .for_each(|(e, t)| *e += r * t); + + // For non-relaxed instances, u_new = 1 + self.instance.u += r; + self + .instance + .X + .iter_mut() + .zip_eq(X_new) + .for_each(|(x, x_new)| *x += r * x_new); + self.instance.W = self.instance.W + W_comm_new * r; + self.instance.E = self.instance.E + T_comm * r; + + FoldProof { + W: W_comm_new, + T: T_comm, + } + } + + /// Given two lists of [RelaxedR1CS] accumulators, + pub fn merge_many( + ck: &CommitmentKey, + shapes: &[R1CSShape], + mut accs_L: Vec, + accs_R: &[Self], + acc_sm: &mut ScalarMulAccumulator, + transcript: &mut Transcript, + ) -> (Vec, Vec>) { + // TODO: parallelize + let (Ts, T_comms): (Vec<_>, Vec<_>) = zip_with!( + (accs_L.iter_mut(), accs_R.iter(), shapes), + |acc_L, acc_R, shape| { + acc_L.compute_fold_proof( + ck, + shape, + Some(acc_R.instance.u), + &acc_R.instance.X, + &acc_R.W, + ) + } + ) + .unzip(); + + for T_comm in &T_comms { + transcript.absorb_commitment_primary(*T_comm); + } + let r = transcript.squeeze(); + + zip_with!( + ( + accs_L.into_iter(), + accs_R.iter(), + Ts.iter(), + T_comms.into_iter() + ), + |acc_L, acc_R, T, T_comm| { + let W = zip_with!( + (acc_L.W.into_par_iter(), acc_R.W.par_iter()), + |w_L, w_R| w_L + r * w_R + ) + .collect(); + + let E = zip_with!( + (acc_L.E.into_par_iter(), T.par_iter(), acc_R.E.par_iter()), + |e_L, t, e_R| e_L + r * (*t + r * e_R) + ) + .collect(); + + let instance = { + let u = acc_L.instance.u + r * acc_R.instance.u; + let X = zip_eq(acc_L.instance.X.into_iter(), acc_R.instance.X.iter()) + .map(|(x_L, x_R)| x_L + r * x_R) + .collect(); + + // Compute scalar multiplications and resulting instances to be proved with the CycleFold circuit + // W_next = W_L + r * W_R + let W = acc_sm.scalar_mul(acc_L.instance.W, acc_R.instance.W, r, transcript); + + let E_tmp = acc_sm.scalar_mul(T_comm, acc_R.instance.E, r, transcript); + // E_next = E_L + r * E1_next = E_L + r * T + r^2 * E_R + let E = acc_sm.scalar_mul(acc_L.instance.E, E_tmp, r, transcript); + + RelaxedR1CSInstance { u, X, W, E } + }; + + let acc = Self { instance, W, E }; + + let merge_proof = MergeProof { T: T_comm }; + + (acc, merge_proof) + } + ) + .unzip() + } + + /// Given two lists of [RelaxedR1CS] accumulators, + pub fn merge_secondary>( + ck: &CommitmentKey, + shape: &R1CSShape, + acc_L: Self, + acc_R: &Self, + transcript: &mut Transcript, + ) -> (Self, MergeProof) { + let (T, T_comm) = acc_L.compute_fold_proof( + ck, + shape, + Some(acc_R.instance.u), + &acc_R.instance.X, + &acc_R.W, + ); + + transcript.absorb(comm_to_base::(&T_comm)); + let r = transcript.squeeze_bits_secondary(NUM_CHALLENGE_BITS); + + let W = zip_with!( + (acc_L.W.into_par_iter(), acc_R.W.par_iter()), + |w_L, w_R| w_L + r * w_R + ) + .collect(); + + let E = zip_with!( + (acc_L.E.into_par_iter(), T.par_iter(), acc_R.E.par_iter()), + |e_L, t, e_R| e_L + r * (*t + r * e_R) + ) + .collect(); + + let instance = { + let u = acc_L.instance.u + r * acc_R.instance.u; + let X = zip_eq(acc_L.instance.X, &acc_R.instance.X) + .map(|(x_L, x_R)| x_L + r * x_R) + .collect(); + + let W = acc_L.instance.W + acc_R.instance.W * r; + let E_tmp = T_comm + acc_R.instance.E * r; + let E = acc_L.instance.E + E_tmp * r; + + RelaxedR1CSInstance { u, X, W, E } + }; + + let acc = Self { instance, W, E }; + + let merge_proof = MergeProof { T: T_comm }; + + (acc, merge_proof) + } + + fn compute_fold_proof( + &self, + ck: &CommitmentKey, + shape: &R1CSShape, + u_new: Option, + X_new: &[E::Scalar], + W_new: &[E::Scalar], + ) -> (Vec, Commitment) { + let u_1 = self.instance.u; + let u_2 = u_new.unwrap_or(E::Scalar::ONE); + let (AZ_1, BZ_1, CZ_1) = tracing::trace_span!("AZ_1, BZ_1, CZ_1") + .in_scope(|| shape.multiply_witness(&self.W, &u_1, &self.instance.X)) + .unwrap(); + + let (AZ_2, BZ_2, CZ_2) = tracing::trace_span!("AZ_2, BZ_2, CZ_2") + .in_scope(|| shape.multiply_witness(W_new, &u_2, X_new)) + .unwrap(); + + let (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) = + tracing::trace_span!("cross terms").in_scope(|| { + let AZ_1_circ_BZ_2 = (0..AZ_1.len()) + .into_par_iter() + .map(|i| AZ_1[i] * BZ_2[i]) + .collect::>(); + let AZ_2_circ_BZ_1 = (0..AZ_2.len()) + .into_par_iter() + .map(|i| AZ_2[i] * BZ_1[i]) + .collect::>(); + let u_1_cdot_CZ_2 = (0..CZ_2.len()) + .into_par_iter() + .map(|i| u_1 * CZ_2[i]) + .collect::>(); + // TODO: Avoid multiplication by u2 if it is 1 + let u_2_cdot_CZ_1 = (0..CZ_1.len()) + .into_par_iter() + .map(|i| u_2 * CZ_1[i]) + .collect::>(); + (AZ_1_circ_BZ_2, AZ_2_circ_BZ_1, u_1_cdot_CZ_2, u_2_cdot_CZ_1) + }); + + let T = tracing::trace_span!("T").in_scope(|| { + AZ_1_circ_BZ_2 + .par_iter() + .zip_eq(&AZ_2_circ_BZ_1) + .zip_eq(&u_1_cdot_CZ_2) + .zip_eq(&u_2_cdot_CZ_1) + .map(|(((a, b), c), d)| *a + *b - *c - *d) + .collect::>() + }); + + let comm_T = CE::::commit(ck, &T); + (T, comm_T) + } +} + +impl RelaxedR1CSInstance { + pub(crate) fn default(num_io: usize) -> Self { + Self { + u: E::Scalar::ZERO, + X: vec![E::Scalar::ZERO; num_io], + W: Commitment::::default(), + E: Commitment::::default(), + } + } +} + +impl RelaxedR1CSInstance { + pub(crate) fn as_preimage(&self) -> impl IntoIterator + '_ { + // TODO, decompose into real limbs + let u_limbs = [E2::Base::ZERO; BN_N_LIMBS]; + let X_limbs = self.X.iter().flat_map(|_x| [E2::Base::ZERO; BN_N_LIMBS]); + let W = comm_to_base::(&self.W); + let E = comm_to_base::(&self.E); + chain![u_limbs, X_limbs, W, E] + } + + pub fn io_size(&self) -> usize { + [ + BN_N_LIMBS, // u + self.X.len() * BN_N_LIMBS, // X + 2, // W + 2, // E + ] + .into_iter() + .sum() + } +} + +impl RelaxedR1CSInstance { + /// On the primary curve, the instances are stored as hashes in the recursive state. + pub fn hash(&self, transcript_constants: &TranscriptConstants) -> E1::Scalar { + let mut transcript = Transcript::::new(transcript_constants.clone()); + let Self { u, X, W, E } = self; + let W = HashedCommitment::::new(*W, transcript_constants); + let E = HashedCommitment::::new(*E, transcript_constants); + transcript.absorb(chain![ + [*u], + X.iter().cloned(), + W.as_preimage(), + E.as_preimage() + ]); + transcript.squeeze() + } +} + +/// Convert a commitment over the secondary curve to its coordinates to it can be added to a transcript defined +/// over the primary curve. +/// The `is_infinity` flag is not added since it is computed in the circuit and the coordinates are checked. +fn comm_to_base(comm: &Commitment) -> [E2::Base; 2] { + let (x, y, _) = comm.to_coordinates(); + [x, y] +} diff --git a/src/parafold/nifs_primary/circuit_alloc.rs b/src/parafold/nifs_primary/circuit_alloc.rs deleted file mode 100644 index 560346eb1..000000000 --- a/src/parafold/nifs_primary/circuit_alloc.rs +++ /dev/null @@ -1,118 +0,0 @@ -use bellpepper_core::num::AllocatedNum; -use bellpepper_core::{ConstraintSystem, SynthesisError}; - -use crate::parafold::cycle_fold::{AllocatedHashedCommitment, AllocatedScalarMulFoldProof}; -use crate::parafold::nifs_primary::{ - AllocatedFoldProof, AllocatedMergeProof, AllocatedRelaxedR1CSInstance, -}; -use crate::parafold::nifs_primary::{FoldProof, MergeProof, RelaxedR1CSInstance}; -use crate::traits::Engine; - -impl AllocatedRelaxedR1CSInstance { - pub fn alloc_infallible(mut cs: CS, instance: FI) -> Self - where - CS: ConstraintSystem, - FI: FnOnce() -> RelaxedR1CSInstance, - { - let RelaxedR1CSInstance { u, X, W, E } = instance(); - let u = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc u"), || u); - let X = X - .into_iter() - .enumerate() - .map(|(i, X)| AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc X[{i}]")), || X)) - .collect(); - let W = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc W"), || W); - let E = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc E"), || E); - - Self { u, X, W, E } - } - - pub fn to_native(&self) -> Result, SynthesisError> { - let u = self - .u - .get_value() - .ok_or(SynthesisError::AssignmentMissing)?; - let X = self - .X - .iter() - .map(|x| x.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::, _>>()?; - let W = self.W.to_native()?; - let E = self.E.to_native()?; - - Ok(RelaxedR1CSInstance { u, X, W, E }) - } -} - -impl AllocatedFoldProof -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(mut cs: CS, fold_proof: FP) -> Self - where - CS: ConstraintSystem, - FP: FnOnce() -> FoldProof, - { - let FoldProof { - W, - T, - W_sm_proof, - E_sm_proof, - } = fold_proof(); - let W = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc W"), || W); - let T = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc T"), || T); - let W_sm_proof = - AllocatedScalarMulFoldProof::alloc_infallible(cs.namespace(|| "alloc W_sm_proof"), || { - W_sm_proof - }); - let E_sm_proof = - AllocatedScalarMulFoldProof::alloc_infallible(cs.namespace(|| "alloc E_sm_proof"), || { - E_sm_proof - }); - Self { - W, - T, - W_sm_proof, - E_sm_proof, - } - } -} - -impl AllocatedMergeProof -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(mut cs: CS, merge_proof: FP) -> Self - where - CS: ConstraintSystem, - FP: FnOnce() -> MergeProof, - { - let MergeProof { - T, - W_sm_proof, - E1_sm_proof, - E2_sm_proof, - } = merge_proof(); - let T = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc T"), || T); - let W_sm_proof = - AllocatedScalarMulFoldProof::alloc_infallible(cs.namespace(|| "alloc W_sm_proof"), || { - W_sm_proof - }); - let E1_sm_proof = - AllocatedScalarMulFoldProof::alloc_infallible(cs.namespace(|| "alloc E1_sm_proof"), || { - E1_sm_proof - }); - let E2_sm_proof = - AllocatedScalarMulFoldProof::alloc_infallible(cs.namespace(|| "alloc E2_sm_proof"), || { - E2_sm_proof - }); - Self { - T, - W_sm_proof, - E1_sm_proof, - E2_sm_proof, - } - } -} diff --git a/src/parafold/nifs_primary/mod.rs b/src/parafold/nifs_primary/mod.rs deleted file mode 100644 index 69a37b067..000000000 --- a/src/parafold/nifs_primary/mod.rs +++ /dev/null @@ -1,64 +0,0 @@ -use bellpepper_core::num::AllocatedNum; - -use crate::parafold::cycle_fold::{AllocatedHashedCommitment, AllocatedScalarMulFoldProof}; -use crate::parafold::cycle_fold::{HashedCommitment, ScalarMulFoldProof}; -use crate::traits::Engine; - -pub mod circuit; -mod circuit_alloc; -pub mod prover; - -/// Instance of a Relaxed-R1CS accumulator for a circuit -#[derive(Debug, Clone, PartialEq)] -pub struct RelaxedR1CSInstance { - u: E1::Scalar, - X: Vec, - W: HashedCommitment, - E: HashedCommitment, -} - -/// Allocated [RelaxedR1CSInstance] -#[derive(Debug, Clone)] -pub struct AllocatedRelaxedR1CSInstance { - u: AllocatedNum, - X: Vec>, - W: AllocatedHashedCommitment, - E: AllocatedHashedCommitment, -} - - -/// A proof for folding a statement X of a circuit C into a Relaxed-R1CS circuit for the same circuit C -#[derive(Debug, Clone, Default)] -pub struct FoldProof { - W: HashedCommitment, - T: HashedCommitment, - W_sm_proof: ScalarMulFoldProof, - E_sm_proof: ScalarMulFoldProof, -} - -/// Allocated [FoldProof] -#[derive(Debug, Clone)] -pub struct AllocatedFoldProof { - pub W: AllocatedHashedCommitment, - pub T: AllocatedHashedCommitment, - pub W_sm_proof: AllocatedScalarMulFoldProof, - pub E_sm_proof: AllocatedScalarMulFoldProof, -} - -/// A proof for merging two valid Relaxed-R1CS accumulators for the same circuit C -#[derive(Debug, Clone)] -pub struct MergeProof { - T: HashedCommitment, - W_sm_proof: ScalarMulFoldProof, - E1_sm_proof: ScalarMulFoldProof, - E2_sm_proof: ScalarMulFoldProof, -} - -/// Allocated [MergeProof] -#[derive(Debug, Clone)] -pub struct AllocatedMergeProof { - T: AllocatedHashedCommitment, - W_sm_proof: AllocatedScalarMulFoldProof, - E1_sm_proof: AllocatedScalarMulFoldProof, - E2_sm_proof: AllocatedScalarMulFoldProof, -} diff --git a/src/parafold/nifs_primary/prover.rs b/src/parafold/nifs_primary/prover.rs deleted file mode 100644 index 266436aeb..000000000 --- a/src/parafold/nifs_primary/prover.rs +++ /dev/null @@ -1,234 +0,0 @@ -use itertools::*; -use rayon::prelude::*; - -use crate::parafold::cycle_fold::prover::ScalarMulAccumulator; -use crate::parafold::cycle_fold::HashedCommitment; -use crate::parafold::nifs_primary::{FoldProof, MergeProof, RelaxedR1CSInstance}; -use crate::parafold::prover::CommitmentKey; -use crate::parafold::transcript::prover::Transcript; -use crate::r1cs::R1CSShape; -use crate::traits::Engine; -use crate::zip_with; - -/// A full Relaxed-R1CS accumulator for a circuit -#[derive(Debug)] -pub struct RelaxedR1CS { - instance: RelaxedR1CSInstance, - W: Vec, - E: Vec, - // TODO: store cache for Folding T -} - -impl RelaxedR1CS { - // pub fn default(shape: &R1CSShape) -> Self { - // Self { - // instance: RelaxedR1CSInstance::default(shape), - // W: vec![E::Scalar::ZERO; shape.num_vars], - // E: vec![E::Scalar::ZERO; shape.num_cons], - // } - // } - - pub fn instance(&self) -> &RelaxedR1CSInstance { - &self.instance - } - - /// Given the public IO `X_new` for a circuit with R1CS representation `shape`, - /// along with a satisfying witness vector `W_new`, and assuming `self` is a valid accumulator for the same circuit, - /// this function will fold the statement into `self` and return a [FoldProof] that will allow the verifier to perform - /// the same transformation over the corresponding [RelaxedR1CSInstance] of the input `self`. - /// - /// # Warning - /// We assume the R1CS IO `X_new` has already been absorbed in some form into the transcript in order to avoid - /// unnecessary hashing. The caller is responsible for ensuring this assumption is valid. - pub fn fold( - &mut self, - ck: &CommitmentKey, - shape: &R1CSShape, - X_new: Vec, - W_new: &[E1::Scalar], - acc_sm: &mut ScalarMulAccumulator, - transcript: &mut Transcript, - ) -> FoldProof - where - E2: Engine, - { - // TODO: Parallelize both of these operations - let W_comm_new = { ck.commit(&W_new) }; - let (T, T_comm) = { self.compute_fold_proof(ck, shape, None, &X_new, &W_new) }; - - transcript.absorb(&W_comm_new); - transcript.absorb(&T_comm); - let r = transcript.squeeze(); - - self - .W - .par_iter_mut() - .zip_eq(W_new.par_iter()) - .for_each(|(w, w_new)| *w += r * w_new); - - self - .E - .par_iter_mut() - .zip_eq(T.par_iter()) - .for_each(|(e, t)| *e += r * t); - - let fold_proof = self - .instance - .fold_aux(acc_sm, X_new, W_comm_new, T_comm, r, transcript); - fold_proof - } - - /// Given two lists of [RelaxedR1CS] accumulators, - pub fn merge_many( - ck: &CommitmentKey, - shapes: &[R1CSShape], - mut accs_L: Vec, - accs_R: &[Self], - acc_sm: &mut ScalarMulAccumulator, - transcript: &mut Transcript, - ) -> (Vec, Vec>) - where - E2: Engine, - { - // TODO: parallelize - let (Ts, T_comms): (Vec<_>, Vec<_>) = zip_with!( - (accs_L.iter_mut(), accs_R.iter(), shapes), - |acc_L, acc_R, shape| { - acc_L.compute_fold_proof( - ck, - shape, - Some(acc_R.instance.u), - &acc_R.instance.X, - &acc_R.W, - ) - } - ) - .unzip(); - - for T_comm in &T_comms { - transcript.absorb(T_comm); - } - let r = transcript.squeeze(); - - zip_with!( - ( - accs_L.into_iter(), - accs_R.iter(), - Ts.iter(), - T_comms.into_iter() - ), - |acc_L, acc_R, T, T_comm| { - let W = zip_with!( - (acc_L.W.into_par_iter(), acc_R.W.par_iter()), - |w_L, w_R| w_L + r * w_R - ) - .collect(); - - let E = zip_with!( - (acc_L.E.into_par_iter(), T.par_iter(), acc_R.E.par_iter()), - |e_L, t, e_R| e_L + r * (*t + r * e_R) - ) - .collect(); - - let (instance, merge_proof) = RelaxedR1CSInstance::merge_aux( - acc_L.instance, - &acc_R.instance, - acc_sm, - T_comm, - r, - transcript, - ); - - let acc = Self { instance, W, E }; - - (acc, merge_proof) - } - ) - .unzip() - } - - fn compute_fold_proof( - &self, - _ck: &CommitmentKey, - _shape: &R1CSShape, - _u_new: Option, - _X_new: &[E1::Scalar], - _W_new: &[E1::Scalar], - ) -> (Vec, HashedCommitment) { - // let T_comm = CE::::commit(ck, &T); - todo!() - } -} - -impl RelaxedR1CSInstance { - pub fn fold_aux( - &mut self, - acc_sm: &mut ScalarMulAccumulator, - X_new: Vec, - W_new: HashedCommitment, - T: HashedCommitment, - r: E1::Scalar, - transcript: &mut Transcript, - ) -> FoldProof - where - E2: Engine, - { - // For non-relaxed instances, u_new = 1 - self.u += r; - self - .X - .iter_mut() - .zip_eq(X_new) - .for_each(|(x, x_new)| *x += r * x_new); - - // Compute scalar multiplications and resulting instances to be proved with the CycleFold circuit - // W_comm_next = W_comm_curr + r * W_comm_new - let (W, W_sm_proof) = acc_sm.scalar_mul(&self.W, &W_new, &r, transcript); - self.W = W; - - // E_comm_next = E_comm_curr + r * T - let (E, E_sm_proof) = acc_sm.scalar_mul(&self.E, &T, &r, transcript); - self.E = E; - - FoldProof { - W: W_new, - T, - W_sm_proof, - E_sm_proof, - } - } - - pub fn merge_aux( - acc_L: Self, - acc_R: &Self, - acc_sm: &mut ScalarMulAccumulator, - T: HashedCommitment, - r: E1::Scalar, - transcript: &mut Transcript, - ) -> (Self, MergeProof) - where - E2: Engine, - { - let u = acc_L.u + r * &acc_R.u; - let X = zip_eq(acc_L.X.into_iter(), acc_R.X.iter()) - .map(|(x_L, x_R)| x_L + r * x_R) - .collect(); - - // Compute scalar multiplications and resulting instances to be proved with the CycleFold circuit - // W_next = W_L + r * W_R - let (W, W_sm_proof) = acc_sm.scalar_mul::(&acc_L.W, &acc_R.W, &r, transcript); - - let (E1_next, E1_sm_proof) = acc_sm.scalar_mul::(&T, &acc_R.E, &r, transcript); - // E_next = E_L + r * E1_next = E_L + r * T + r^2 * E_R - let (E, E2_sm_proof) = acc_sm.scalar_mul::(&acc_L.E, &E1_next, &r, transcript); - let instance = Self { u, X, W, E }; - - let merge_proof = MergeProof { - T, - W_sm_proof, - E1_sm_proof, - E2_sm_proof, - }; - (instance, merge_proof) - } -} diff --git a/src/parafold/nifs_secondary/circuit_alloc.rs b/src/parafold/nifs_secondary/circuit_alloc.rs deleted file mode 100644 index 0a3c5cc56..000000000 --- a/src/parafold/nifs_secondary/circuit_alloc.rs +++ /dev/null @@ -1,89 +0,0 @@ -use bellpepper_core::{ConstraintSystem, SynthesisError}; - -use crate::parafold::ecc::AllocatedPoint; -use crate::parafold::nifs_secondary::prover::SecondaryRelaxedR1CSInstance; -use crate::parafold::nifs_secondary::{ - AllocatedSecondaryFoldProof, AllocatedSecondaryMergeProof, AllocatedSecondaryRelaxedR1CSInstance, -}; -use crate::parafold::nifs_secondary::{SecondaryFoldProof, SecondaryMergeProof}; -use crate::traits::commitment::CommitmentTrait; -use crate::traits::Engine; - -impl AllocatedSecondaryRelaxedR1CSInstance { - pub fn alloc_infallible(/*mut*/ _cs: CS, _instance: FI) -> Self - where - CS: ConstraintSystem, - FI: FnOnce() -> SecondaryRelaxedR1CSInstance, - { - // Both u, X need to be allocated as BigInt - todo!() - // let SecondaryRelaxedR1CSInstance { u, X, W, E } = instance(); - // let u = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc u"), || u); - // let X = X - // .into_iter() - // .enumerate() - // .map(|(i, X)| AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc X[{i}]")), || X)) - // .collect(); - // let W = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc W"), || W); - // let E = AllocatedHashedCommitment::alloc_infallible(cs.namespace(|| "alloc E"), || E); - // - // Self { - // u: BigNat::alloc_from_nat(), - // X: vec![], - // W: (), - // E: (), - // } - } - - pub fn to_native(&self) -> Result, SynthesisError> { - todo!() - // let u = self - // .u - // .get_value() - // .ok_or(SynthesisError::AssignmentMissing)?; - // let X = self - // .X - // .iter() - // .map(|x| x.get_value().ok_or(SynthesisError::AssignmentMissing)) - // .collect::, _>>()?; - // let W = self.W.to_native()?; - // let E = self.W.to_native()?; - // - // Ok(RelaxedR1CSInstance { u, X, W, E }) - } -} - -impl AllocatedSecondaryFoldProof -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(mut cs: CS, fold_proof: FP) -> Self - where - CS: ConstraintSystem, - FP: FnOnce() -> SecondaryFoldProof, - { - let SecondaryFoldProof { W, T } = fold_proof(); - let W = AllocatedPoint::alloc(cs.namespace(|| "alloc W"), Some(W.to_coordinates())).unwrap(); - let T = AllocatedPoint::alloc(cs.namespace(|| "alloc T"), Some(T.to_coordinates())).unwrap(); - - Self { W, T } - } -} - -impl AllocatedSecondaryMergeProof -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(mut cs: CS, merge_proof: FP) -> Self - where - CS: ConstraintSystem, - FP: FnOnce() -> SecondaryMergeProof, - { - let SecondaryMergeProof { T } = merge_proof(); - let T = AllocatedPoint::alloc(cs.namespace(|| "alloc T"), Some(T.to_coordinates())).unwrap(); - - Self { T } - } -} diff --git a/src/parafold/nifs_secondary/mod.rs b/src/parafold/nifs_secondary/mod.rs deleted file mode 100644 index d77f849f8..000000000 --- a/src/parafold/nifs_secondary/mod.rs +++ /dev/null @@ -1,41 +0,0 @@ -use crate::gadgets::nonnative::bignat::BigNat; -use crate::parafold::ecc::AllocatedPoint; -use crate::traits::Engine; -use crate::Commitment; - -pub mod circuit; -pub mod circuit_alloc; -pub mod prover; - -#[derive(Debug, Clone)] -pub struct AllocatedSecondaryRelaxedR1CSInstance { - pub u: BigNat, - pub X: Vec>, - pub W: AllocatedPoint, - pub E: AllocatedPoint, - // q: BigNat, // = E2::Base::MODULUS -} - -/// A proof for folding a statement X of a circuit C into a Relaxed-R1CS circuit for the same circuit C -#[derive(Debug, Clone, Default)] -pub struct SecondaryFoldProof { - W: Commitment, - T: Commitment, -} - -#[derive(Debug, Clone)] -pub struct AllocatedSecondaryFoldProof { - W: AllocatedPoint, - T: AllocatedPoint, -} - -/// A proof for merging two valid Relaxed-R1CS accumulators for the same circuit C -#[derive(Debug, Clone)] -pub struct SecondaryMergeProof { - T: Commitment, -} - -#[derive(Debug, Clone)] -pub struct AllocatedSecondaryMergeProof { - pub T: AllocatedPoint, -} diff --git a/src/parafold/nifs_secondary/prover.rs b/src/parafold/nifs_secondary/prover.rs deleted file mode 100644 index d9769cd71..000000000 --- a/src/parafold/nifs_secondary/prover.rs +++ /dev/null @@ -1,17 +0,0 @@ -use crate::provider::pedersen::Commitment; -use crate::traits::Engine; - -#[derive(Debug, Clone)] -pub struct SecondaryRelaxedR1CS { - instance: SecondaryRelaxedR1CSInstance, - W: Vec, - E: Vec, -} - -#[derive(Debug, Clone, PartialEq)] -pub struct SecondaryRelaxedR1CSInstance { - pub u: E2::Scalar, - pub X: Vec, - pub W: Commitment, - pub E: Commitment, -} diff --git a/src/parafold/nivc/circuit.rs b/src/parafold/nivc/circuit.rs index da8839b65..3c3de6950 100644 --- a/src/parafold/nivc/circuit.rs +++ b/src/parafold/nivc/circuit.rs @@ -1,305 +1,468 @@ -use bellpepper_core::boolean::AllocatedBit; +use bellpepper_core::boolean::{AllocatedBit, Boolean}; use bellpepper_core::num::AllocatedNum; -use bellpepper_core::{ConstraintSystem, SynthesisError}; +use bellpepper_core::{ConstraintSystem, LinearCombination, SynthesisError}; use ff::{Field, PrimeField}; -use itertools::zip_eq; +use itertools::{chain, zip_eq, Itertools}; -use crate::gadgets::utils::{alloc_num_equals, alloc_zero}; -use crate::parafold::cycle_fold::AllocatedScalarMulAccumulator; -use crate::parafold::nifs_primary::AllocatedRelaxedR1CSInstance; -use crate::parafold::nivc::hash::{AllocatedNIVCHasher, NIVCHasher}; +use crate::gadgets::utils::{alloc_num_equals, alloc_zero, conditionally_select}; +use crate::parafold::cycle_fold::circuit::AllocatedScalarMulAccumulator; +use crate::parafold::nifs::circuit::AllocatedRelaxedR1CSInstance; +use crate::parafold::nifs::circuit_secondary::AllocatedSecondaryRelaxedR1CSInstance; +use crate::parafold::nifs::{FoldProof, RelaxedR1CSInstance}; use crate::parafold::nivc::{ - AllocatedNIVCIO, AllocatedNIVCMergeProof, AllocatedNIVCState, AllocatedNIVCUpdateProof, - NIVCMergeProof, NIVCStateInstance, NIVCUpdateProof, + AllocatedNIVCIO, NIVCMergeProof, NIVCStateInstance, NIVCUpdateProof, NIVCIO, }; use crate::parafold::transcript::circuit::AllocatedTranscript; +use crate::parafold::transcript::TranscriptConstants; use crate::traits::circuit_supernova::EnforcingStepCircuit; use crate::traits::Engine; +/// A representation of a NIVC state, where `io` represents the computations inputs and outputs, +/// and the `accs` are the accumulators for each step function that was used to produce this result. +#[derive(Debug, Clone)] +pub struct AllocatedNIVCState { + io: AllocatedNIVCIO, + accs_hash: Vec>, + acc_cf: AllocatedSecondaryRelaxedR1CSInstance, +} + impl AllocatedNIVCState where E1: Engine, E2: Engine, { - /// Compute the hash of the parts of the state - fn hash( - &self, - mut cs: CS, - _hasher: &AllocatedNIVCHasher, - ) -> Result, SynthesisError> - where - CS: ConstraintSystem, - { - // FIXME: Real hash - Ok(AllocatedNum::alloc_infallible( - cs.namespace(|| "alloc hash"), - || E1::Scalar::ONE, - )) - } - /// Loads a previously proved state from a proof of its correctness. /// /// # Details /// /// - fn from_proof( + pub fn from_proof( mut cs: CS, - hasher: &NIVCHasher, - proof: AllocatedNIVCUpdateProof, - transcript: &mut AllocatedTranscript, - ) -> Result< - ( - AllocatedNIVCIO, - Vec>, - AllocatedScalarMulAccumulator, - AllocatedNum, - ), - SynthesisError, - > + ro_consts: &TranscriptConstants, + proof: NIVCUpdateProof, + ) -> Result<(Self, AllocatedTranscript), SynthesisError> where CS: ConstraintSystem, { - // `state_prev` is the output of the previous iteration, that was produced by the circuit - // at index `index_prev`, where the inputs were `h_L, h_R`. - // `fold_proof` proves this computation, but also includes auxiliary proof data to update the accumulators - // in `state_prev`. - let AllocatedNIVCUpdateProof { - state: state_prev, - index: index_prev, - fold_proof, + let NIVCUpdateProof { + transcript_init, + state, + acc_prev, + index_prev, + nifs_fold_proof, + sm_fold_proofs, } = proof; - // The actual public output of the circuit is the hash of `state_prev`, - // so we recompute it to derive the full R1CS IO `X_prev` - let h_prev = state_prev.hash(cs.namespace(|| "hash_prev"), hasher)?; + // Initialize transcript with the state of the transcript in the previous iteration + let (mut transcript, transcript_init) = AllocatedTranscript::new_init( + cs.namespace(|| "init transcript"), + transcript_init, + ro_consts.clone(), + ); - let AllocatedNIVCState { - hash_input: [h_L, h_R], - io, - accs: accs_prev, - mut acc_sm, - } = state_prev; + // Load the initial state from the proof, adding each field to the transcript + let mut state = Self::alloc_transcript(cs.namespace(|| "alloc state"), state, &mut transcript); - // Handle base case, which is active when `h_L == h_R == 0`. - { + // Define the base case as transcript_init == 0 + let is_base_case: Boolean = { let zero = alloc_zero(cs.namespace(|| "alloc zero")); - let is_init = { - let h_L_is_zero = alloc_num_equals(cs.namespace(|| "h_L == 0"), &h_L, &zero)?; - let h_R_is_zero = alloc_num_equals(cs.namespace(|| "h_R == 0"), &h_R, &zero)?; - AllocatedBit::and( - cs.namespace(|| "is_init = (h_L == 0) & (h_R == 0)"), - &h_L_is_zero, - &h_R_is_zero, - )? - }; - - // We only need to enforce that the NIVC IO is trivial. - // We do not need to check that `accs` and `acc_sm` are trivial, the only requirement is that they are - // valid RelaxedR1CS accumulators. In practice, we do actually supply trivial accumulators. - io.enforce_trivial(cs.namespace(|| "is_init => (io.in == io.out)"), &is_init); + alloc_num_equals( + cs.namespace(|| "transcript_init == 0"), + &transcript_init, + &zero, + )? } - - // Manually add the R1CS IO to the transcript to bind the inputs - let X_prev = vec![h_prev.clone()]; - for x_prev in &X_prev { - transcript.absorb(x_prev); - } - - // FIXME: Use selector - // let index = io_new.program_counter(); - // let selector = AllocatedSelector::new(index, accs_curr.len()); - // let acc_curr = selector.get(accs) - let mut acc = accs_prev[index_prev].clone(); - - acc.fold( - cs.namespace(|| "fold"), - X_prev, + .into(); + + // Enforce base case on loaded state + state.enforce_base_case(cs.namespace(|| "base case"), &is_base_case); + + // Initialize scalar mul accumulator for folding + let mut acc_sm = AllocatedScalarMulAccumulator::new(ro_consts.clone()); + + state.update_accs( + cs.namespace(|| "update accs"), + ro_consts, + transcript_init, + acc_prev, + index_prev, + nifs_fold_proof, + &is_base_case, &mut acc_sm, - fold_proof, - transcript, + &mut transcript, + )?; + + // Prove all scalar multiplication by updating the secondary curve accumulator + state.acc_cf = acc_sm.finalize( + cs.namespace(|| "finalize acc_sm"), + state.acc_cf, + sm_fold_proofs, + &mut transcript, )?; - // let accs_next = selector.update(acc_curr); - let mut accs_next = accs_prev.clone(); - accs_next[index_prev] = acc; + state.acc_cf = state + .acc_cf + .select_default(cs.namespace(|| "enforce trivial acc_cf"), &is_base_case)?; - Ok((io, accs_next, acc_sm, h_prev)) + Ok((state, transcript)) } - pub fn new_step( + pub fn update_io( + &mut self, mut cs: CS, - hasher: &NIVCHasher, - proof: NIVCUpdateProof, - step_circuit: SF, - ) -> Result, SynthesisError> + step_circuit: &SF, + ) -> Result, SynthesisError> where CS: ConstraintSystem, SF: EnforcingStepCircuit, { - let mut transcript = AllocatedTranscript::new(); - let proof = - AllocatedNIVCUpdateProof::alloc_infallible(cs.namespace(|| "alloc proof"), || proof); - // Fold proof for previous state - let (io_prev, accs_next, acc_sm_next, hash_prev) = Self::from_proof( - cs.namespace(|| "verify self"), - hasher, - proof, - &mut transcript, - )?; + // Run the step circuit + let cs_step = &mut cs.namespace(|| "synthesize"); - let AllocatedNIVCIO { - pc_in: pc_init, - z_in: z_init, - pc_out: pc_prev, - z_out: z_prev, - } = io_prev; + let (pc_curr, z_curr) = (Some(&self.io.pc_out), self.io.z_out.as_slice()); - // Run the step circuit - let (pc_next, z_next) = { - let cs_step = &mut cs.namespace(|| "synthesize"); + let (pc_next, z_next) = step_circuit.synthesize(cs_step, pc_curr, z_curr)?; + + self.io.pc_out = pc_next.ok_or(SynthesisError::AssignmentMissing)?; + self.io.z_out = z_next; - let (pc_next, z_next) = step_circuit.synthesize(cs_step, Some(&pc_prev), &z_prev)?; - let pc_next = pc_next.ok_or(SynthesisError::AssignmentMissing)?; - (pc_next, z_next) + self.io.to_native() + } + + pub fn merge( + mut cs: CS, + self_L: Self, + self_R: Self, + ro_consts: &TranscriptConstants, + proof: NIVCMergeProof, + transcript: &mut AllocatedTranscript, + ) -> Result<(Self, NIVCIO), SynthesisError> + where + CS: ConstraintSystem, + { + let mut acc_sm = AllocatedScalarMulAccumulator::new(ro_consts.clone()); + + let Self { + io: io_L, + accs_hash: accs_hash_L, + acc_cf: acc_cf_L, + } = self_L; + let Self { + io: io_R, + accs_hash: accs_hash_R, + acc_cf: acc_cf_R, + } = self_R; + + let io = AllocatedNIVCIO::merge(cs.namespace(|| "io merge"), io_L, io_R); + + // Load the preimages of the accumulators in each state + let (accs_L, accs_R) = { + let accs_L = Self::load_accs( + cs.namespace(|| "load accs_R"), + proof.accs_L, + accs_hash_L, + ro_consts, + )?; + let accs_R = Self::load_accs( + cs.namespace(|| "load accs_R"), + proof.accs_R, + accs_hash_R, + ro_consts, + )?; + (accs_L, accs_R) }; - // Set the new IO state - let io_next = AllocatedNIVCIO { - pc_in: pc_init, - z_in: z_init, - pc_out: pc_next, - z_out: z_next, + // Merge the two lists of accumulators and return their hashes + let accs_hash = { + let accs = AllocatedRelaxedR1CSInstance::merge_many( + cs.namespace(|| "accs"), + accs_L, + accs_R, + &mut acc_sm, + proof.nivc_merge_proof, + transcript, + )?; + + accs + .into_iter() + .map(|acc| acc.hash(cs.namespace(|| "hash acc"), ro_consts)) + .collect::, _>>()? }; - // Define output state - let nivc_next = AllocatedNIVCState { - hash_input: [hash_prev.clone(), hash_prev], - io: io_next, - accs: accs_next, - acc_sm: acc_sm_next, + // Merge the secondary curve accumulators + let acc_cf = AllocatedSecondaryRelaxedR1CSInstance::merge( + cs.namespace(|| "merge acc_cf"), + acc_cf_L, + acc_cf_R, + proof.cf_merge_proof, + transcript, + )?; + + // Prove all scalar multiplications by folding the result into the secondary curve accumulator + let acc_cf = acc_sm.finalize( + cs.namespace(|| "acc_sm finalize"), + acc_cf, + proof.sm_fold_proofs, + transcript, + )?; + let state = Self { + io, + accs_hash, + acc_cf, }; + let io = state.io.to_native()?; - // For succinctness, we only output the hash of the next state - let hash_next = nivc_next.hash(cs.namespace(|| "hash nivc_next"), hasher)?; + Ok((state, io)) + } - // To ensure both step and merge circuits have the same IO, we inputize the previous output twice - hash_next.inputize(cs.namespace(|| "inputize hash_next"))?; + pub fn inputize(&self, mut cs: CS) -> Result<(), SynthesisError> + where + CS: ConstraintSystem, + { + for x in self.as_preimage() { + x.inputize(cs.namespace(|| "inputize"))? + } + Ok(()) + } + + fn alloc_transcript( + mut cs: CS, + state: NIVCStateInstance, + transcript: &mut AllocatedTranscript, + ) -> Self + where + CS: ConstraintSystem, + { + let NIVCStateInstance { + io, + accs_hash, + acc_cf, + } = state; + + let io = AllocatedNIVCIO::alloc_transcript(cs.namespace(|| "alloc io"), io, transcript); + + let accs_hash = accs_hash + .into_iter() + .map(|acc_hash| { + let acc_hash = + AllocatedNum::alloc_infallible(cs.namespace(|| "alloc acc_hash"), || acc_hash); + transcript.absorb([acc_hash.clone()]); + acc_hash + }) + .collect::>(); + + let acc_cf = AllocatedSecondaryRelaxedR1CSInstance::alloc_transcript( + cs.namespace(|| "alloc acc_cf"), + acc_cf, + transcript, + ); - nivc_next.to_native() + Self { + io, + accs_hash, + acc_cf, + } } - /// Circuit - pub fn new_merge( + fn enforce_base_case(&self, mut cs: CS, is_base_case: &Boolean) + where + CS: ConstraintSystem, + { + // We only need to enforce that the NIVC IO is trivial. + // We do not need to check that `accs` and `acc_sm` are trivial, the only requirement is that they are + // valid RelaxedR1CS accumulators. In practice, we do actually supply trivial accumulators. + self.io.enforce_trivial( + cs.namespace(|| "is_init => (io.in == io.out)"), + is_base_case, + ); + + self.acc_cf.enforce_trivial( + cs.namespace(|| "is_init => acc_cf.is_trivial"), + is_base_case, + ); + } + + fn update_accs( + &mut self, mut cs: CS, - hasher: &NIVCHasher, - proof: NIVCMergeProof, - ) -> Result, SynthesisError> + ro_consts: &TranscriptConstants, + transcript_init: AllocatedNum, + acc_prev: RelaxedR1CSInstance, + index_prev: Option, + nifs_fold_proof: FoldProof, + is_base_case: &Boolean, + acc_sm: &mut AllocatedScalarMulAccumulator, + transcript: &mut AllocatedTranscript, + ) -> Result<(), SynthesisError> where CS: ConstraintSystem, { - let mut transcript = AllocatedTranscript::new(); - - let proof = AllocatedNIVCMergeProof::alloc_infallible(cs.namespace(|| "alloc proof"), || proof); - let AllocatedNIVCMergeProof { - proof_L, - proof_R, - sm_merge_proof, - nivc_merge_proofs, - } = proof; + let (acc_prev_hash, acc_curr_hash) = { + // Load pre-image of accumulator to be updated + let acc_prev = + AllocatedRelaxedR1CSInstance::alloc(cs.namespace(|| "alloc acc_prev"), acc_prev, ro_consts); - // - let (io_L_prev, accs_L_next, acc_sm_L_next, hash_L_prev) = Self::from_proof( - cs.namespace(|| "verify proof_L"), - hasher, - proof_L, - &mut transcript, - )?; - let (io_R_prev, accs_R_next, acc_sm_R_next, hash_R_prev) = Self::from_proof( - cs.namespace(|| "verify proof_R"), - hasher, - proof_R, - &mut transcript, - )?; + // Compute its hash + let acc_prev_hash = acc_prev.hash(cs.namespace(|| "hash acc_prev"), ro_consts)?; - let mut acc_sm_next = AllocatedScalarMulAccumulator::merge( - cs.namespace(|| "merge acc_sm_L acc_sm_R"), - acc_sm_L_next, - acc_sm_R_next, - sm_merge_proof, - &mut transcript, - )?; + // Set the R1CS IO as the transcript init followed by the state + let X_prev = chain![[transcript_init], self.as_preimage()].collect::>(); - // merge the accumulators from both states. - let accs_next = AllocatedRelaxedR1CSInstance::merge_many( - cs.namespace(|| "merge many"), - accs_L_next, - accs_R_next, - &mut acc_sm_next, - nivc_merge_proofs, - &mut transcript, - )?; + let acc_curr = acc_prev.fold( + cs.namespace(|| "fold"), + X_prev, + acc_sm, + nifs_fold_proof, + transcript, + )?; - let io_next = io_L_prev.merge(cs.namespace(|| "merge io"), io_R_prev)?; + let acc_curr_hash = acc_curr.hash(cs.namespace(|| "hash acc_curr"), ro_consts)?; - let nivc_next = Self { - hash_input: [hash_L_prev, hash_R_prev], - io: io_next, - accs: accs_next, - acc_sm: acc_sm_next, + (acc_prev_hash, acc_curr_hash) }; - let hash_next = nivc_next.hash(cs.namespace(|| "hash nivc_next"), hasher)?; - hash_next.inputize(cs.namespace(|| "inputize hash_next"))?; + // Create selector for acc_prev_hash and ensure it is contained in accs_hash + let accs_hash_selector = { + let bits = self + .accs_hash + .iter() + .enumerate() + .map(|(index, acc_hash)| { + // Allocate a bit which + let bit = AllocatedBit::alloc(cs.namespace(|| "alloc selector"), { + let bit = if let Some(index_prev) = index_prev { + index_prev == index + } else { + false + }; + Some(bit) + }) + .unwrap(); + + // Ensure acc_hash[index_prev] = acc_prev_hash + cs.enforce( + || "bit * (acc_hash - acc_prev_hash) = 0", + |lc| lc + bit.get_variable(), + |lc| lc + acc_hash.get_variable() - acc_prev_hash.get_variable(), + |lc| lc, + ); + + bit + }) + .collect::>(); + + let lc_sum = bits + .iter() + .fold(LinearCombination::zero(), |lc, bit| lc + bit.get_variable()); + + // Ensure only 1 selection bit is true, except in the base case where all bits are 0 + cs.enforce( + || "is_base.not = ∑_i bits[i]", + |lc| lc, + |lc| lc, + |_| is_base_case.not().lc(CS::one(), E1::Scalar::ONE) - &lc_sum, + ); + + bits + }; + + // Update hashes of accumulators in state + self + .accs_hash + .iter_mut() + .zip_eq(accs_hash_selector) + .for_each(|(acc_hash, bit)| { + *acc_hash = conditionally_select( + cs.namespace(|| "accs_hash_curr"), + &acc_curr_hash, + acc_hash, + &Boolean::Is(bit), + ) + .unwrap(); + }); + Ok(()) + } + + fn as_preimage(&self) -> impl IntoIterator> + '_ { + chain![ + self.io.as_preimage(), + self.accs_hash.iter().cloned(), + self.acc_cf.as_preimage() + ] + } - nivc_next.to_native() + fn load_accs( + mut cs: CS, + accs_native: Vec>, + accs_hash: Vec>, + ro_consts: &TranscriptConstants, + ) -> Result>, SynthesisError> + where + CS: ConstraintSystem, + { + zip_eq(accs_native, accs_hash) + .map( + |(acc_native, acc_hash): (RelaxedR1CSInstance, AllocatedNum)| { + let acc = AllocatedRelaxedR1CSInstance::alloc( + cs.namespace(|| "alloc acc"), + acc_native, + ro_consts, + ); + let acc_hash_real = acc.hash(cs.namespace(|| "hash acc"), ro_consts)?; + + // Ensure the loaded accumulator's hash matches the one from the state + cs.enforce( + || "acc_hash_real == acc_hash", + |lc| lc, + |lc| lc, + |lc| lc + acc_hash_real.get_variable() - acc_hash.get_variable(), + ); + Ok::<_, SynthesisError>(acc) + }, + ) + .collect::, _>>() } } impl AllocatedNIVCIO { - pub fn merge(self, mut cs: CS, other: Self) -> Result + pub fn merge(mut cs: CS, io_L: Self, io_R: Self) -> Self where CS: ConstraintSystem, { - // self.pc_out = other.pc_in + // io_L.pc_out = io_R.pc_in cs.enforce( - || "self.pc_out = other.pc_in", + || "io_L.pc_out = io_R.pc_in", |lc| lc, |lc| lc, - |lc| lc + self.pc_out.get_variable() - other.pc_in.get_variable(), + |lc| lc + io_L.pc_out.get_variable() - io_R.pc_in.get_variable(), ); - // self.z_out = other.z_in - zip_eq(&self.z_out, &other.z_in) + // io_L.z_out = io_R.z_in + zip_eq(&io_L.z_out, &io_R.z_in) .enumerate() .for_each(|(i, (z_L_i, z_R_i))| { cs.enforce( - || format!("self.z_out[{i}] = other.z_in[{i}]"), + || format!("io_L.z_out[{i}] = io_R.z_in[{i}]"), |lc| lc, |lc| lc, |lc| lc + z_L_i.get_variable() - z_R_i.get_variable(), ); }); - Ok(Self { - pc_in: self.pc_in, - z_in: self.z_in, - pc_out: other.pc_out, - z_out: other.z_out, - }) + Self { + pc_in: io_L.pc_in, + z_in: io_L.z_in, + pc_out: io_R.pc_out, + z_out: io_R.z_out, + } } - pub fn enforce_trivial(&self, mut cs: CS, is_trivial: &AllocatedBit) + pub fn enforce_trivial(&self, mut cs: CS, is_trivial: &Boolean) where CS: ConstraintSystem, { - // (is_trivial) * (pc_in - pc_out) = 0 - cs.enforce( - || "(is_trivial) * (pc_in - pc_out) = 0", - |lc| lc + is_trivial.get_variable(), - |lc| lc + self.pc_in.get_variable() - self.pc_out.get_variable(), - |lc| lc, - ); + let is_trivial = is_trivial.lc(CS::one(), F::ONE); // (is_trivial) * (z_in - z_out) = 0 zip_eq(&self.z_in, &self.z_out) @@ -307,10 +470,96 @@ impl AllocatedNIVCIO { .for_each(|(i, (z_in_i, z_out_i))| { cs.enforce( || format!("(is_trivial) * (z_in[{i}] - z_out[{i}]) = 0"), - |lc| lc + is_trivial.get_variable(), + |_| is_trivial.clone(), |lc| lc + z_in_i.get_variable() - z_out_i.get_variable(), |lc| lc, ); }); + + // (is_trivial) * (pc_in - pc_out) = 0 + cs.enforce( + || "(is_trivial) * (pc_in - pc_out) = 0", + |_| is_trivial, + |lc| lc + self.pc_in.get_variable() - self.pc_out.get_variable(), + |lc| lc, + ); + } + + pub fn as_preimage(&self) -> impl IntoIterator> + '_ { + chain![ + [self.pc_in.clone()], + self.z_in.iter().cloned(), + [self.pc_out.clone()], + self.z_out.iter().cloned() + ] + } + + pub fn alloc_transcript>( + mut cs: CS, + state: NIVCIO, + transcript: &mut AllocatedTranscript, + ) -> Self + where + CS: ConstraintSystem, + { + let NIVCIO { + pc_in, + z_in, + pc_out, + z_out, + } = state; + + let pc_in = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc pc_in"), || pc_in); + let z_in = z_in + .into_iter() + .enumerate() + .map(|(i, z)| { + AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc z_in[{i}]")), || z) + }) + .collect(); + let pc_out = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc pc_out"), || pc_out); + let z_out = z_out + .into_iter() + .enumerate() + .map(|(i, z)| { + AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc z_out[{i}]")), || z) + }) + .collect(); + + let io = Self { + pc_in, + z_in, + pc_out, + z_out, + }; + transcript.absorb(io.as_preimage()); + io + } + + pub fn to_native(&self) -> Result, SynthesisError> { + let pc_in = self + .pc_in + .get_value() + .ok_or(SynthesisError::AssignmentMissing)?; + let z_in = self + .z_in + .iter() + .map(|z| z.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::, _>>()?; + let pc_out = self + .pc_out + .get_value() + .ok_or(SynthesisError::AssignmentMissing)?; + let z_out = self + .z_out + .iter() + .map(|z| z.get_value().ok_or(SynthesisError::AssignmentMissing)) + .collect::, _>>()?; + Ok(NIVCIO { + pc_in, + z_in, + pc_out, + z_out, + }) } } diff --git a/src/parafold/nivc/circuit_alloc.rs b/src/parafold/nivc/circuit_alloc.rs deleted file mode 100644 index 1a61375b4..000000000 --- a/src/parafold/nivc/circuit_alloc.rs +++ /dev/null @@ -1,169 +0,0 @@ -use bellpepper_core::num::AllocatedNum; -use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::PrimeField; - -use crate::parafold::cycle_fold::AllocatedScalarMulAccumulator; -use crate::parafold::nifs_primary::{AllocatedFoldProof, AllocatedRelaxedR1CSInstance}; -use crate::parafold::nivc::{AllocatedNIVCIO, AllocatedNIVCState, AllocatedNIVCUpdateProof}; -use crate::parafold::nivc::{NIVCStateInstance, NIVCUpdateProof, NIVCIO}; -use crate::traits::Engine; - -impl AllocatedNIVCIO { - pub fn alloc_infallible(mut cs: CS, state: N) -> Self - where - CS: ConstraintSystem, - N: FnOnce() -> NIVCIO, - { - let NIVCIO { - pc_in, - z_in, - pc_out, - z_out, - } = state(); - - let pc_in = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc pc_in"), || pc_in); - let z_in = z_in - .into_iter() - .enumerate() - .map(|(i, z)| { - AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc z_in[{i}]")), || z) - }) - .collect(); - let pc_out = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc pc_out"), || pc_out); - let z_out = z_out - .into_iter() - .enumerate() - .map(|(i, z)| { - AllocatedNum::alloc_infallible(cs.namespace(|| format!("alloc z_out[{i}]")), || z) - }) - .collect(); - - Self { - pc_in, - z_in, - pc_out, - z_out, - } - } - - pub fn to_native(&self) -> Result, SynthesisError> { - let pc_in = self - .pc_in - .get_value() - .ok_or(SynthesisError::AssignmentMissing)?; - let pc_out = self - .pc_out - .get_value() - .ok_or(SynthesisError::AssignmentMissing)?; - let z_in = self - .z_in - .iter() - .map(|z| z.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::, _>>()?; - let z_out = self - .z_out - .iter() - .map(|z| z.get_value().ok_or(SynthesisError::AssignmentMissing)) - .collect::, _>>()?; - - Ok(NIVCIO { - pc_in, - z_in, - pc_out, - z_out, - }) - } -} - -impl AllocatedNIVCState -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(mut cs: CS, state: FI) -> Self - where - CS: ConstraintSystem, - FI: FnOnce() -> NIVCStateInstance, - { - let NIVCStateInstance:: { - hash_input: [h_L, h_R], - io, - accs, - acc_sm, - .. - } = state(); - - let h_L = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc h_L"), || h_L); - let h_R = AllocatedNum::alloc_infallible(cs.namespace(|| "alloc h_R"), || h_R); - let io = AllocatedNIVCIO::alloc_infallible(cs.namespace(|| "alloc io"), || io); - let accs = accs - .into_iter() - .enumerate() - .map(|(i, acc)| { - AllocatedRelaxedR1CSInstance::alloc_infallible( - cs.namespace(|| format!("alloc acc[{i}]")), - || acc, - ) - }) - .collect(); - let acc_sm = - AllocatedScalarMulAccumulator::::alloc_infallible(cs.namespace(|| "alloc W"), || { - acc_sm - }); - - Self { - hash_input: [h_L, h_R], - io, - accs, - acc_sm, - } - } - - pub fn to_native(&self) -> Result, SynthesisError> { - let [h_L, h_R] = &self.hash_input; - let h_L = h_L.get_value().ok_or(SynthesisError::AssignmentMissing)?; - let h_R = h_R.get_value().ok_or(SynthesisError::AssignmentMissing)?; - let io = self.io.to_native()?; - let accs = self - .accs - .iter() - .map(|acc| acc.to_native()) - .collect::, _>>()?; - let acc_sm = self.acc_sm.to_native()?; - - Ok(NIVCStateInstance { - hash_input: [h_L, h_R], - io, - accs, - acc_sm, - }) - } -} - -impl AllocatedNIVCUpdateProof -where - E1: Engine, - E2: Engine, -{ - pub fn alloc_infallible(mut cs: CS, proof: FP) -> Self - where - CS: ConstraintSystem, - FP: FnOnce() -> NIVCUpdateProof, - { - let NIVCUpdateProof { - state, - index, - nifs_fold_proof, - } = proof(); - - let state = AllocatedNIVCState::alloc_infallible(cs.namespace(|| "alloc state"), || state); - let fold_proof = - AllocatedFoldProof::alloc_infallible(cs.namespace(|| "alloc fold_proof"), || nifs_fold_proof); - - Self { - state, - index, - fold_proof, - } - } -} diff --git a/src/parafold/nivc/hash.rs b/src/parafold/nivc/hash.rs deleted file mode 100644 index 1951c84e1..000000000 --- a/src/parafold/nivc/hash.rs +++ /dev/null @@ -1,26 +0,0 @@ -use crate::traits::{Engine, ROConstants, ROConstantsCircuit}; -use bellpepper_core::num::AllocatedNum; - -pub struct NIVCHasher { - ro_consts: ROConstants, - pp: E::Scalar, - arity: usize, -} - -pub struct AllocatedNIVCHasher { - ro_consts: ROConstantsCircuit, - pp: AllocatedNum, - arity: usize, -} - -impl NIVCHasher { - pub fn new(ro_consts: ROConstantsCircuit, pp: AllocatedNum, arity: usize) -> Self { - Self { - ro_consts, - pp, - arity, - } - } -} - -impl AllocatedNIVCHasher {} diff --git a/src/parafold/nivc/mod.rs b/src/parafold/nivc/mod.rs index deffd5a47..6e9b54d54 100644 --- a/src/parafold/nivc/mod.rs +++ b/src/parafold/nivc/mod.rs @@ -1,21 +1,13 @@ -use crate::parafold::cycle_fold::{ - AllocatedScalarMulAccumulator, AllocatedScalarMulMergeProof, ScalarMulAccumulatorInstance, - ScalarMulMergeProof, -}; -use crate::parafold::nifs_primary::{ - AllocatedFoldProof, AllocatedMergeProof, AllocatedRelaxedR1CSInstance, FoldProof, MergeProof, - RelaxedR1CSInstance, -}; -use crate::traits::Engine; use bellpepper_core::num::AllocatedNum; use ff::PrimeField; +use crate::parafold::nifs::{FoldProof, MergeProof, RelaxedR1CSInstance}; +use crate::traits::Engine; + pub mod circuit; -mod circuit_alloc; -pub mod hash; pub mod prover; -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct NIVCIO { pc_in: F, z_in: Vec, @@ -23,6 +15,13 @@ pub struct NIVCIO { z_out: Vec, } +#[derive(Clone, Debug)] +pub struct NIVCStateInstance { + io: NIVCIO, + accs_hash: Vec, + acc_cf: RelaxedR1CSInstance, +} + /// The input and output of a NIVC computation over one or more steps. /// /// # Note @@ -35,57 +34,26 @@ pub struct AllocatedNIVCIO { z_out: Vec>, } -#[derive(Debug, Clone, PartialEq)] -pub struct NIVCStateInstance { - hash_input: [E1::Scalar; 2], - io: NIVCIO, - accs: Vec>, - acc_sm: ScalarMulAccumulatorInstance, -} - -/// A representation of a NIVC state, where `io` represents the computations inputs and outputs, -/// and the `accs` are the accumulators for each step function that was used to produce this result. -#[derive(Debug, Clone)] -pub struct AllocatedNIVCState { - hash_input: [AllocatedNum; 2], - io: AllocatedNIVCIO, - accs: Vec>, - acc_sm: AllocatedScalarMulAccumulator, -} - #[derive(Debug, Clone)] pub struct NIVCUpdateProof { + transcript_init: E1::Scalar, + state: NIVCStateInstance, - index: usize, - nifs_fold_proof: FoldProof, -} -/// A proved NIVC step for a particular step function. Can be folded into an existing [`AllocatedNIVCState']. -#[derive(Debug, Clone)] -pub struct AllocatedNIVCUpdateProof { - /// Output of the previous step - state: AllocatedNIVCState, - /// Index of the circuits that produced `state` - index: usize, - /// Proof for folding the previous circuit into `state.accs[index_prev]` - fold_proof: AllocatedFoldProof, + acc_prev: RelaxedR1CSInstance, + index_prev: Option, + nifs_fold_proof: FoldProof, + + sm_fold_proofs: [FoldProof; 2], } #[derive(Debug, Clone)] pub struct NIVCMergeProof { - proof_L: NIVCUpdateProof, - proof_R: NIVCUpdateProof, - sm_merge_proof: ScalarMulMergeProof, - nivc_merge_proof: Vec>, -} + accs_L: Vec>, + accs_R: Vec>, + nivc_merge_proof: Vec>, -/// A proved NIVC step for a particular step function. Can be folded into an existing [`AllocatedNIVCState']. -#[derive(Debug, Clone)] -pub struct AllocatedNIVCMergeProof { - proof_L: AllocatedNIVCUpdateProof, - proof_R: AllocatedNIVCUpdateProof, - /// Proof for merging the scalar multiplication accumulators from two different states. - sm_merge_proof: AllocatedScalarMulMergeProof, - /// Proofs for merging each accumulator in [AllocatedNIVCState.accs] from two different states - nivc_merge_proofs: Vec>, + cf_merge_proof: MergeProof, + + sm_fold_proofs: Vec>, } diff --git a/src/parafold/nivc/prover.rs b/src/parafold/nivc/prover.rs index ac0455b2e..b4f826099 100644 --- a/src/parafold/nivc/prover.rs +++ b/src/parafold/nivc/prover.rs @@ -1,24 +1,26 @@ -use ff::PrimeField; +use ff::{Field, PrimeField}; +use itertools::chain; use crate::parafold::cycle_fold::prover::ScalarMulAccumulator; -use crate::parafold::nifs_primary::prover::RelaxedR1CS; -use crate::parafold::nivc::hash::{AllocatedNIVCHasher, NIVCHasher}; +use crate::parafold::nifs::prover::RelaxedR1CS; +use crate::parafold::nifs::{FoldProof, RelaxedR1CSInstance}; use crate::parafold::nivc::{NIVCMergeProof, NIVCStateInstance, NIVCUpdateProof, NIVCIO}; -use crate::parafold::prover::CommitmentKey; use crate::parafold::transcript::prover::Transcript; +use crate::parafold::transcript::TranscriptConstants; use crate::r1cs::R1CSShape; use crate::traits::Engine; +use crate::CommitmentKey; #[derive(Debug)] pub struct NIVCState { + transcript: Transcript, + io: NIVCIO, accs: Vec>, - acc_sm: ScalarMulAccumulator, + acc_cf: RelaxedR1CS, } #[derive(Debug)] -pub struct NIVCUpdateWitness { - // - pub(crate) state: NIVCStateInstance, +pub struct NIVCUpdateWitness { pub(crate) index: usize, pub(crate) W: Vec, } @@ -26,95 +28,249 @@ pub struct NIVCUpdateWitness { impl NIVCState where E1: Engine, - E2: Engine, + E2: Engine, { - pub fn update( + /// Initialize the prover state and create a default proof for the first iteration. + /// + /// # Details + /// + /// In the first iteration, the circuit verifier checks the base-case conditions, but does not update any + /// of the accumulators. To ensure uniformity with the non-base case path, the transcript will be updated + /// in the normal way, albeit with dummy proof data. + /// + /// + pub fn init( + shapes: &[R1CSShape], + shape_cf: &R1CSShape, + ro_consts: &TranscriptConstants, + pc_init: usize, + z_init: Vec, + ) -> (Self, NIVCUpdateProof) { + let transcript_init = E1::Scalar::ZERO; + let mut state = Self { + transcript: Transcript::new_init(transcript_init, ro_consts.clone()), + io: NIVCIO::new(pc_init, z_init), + accs: shapes.iter().map(|shape| RelaxedR1CS::new(shape)).collect(), + acc_cf: RelaxedR1CS::new(shape_cf), + }; + + let state_instance = state.instance(ro_consts); + + let num_io = state_instance.io_size(); + state.transcript.absorb(state_instance.as_preimage()); + let acc_prev = RelaxedR1CSInstance::default(num_io); + + let mut acc_sm = ScalarMulAccumulator::new(ro_consts.clone()); + let nifs_fold_proof = RelaxedR1CS::simulate_fold_primary(&mut acc_sm, &mut state.transcript); + let sm_fold_proofs: [FoldProof; 2] = acc_sm + .simulate_finalize(&mut state.transcript) + .try_into() + .unwrap(); + + let proof = NIVCUpdateProof { + transcript_init, + state: state_instance, + acc_prev, + index_prev: None, + nifs_fold_proof, + sm_fold_proofs, + }; + + (state, proof) + } + + fn update( &mut self, ck: &CommitmentKey, + ck_cf: &CommitmentKey, + ro_consts: &TranscriptConstants, shapes: &[R1CSShape], - hasher: &AllocatedNIVCHasher, - witness_prev: &NIVCUpdateWitness, - transcript: &mut Transcript, + shape_cf: &R1CSShape, + witness_prev: &NIVCUpdateWitness, ) -> NIVCUpdateProof { - let Self { accs, acc_sm } = self; + let mut acc_sm = ScalarMulAccumulator::::new(ro_consts.clone()); + let transcript_init = self.transcript.seal(); + + let state = self.instance(ro_consts); + + let X_prev = chain![[transcript_init], state.as_preimage()].collect(); let NIVCUpdateWitness { - state: instance_prev, index: index_prev, W: W_prev, } = witness_prev; let index_prev = *index_prev; - let hash_prev = instance_prev.hash(hasher); + let acc_prev = self.accs[index_prev].instance().clone(); let shape_prev = &shapes[index_prev]; - // Add the R1CS IO to the transcript - transcript.absorb(&hash_prev); - let X_prev = vec![hash_prev]; - // Fold the proof for the previous iteration into the correct accumulator - let nifs_fold_proof = accs[index_prev].fold(ck, shape_prev, X_prev, W_prev, acc_sm, transcript); + let nifs_fold_proof = self.accs[index_prev].fold_primary( + ck, + shape_prev, + X_prev, + W_prev, + &mut acc_sm, + &mut self.transcript, + ); - let nivc_fold_proof = NIVCUpdateProof { - state: instance_prev.clone(), - index: index_prev, - nifs_fold_proof, - }; + let sm_fold_proofs: [FoldProof; 2] = acc_sm + .finalize(ck_cf, shape_cf, &mut self.acc_cf, &mut self.transcript) + .try_into() + .unwrap(); - nivc_fold_proof + NIVCUpdateProof { + transcript_init, + state, + acc_prev, + index_prev: Some(index_prev), + nifs_fold_proof, + sm_fold_proofs, + } } pub fn merge( ck: &CommitmentKey, + ck_cf: &CommitmentKey, shapes: &[R1CSShape], - mut self_L: Self, + shape_cf: &R1CSShape, + ro_consts: &TranscriptConstants, + self_L: Self, self_R: &Self, - proof_L: NIVCUpdateProof, - proof_R: NIVCUpdateProof, - transcript: &mut Transcript, ) -> (Self, NIVCMergeProof) { - let Self { + transcript: transcript_L, + io: io_L, accs: accs_L, - acc_sm: acc_sm_L, + acc_cf: acc_cf_L, } = self_L; let Self { + transcript: transcript_R, + io: io_R, accs: accs_R, - acc_sm: acc_sm_R, + acc_cf: acc_cf_R, } = self_R; - let (mut acc_sm, sm_merge_proof) = ScalarMulAccumulator::merge(acc_sm_L, acc_sm_R, transcript); + let mut acc_sm = ScalarMulAccumulator::new(ro_consts.clone()); + let mut transcript = Transcript::merge(transcript_L, transcript_R); + + let io = NIVCIO::merge(io_L, io_R.clone()); + + let accs_L_instance = accs_L + .iter() + .map(|acc| acc.instance()) + .cloned() + .collect::>(); + let accs_R_instance = accs_R + .iter() + .map(|acc| acc.instance()) + .cloned() + .collect::>(); let (accs, nivc_merge_proof) = - RelaxedR1CS::merge_many(ck, shapes, accs_L, accs_R, &mut acc_sm, transcript); + RelaxedR1CS::merge_many(ck, shapes, accs_L, accs_R, &mut acc_sm, &mut transcript); + + let (mut acc_cf, cf_merge_proof) = + RelaxedR1CS::merge_secondary(ck_cf, shape_cf, acc_cf_L, acc_cf_R, &mut transcript); + + let sm_fold_proofs = acc_sm.finalize(ck_cf, shape_cf, &mut acc_cf, &mut transcript); + + let self_next = Self { + transcript, + io, + accs, + acc_cf, + }; let merge_proof = NIVCMergeProof { - proof_L, - proof_R, - sm_merge_proof, + accs_L: accs_L_instance, + accs_R: accs_R_instance, nivc_merge_proof, + cf_merge_proof, + sm_fold_proofs, }; - let self_next = Self { accs, acc_sm }; (self_next, merge_proof) } + + pub fn instance(&self, ro_consts: &TranscriptConstants) -> NIVCStateInstance { + let accs_hash = self + .accs + .iter() + .map(|acc| acc.instance().hash(ro_consts)) + .collect(); + + NIVCStateInstance { + io: self.io.clone(), + accs_hash, + acc_cf: self.acc_cf.instance().clone(), + } + } } -impl NIVCStateInstance { - /// compute the hash of the state to be passed as public input/output - fn hash(&self, _hasher: &NIVCHasher) -> E1::Scalar { - todo!() +impl NIVCStateInstance +where + E1: Engine, + E2: Engine, +{ + pub fn as_preimage(&self) -> impl IntoIterator + '_ { + chain![ + self.io.as_preimage(), + self.accs_hash.iter().cloned(), + self.acc_cf.as_preimage() + ] + } + + pub fn io_size(&self) -> usize { + [ + 1, // transcript init + self.io.io_size(), // io + self.accs_hash.len(), // accs_hash + self.acc_cf.io_size(), // acc_cf + ] + .into_iter() + .sum() } } impl NIVCIO { - pub fn default(arity: usize) -> Self { + pub fn new(pc_init: usize, z_init: Vec) -> Self { Self { - pc_in: F::ZERO, - z_in: vec![F::default(); arity], - pc_out: F::ZERO, - z_out: vec![F::default(); arity], + pc_in: F::from(pc_init as u64), + z_in: z_init.clone(), + pc_out: F::from(pc_init as u64), + z_out: z_init, } } + + pub fn merge(self_L: Self, self_R: Self) -> Self { + assert_eq!(self_L.pc_out, self_R.pc_in); + assert_eq!(self_L.z_out, self_R.z_in); + Self { + pc_in: self_L.pc_in, + z_in: self_L.z_in, + pc_out: self_R.pc_out, + z_out: self_R.z_out, + } + } + pub fn as_preimage(&self) -> impl IntoIterator + '_ { + chain![ + [self.pc_in], + self.z_in.iter().cloned(), + [self.pc_out], + self.z_out.iter().cloned() + ] + } + + pub fn io_size(&self) -> usize { + [ + 1, // pc_in + self.z_in.len(), // z_in + 1, // pc_out + self.z_out.len(), // z_out + ] + .into_iter() + .sum() + } } diff --git a/src/parafold/prover.rs b/src/parafold/prover.rs index 35bffe4e7..60233b988 100644 --- a/src/parafold/prover.rs +++ b/src/parafold/prover.rs @@ -1,34 +1,15 @@ -use bellpepper_core::ConstraintSystem; - -use crate::bellpepper::solver::SatisfyingAssignment; -use crate::parafold::cycle_fold::HashedCommitment; -use crate::parafold::nivc::hash::NIVCHasher; -use crate::parafold::nivc::prover::{NIVCState, NIVCUpdateWitness}; -use crate::parafold::nivc::{AllocatedNIVCState, NIVCUpdateProof}; -use crate::parafold::transcript::prover::Transcript; +use crate::parafold::nivc::prover::NIVCState; +use crate::parafold::nivc::NIVCUpdateProof; +use crate::parafold::transcript::TranscriptConstants; use crate::r1cs::R1CSShape; -use crate::traits::circuit_supernova::StepCircuit; -use crate::traits::commitment::CommitmentEngineTrait; -use crate::Engine; - -pub struct CommitmentKey { - // TODO: ro for secondary circuit to compute the hash of the point - // this should only be a 2-to-1 hash since we are hashing the coordinates - ck: crate::CommitmentKey, -} - -impl CommitmentKey { - pub fn commit(&self, elements: &[E::Scalar]) -> HashedCommitment { - let c = E::CE::commit(&self.ck, elements); - HashedCommitment::new(c) - } -} - -pub struct ProvingKey { +use crate::{CommitmentKey, Engine}; +pub struct ProvingKey { // public params ck: CommitmentKey, - nivc_hasher: NIVCHasher, + ck_cf: CommitmentKey, shapes: Vec>, + shape_cf: R1CSShape, + ro_consts: TranscriptConstants, } pub struct RecursiveSNARK { @@ -40,81 +21,32 @@ pub struct RecursiveSNARK { impl RecursiveSNARK where E1: Engine, - E2: Engine, + E2: Engine, { - // pub fn new(pc_init: usize, shapes: &[R1CSShape]) -> Self { - // let num_circuits = shapes.len(); - // let arity = assert!(pc_init < num_circuits); - // } - - pub fn prove_step>( - mut self, - pk: &ProvingKey, - step_circuit: C, - ) -> Self { - let Self { mut state, proof } = self; - let circuit_index = step_circuit.circuit_index(); - let mut cs = SatisfyingAssignment::::new(); - let state_instance = - AllocatedNIVCState::new_step(&mut cs, &pk.nivc_hasher, proof, step_circuit).unwrap(); - let W = cs.aux_assignment(); - // assert state_instance == state.instance - let witness = NIVCUpdateWitness { - state: state_instance, - index: circuit_index, - W: W.to_vec(), - }; - - let mut transcript = Transcript::new(); - - let proof = state.update( - &pk.ck, - &pk.shapes, - &pk.nivc_hasher, - &witness, - &mut transcript, - ); + pub fn new(pk: &ProvingKey, pc_init: usize, z_init: Vec) -> Self { + let num_circuits = pk.shapes.len(); + assert!(pc_init < num_circuits); + let (state, proof) = NIVCState::init(&pk.shapes, &pk.shape_cf, &pk.ro_consts, pc_init, z_init); Self { state, proof } } - // pub fn merge>( - // pk: &ProvingKey, - // self_L: Self, - // self_R: &Self, + + // pub fn prove_step>( + // &mut self, + // pk: &ProvingKey, + // step_circuit: &C, // ) -> Self { - // let Self { - // state: state_L, - // proof: proof_L, - // } = self_L; - // let Self { - // state: state_R, - // proof: proof_R, - // } = self_R; - // - // let mut transcript = Transcript::new(); - // - // let (state, proof) = NIVCState::merge( - // &pk.ck, - // &pk.shapes, - // state_L, - // state_R, - // proof_L, - // proof_R.clone(), - // &mut transcript, - // ); - // - // let circuit_index = pk.shapes.len(); + // let Self { state, proof } = self; + // let circuit_index = step_circuit.circuit_index(); // let mut cs = SatisfyingAssignment::::new(); - // let state_instance = AllocatedNIVCState::new_merge(&mut cs, &pk.nivc_hasher, proof).unwrap(); + // let io = synthesize_step(&mut cs, &pk.ro_consts, proof, step_circuit).unwrap(); // let W = cs.aux_assignment(); // // assert state_instance == state.instance // let witness = NIVCUpdateWitness { - // state: state_instance, // index: circuit_index, // W: W.to_vec(), // }; // - // let mut transcript = Transcript::new(); // // let proof = state.update( // &pk.ck, @@ -126,6 +58,56 @@ where // // Self { state, proof } // } + + // pub fn merge>( + // pk: &ProvingKey, + // self_L: Self, + // self_R: &Self, + // ) -> Self { + // let Self { + // state: state_L, + // proof: proof_L, + // } = self_L; + // let Self { + // state: state_R, + // proof: proof_R, + // } = self_R; + // + // let mut transcript = Transcript::new(); + // + // let (state, proof) = NIVCState::merge( + // &pk.ck, + // &pk.shapes, + // state_L, + // state_R, + // proof_L, + // proof_R.clone(), + // &mut transcript, + // ); + // + // let circuit_index = pk.shapes.len(); + // let mut cs = SatisfyingAssignment::::new(); + // let state_instance = AllocatedNIVCState::new_merge(&mut cs, &pk.nivc_hasher, proof).unwrap(); + // let W = cs.aux_assignment(); + // // assert state_instance == state.instance + // let witness = NIVCUpdateWitness { + // state: state_instance, + // index: circuit_index, + // W: W.to_vec(), + // }; + // + // let mut transcript = Transcript::new(); + // + // let proof = state.update( + // &pk.ck, + // &pk.shapes, + // &pk.nivc_hasher, + // &witness, + // &mut transcript, + // ); + // + // Self { state, proof } + // } } -pub struct CompressedSNARK {} +// pub struct CompressedSNARK {} diff --git a/src/parafold/transcript/circuit.rs b/src/parafold/transcript/circuit.rs index 38ef8bc4f..431858c6b 100644 --- a/src/parafold/transcript/circuit.rs +++ b/src/parafold/transcript/circuit.rs @@ -1,75 +1,118 @@ -use bellpepper_core::boolean::AllocatedBit; +use bellpepper_core::boolean::{AllocatedBit, Boolean}; use bellpepper_core::num::AllocatedNum; use bellpepper_core::{ConstraintSystem, SynthesisError}; -use ff::PrimeField; +use neptune::circuit2::Elt; +use neptune::sponge::api::{IOPattern, SpongeAPI, SpongeOp}; +use neptune::sponge::circuit::SpongeCircuit; +use neptune::sponge::vanilla::Mode::Simplex; +use neptune::sponge::vanilla::SpongeTrait; -use crate::traits::{Engine, ROConstantsCircuit}; +use crate::parafold::transcript::TranscriptConstants; +use crate::traits::Engine; -pub trait TranscriptRepresentable { - fn to_field_vec(&self) -> Vec>; +pub struct AllocatedTranscript { + constants: TranscriptConstants, + state: Vec>, } -impl TranscriptRepresentable for AllocatedNum { - fn to_field_vec(&self) -> Vec> { - vec![self.clone()] - } -} - -pub struct AllocatedTranscript< E1: Engine> { - ro_consts: ROConstantsCircuit, - state: Vec>, -} - -impl AllocatedTranscript -{ - pub fn new(ro_consts: ROConstantsCircuit) -> Self { +impl AllocatedTranscript { + pub fn new(constants: TranscriptConstants) -> Self { Self { - ro_consts, + constants, state: vec![], } } - pub fn absorb(&mut self, element: &T) + pub fn new_init( + mut cs: CS, + init: E::Scalar, + constants: TranscriptConstants, + ) -> (Self, AllocatedNum) where - T: TranscriptRepresentable, + CS: ConstraintSystem, { - self.state.extend(element.to_field_vec()); + let init = AllocatedNum::alloc_infallible(&mut cs, || init); + let init_elt = Elt::Allocated(init.clone()); + ( + Self { + constants, + state: vec![init_elt], + }, + init, + ) } - pub fn squeeze(&mut self, /*mut*/ _cs: CS) -> Result, SynthesisError> + pub fn absorb(&mut self, elements: impl IntoIterator>) { + self.state.extend(elements.into_iter().map(Elt::Allocated)); + } + + pub(crate) fn inputize(&self, mut cs: CS) -> Result<(), SynthesisError> where - CS: ConstraintSystem, + CS: ConstraintSystem, { - todo!() - // let mut ro = E::ROCircuit::new(self.ro_consts.clone(), self.state.len()); - // for e in self.state.drain(..) { - // ro.absorb(&e); - // } - // // FIXME: We only need small challenges when folding secondary circuits - // let output_bits = ro.squeeze(cs.namespace(|| "squeeze"), 128)?; - // let output = le_bits_to_num(cs.namespace(|| "bits to num"), &output_bits)?; - // - // self.state.extend([output.clone()]); - // Ok(output) + assert_eq!(self.state.len(), 1); + let state = self.state[0].ensure_allocated(&mut cs, false)?; + state.inputize(&mut cs) + } + + pub fn squeeze(&mut self, mut cs: CS) -> Result, SynthesisError> + where + CS: ConstraintSystem, + { + let num_absorbs = self.state.len() as u32; + + let pattern = IOPattern(vec![SpongeOp::Absorb(num_absorbs), SpongeOp::Squeeze(1u32)]); + + let acc = &mut cs.namespace(|| "squeeze"); + + let mut sponge = SpongeCircuit::new_with_constants(&self.constants.0, Simplex); + sponge.start(pattern, None, acc); + // sponge.start(pattern, None, &mut cs.namespace(|| "start")); + SpongeAPI::absorb( + &mut sponge, + num_absorbs, + &self.state, + acc, + // &mut cs.namespace(|| "absorb"), + ); + + self.state = SpongeAPI::squeeze(&mut sponge, 1, acc); + // self.state = SpongeAPI::squeeze(&mut sponge, 1, &mut cs.namespace(|| "squeeze")); + sponge.finish(acc).unwrap(); + // sponge.finish(&mut cs.namespace(|| "finish")).unwrap(); + + let hash = self.state[0].ensure_allocated(acc, false)?; + + Ok(hash) } pub fn squeeze_bits( &mut self, - /*mut*/ _cs: CS, - _num_bits: usize, + mut cs: CS, + num_bits: usize, ) -> Result, SynthesisError> where - CS: ConstraintSystem, + CS: ConstraintSystem, { - todo!() - // let mut ro = E::ROCircuit::new(self.ro_consts.clone(), self.state.len()); - // for e in self.state.drain(..) { - // ro.absorb(&e); - // } - // // FIXME: We only need small challenges when folding secondary circuits - // let output_bits = ro.squeeze(cs.namespace(|| "squeeze"), 128)?; - // let output = le_bits_to_num(cs.namespace(|| "bits to num"), &output_bits)?; - // - // self.state.extend([output.clone()]); - // Ok(output) + let hash = self.squeeze(&mut cs)?; + + let bits = hash + .to_bits_le_strict(cs.namespace(|| "hash to bits"))? + .into_iter() + .take(num_bits) + .map(|boolean| match boolean { + Boolean::Is(x) => x, + _ => unreachable!("Wrong type of input. We should have never reached there"), + }) + .collect::>(); + + Ok(bits) + } + + /// Combine two transcripts + pub fn merge(mut self_L: Self, self_R: Self) -> Self { + assert_eq!(self_L.state.len(), 1); + assert_eq!(self_R.state.len(), 1); + self_L.state.extend(self_R.state); + self_L } } diff --git a/src/parafold/transcript/mod.rs b/src/parafold/transcript/mod.rs index 632afbe14..65fe6dc04 100644 --- a/src/parafold/transcript/mod.rs +++ b/src/parafold/transcript/mod.rs @@ -1,3 +1,14 @@ +use generic_array::typenum::{U2, U24}; +use neptune::poseidon::PoseidonConstants; + +use crate::traits::Engine; pub mod circuit; pub mod prover; + +/// Poseidon constants for hashing. First element is used for the Fiat-Shamir transcript, +/// second is used for hashing points on the primary curve. +pub type TranscriptConstants = ( + PoseidonConstants<::Scalar, U24>, + PoseidonConstants<::Base, U2>, +); diff --git a/src/parafold/transcript/prover.rs b/src/parafold/transcript/prover.rs index b7e8a295a..84c2c379d 100644 --- a/src/parafold/transcript/prover.rs +++ b/src/parafold/transcript/prover.rs @@ -1,55 +1,91 @@ -use crate::traits::Engine; -use ff::PrimeField; +use ff::{Field, PrimeFieldBits}; +use neptune::sponge::api::{IOPattern, SpongeAPI, SpongeOp}; +use neptune::sponge::vanilla::Mode::Simplex; +use neptune::sponge::vanilla::{Sponge, SpongeTrait}; -pub trait TranscriptRepresentable { - // With Rust 1.75 we can return impl Iterator - fn to_field_vec(&self) -> Vec; -} +use crate::parafold::cycle_fold::HashedCommitment; +use crate::parafold::transcript::TranscriptConstants; +use crate::traits::commitment::CommitmentTrait; +use crate::traits::Engine; +use crate::Commitment; -pub struct Transcript { - // ro_consts: ROConstants, - state: Vec, +#[derive(Clone, Debug)] +pub struct Transcript { + constants: TranscriptConstants, + state: Vec, } -impl TranscriptRepresentable for F { - fn to_field_vec(&self) -> Vec { - vec![*self] +impl Transcript { + pub fn new(constants: TranscriptConstants) -> Self { + Self { + constants, + state: vec![], + } } -} -impl Transcript { - pub fn new() -> Self { - todo!() + pub fn new_init(init: E::Scalar, constants: TranscriptConstants) -> Self { + Self { + constants, + state: vec![init], + } } - pub fn absorb(&mut self, element: &T) + + pub fn absorb(&mut self, elements: I) where - T: TranscriptRepresentable, + I: IntoIterator, { - self.state.extend(element.to_field_vec().into_iter()); - } - - pub fn squeeze(&mut self) -> E1::Scalar { - todo!() - // let mut ro = E::RO::new(self.ro_consts.clone(), self.state.len()); - // for e in self.state.drain(..) { - // ro.absorb(&e); - // } - // // FIXME: We only need small challenges when folding secondary circuits - // let output = ro.squeeze(128); - // - // self.state.extend([output.clone()]); - // Ok(output) - } - pub fn squeeze_bits(&mut self, _num_bits: usize) -> E1::Scalar { - todo!() - // let mut ro = E::RO::new(self.ro_consts.clone(), self.state.len()); - // for e in self.state.drain(..) { - // ro.absorb(&e); - // } - // // FIXME: We only need small challenges when folding secondary circuits - // let output = ro.squeeze(128); - // - // self.state.extend([output.clone()]); - // Ok(output) + self.state.extend(elements); + } + + pub fn absorb_commitment_primary(&mut self, c: Commitment) { + let c_hash = HashedCommitment::::new(c, &self.constants); + self.absorb(c_hash.as_preimage()); + } + + pub fn absorb_commitment_secondary>(&mut self, c: Commitment) { + let (x, y, _) = c.to_coordinates(); + self.absorb([x, y]); + } + + pub fn squeeze(&mut self) -> E::Scalar { + let mut sponge = Sponge::new_with_constants(&self.constants.0, Simplex); + let num_absorbs = self.state.len() as u32; + let acc = &mut (); + let parameter = IOPattern(vec![SpongeOp::Absorb(num_absorbs), SpongeOp::Squeeze(1u32)]); + sponge.start(parameter, None, acc); + SpongeAPI::absorb(&mut sponge, num_absorbs, &self.state, acc); + let hash = SpongeAPI::squeeze(&mut sponge, 1, acc); + sponge.finish(acc).unwrap(); + let output = hash[0]; + self.state = hash; + output + } + + pub fn squeeze_bits_secondary(&mut self, num_bits: usize) -> E::Base { + let hash = self.squeeze(); + + // Only return `num_bits` + let bits = hash.to_le_bits(); + let mut res = E::Base::ZERO; + let mut coeff = E::Base::ONE; + for bit in bits.into_iter().take(num_bits) { + if bit { + res += coeff; + } + coeff += coeff; + } + res + } + + pub fn seal(&self) -> E::Scalar { + assert_eq!(self.state.len(), 1); + self.state[0] + } + + pub fn merge(mut self_L: Self, self_R: &Self) -> Self { + assert_eq!(self_L.state.len(), 1); + assert_eq!(self_R.state.len(), 1); + self_L.state.extend(self_R.state.iter().cloned()); + self_L } }