diff --git a/benches/common/fib.rs b/benches/common/fib.rs index aee157ca1..3d84501e0 100644 --- a/benches/common/fib.rs +++ b/benches/common/fib.rs @@ -38,7 +38,7 @@ pub(crate) fn fib_limit(n: usize, rc: usize) -> usize { rc * (frame / rc + usize::from(frame % rc != 0)) } -fn lurk_fib(store: &Store, n: usize) -> Ptr { +fn lurk_fib(store: &Store, n: usize) -> &Ptr { let frame_idx = fib_frame(n); let limit = frame_idx; let fib_expr = fib_expr(store); @@ -59,7 +59,7 @@ fn lurk_fib(store: &Store, n: usize) -> Ptr { // body: (.lurk.user.fib), continuation: Outermost } let [_, _, rest_bindings] = store.pop_binding(target_env).unwrap(); - let [_, val, _] = store.pop_binding(&rest_bindings).unwrap(); + let [_, val, _] = store.pop_binding(rest_bindings).unwrap(); val } @@ -109,7 +109,7 @@ pub(crate) fn test_fib_io_matches() { let fib_9 = store.num_u64(34); let fib_10 = store.num_u64(55); let fib_11 = store.num_u64(89); - assert_eq!(fib_9, lurk_fib(&store, 9)); - assert_eq!(fib_10, lurk_fib(&store, 10)); - assert_eq!(fib_11, lurk_fib(&store, 11)); + assert_eq!(&fib_9, lurk_fib(&store, 9)); + assert_eq!(&fib_10, lurk_fib(&store, 10)); + assert_eq!(&fib_11, lurk_fib(&store, 11)); } diff --git a/chain-server/src/server.rs b/chain-server/src/server.rs index c0def2dde..f2dad2872 100644 --- a/chain-server/src/server.rs +++ b/chain-server/src/server.rs @@ -173,8 +173,8 @@ where // produce the data for the response let chain_response_data = ser(ChainResponseData::new( - &result, - &next_callable, + result, + next_callable, &self.store, compressed_proof, )) @@ -182,13 +182,13 @@ where // save the session if let Some(session) = &self.session { - let session_data = SessionData::pack_standalone(self, &next_callable); + let session_data = SessionData::pack_standalone(self, next_callable); dump(session_data, session).map_err(|e| Status::internal(e.to_string()))?; } // now it's safe to set the new callable state since no error // has occurred so far - *callable_state = next_callable; + *callable_state = *next_callable; Ok(Response::new(ChainResponse { chain_response_data, @@ -371,8 +371,8 @@ where // produce the data for the response let chain_response_data = ser(ChainResponseData::new( - &result, - &next_callable, + result, + next_callable, &self.store, compressed_proof, )) @@ -382,16 +382,16 @@ where if let Some(session) = &self.session { let session_data = SessionData::pack_stream( self, - &next_callable, - Some((&result, recursive_proof.clone())), + next_callable, + Some((result, recursive_proof.clone())), ); dump(session_data, session).map_err(|e| Status::internal(e.to_string()))?; } // now it's safe to set the new state since no error has occurred so far *state = StreamState { - callable: next_callable, - result_and_proof: Some((result, recursive_proof)), + callable: *next_callable, + result_and_proof: Some((*result, recursive_proof)), }; Ok(Response::new(ChainResponse { @@ -643,7 +643,7 @@ fn get_service_and_address< let callable = if init_args.comm { let hash_ptr = store.read_with_default_state(&init_args.callable)?; let hash = store - .fetch_f(&hash_ptr) + .fetch_f_by_val(hash_ptr.val()) .ok_or("Failed to parse callable hash")?; fetch_comm(hash, &store)?; hash_ptr.cast(Tag::Expr(lurk::tag::ExprTag::Comm)) diff --git a/examples/keccak.rs b/examples/keccak.rs index 5495cf403..df93a0234 100644 --- a/examples/keccak.rs +++ b/examples/keccak.rs @@ -309,7 +309,7 @@ impl CircomGadget for CircomKeccak { let bytes_to_hash = bits_to_bytes( &z_bits .iter() - .map(|ptr| ptr.value() != &F::ZERO) + .map(|ptr| ptr.hash() != &F::ZERO) .collect::>(), ); @@ -371,7 +371,7 @@ fn main() { .unwrap() .0 .iter() - .map(|ptr| ptr.raw().get_atom().unwrap() == 1) + .map(|ptr| ptr.val().get_atom_idx().unwrap() == 1) .collect::>(), ); diff --git a/foil/src/coil.rs b/foil/src/coil.rs index 9a808a76c..73deb76ac 100644 --- a/foil/src/coil.rs +++ b/foil/src/coil.rs @@ -85,8 +85,8 @@ fn lookup_vertex_id(store: &Store, env: &Ptr, var: &Ptr) -> Res return Ok(None); }; - if *var == bound_var { - match store.fetch_num(&id) { + if var == bound_var { + match store.fetch_num(id) { Some(f) => Ok(f.to_u64().map(|u| { let n = u as Id; info!("found {n}"); @@ -97,7 +97,7 @@ fn lookup_vertex_id(store: &Store, env: &Ptr, var: &Ptr) -> Res } } } else { - lookup_vertex_id(store, &rest_env, var) + lookup_vertex_id(store, rest_env, var) } } @@ -127,9 +127,9 @@ impl Context { let [_var, id, rest_env] = store .pop_binding(&self.env) .ok_or(anyhow!("failed to pop binding"))?; - self.env = rest_env; + self.env = *rest_env; - Ok(match store.fetch_num(&id) { + Ok(match store.fetch_num(id) { Some(f) => f .to_u64() .map(|u| u as Id) diff --git a/src/circuit/gadgets/pointer.rs b/src/circuit/gadgets/pointer.rs index cb92f242c..c3c44695a 100644 --- a/src/circuit/gadgets/pointer.rs +++ b/src/circuit/gadgets/pointer.rs @@ -5,8 +5,9 @@ use ff::PrimeField; use crate::{ field::LurkField, + lem::pointers::ZPtr, tag::{ExprTag, Tag}, - z_ptr::{ZContPtr, ZExprPtr, ZPtr}, + z_ptr::{ZContPtr, ZExprPtr}, }; use super::{ @@ -50,17 +51,17 @@ impl Debug for AllocatedPtr { } impl AllocatedPtr { - pub fn alloc, T: Tag>( + pub fn alloc>( cs: &mut CS, value: Fo, ) -> Result where - Fo: FnOnce() -> Result, SynthesisError>, + Fo: FnOnce() -> Result, SynthesisError>, { let mut hash = None; let alloc_tag = AllocatedNum::alloc(ns!(cs, "tag"), || { let ptr = value()?; - hash = Some(*ptr.value()); + hash = Some(*ptr.hash()); Ok(ptr.tag_field()) })?; @@ -74,14 +75,14 @@ impl AllocatedPtr { }) } - pub fn alloc_infallible, T: Tag>(cs: &mut CS, value: Fo) -> Self + pub fn alloc_infallible>(cs: &mut CS, value: Fo) -> Self where - Fo: FnOnce() -> ZPtr, + Fo: FnOnce() -> ZPtr, { let mut hash = None; let alloc_tag = AllocatedNum::alloc_infallible(ns!(cs, "tag"), || { let ptr = value(); - hash = Some(*ptr.value()); + hash = Some(*ptr.hash()); ptr.tag_field() }); @@ -106,12 +107,12 @@ impl AllocatedPtr { }) } - pub fn alloc_constant, T: Tag>( + pub fn alloc_constant>( cs: &mut CS, - value: ZPtr, + value: ZPtr, ) -> Result { let alloc_tag = allocate_constant(ns!(cs, "tag"), value.tag_field()); - let alloc_hash = allocate_constant(ns!(cs, "hash"), *value.value()); + let alloc_hash = allocate_constant(ns!(cs, "hash"), *value.hash()); Ok(AllocatedPtr { tag: alloc_tag, @@ -131,7 +132,7 @@ impl AllocatedPtr { &self.hash } - pub fn get_value(&self) -> Option> { + pub fn get_value(&self) -> Option> { self.tag.get_value().and_then(|tag| { self.hash .get_value() diff --git a/src/cli/repl/meta_cmd.rs b/src/cli/repl/meta_cmd.rs index 5c99ada51..fa3ce761d 100644 --- a/src/cli/repl/meta_cmd.rs +++ b/src/cli/repl/meta_cmd.rs @@ -19,8 +19,7 @@ use crate::{ field::LurkField, lem::{ eval::evaluate_with_env_and_cont, - pointers::{Ptr, RawPtr}, - store::expect_ptrs, + pointers::{IVal, Ptr}, tag::Tag, }, package::{Package, SymbolRef}, @@ -272,7 +271,7 @@ where let second_io = repl .eval_expr(second) .with_context(|| "evaluating second arg")?; - let (Tag::Expr(ExprTag::Num), RawPtr::Atom(secret)) = first_io[0].parts() else { + let (Tag::Expr(ExprTag::Num), IVal::Atom(secret)) = first_io[0].parts() else { bail!( "Secret must be a number. Got {}", first_io[0].fmt_to_string(&repl.store, &repl.state.borrow()) @@ -589,7 +588,7 @@ where .store .fetch_cons(result) .ok_or_else(|| anyhow!("Chained function must return a cons expression"))?; - let (Tag::Expr(ExprTag::Comm), RawPtr::Atom(hash)) = comm.parts() else { + let (Tag::Expr(ExprTag::Comm), IVal::Atom(hash)) = comm.parts() else { bail!("Second component of a chain must be a commitment") }; let hash = *repl.store.expect_f(*hash); @@ -598,7 +597,7 @@ where .store .open(hash) .expect("data must have been committed"); - repl.hide(*secret, *fun) + repl.hide(secret.0, *fun) }, }; @@ -822,7 +821,7 @@ where }; let (car, _) = repl.store.car_cdr_simple(&rest)?; - let (Tag::Expr(ExprTag::Num), RawPtr::Atom(rc_idx)) = car.parts() else { + let (Tag::Expr(ExprTag::Num), IVal::Atom(rc_idx)) = car.parts() else { bail!("Reduction count must be a Num") }; let Some(rc) = repl.store.expect_f(*rc_idx).to_u64().map(|u| u as usize) else { @@ -868,13 +867,13 @@ where .eval_expr_with_env(apply_call, repl.store.intern_empty_env()) .with_context(|| "evaluating protocol function call")?; - let (Tag::Expr(ExprTag::Cons), RawPtr::Hash4(idx)) = &io[0].parts() else { + let (Tag::Expr(ExprTag::Cons), IVal::Tuple2(idx)) = &io[0].parts() else { bail!( "Protocol function must return a pair. Got {}", io[0].fmt_to_string(&repl.store, &repl.state.borrow()) ) }; - let [pre_verify, post_verify] = &expect_ptrs!(repl.store, 2, *idx); + let [pre_verify, post_verify] = repl.store.expect_tuple2(*idx); if pre_verify.is_nil() { bail!("Pre-verification predicate rejected the input") diff --git a/src/cli/repl/mod.rs b/src/cli/repl/mod.rs index 9f43afd62..29658ded8 100644 --- a/src/cli/repl/mod.rs +++ b/src/cli/repl/mod.rs @@ -29,7 +29,7 @@ use crate::{ make_eval_step_from_config, EvalConfig, }, interpreter::Frame, - pointers::{Ptr, RawPtr}, + pointers::{IVal, Ptr}, store::Store, tag::Tag, Func, @@ -322,7 +322,10 @@ where &self.store, (input[0], output[0]), (input[1], output[1]), - (cont.parts(), cont_out.parts()), + ( + (cont.tag_field(), *cont.hash()), + (cont_out.tag_field(), *cont_out.hash()), + ), ); let claim_comm = Commitment::new(None, claim, &self.store); @@ -555,8 +558,7 @@ where /// Errors when `hash_expr` doesn't reduce to a Num or Comm pointer fn get_comm_hash(&mut self, hash_expr: Ptr) -> Result<&F> { let io = self.eval_expr(hash_expr)?; - let (Tag::Expr(ExprTag::Num | ExprTag::Comm), RawPtr::Atom(hash_idx)) = io[0].parts() - else { + let (Tag::Expr(ExprTag::Num | ExprTag::Comm), IVal::Atom(hash_idx)) = io[0].parts() else { bail!("Commitment hash expression must reduce to a Num or Comm pointer") }; Ok(self.store.expect_f(*hash_idx)) diff --git a/src/cli/zstore.rs b/src/cli/zstore.rs index dbaa3112e..f409e3e98 100644 --- a/src/cli/zstore.rs +++ b/src/cli/zstore.rs @@ -6,11 +6,10 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use crate::{ field::{FWrap, LurkField}, lem::{ - pointers::{Ptr, RawPtr, ZPtr}, - store::{expect_ptrs, intern_ptrs_hydrated, Store}, - tag::Tag, + pointers::{IVal, Ptr, ZPtr}, + store::Store, + store_core::StoreHasher, }, - tag::ExprTag::Env, }; use super::field_data::HasFieldModulus; @@ -24,7 +23,7 @@ pub(crate) enum ZPtrType { Tuple2(ZPtr, ZPtr), Tuple3(ZPtr, ZPtr, ZPtr), Tuple4(ZPtr, ZPtr, ZPtr, ZPtr), - Env(ZPtr, ZPtr, ZPtr), + Compact(ZPtr, ZPtr, ZPtr), } /// Holds a mapping from `ZPtr`s to their `ZPtrType`s @@ -43,86 +42,54 @@ impl ZDag { *z_ptr } else { let tag = ptr.tag(); - let z_ptr = match ptr.raw() { - RawPtr::Atom(idx) => { + let z_ptr = match ptr.val() { + IVal::Atom(idx) => { let f = store.expect_f(*idx); let z_ptr = ZPtr::from_parts(*tag, *f); self.0.insert(z_ptr, ZPtrType::Atom); z_ptr } - RawPtr::Hash4(idx) => { - if matches!(tag, Tag::Expr(Env)) { - let [sym, val, env] = store.expect_env_components(*idx); - let sym = self.populate_with(&sym, store, cache); - let val = self.populate_with(&val, store, cache); - let env = self.populate_with(&env, store, cache); - let z_ptr = ZPtr::from_parts( - *tag, - store.poseidon_cache.hash4(&[ - *sym.value(), - val.tag_field(), - *val.value(), - *env.value(), - ]), - ); - self.0.insert(z_ptr, ZPtrType::Env(sym, val, env)); - z_ptr - } else { - let [a, b] = expect_ptrs!(store, 2, *idx); - let a = self.populate_with(&a, store, cache); - let b = self.populate_with(&b, store, cache); - let z_ptr = ZPtr::from_parts( - *tag, - store.poseidon_cache.hash4(&[ - a.tag_field(), - *a.value(), - b.tag_field(), - *b.value(), - ]), - ); - self.0.insert(z_ptr, ZPtrType::Tuple2(a, b)); - z_ptr - } + IVal::Tuple2(idx) => { + let [a, b] = store.expect_tuple2(*idx); + let a = self.populate_with(a, store, cache); + let b = self.populate_with(b, store, cache); + let z_ptr = ZPtr::new(*tag, store.core.hasher.hash_ptrs(vec![a, b])); + self.0.insert(z_ptr, ZPtrType::Tuple2(a, b)); + z_ptr } - RawPtr::Hash6(idx) => { - let [a, b, c] = expect_ptrs!(store, 3, *idx); - let a = self.populate_with(&a, store, cache); - let b = self.populate_with(&b, store, cache); - let c = self.populate_with(&c, store, cache); - let z_ptr = ZPtr::from_parts( - *tag, - store.poseidon_cache.hash6(&[ - a.tag_field(), - *a.value(), - b.tag_field(), - *b.value(), - c.tag_field(), - *c.value(), - ]), - ); + IVal::Tuple3(idx) => { + let [a, b, c] = store.expect_tuple3(*idx); + let a = self.populate_with(a, store, cache); + let b = self.populate_with(b, store, cache); + let c = self.populate_with(c, store, cache); + let z_ptr = ZPtr::new(*tag, store.core.hasher.hash_ptrs(vec![a, b, c])); self.0.insert(z_ptr, ZPtrType::Tuple3(a, b, c)); z_ptr } - RawPtr::Hash8(idx) => { - let [a, b, c, d] = expect_ptrs!(store, 4, *idx); - let a = self.populate_with(&a, store, cache); - let b = self.populate_with(&b, store, cache); - let c = self.populate_with(&c, store, cache); - let d = self.populate_with(&d, store, cache); - let z_ptr = ZPtr::from_parts( + IVal::Tuple4(idx) => { + let [a, b, c, d] = store.expect_tuple4(*idx); + let a = self.populate_with(a, store, cache); + let b = self.populate_with(b, store, cache); + let c = self.populate_with(c, store, cache); + let d = self.populate_with(d, store, cache); + let z_ptr = ZPtr::new(*tag, store.core.hasher.hash_ptrs(vec![a, b, c, d])); + self.0.insert(z_ptr, ZPtrType::Tuple4(a, b, c, d)); + z_ptr + } + IVal::Compact(idx) => { + let [a, b, c] = store.expect_tuple3(*idx); + let a = self.populate_with(a, store, cache); + let b = self.populate_with(b, store, cache); + let c = self.populate_with(c, store, cache); + let (b_tag, b_val) = b.into_parts(); + let z_ptr = ZPtr::new( *tag, - store.poseidon_cache.hash8(&[ - a.tag_field(), - *a.value(), - b.tag_field(), - *b.value(), - c.tag_field(), - *c.value(), - d.tag_field(), - *d.value(), - ]), + store + .core + .hasher + .hash_compact(*a.val(), b_tag, b_val, *c.val()), ); - self.0.insert(z_ptr, ZPtrType::Tuple4(a, b, c, d)); + self.0.insert(z_ptr, ZPtrType::Compact(a, b, c)); z_ptr } }; @@ -134,7 +101,7 @@ impl ZDag { let mut stack = vec![*ptr]; macro_rules! feed_loop { ($x:expr) => { - if $x.raw().is_hash() { + if $x.val().is_compound() { if !cache.contains_key(&$x) { if dag.insert($x) { stack.push($x); @@ -144,27 +111,21 @@ impl ZDag { }; } while let Some(ptr) = stack.pop() { - match ptr.raw() { - RawPtr::Atom(..) => (), - RawPtr::Hash4(idx) => { - if matches!(ptr.tag(), Tag::Expr(Env)) { - for ptr in store.expect_env_components(*idx) { - feed_loop!(ptr) - } - } else { - for ptr in expect_ptrs!(store, 2, *idx) { - feed_loop!(ptr) - } + match ptr.val() { + IVal::Atom(..) => (), + IVal::Tuple2(idx) => { + for ptr in store.expect_tuple2(*idx) { + feed_loop!(*ptr) } } - RawPtr::Hash6(idx) => { - for ptr in expect_ptrs!(store, 3, *idx) { - feed_loop!(ptr) + IVal::Tuple3(idx) | IVal::Compact(idx) => { + for ptr in store.expect_tuple3(*idx) { + feed_loop!(*ptr) } } - RawPtr::Hash8(idx) => { - for ptr in expect_ptrs!(store, 4, *idx) { - feed_loop!(ptr) + IVal::Tuple4(idx) => { + for ptr in store.expect_tuple4(*idx) { + feed_loop!(*ptr) } } } @@ -191,7 +152,9 @@ impl ZDag { None => bail!("Couldn't find ZPtr on ZStore"), Some(ZPtrType::Atom) => Ok(vec![]), Some(ZPtrType::Tuple2(z1, z2)) => Ok(vec![z1, z2]), - Some(ZPtrType::Tuple3(z1, z2, z3) | ZPtrType::Env(z1, z2, z3)) => Ok(vec![z1, z2, z3]), + Some(ZPtrType::Tuple3(z1, z2, z3) | ZPtrType::Compact(z1, z2, z3)) => { + Ok(vec![z1, z2, z3]) + } Some(ZPtrType::Tuple4(z1, z2, z3, z4)) => Ok(vec![z1, z2, z3, z4]), } } @@ -208,35 +171,34 @@ impl ZDag { } else { let ptr = match self.get_type(z_ptr) { None => bail!("Couldn't find ZPtr on ZStore"), - Some(ZPtrType::Atom) => store.intern_atom(*z_ptr.tag(), *z_ptr.value()), + Some(ZPtrType::Atom) => store.intern_atom(*z_ptr.tag(), *z_ptr.hash()), Some(ZPtrType::Tuple2(z1, z2)) => { let ptr1 = self.populate_store(z1, store, cache)?; let ptr2 = self.populate_store(z2, store, cache)?; - intern_ptrs_hydrated!(store, *z_ptr.tag(), *z_ptr, ptr1, ptr2) + store.intern_tuple2([ptr1, ptr2], *z_ptr.tag(), Some(*z_ptr.hash())) } Some(ZPtrType::Tuple3(z1, z2, z3)) => { let ptr1 = self.populate_store(z1, store, cache)?; let ptr2 = self.populate_store(z2, store, cache)?; let ptr3 = self.populate_store(z3, store, cache)?; - intern_ptrs_hydrated!(store, *z_ptr.tag(), *z_ptr, ptr1, ptr2, ptr3) + store.intern_tuple3([ptr1, ptr2, ptr3], *z_ptr.tag(), Some(*z_ptr.hash())) } Some(ZPtrType::Tuple4(z1, z2, z3, z4)) => { let ptr1 = self.populate_store(z1, store, cache)?; let ptr2 = self.populate_store(z2, store, cache)?; let ptr3 = self.populate_store(z3, store, cache)?; let ptr4 = self.populate_store(z4, store, cache)?; - intern_ptrs_hydrated!(store, *z_ptr.tag(), *z_ptr, ptr1, ptr2, ptr3, ptr4) + store.intern_tuple4( + [ptr1, ptr2, ptr3, ptr4], + *z_ptr.tag(), + Some(*z_ptr.hash()), + ) } - Some(ZPtrType::Env(sym, val, env)) => { - let (_, sym_raw) = self.populate_store(sym, store, cache)?.into_parts(); - let (val_tag, val_raw) = - self.populate_store(val, store, cache)?.into_parts(); - let (_, env_raw) = self.populate_store(env, store, cache)?.into_parts(); - let raw = store.intern_raw_ptrs_hydrated( - [sym_raw, store.tag(val_tag), val_raw, env_raw], - FWrap(*z_ptr.value()), - ); - Ptr::new(Tag::Expr(Env), raw) + Some(ZPtrType::Compact(z1, z2, z3)) => { + let ptr1 = self.populate_store(z1, store, cache)?; + let ptr2 = self.populate_store(z2, store, cache)?; + let ptr3 = self.populate_store(z3, store, cache)?; + store.intern_compact([ptr1, ptr2, ptr3], *z_ptr.tag(), Some(*z_ptr.hash())) } }; cache.insert(*z_ptr, ptr); @@ -308,11 +270,11 @@ impl ZDag { self.populate_z_dag(z4, z_dag, cache)?; z_dag.0.insert(*z_ptr, ZPtrType::Tuple4(*z1, *z2, *z3, *z4)); } - Some(ZPtrType::Env(sym, val, env)) => { - self.populate_z_dag(sym, z_dag, cache)?; - self.populate_z_dag(val, z_dag, cache)?; - self.populate_z_dag(env, z_dag, cache)?; - z_dag.0.insert(*z_ptr, ZPtrType::Env(*sym, *val, *env)); + Some(ZPtrType::Compact(z1, z2, z3)) => { + self.populate_z_dag(z1, z_dag, cache)?; + self.populate_z_dag(z2, z_dag, cache)?; + self.populate_z_dag(z3, z_dag, cache)?; + z_dag.0.insert(*z_ptr, ZPtrType::Compact(*z1, *z2, *z3)); } }; cache.insert(*z_ptr); @@ -389,9 +351,9 @@ impl ZStore { ) -> (Self, ZPtr, HashMap>) { let mut z_store = ZStore::default(); let mut cache = HashMap::default(); - for (FWrap(hash), img) in store.comms.clone().into_tuple_vec() { + for (FWrap(hash), img) in store.core.comms.clone().into_tuple_vec() { let payload = z_store.populate_with(&img.1, store, &mut cache); - z_store.add_comm(hash, img.0, payload) + z_store.add_comm(hash, img.0 .0, payload) } let z_ptr = z_store.populate_with(ptr, store, &mut cache); (z_store, z_ptr, cache) @@ -440,11 +402,7 @@ mod tests { use crate::{ field::LurkField, - lem::{ - pointers::Ptr, - store::{intern_ptrs, Store}, - tag::Tag, - }, + lem::{pointers::Ptr, store::Store, tag::Tag}, tag::{ ContTag, ExprTag, Op1, Op2, CONT_TAG_INIT, EXPR_TAG_INIT, OP1_TAG_INIT, OP2_TAG_INIT, }, @@ -466,40 +424,46 @@ mod tests { 3 => Tag::Op2(Op2::try_from((rnd % Op2::COUNT) as u16 + OP2_TAG_INIT).unwrap()), _ => unreachable!(), }; - if matches!(tag, Tag::Expr(ExprTag::Env)) { - let mut env = store.intern_empty_env(); - for _ in 0..max_depth { - let sym = store.intern_user_symbol("foo"); - let val = rng_interner(rng, max_depth - 1, store); - env = store.push_binding(sym, val, env); - } - return env; - } if max_depth == 0 { store.intern_atom(tag, Bn::from_u64(rnd.try_into().unwrap())) } else { - match rnd % 4 { + match rnd % 5 { 0 => store.intern_atom(tag, Bn::from_u64(rnd.try_into().unwrap())), - 1 => intern_ptrs!( - store, + 1 => store.intern_tuple2( + [ + rng_interner(rng, max_depth - 1, store), + rng_interner(rng, max_depth - 1, store), + ], + tag, + None, + ), + 2 => store.intern_tuple3( + [ + rng_interner(rng, max_depth - 1, store), + rng_interner(rng, max_depth - 1, store), + rng_interner(rng, max_depth - 1, store), + ], tag, - rng_interner(rng, max_depth - 1, store), - rng_interner(rng, max_depth - 1, store) + None, ), - 2 => intern_ptrs!( - store, + 3 => store.intern_tuple4( + [ + rng_interner(rng, max_depth - 1, store), + rng_interner(rng, max_depth - 1, store), + rng_interner(rng, max_depth - 1, store), + rng_interner(rng, max_depth - 1, store), + ], tag, - rng_interner(rng, max_depth - 1, store), - rng_interner(rng, max_depth - 1, store), - rng_interner(rng, max_depth - 1, store) + None, ), - 3 => intern_ptrs!( - store, + 4 => store.intern_compact( + [ + rng_interner(rng, max_depth - 1, store), + rng_interner(rng, max_depth - 1, store), + rng_interner(rng, max_depth - 1, store), + ], tag, - rng_interner(rng, max_depth - 1, store), - rng_interner(rng, max_depth - 1, store), - rng_interner(rng, max_depth - 1, store), - rng_interner(rng, max_depth - 1, store) + None, ), _ => unreachable!(), } diff --git a/src/coprocessor/gadgets.rs b/src/coprocessor/gadgets.rs index fdebd0b82..1beb9a660 100644 --- a/src/coprocessor/gadgets.rs +++ b/src/coprocessor/gadgets.rs @@ -13,7 +13,7 @@ use crate::{ lem::{ circuit::GlobalAllocator, pointers::{Ptr, ZPtr}, - store::{expect_ptrs, Store}, + store::Store, tag, }, tag::{ExprTag, Tag}, @@ -38,7 +38,7 @@ pub(crate) fn construct_tuple2, T: Tag>( b.tag().clone(), b.hash().clone(), ], - store.poseidon_cache.constants.c4(), + store.core.hasher.poseidon_cache.constants.c4(), )?; Ok(AllocatedPtr::from_parts(tag, hash)) @@ -67,7 +67,7 @@ pub(crate) fn construct_tuple3, T: Tag>( c.tag().clone(), c.hash().clone(), ], - store.poseidon_cache.constants.c6(), + store.core.hasher.poseidon_cache.constants.c6(), )?; Ok(AllocatedPtr::from_parts(tag, hash)) @@ -99,7 +99,7 @@ pub(crate) fn construct_tuple4, T: Tag>( d.tag().clone(), d.hash().clone(), ], - store.poseidon_cache.constants.c8(), + store.core.hasher.poseidon_cache.constants.c8(), )?; Ok(AllocatedPtr::from_parts(tag, hash)) @@ -167,7 +167,7 @@ pub(crate) fn construct_env>( val.hash().clone(), next_env.clone(), ], - store.poseidon_cache.constants.c4(), + store.core.hasher.poseidon_cache.constants.c4(), )?; Ok(AllocatedPtr::from_parts(tag, hash)) @@ -192,7 +192,7 @@ pub(crate) fn construct_provenance>( result.hash().clone(), deps.clone(), ], - store.poseidon_cache.constants.c4(), + store.core.hasher.poseidon_cache.constants.c4(), )?; Ok(AllocatedPtr::from_parts(tag, hash)) @@ -213,14 +213,14 @@ pub(crate) fn deconstruct_env>( let (a, b, c, d) = { if let Some([v, val, new_env]) = s.pop_binding(&env_ptr) { - let v_zptr = s.hash_ptr(&v); - let val_zptr = s.hash_ptr(&val); - let new_env_zptr = s.hash_ptr(&new_env); + let v_zptr = s.hash_ptr(v); + let val_zptr = s.hash_ptr(val); + let new_env_zptr = s.hash_ptr(new_env); ( - *v_zptr.value(), + *v_zptr.hash(), val_zptr.tag().to_field::(), - *val_zptr.value(), - *new_env_zptr.value(), + *val_zptr.hash(), + *new_env_zptr.hash(), ) } else { (F::ZERO, F::ZERO, F::ZERO, F::ZERO) @@ -240,7 +240,7 @@ pub(crate) fn deconstruct_env>( val_hash.clone(), new_env_hash.clone(), ], - s.poseidon_cache.constants.c4(), + s.core.hasher.poseidon_cache.constants.c4(), )?; let val = AllocatedPtr::from_parts(val_tag, val_hash); @@ -264,14 +264,14 @@ pub(crate) fn deconstruct_provenance>( let (a, b, c, d) = { if let Some([q, res, deps]) = s.deconstruct_provenance(&prov_ptr) { - let q_zptr = s.hash_ptr(&q); - let res_zptr = s.hash_ptr(&res); - let deps_zptr = s.hash_ptr(&deps); + let q_zptr = s.hash_ptr(q); + let res_zptr = s.hash_ptr(res); + let deps_zptr = s.hash_ptr(deps); ( - *q_zptr.value(), + *q_zptr.hash(), res_zptr.tag().to_field::(), - *res_zptr.value(), - *deps_zptr.value(), + *res_zptr.hash(), + *deps_zptr.hash(), ) } else { (F::ZERO, F::ZERO, F::ZERO, F::ZERO) @@ -291,7 +291,7 @@ pub(crate) fn deconstruct_provenance>( res_hash.clone(), deps_tuple_hash.clone(), ], - s.poseidon_cache.constants.c4(), + s.core.hasher.poseidon_cache.constants.c4(), )?; let res = AllocatedPtr::from_parts(res_tag, res_hash); @@ -333,7 +333,7 @@ pub fn deconstruct_tuple2>( ) -> Result<(AllocatedPtr, AllocatedPtr), SynthesisError> { let (a, b) = if not_dummy.get_value() == Some(true) { let idx = get_ptr(tuple, store)?.get_index2().expect("invalid Ptr"); - let [a, b] = &expect_ptrs!(store, 2, idx); + let [a, b] = store.expect_tuple2(idx); (store.hash_ptr(a), store.hash_ptr(b)) } else { (ZPtr::dummy(), ZPtr::dummy()) @@ -350,7 +350,7 @@ pub fn deconstruct_tuple2>( b.tag().clone(), b.hash().clone(), ], - store.poseidon_cache.constants.c4(), + store.core.hasher.poseidon_cache.constants.c4(), )?; implies_equal(ns!(cs, "hash equality"), not_dummy, tuple.hash(), &hash); @@ -371,7 +371,7 @@ pub(crate) fn deconstruct_tuple3>( ) -> Result<(AllocatedPtr, AllocatedPtr, AllocatedPtr), SynthesisError> { let (a, b, c) = if not_dummy.get_value() == Some(true) { let idx = get_ptr(tuple, store)?.get_index3().expect("invalid Ptr"); - let [a, b, c] = &expect_ptrs!(store, 3, idx); + let [a, b, c] = store.expect_tuple3(idx); (store.hash_ptr(a), store.hash_ptr(b), store.hash_ptr(c)) } else { (ZPtr::dummy(), ZPtr::dummy(), ZPtr::dummy()) @@ -391,7 +391,7 @@ pub(crate) fn deconstruct_tuple3>( c.tag().clone(), c.hash().clone(), ], - store.poseidon_cache.constants.c6(), + store.core.hasher.poseidon_cache.constants.c6(), )?; implies_equal(ns!(cs, "hash equality"), not_dummy, tuple.hash(), &hash); @@ -420,7 +420,7 @@ pub(crate) fn deconstruct_tuple4>( > { let (a, b, c, d) = if not_dummy.get_value() == Some(true) { let idx = get_ptr(tuple, store)?.get_index4().expect("invalid Ptr"); - let [a, b, c, d] = &expect_ptrs!(store, 4, idx); + let [a, b, c, d] = store.expect_tuple4(idx); ( store.hash_ptr(a), store.hash_ptr(b), @@ -448,7 +448,7 @@ pub(crate) fn deconstruct_tuple4>( d.tag().clone(), d.hash().clone(), ], - store.poseidon_cache.constants.c8(), + store.core.hasher.poseidon_cache.constants.c8(), )?; implies_equal(ns!(cs, "hash equality"), not_dummy, tuple.hash(), &hash); @@ -558,7 +558,7 @@ pub(crate) fn car_cdr>( cdr.tag().clone(), cdr.hash().clone(), ], - store.poseidon_cache.constants.c4(), + store.core.hasher.poseidon_cache.constants.c4(), )?; implies_equal( @@ -620,7 +620,7 @@ pub(crate) fn car_cdr_simple>( cdr.tag().clone(), cdr.hash().clone(), ], - store.poseidon_cache.constants.c4(), + store.core.hasher.poseidon_cache.constants.c4(), )?; implies_equal( @@ -719,17 +719,11 @@ pub fn chain_car_cdr>( } #[inline] -pub fn a_ptr_as_z_ptr( - a: &AllocatedPtr, -) -> Option> { +pub fn a_ptr_as_z_ptr(a: &AllocatedPtr) -> Option> { a.tag() .get_value() .and_then(|t| Tag::from_field(&t)) - .and_then(|tag| { - a.hash() - .get_value() - .map(|hash| crate::z_ptr::ZPtr::from_parts(tag, hash)) - }) + .and_then(|tag| a.hash().get_value().map(|hash| ZPtr::from_parts(tag, hash))) } #[cfg(test)] @@ -745,10 +739,7 @@ mod test { deconstruct_tuple4, }, field::LurkField, - lem::{ - circuit::GlobalAllocator, - store::{intern_ptrs, Store}, - }, + lem::{circuit::GlobalAllocator, store::Store}, }; use super::{a_ptr_as_z_ptr, chain_car_cdr, construct_list, deconstruct_tuple2}; @@ -763,13 +754,13 @@ mod test { let a_nil = g.alloc_ptr(&mut cs, &nil, &store); let nil2 = construct_tuple2(ns!(cs, "nil2"), &g, &store, nil_tag, &a_nil, &a_nil).unwrap(); - let nil2_ptr = intern_ptrs!(store, *nil_tag, nil, nil); + let nil2_ptr = store.intern_tuple2([nil, nil], *nil_tag, None); let z_nil2_ptr = store.hash_ptr(&nil2_ptr); assert_eq!(a_ptr_as_z_ptr(&nil2), Some(z_nil2_ptr)); let nil3 = construct_tuple3(ns!(cs, "nil3"), &g, &store, nil_tag, &a_nil, &a_nil, &a_nil).unwrap(); - let nil3_ptr = intern_ptrs!(store, *nil_tag, nil, nil, nil); + let nil3_ptr = store.intern_tuple3([nil, nil, nil], *nil_tag, None); let z_nil3_ptr = store.hash_ptr(&nil3_ptr); assert_eq!(a_ptr_as_z_ptr(&nil3), Some(z_nil3_ptr)); @@ -784,7 +775,7 @@ mod test { &a_nil, ) .unwrap(); - let nil4_ptr = intern_ptrs!(store, *nil_tag, nil, nil, nil, nil); + let nil4_ptr = store.intern_tuple4([nil, nil, nil, nil], *nil_tag, None); let z_nil4_ptr = store.hash_ptr(&nil4_ptr); assert_eq!(a_ptr_as_z_ptr(&nil4), Some(z_nil4_ptr)); } @@ -818,7 +809,7 @@ mod test { let nil_tag = *nil.tag(); let not_dummy = Boolean::Constant(true); - let tuple2 = intern_ptrs!(store, nil_tag, nil, nil); + let tuple2 = store.intern_tuple2([nil, nil], nil_tag, None); let z_tuple2 = store.hash_ptr(&tuple2); let a_tuple2 = AllocatedPtr::alloc_infallible(ns!(cs, "tuple2"), || z_tuple2); let (a, b) = @@ -827,7 +818,7 @@ mod test { assert_eq!(a_ptr_as_z_ptr(&a), Some(z_nil)); assert_eq!(a_ptr_as_z_ptr(&b), Some(z_nil)); - let tuple3 = intern_ptrs!(store, nil_tag, nil, nil, nil); + let tuple3 = store.intern_tuple3([nil, nil, nil], nil_tag, None); let z_tuple3 = store.hash_ptr(&tuple3); let a_tuple3 = AllocatedPtr::alloc_infallible(ns!(cs, "tuple3"), || z_tuple3); let (a, b, c) = @@ -837,7 +828,7 @@ mod test { assert_eq!(a_ptr_as_z_ptr(&b), Some(z_nil)); assert_eq!(a_ptr_as_z_ptr(&c), Some(z_nil)); - let tuple4 = intern_ptrs!(store, nil_tag, nil, nil, nil, nil); + let tuple4 = store.intern_tuple4([nil, nil, nil, nil], nil_tag, None); let z_tuple4 = store.hash_ptr(&tuple4); let a_tuple4 = AllocatedPtr::alloc_infallible(ns!(cs, "tuple4"), || z_tuple4); let (a, b, c, d) = diff --git a/src/coprocessor/mod.rs b/src/coprocessor/mod.rs index 7c1a5d5c9..83fcefa71 100644 --- a/src/coprocessor/mod.rs +++ b/src/coprocessor/mod.rs @@ -120,7 +120,7 @@ pub(crate) mod test { use super::*; use crate::circuit::gadgets::constraints::{alloc_equal, mul}; - use crate::lem::{pointers::RawPtr, tag::Tag as LEMTag}; + use crate::lem::{pointers::IVal, tag::Tag as LEMTag}; use crate::tag::{ExprTag, Tag}; use std::marker::PhantomData; @@ -214,11 +214,11 @@ pub(crate) mod test { } fn evaluate(&self, s: &Store, args: &[Ptr], env: &Ptr, cont: &Ptr) -> Vec { - let (LEMTag::Expr(ExprTag::Num), RawPtr::Atom(a)) = args[0].parts() else { + let (LEMTag::Expr(ExprTag::Num), IVal::Atom(a)) = args[0].parts() else { return vec![args[0], *env, s.cont_error()]; }; let a = s.expect_f(*a); - let (LEMTag::Expr(ExprTag::Num), RawPtr::Atom(b)) = args[1].parts() else { + let (LEMTag::Expr(ExprTag::Num), IVal::Atom(b)) = args[1].parts() else { return vec![args[1], *env, s.cont_error()]; }; let b = s.expect_f(*b); diff --git a/src/coprocessor/sha256.rs b/src/coprocessor/sha256.rs index bfcea4440..2f184df9b 100644 --- a/src/coprocessor/sha256.rs +++ b/src/coprocessor/sha256.rs @@ -9,9 +9,11 @@ use crate::{ self as lurk, circuit::gadgets::pointer::AllocatedPtr, field::LurkField, - lem::{pointers::Ptr, store::Store}, + lem::{ + pointers::{Ptr, ZPtr}, + store::Store, + }, tag::{ExprTag, Tag}, - z_ptr::ZPtr, }; use super::{CoCircuit, Coprocessor}; @@ -61,14 +63,14 @@ fn synthesize_sha256>( ) } -fn compute_sha256(n: usize, z_ptrs: &[ZPtr]) -> F { +fn compute_sha256(n: usize, z_ptrs: &[ZPtr]) -> F { let mut hasher = Sha256::new(); let mut input = vec![0u8; 64 * n]; for (i, z_ptr) in z_ptrs.iter().enumerate() { let tag_zptr: F = z_ptr.tag().to_field(); - let hash_zptr = z_ptr.value(); + let hash_zptr = z_ptr.hash(); input[(64 * i)..(64 * i + 32)].copy_from_slice(&tag_zptr.to_bytes()); input[(64 * i + 32)..(64 * (i + 1))].copy_from_slice(&hash_zptr.to_bytes()); } diff --git a/src/coprocessor/trie/mod.rs b/src/coprocessor/trie/mod.rs index 41a0c19f8..06be8f7f2 100644 --- a/src/coprocessor/trie/mod.rs +++ b/src/coprocessor/trie/mod.rs @@ -56,7 +56,8 @@ pub struct NewCoprocessor { impl Coprocessor for NewCoprocessor { fn evaluate_simple(&self, s: &Store, _args: &[Ptr]) -> Ptr { - let trie: StandardTrie<'_, F> = Trie::new(&s.poseidon_cache, &s.inverse_poseidon_cache); + let trie: StandardTrie<'_, F> = + Trie::new(&s.core.hasher.poseidon_cache, &s.inverse_poseidon_cache); // TODO: Use a custom type. s.num(trie.root) } @@ -81,7 +82,8 @@ impl CoCircuit for NewCoprocessor { _not_dummy: &Boolean, _args: &[AllocatedPtr], ) -> Result, SynthesisError> { - let trie: StandardTrie<'_, F> = Trie::new(&s.poseidon_cache, &s.inverse_poseidon_cache); + let trie: StandardTrie<'_, F> = + Trie::new(&s.core.hasher.poseidon_cache, &s.inverse_poseidon_cache); // TODO: Use a custom type. let root = s.num(trie.root); @@ -102,10 +104,13 @@ impl Coprocessor for LookupCoprocessor { let key_ptr = &args[1]; // TODO: Check tags. - let root_scalar = *s.hash_ptr(root_ptr).value(); - let key_scalar = *s.hash_ptr(key_ptr).value(); - let trie: StandardTrie<'_, F> = - Trie::new_with_root(&s.poseidon_cache, &s.inverse_poseidon_cache, root_scalar); + let root_scalar = *s.hash_ptr(root_ptr).hash(); + let key_scalar = *s.hash_ptr(key_ptr).hash(); + let trie: StandardTrie<'_, F> = Trie::new_with_root( + &s.core.hasher.poseidon_cache, + &s.inverse_poseidon_cache, + root_scalar, + ); s.comm(trie.lookup_aux(key_scalar).unwrap()) } @@ -176,7 +181,7 @@ impl CoCircuit for LookupCoprocessor { root_ptr, key_ptr, not_dummy, - &s.poseidon_cache, + &s.core.hasher.poseidon_cache, &s.inverse_poseidon_cache, )?; @@ -208,11 +213,14 @@ impl Coprocessor for InsertCoprocessor { let root_ptr = &args[0]; let key_ptr = &args[1]; let val_ptr = &args[2]; - let root_scalar = *s.hash_ptr(root_ptr).value(); - let key_scalar = *s.hash_ptr(key_ptr).value(); - let val_scalar = *s.hash_ptr(val_ptr).value(); - let mut trie: StandardTrie<'_, F> = - Trie::new_with_root(&s.poseidon_cache, &s.inverse_poseidon_cache, root_scalar); + let root_scalar = *s.hash_ptr(root_ptr).hash(); + let key_scalar = *s.hash_ptr(key_ptr).hash(); + let val_scalar = *s.hash_ptr(val_ptr).hash(); + let mut trie: StandardTrie<'_, F> = Trie::new_with_root( + &s.core.hasher.poseidon_cache, + &s.inverse_poseidon_cache, + root_scalar, + ); trie.insert(key_scalar, val_scalar).unwrap(); s.num(trie.root) @@ -290,7 +298,7 @@ impl CoCircuit for InsertCoprocessor { key_ptr, val_ptr, not_dummy, - &s.poseidon_cache, + &s.core.hasher.poseidon_cache, &s.inverse_poseidon_cache, )?; diff --git a/src/coroutine/memoset/demo.rs b/src/coroutine/memoset/demo.rs index 787c9e7e8..f39c6fdb0 100644 --- a/src/coroutine/memoset/demo.rs +++ b/src/coroutine/memoset/demo.rs @@ -32,7 +32,7 @@ impl Query for DemoQuery { match self { Self::Factorial(n) => { let n_zptr = scope.store.hash_ptr(n); - let n = n_zptr.value(); + let n = n_zptr.hash(); if *n == F::ZERO { scope.store.num(F::ONE) @@ -40,7 +40,7 @@ impl Query for DemoQuery { let sub_query = Self::Factorial(scope.store.num(*n - F::ONE)); let m_ptr = self.recursive_eval(scope, sub_query); let m_zptr = scope.store.hash_ptr(&m_ptr); - let m = m_zptr.value(); + let m = m_zptr.hash(); scope.store.num(*n * m) } diff --git a/src/coroutine/memoset/env.rs b/src/coroutine/memoset/env.rs index ff7f06e89..a6e0c69fc 100644 --- a/src/coroutine/memoset/env.rs +++ b/src/coroutine/memoset/env.rs @@ -34,11 +34,11 @@ impl Query for EnvQuery { match self { Self::Lookup(var, env) => { if let Some([v, val, new_env]) = s.pop_binding(env) { - if s.ptr_eq(var, &v) { + if s.ptr_eq(var, v) { let t = s.intern_t(); - s.cons(val, t) + s.cons(*val, t) } else { - self.recursive_eval(scope, Self::Lookup(*var, new_env)) + self.recursive_eval(scope, Self::Lookup(*var, *new_env)) } } else { let nil = s.intern_nil(); @@ -86,9 +86,9 @@ impl Query for EnvQuery { match self { EnvQuery::Lookup(var, env) => { let allocated_var = - AllocatedNum::alloc_infallible(ns!(cs, "var"), || *s.hash_ptr(var).value()); + AllocatedNum::alloc_infallible(ns!(cs, "var"), || *s.hash_ptr(var).hash()); let allocated_env = - AllocatedNum::alloc_infallible(ns!(cs, "env"), || *s.hash_ptr(env).value()); + AllocatedNum::alloc_infallible(ns!(cs, "env"), || *s.hash_ptr(env).hash()); Self::CQ::Lookup(allocated_var, allocated_env) } _ => unreachable!(), diff --git a/src/coroutine/memoset/mod.rs b/src/coroutine/memoset/mod.rs index 16ea867eb..263985aed 100644 --- a/src/coroutine/memoset/mod.rs +++ b/src/coroutine/memoset/mod.rs @@ -52,12 +52,11 @@ use crate::field::LurkField; use crate::lem::circuit::GlobalAllocator; use crate::lem::tag::Tag; use crate::lem::{ - pointers::Ptr, + pointers::{Ptr, ZPtr}, store::{Store, WithStore}, }; use crate::symbol::Symbol; use crate::tag::{ExprTag, Tag as XTag}; -use crate::z_ptr::ZPtr; use multiset::MultiSet; pub use query::{CircuitQuery, Query}; @@ -107,7 +106,7 @@ impl Transcript { fn r(&self, s: &Store) -> F { let z_ptr = s.hash_ptr(&self.acc); assert_eq!(Tag::Expr(ExprTag::Cons), *z_ptr.tag()); - *z_ptr.value() + *z_ptr.hash() } #[allow(dead_code)] @@ -183,7 +182,7 @@ impl CircuitTranscript { #[allow(dead_code)] fn dbg(&self, s: &Store) { - let z = self.acc.get_value::().unwrap(); + let z = self.acc.get_value().unwrap(); let transcript = s.to_ptr(&z); tracing::debug!("transcript: {}", transcript.fmt_to_string_simple(s)); @@ -328,7 +327,7 @@ pub struct Scope, M, F: LurkField> { /// unique keys: query-index -> [key] unique_inserted_keys: IndexMap>, // This may become an explicit map or something allowing more fine-grained control. - provenances: OnceCell, ZPtr>>, + provenances: OnceCell, ZPtr>>, default_rc: usize, pub(crate) store: Arc>, pub(crate) runtime_data: Q::RD, @@ -424,7 +423,7 @@ pub struct CircuitScope<'a, F: LurkField, CM, RD> { /// k -> allocated v transcript: CircuitTranscript, /// k -> prov - provenances: Option<&'a IndexMap, ZPtr>>, + provenances: Option<&'a IndexMap, ZPtr>>, acc: Option>, pub(crate) runtime_data: RD, } @@ -444,7 +443,7 @@ pub struct CoroutineCircuit<'a, F: LurkField, M, Q: Query> { struct WitnessData<'a, F: LurkField, M> { keys: &'a [Ptr], memoset: &'a M, - provenances: &'a IndexMap, ZPtr>, + provenances: &'a IndexMap, ZPtr>, next_query_index: usize, } @@ -631,7 +630,7 @@ impl> Scope, F> { transcript } - fn provenances(&self) -> &IndexMap, ZPtr> { + fn provenances(&self) -> &IndexMap, ZPtr> { self.provenances.get_or_init(|| self.compute_provenances()) } @@ -647,7 +646,7 @@ impl> Scope, F> { // } // } - // fn dbg_provenances_zptrs(store: &Store, provenances: &IndexMap, ZPtr>) { + // fn dbg_provenances_zptrs(store: &Store, provenances: &IndexMap, ZPtr>) { // for (q, provenance) in provenances { // dbg!( // store.to_ptr(q).fmt_to_string_simple(store), @@ -656,7 +655,7 @@ impl> Scope, F> { // } // } - fn compute_provenances(&self) -> IndexMap, ZPtr> { + fn compute_provenances(&self) -> IndexMap, ZPtr> { let mut provenances = IndexMap::default(); let mut ready = HashSet::new(); @@ -924,7 +923,7 @@ impl<'a, F: LurkField, RD> CircuitScope<'a, F, LogMemoCircuit<'a, F>, RD> { g: &GlobalAllocator, s: &Store, memoset: LogMemoCircuit<'a, F>, - provenances: Option<&'a IndexMap, ZPtr>>, + provenances: Option<&'a IndexMap, ZPtr>>, runtime_data: RD, ) -> Self { Self { @@ -1381,7 +1380,7 @@ impl LogMemo { fn acc_add(&self, acc: &Ptr, kv: &Ptr, store: &Store) -> Ptr { let acc_num = store.expect_f(acc.get_atom().unwrap()); - let kv_num = store.hash_raw_ptr(kv.raw()).0; + let kv_num = store.hash_ptr_val(kv.val()).0; let element = self.map_to_element(kv_num).unwrap(); store.num(*acc_num + element) } diff --git a/src/field.rs b/src/field.rs index b53d7939e..356659931 100644 --- a/src/field.rs +++ b/src/field.rs @@ -285,6 +285,15 @@ impl LurkField for GrumpkinScalar { #[derive(Clone, Debug, PartialEq, Eq)] pub struct FWrap(pub F); +impl FWrap { + /// Returns a reference to the wrapped value + #[inline] + pub fn get(&self) -> &F { + let Self(f) = self; + f + } +} + impl Copy for FWrap {} #[cfg(not(target_arch = "wasm32"))] diff --git a/src/lem/circuit.rs b/src/lem/circuit.rs index 7eb86b97f..ab683ef09 100644 --- a/src/lem/circuit.rs +++ b/src/lem/circuit.rs @@ -145,8 +145,8 @@ impl GlobalAllocator { cs: &mut CS, z_ptr: ZPtr, ) -> AllocatedPtr { - let crate::z_ptr::ZPtr(tag, hash) = z_ptr; - self.alloc_z_ptr_from_parts(cs, &tag, hash) + let (tag, hash) = z_ptr.into_parts(); + self.alloc_z_ptr_from_parts(cs, &tag, hash.0) } #[inline] @@ -221,22 +221,22 @@ fn allocate_img_for_slot>( SlotType::Hash4 => AllocatedVal::Number(poseidon_hash( cs, preallocated_preimg, - store.poseidon_cache.constants.c4(), + store.core.hasher.poseidon_cache.constants.c4(), )?), SlotType::Hash6 => AllocatedVal::Number(poseidon_hash( cs, preallocated_preimg, - store.poseidon_cache.constants.c6(), + store.core.hasher.poseidon_cache.constants.c6(), )?), SlotType::Hash8 => AllocatedVal::Number(poseidon_hash( cs, preallocated_preimg, - store.poseidon_cache.constants.c8(), + store.core.hasher.poseidon_cache.constants.c8(), )?), SlotType::Commitment => AllocatedVal::Number(poseidon_hash( cs, preallocated_preimg, - store.poseidon_cache.constants.c3(), + store.core.hasher.poseidon_cache.constants.c3(), )?), SlotType::BitDecomp => { AllocatedVal::Bits(preallocated_preimg[0].to_bits_le_strict(cs)?) @@ -275,8 +275,8 @@ pub(crate) fn allocate_slot>( preallocated_preimg.push(alloc_ptr.tag().clone()); preallocated_preimg.push(alloc_ptr.hash().clone()); } - Val::Num(raw) => { - let f = store.hash_raw_ptr(raw).0; + Val::Num(ptr_val) => { + let f = store.hash_ptr_val(ptr_val).0; preallocated_preimg.push(AllocatedNum::alloc_infallible( ns!(cs, format!("component {component_idx} slot {slot}")), || f, @@ -690,7 +690,7 @@ fn synthesize_block, C: Coprocessor>( if not_dummy_and_not_blank { let z_ptr = &collected_z_ptrs[i]; if ptr.tag().get_value() != Some(z_ptr.tag_field()) - || ptr.hash().get_value() != Some(*z_ptr.value()) + || ptr.hash().get_value() != Some(*z_ptr.hash()) { bail!("Mismatch between evaluate and synthesize outputs for coprocessor {sym} (pointer {i})") } @@ -1346,7 +1346,7 @@ fn synthesize_block, C: Coprocessor>( let mut cases_vec = Vec::with_capacity(cases.len()); for (lit, block) in cases { let lit_ptr = lit.to_ptr(ctx.store); - let lit_hash = *ctx.store.hash_ptr(&lit_ptr).value(); + let lit_hash = *ctx.store.hash_ptr(&lit_ptr).hash(); cases_vec.push((lit_hash, block)); } @@ -1572,7 +1572,7 @@ impl Func { let lit_ptr = lit.to_ptr(store); let lit_z_ptr = store.hash_ptr(&lit_ptr); globals.insert(FWrap(lit_z_ptr.tag_field())); - globals.insert(FWrap(*lit_z_ptr.value())); + globals.insert(FWrap(*lit_z_ptr.hash())); } Op::Cast(_, tag, _) => { globals.insert(FWrap(tag.to_field())); diff --git a/src/lem/coroutine/eval.rs b/src/lem/coroutine/eval.rs index 1347c1268..7be98b6a6 100644 --- a/src/lem/coroutine/eval.rs +++ b/src/lem/coroutine/eval.rs @@ -4,9 +4,8 @@ use super::toplevel::ToplevelQuery; use crate::coroutine::memoset::{LogMemo, Query, Scope}; use crate::field::LurkField; -use crate::lem::pointers::{Ptr, RawPtr}; +use crate::lem::pointers::{IVal, Ptr}; use crate::lem::slot::Val; -use crate::lem::store::{fetch_ptrs, intern_ptrs}; use crate::lem::tag::Tag; use crate::lem::var_map::VarMap; use crate::lem::{Block, Ctrl, Func, Lit, Op}; @@ -85,7 +84,7 @@ fn run( let b = bindings.get_ptr(b)?; // In order to compare Ptrs, we *must* resolve the hashes. Otherwise, we risk failing to recognize equality of // compound data with opaque data in either element's transitive closure. - let c = scope.store.hash_ptr(&a).value() == scope.store.hash_ptr(&b).value(); + let c = scope.store.hash_ptr(&a).hash() == scope.store.hash_ptr(&b).hash(); bindings.insert_bool(tgt.clone(), c); } Op::Not(tgt, a) => { @@ -103,9 +102,9 @@ fn run( bindings.insert_bool(tgt.clone(), a || b); } Op::Add(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let (f, g) = (scope.store.expect_f(f), scope.store.expect_f(g)); scope.store.intern_atom(Tag::Expr(Num), *f + *g) } else { @@ -114,9 +113,9 @@ fn run( bindings.insert_ptr(tgt.clone(), c); } Op::Sub(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let (f, g) = (scope.store.expect_f(f), scope.store.expect_f(g)); scope.store.intern_atom(Tag::Expr(Num), *f - *g) } else { @@ -125,9 +124,9 @@ fn run( bindings.insert_ptr(tgt.clone(), c); } Op::Mul(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let (f, g) = (scope.store.expect_f(f), scope.store.expect_f(g)); scope.store.intern_atom(Tag::Expr(Num), *f * *g) } else { @@ -136,9 +135,9 @@ fn run( bindings.insert_ptr(tgt.clone(), c); } Op::Div(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let (f, g) = (scope.store.expect_f(f), scope.store.expect_f(g)); if g == &F::ZERO { bail!("Can't divide by zero") @@ -152,9 +151,9 @@ fn run( bindings.insert_ptr(tgt.clone(), c); } Op::Lt(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f_idx), RawPtr::Atom(g_idx)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f_idx), IVal::Atom(g_idx)) = (a, b) { let f = *scope.store.expect_f(f_idx); let g = *scope.store.expect_f(g_idx); let f = BaseNum::Scalar(f); @@ -167,8 +166,8 @@ fn run( } Op::Trunc(tgt, a, n) => { assert!(*n <= 64); - let a = *bindings.get_ptr(a)?.raw(); - let c = if let RawPtr::Atom(f_idx) = a { + let a = *bindings.get_ptr(a)?.val(); + let c = if let IVal::Atom(f_idx) = a { let f = *scope.store.expect_f(f_idx); let b = if *n < 64 { (1 << *n) - 1 } else { u64::MAX }; scope @@ -180,9 +179,9 @@ fn run( bindings.insert_ptr(tgt.clone(), c); } Op::DivRem64(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let (c1, c2) = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let (c1, c2) = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let f = *scope.store.expect_f(f); let g = *scope.store.expect_f(g); if g == F::ZERO { @@ -207,29 +206,32 @@ fn run( Op::Recv(_) => todo!("not supported yet"), Op::Cons2(img, tag, preimg) => { let preimg_ptrs = bindings.get_many_ptr(preimg)?; - let tgt_ptr = intern_ptrs!(scope.store, *tag, preimg_ptrs[0], preimg_ptrs[1]); + let tgt_ptr = + scope + .store + .intern_tuple2([preimg_ptrs[0], preimg_ptrs[1]], *tag, None); bindings.insert_ptr(img.clone(), tgt_ptr); } Op::Cons3(img, tag, preimg) => { let preimg_ptrs = bindings.get_many_ptr(preimg)?; - let tgt_ptr = intern_ptrs!( - scope.store, + let tgt_ptr = scope.store.intern_tuple3( + [preimg_ptrs[0], preimg_ptrs[1], preimg_ptrs[2]], *tag, - preimg_ptrs[0], - preimg_ptrs[1], - preimg_ptrs[2] + None, ); bindings.insert_ptr(img.clone(), tgt_ptr); } Op::Cons4(img, tag, preimg) => { let preimg_ptrs = bindings.get_many_ptr(preimg)?; - let tgt_ptr = intern_ptrs!( - scope.store, + let tgt_ptr = scope.store.intern_tuple4( + [ + preimg_ptrs[0], + preimg_ptrs[1], + preimg_ptrs[2], + preimg_ptrs[3], + ], *tag, - preimg_ptrs[0], - preimg_ptrs[1], - preimg_ptrs[2], - preimg_ptrs[3] + None, ); bindings.insert_ptr(img.clone(), tgt_ptr); } @@ -238,7 +240,7 @@ fn run( let Some(idx) = img_ptr.get_index2() else { bail!("{img} isn't a Tree2 pointer"); }; - let Some(preimg_ptrs) = fetch_ptrs!(scope.store, 2, idx) else { + let Some(preimg_ptrs) = scope.store.fetch_tuple2(idx) else { bail!("Couldn't fetch {img}'s children") }; for (var, ptr) in preimg.iter().zip(preimg_ptrs.iter()) { @@ -250,7 +252,7 @@ fn run( let Some(idx) = img_ptr.get_index3() else { bail!("{img} isn't a Tree3 pointer"); }; - let Some(preimg_ptrs) = fetch_ptrs!(scope.store, 3, idx) else { + let Some(preimg_ptrs) = scope.store.fetch_tuple3(idx) else { bail!("Couldn't fetch {img}'s children") }; for (var, ptr) in preimg.iter().zip(preimg_ptrs.iter()) { @@ -262,7 +264,7 @@ fn run( let Some(idx) = img_ptr.get_index4() else { bail!("{img} isn't a Tree4 pointer"); }; - let Some(preimg_ptrs) = fetch_ptrs!(scope.store, 4, idx) else { + let Some(preimg_ptrs) = scope.store.fetch_tuple4(idx) else { bail!("Couldn't fetch {img}'s children") }; for (var, ptr) in preimg.iter().zip(preimg_ptrs.iter()) { @@ -283,14 +285,14 @@ fn run( .store .pop_binding(&img_ptr) .context("cannot extract {img}'s binding")?; - for (var, ptr) in preimg.iter().zip(preimg_ptrs.into_iter()) { - bindings.insert_ptr(var.clone(), ptr); + for (var, ptr) in preimg.iter().zip(preimg_ptrs.iter()) { + bindings.insert_ptr(var.clone(), *ptr); } } Op::Hide(tgt, sec, src) => { let src_ptr = bindings.get_ptr(src)?; let sec_ptr = bindings.get_ptr(sec)?; - let (Tag::Expr(Num), RawPtr::Atom(secret_idx)) = sec_ptr.parts() else { + let (Tag::Expr(Num), IVal::Atom(secret_idx)) = sec_ptr.parts() else { bail!("{sec} is not a numeric pointer") }; let secret = *scope.store.expect_f(*secret_idx); @@ -299,7 +301,7 @@ fn run( } Op::Open(tgt_secret, tgt_ptr, comm) => { let comm_ptr = bindings.get_ptr(comm)?; - let (Tag::Expr(Comm), RawPtr::Atom(hash)) = comm_ptr.parts() else { + let (Tag::Expr(Comm), IVal::Atom(hash)) = comm_ptr.parts() else { bail!("{comm} is not a comm pointer") }; let hash = *scope.store.expect_f(*hash); @@ -309,7 +311,7 @@ fn run( bindings.insert_ptr(tgt_ptr.clone(), *ptr); bindings.insert_ptr( tgt_secret.clone(), - scope.store.intern_atom(Tag::Expr(Num), *secret), + scope.store.intern_atom(Tag::Expr(Num), secret.0), ); } Op::Unit(f) => f(), diff --git a/src/lem/coroutine/synthesis.rs b/src/lem/coroutine/synthesis.rs index 3d23274ab..1c416aed5 100644 --- a/src/lem/coroutine/synthesis.rs +++ b/src/lem/coroutine/synthesis.rs @@ -17,6 +17,7 @@ use crate::circuit::gadgets::constraints::{ use crate::circuit::gadgets::pointer::AllocatedPtr; use crate::coroutine::memoset::{CircuitQuery, CircuitScope, LogMemoCircuit}; use crate::lem::circuit::{BoundAllocations, GlobalAllocator}; +use crate::lem::pointers::{IVal, Ptr}; use crate::lem::store::Store; use crate::lem::tag::Tag; use crate::lem::{Block, Ctrl, Func, Op, Var}; @@ -54,7 +55,7 @@ fn allocate_return>( let (_, selected_branch_output) = &branches[selected_index.unwrap_or(0)]; let mut output = Vec::with_capacity(selected_branch_output.len()); for (i, z) in selected_branch_output.iter().enumerate() { - let z_ptr = || z.get_value::().ok_or(AssignmentMissing); + let z_ptr = || z.get_value().ok_or(AssignmentMissing); let ptr = AllocatedPtr::alloc(ns!(cs, format!("matched output {i}")), z_ptr)?; output.push(ptr); } @@ -194,7 +195,7 @@ fn synthesize_run<'a, F: LurkField, CS: ConstraintSystem>( let res = poseidon_hash( ns!(cs, "poseidon_hash"), preimg, - store.poseidon_cache.constants.c4(), + store.core.hasher.poseidon_cache.constants.c4(), )?; let tag = g.alloc_tag_cloned(&mut cs, &Env); let ptr = AllocatedPtr::from_parts(tag, res); @@ -202,17 +203,17 @@ fn synthesize_run<'a, F: LurkField, CS: ConstraintSystem>( } Op::PopBinding(preimg, img) => { let img = bound_allocations.get_ptr(img)?; - let preimg_alloc_nums = if let Some(img_val) = img.get_value::() { - let raw_ptr = store.to_raw_ptr(&FWrap(*img_val.value())); - let idx = raw_ptr.get_hash4().unwrap(); - store - .expect_raw_ptrs::<4>(idx) - .iter() + let preimg_alloc_nums = if let Some(img_val) = img.get_value() { + let [a, b, c] = store.pop_binding(&store.to_ptr(&img_val)).unwrap(); + let a = store.hash_ptr(a); + let b = store.hash_ptr(b); + let c = store.hash_ptr(c); + [*a.hash(), b.tag_field(), *b.hash(), *c.hash()] + .into_iter() .enumerate() - .map(|(i, raw)| { + .map(|(i, f)| { let cs = ns!(cs, format!("preimg {i}")); - let z = store.hash_raw_ptr(raw).0; - AllocatedNum::alloc_infallible(cs, || z) + AllocatedNum::alloc_infallible(cs, || f) }) .collect::>() } else { @@ -227,7 +228,7 @@ fn synthesize_run<'a, F: LurkField, CS: ConstraintSystem>( let hash = poseidon_hash( ns!(cs, "poseidon_hash"), preimg_alloc_nums.clone(), - store.poseidon_cache.constants.c6(), + store.core.hasher.poseidon_cache.constants.c6(), )?; implies_equal( @@ -468,7 +469,7 @@ fn synthesize_run<'a, F: LurkField, CS: ConstraintSystem>( let hash = poseidon_hash( ns!(cs, "poseidon_hash"), preimg, - store.poseidon_cache.constants.c3(), + store.core.hasher.poseidon_cache.constants.c3(), )?; implies_equal( @@ -483,35 +484,32 @@ fn synthesize_run<'a, F: LurkField, CS: ConstraintSystem>( } Op::Open(sec, pay, comm) => { let comm = bound_allocations.get_ptr(comm)?; - let comm_tag = g.alloc_tag(&mut cs, &Comm); - let preimg_alloc_nums = if let Some(comm_val) = comm.get_value::() { - let raw_ptr = store.to_raw_ptr(&FWrap(*comm_val.value())); - let idx = raw_ptr.get_hash4().unwrap(); - store - .expect_raw_ptrs::<4>(idx) - .iter() - .enumerate() - .map(|(i, raw)| { - let cs = ns!(cs, format!("preimg {i}")); - let z = store.hash_raw_ptr(raw).0; - AllocatedNum::alloc_infallible(cs, || z) - }) - .collect::>() + let preimg_alloc_nums = if let Some(hash) = comm.hash().get_value() { + let (FWrap(secret), payload) = store.open(hash).unwrap(); + let (payload_tag, FWrap(payload_hash)) = store.hash_ptr(payload).into_parts(); + let mut alloc = |idx, f| { + AllocatedNum::alloc_infallible(ns!(cs, format!("preimg {idx}")), || f) + }; + let secret = alloc(0, *secret); + let payload_tag = alloc(1, payload_tag.to_field()); + let payload_hash = alloc(2, payload_hash); + vec![secret, payload_tag, payload_hash] } else { - (0..4) + (0..3) .map(|i| { let cs = ns!(cs, format!("preimg {i}")); AllocatedNum::alloc_infallible(cs, || F::ZERO) }) - .collect::>() + .collect() }; let hash = poseidon_hash( ns!(cs, "poseidon_hash"), preimg_alloc_nums.clone(), - store.poseidon_cache.constants.c6(), + store.core.hasher.poseidon_cache.constants.c3(), )?; + let comm_tag = g.alloc_tag(&mut cs, &Comm); implies_equal( ns!(cs, "implies equal comm.tag"), not_dummy, @@ -691,7 +689,7 @@ fn synthesize_run<'a, F: LurkField, CS: ConstraintSystem>( let mut cases_vec = Vec::with_capacity(cases.len()); for (lit, block) in cases { let lit_ptr = lit.to_ptr(store); - let lit_hash = *store.hash_ptr(&lit_ptr).value(); + let lit_hash = *store.hash_ptr(&lit_ptr).hash(); cases_vec.push((lit_hash, block)); } @@ -732,7 +730,7 @@ fn synthesize_cons>( g: &GlobalAllocator, ) -> Result<()> { let preimg = retrieve_nums(preimg, bound_allocations)?; - let constants = &store.poseidon_cache.constants; + let constants = &store.core.hasher.poseidon_cache.constants; let hash = match N { 2 => poseidon_hash(ns!(cs, "poseidon_hash"), preimg, constants.c4())?, 3 => poseidon_hash(ns!(cs, "poseidon_hash"), preimg, constants.c6())?, @@ -755,30 +753,38 @@ fn synthesize_decons>( ) -> Result<()> { let img = bound_allocations.get_ptr(img)?; let preimg_alloc_nums = if Some(true) == not_dummy.get_value() { - let img_val = img.get_value::().unwrap(); - let raw_ptr = store.to_raw_ptr(&FWrap(*img_val.value())); - let idx = raw_ptr.get_hash4().unwrap(); - store - .expect_raw_ptrs::<4>(idx) - .iter() - .enumerate() - .map(|(i, raw)| { - let cs = ns!(cs, format!("preimg {i}")); - let z = store.hash_raw_ptr(raw).0; - AllocatedNum::alloc_infallible(cs, || z) - }) - .collect::>() + let img_z_ptr = img.get_value().unwrap(); + let ptrs: &[Ptr] = match store.to_ptr_val(img_z_ptr.val()) { + IVal::Tuple2(idx) => store.expect_tuple2(idx), + IVal::Tuple3(idx) => store.expect_tuple3(idx), + IVal::Tuple4(idx) => store.expect_tuple4(idx), + _ => panic!("Invalid decons"), + }; + assert_eq!(ptrs.len(), N); + let mut allocations = Vec::with_capacity(2 * N); + for (i, ptr) in ptrs.iter().enumerate() { + let (tag, FWrap(hash)) = store.hash_ptr(ptr).into_parts(); + allocations.push(AllocatedNum::alloc_infallible( + ns!(cs, format!("preimg {}", 2 * i)), + || tag.to_field(), + )); + allocations.push(AllocatedNum::alloc_infallible( + ns!(cs, format!("preimg {}", 2 * i + 1)), + || hash, + )); + } + allocations } else { - (0..4) + (0..2 * N) .map(|i| { let cs = ns!(cs, format!("preimg {i}")); AllocatedNum::alloc_infallible(cs, || F::ZERO) }) - .collect::>() + .collect() }; let preimg_clone = preimg_alloc_nums.clone(); - let constants = &store.poseidon_cache.constants; + let constants = &store.core.hasher.poseidon_cache.constants; let hash = match N { 2 => poseidon_hash(ns!(cs, "poseidon_hash"), preimg_clone, constants.c4())?, 3 => poseidon_hash(ns!(cs, "poseidon_hash"), preimg_clone, constants.c6())?, diff --git a/src/lem/coroutine/toplevel.rs b/src/lem/coroutine/toplevel.rs index 06b45a2c3..9623e82a6 100644 --- a/src/lem/coroutine/toplevel.rs +++ b/src/lem/coroutine/toplevel.rs @@ -274,7 +274,6 @@ mod test { use super::*; use crate::coroutine::memoset::prove::MemosetProver; use crate::coroutine::memoset::CoroutineCircuit; - use crate::lem::tag::Tag; use crate::proof::RecursiveSNARKTrait; use crate::{func, state::user_sym}; @@ -393,12 +392,12 @@ mod test { CoroutineCircuit::<_, _, ToplevelQuery<_>>::blank(index, 1, &s, &toplevel); let mut cs = BenchCS::new(); let dummy = [ - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), ]; factorial_circuit .supernova_synthesize(&mut cs, &dummy) @@ -410,12 +409,12 @@ mod test { CoroutineCircuit::<_, _, ToplevelQuery<_>>::blank(index, 1, &s, &toplevel); let mut cs = BenchCS::new(); let dummy = [ - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), - AllocatedPtr::alloc_infallible::<_, _, Tag>(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), + AllocatedPtr::alloc_infallible(&mut cs, || unreachable!()), ]; even_circuit.supernova_synthesize(&mut cs, &dummy).unwrap(); expect!("1772").assert_eq(&cs.num_constraints().to_string()); diff --git a/src/lem/eval.rs b/src/lem/eval.rs index 94948f07f..7db66e7bc 100644 --- a/src/lem/eval.rs +++ b/src/lem/eval.rs @@ -23,8 +23,8 @@ use crate::{ use super::{ interpreter::{Frame, Hints}, - pointers::{Ptr, RawPtr}, - store::{fetch_ptrs, Store}, + pointers::{IVal, Ptr}, + store::Store, Ctrl, Func, Lit, LitType, Op, Tag, Var, }; @@ -43,9 +43,8 @@ fn get_pc>( lang: &Lang, ) -> usize { match expr.parts() { - (Tag::Expr(Cproc), RawPtr::Hash4(idx)) => { - let [cproc, _] = - &fetch_ptrs!(store, 2, *idx).expect("Coprocessor expression is not interned"); + (Tag::Expr(Cproc), IVal::Tuple2(idx)) => { + let [cproc, _] = store.expect_tuple2(*idx); let cproc_sym = store .fetch_symbol(cproc) .expect("Coprocessor expression is not interned"); diff --git a/src/lem/interpreter.rs b/src/lem/interpreter.rs index 3d4712203..d2e51519f 100644 --- a/src/lem/interpreter.rs +++ b/src/lem/interpreter.rs @@ -1,9 +1,9 @@ use anyhow::{anyhow, bail, Context, Result}; use super::{ - pointers::{Ptr, RawPtr}, + pointers::{IVal, Ptr}, slot::{SlotData, Val}, - store::{fetch_ptrs, intern_ptrs, Store}, + store::Store, tag::Tag, var_map::VarMap, Block, Ctrl, Func, Lit, Op, Var, @@ -206,7 +206,7 @@ impl Block { let b = bindings.get_ptr(b)?; // In order to compare Ptrs, we *must* resolve the hashes. Otherwise, we risk failing to recognize equality of // compound data with opaque data in either element's transitive closure. - let c = store.hash_ptr(&a).value() == store.hash_ptr(&b).value(); + let c = store.hash_ptr(&a).val() == store.hash_ptr(&b).val(); bindings.insert_bool(tgt.clone(), c); } Op::Not(tgt, a) => { @@ -224,9 +224,9 @@ impl Block { bindings.insert_bool(tgt.clone(), a || b); } Op::Add(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let (f, g) = (store.expect_f(f), store.expect_f(g)); store.intern_atom(Tag::Expr(Num), *f + *g) } else { @@ -235,9 +235,9 @@ impl Block { bindings.insert_ptr(tgt.clone(), c); } Op::Sub(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let (f, g) = (store.expect_f(f), store.expect_f(g)); store.intern_atom(Tag::Expr(Num), *f - *g) } else { @@ -246,9 +246,9 @@ impl Block { bindings.insert_ptr(tgt.clone(), c); } Op::Mul(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let (f, g) = (store.expect_f(f), store.expect_f(g)); store.intern_atom(Tag::Expr(Num), *f * *g) } else { @@ -257,9 +257,9 @@ impl Block { bindings.insert_ptr(tgt.clone(), c); } Op::Div(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let (f, g) = (store.expect_f(f), store.expect_f(g)); if g == &F::ZERO { bail!("Can't divide by zero") @@ -271,20 +271,20 @@ impl Block { bindings.insert_ptr(tgt.clone(), c); } Op::Lt(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let c = if let (RawPtr::Atom(f_idx), RawPtr::Atom(g_idx)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let c = if let (IVal::Atom(f_idx), IVal::Atom(g_idx)) = (a, b) { let f = *store.expect_f(f_idx); let g = *store.expect_f(g_idx); let diff = f - g; hints.bit_decomp.push(Some(SlotData { - vals: vec![Val::Num(RawPtr::Atom(store.intern_f(f + f).0))], + vals: vec![Val::Num(IVal::Atom(store.intern_f(f + f).0))], })); hints.bit_decomp.push(Some(SlotData { - vals: vec![Val::Num(RawPtr::Atom(store.intern_f(g + g).0))], + vals: vec![Val::Num(IVal::Atom(store.intern_f(g + g).0))], })); hints.bit_decomp.push(Some(SlotData { - vals: vec![Val::Num(RawPtr::Atom(store.intern_f(diff + diff).0))], + vals: vec![Val::Num(IVal::Atom(store.intern_f(diff + diff).0))], })); let f = BaseNum::Scalar(f); let g = BaseNum::Scalar(g); @@ -296,11 +296,11 @@ impl Block { } Op::Trunc(tgt, a, n) => { assert!(*n <= 64); - let a = *bindings.get_ptr(a)?.raw(); - let c = if let RawPtr::Atom(f_idx) = a { + let a = *bindings.get_ptr(a)?.val(); + let c = if let IVal::Atom(f_idx) = a { let f = *store.expect_f(f_idx); hints.bit_decomp.push(Some(SlotData { - vals: vec![Val::Num(RawPtr::Atom(f_idx))], + vals: vec![Val::Num(IVal::Atom(f_idx))], })); let b = if *n < 64 { (1 << *n) - 1 } else { u64::MAX }; store.intern_atom(Tag::Expr(Num), F::from_u64(f.to_u64_unchecked() & b)) @@ -310,9 +310,9 @@ impl Block { bindings.insert_ptr(tgt.clone(), c); } Op::DivRem64(tgt, a, b) => { - let a = *bindings.get_ptr(a)?.raw(); - let b = *bindings.get_ptr(b)?.raw(); - let (c1, c2) = if let (RawPtr::Atom(f), RawPtr::Atom(g)) = (a, b) { + let a = *bindings.get_ptr(a)?.val(); + let b = *bindings.get_ptr(b)?.val(); + let (c1, c2) = if let (IVal::Atom(f), IVal::Atom(g)) = (a, b) { let f = *store.expect_f(f); let g = *store.expect_f(g); if g == F::ZERO { @@ -339,28 +339,33 @@ impl Block { } Op::Cons2(img, tag, preimg) => { let preimg_ptrs = bindings.get_many_ptr(preimg)?; - let tgt_ptr = intern_ptrs!(store, *tag, preimg_ptrs[0], preimg_ptrs[1]); + let tgt_ptr = store.intern_tuple2([preimg_ptrs[0], preimg_ptrs[1]], *tag, None); bindings.insert_ptr(img.clone(), tgt_ptr); let vals = preimg_ptrs.into_iter().map(Val::Pointer).collect(); hints.hash4.push(Some(SlotData { vals })); } Op::Cons3(img, tag, preimg) => { let preimg_ptrs = bindings.get_many_ptr(preimg)?; - let tgt_ptr = - intern_ptrs!(store, *tag, preimg_ptrs[0], preimg_ptrs[1], preimg_ptrs[2]); + let tgt_ptr = store.intern_tuple3( + [preimg_ptrs[0], preimg_ptrs[1], preimg_ptrs[2]], + *tag, + None, + ); bindings.insert_ptr(img.clone(), tgt_ptr); let vals = preimg_ptrs.into_iter().map(Val::Pointer).collect(); hints.hash6.push(Some(SlotData { vals })); } Op::Cons4(img, tag, preimg) => { let preimg_ptrs = bindings.get_many_ptr(preimg)?; - let tgt_ptr = intern_ptrs!( - store, + let tgt_ptr = store.intern_tuple4( + [ + preimg_ptrs[0], + preimg_ptrs[1], + preimg_ptrs[2], + preimg_ptrs[3], + ], *tag, - preimg_ptrs[0], - preimg_ptrs[1], - preimg_ptrs[2], - preimg_ptrs[3] + None, ); bindings.insert_ptr(img.clone(), tgt_ptr); let vals = preimg_ptrs.into_iter().map(Val::Pointer).collect(); @@ -371,13 +376,13 @@ impl Block { let Some(idx) = img_ptr.get_index2() else { bail!("{img} isn't a Tree2 pointer"); }; - let Some(preimg_ptrs) = fetch_ptrs!(store, 2, idx) else { + let Some(preimg_ptrs) = store.fetch_tuple2(idx) else { bail!("Couldn't fetch {img}'s children") }; for (var, ptr) in preimg.iter().zip(preimg_ptrs.iter()) { bindings.insert_ptr(var.clone(), *ptr); } - let vals = preimg_ptrs.into_iter().map(Val::Pointer).collect(); + let vals = preimg_ptrs.iter().map(|x| Val::Pointer(*x)).collect(); hints.hash4.push(Some(SlotData { vals })); } Op::Decons3(preimg, img) => { @@ -385,13 +390,13 @@ impl Block { let Some(idx) = img_ptr.get_index3() else { bail!("{img} isn't a Tree3 pointer"); }; - let Some(preimg_ptrs) = fetch_ptrs!(store, 3, idx) else { + let Some(preimg_ptrs) = store.fetch_tuple3(idx) else { bail!("Couldn't fetch {img}'s children") }; for (var, ptr) in preimg.iter().zip(preimg_ptrs.iter()) { bindings.insert_ptr(var.clone(), *ptr); } - let vals = preimg_ptrs.into_iter().map(Val::Pointer).collect(); + let vals = preimg_ptrs.iter().map(|x| Val::Pointer(*x)).collect(); hints.hash6.push(Some(SlotData { vals })); } Op::Decons4(preimg, img) => { @@ -399,13 +404,13 @@ impl Block { let Some(idx) = img_ptr.get_index4() else { bail!("{img} isn't a Tree4 pointer"); }; - let Some(preimg_ptrs) = fetch_ptrs!(store, 4, idx) else { + let Some(preimg_ptrs) = store.fetch_tuple4(idx) else { bail!("Couldn't fetch {img}'s children") }; for (var, ptr) in preimg.iter().zip(preimg_ptrs.iter()) { bindings.insert_ptr(var.clone(), *ptr); } - let vals = preimg_ptrs.into_iter().map(Val::Pointer).collect(); + let vals = preimg_ptrs.iter().map(|x| Val::Pointer(*x)).collect(); hints.hash8.push(Some(SlotData { vals })); } Op::PushBinding(img, preimg) => { @@ -414,9 +419,9 @@ impl Block { store.push_binding(preimg_ptrs[0], preimg_ptrs[1], preimg_ptrs[2]); bindings.insert_ptr(img.clone(), tgt_ptr); let vals = vec![ - Val::Num(*preimg_ptrs[0].raw()), + Val::Num(*preimg_ptrs[0].val()), Val::Pointer(preimg_ptrs[1]), - Val::Num(*preimg_ptrs[2].raw()), + Val::Num(*preimg_ptrs[2].val()), ]; hints.hash4.push(Some(SlotData { vals })); } @@ -429,27 +434,27 @@ impl Block { bindings.insert_ptr(var.clone(), *ptr); } let vals = vec![ - Val::Num(*preimg_ptrs[0].raw()), + Val::Num(*preimg_ptrs[0].val()), Val::Pointer(preimg_ptrs[1]), - Val::Num(*preimg_ptrs[2].raw()), + Val::Num(*preimg_ptrs[2].val()), ]; hints.hash4.push(Some(SlotData { vals })); } Op::Hide(tgt, sec, src) => { let src_ptr = bindings.get_ptr(src)?; let sec_ptr = bindings.get_ptr(sec)?; - let (Tag::Expr(Num), RawPtr::Atom(secret_idx)) = sec_ptr.parts() else { + let (Tag::Expr(Num), IVal::Atom(secret_idx)) = sec_ptr.parts() else { bail!("{sec} is not a numeric pointer") }; let secret = *store.expect_f(*secret_idx); let tgt_ptr = store.hide(secret, src_ptr); - let vals = vec![Val::Num(RawPtr::Atom(*secret_idx)), Val::Pointer(src_ptr)]; + let vals = vec![Val::Num(IVal::Atom(*secret_idx)), Val::Pointer(src_ptr)]; hints.commitment.push(Some(SlotData { vals })); bindings.insert_ptr(tgt.clone(), tgt_ptr); } Op::Open(tgt_secret, tgt_ptr, comm) => { let comm_ptr = bindings.get_ptr(comm)?; - let (Tag::Expr(Comm), RawPtr::Atom(hash)) = comm_ptr.parts() else { + let (Tag::Expr(Comm), IVal::Atom(hash)) = comm_ptr.parts() else { bail!("{comm} is not a comm pointer") }; let hash = *store.expect_f(*hash); @@ -459,10 +464,10 @@ impl Block { bindings.insert_ptr(tgt_ptr.clone(), *ptr); bindings.insert_ptr( tgt_secret.clone(), - store.intern_atom(Tag::Expr(Num), *secret), + store.intern_atom(Tag::Expr(Num), secret.0), ); - let secret_idx = store.intern_f(*secret).0; - let vals = vec![Val::Num(RawPtr::Atom(secret_idx)), Val::Pointer(*ptr)]; + let secret_idx = store.intern_f(secret.0).0; + let vals = vec![Val::Num(IVal::Atom(secret_idx)), Val::Pointer(*ptr)]; hints.commitment.push(Some(SlotData { vals })); } Op::Unit(f) => f(), diff --git a/src/lem/mod.rs b/src/lem/mod.rs index 25186d7fe..a2749cc64 100644 --- a/src/lem/mod.rs +++ b/src/lem/mod.rs @@ -68,6 +68,7 @@ pub mod multiframe; pub mod pointers; mod slot; pub mod store; +pub mod store_core; pub mod tag; mod var_map; diff --git a/src/lem/multiframe.rs b/src/lem/multiframe.rs index 2133acf31..75deed91f 100644 --- a/src/lem/multiframe.rs +++ b/src/lem/multiframe.rs @@ -485,7 +485,7 @@ fn assert_eq_ptrs_aptrs( return Err(SynthesisError::AssignmentMissing); }; assert_eq!(alloc_ptr_tag, z_ptr.tag().to_field()); - assert_eq!(&alloc_ptr_hash, z_ptr.value()); + assert_eq!(&alloc_ptr_hash, z_ptr.hash()); } } Ok(()) @@ -754,7 +754,7 @@ impl> Circuit for MultiFrame { let allocated_hash = AllocatedNum::alloc_infallible( ns!(cs, format!("allocated hash for input {i}")), - || *z_ptr.value(), + || *z_ptr.hash(), ); allocated_hash.inputize(ns!(cs, format!("inputized hash for input {i}")))?; @@ -773,7 +773,7 @@ impl> Circuit for MultiFrame { let allocated_hash = AllocatedNum::alloc_infallible( ns!(cs, format!("allocated hash for output {i}")), - || *z_ptr.value(), + || *z_ptr.hash(), ); allocated_hash.inputize(ns!(cs, format!("inputized hash for output {i}")))?; @@ -831,12 +831,12 @@ impl> Provable for MultiFrame { for ptr in input { let z_ptr = store.hash_ptr(ptr); res.push(z_ptr.tag().to_field()); - res.push(*z_ptr.value()); + res.push(*z_ptr.hash()); } for ptr in output { let z_ptr = store.hash_ptr(ptr); res.push(z_ptr.tag().to_field()); - res.push(*z_ptr.value()); + res.push(*z_ptr.hash()); } res } @@ -1085,7 +1085,7 @@ mod tests { let z_ptr = store.hash_ptr(ptr); let allocated_tag = AllocatedNum::alloc_infallible(&mut cs, || z_ptr.tag_field()); allocated_tag.inputize(&mut cs).unwrap(); - let allocated_hash = AllocatedNum::alloc_infallible(&mut cs, || *z_ptr.value()); + let allocated_hash = AllocatedNum::alloc_infallible(&mut cs, || *z_ptr.hash()); allocated_hash.inputize(&mut cs).unwrap(); input.push(AllocatedPtr::from_parts(allocated_tag, allocated_hash)); } diff --git a/src/lem/pointers.rs b/src/lem/pointers.rs index 50b4cb3d1..b82ff128b 100644 --- a/src/lem/pointers.rs +++ b/src/lem/pointers.rs @@ -1,102 +1,102 @@ +use match_opt::match_opt; use serde::{Deserialize, Serialize}; use crate::{ - field::LurkField, - tag::ExprTag::{Cons, Fun, Nil, Num, Str, Sym}, + field::{FWrap, LurkField}, + tag::{ + ExprTag::{Cons, Fun, Nil, Num, Str, Sym}, + Tag as TagTrait, + }, }; use super::Tag; -/// `RawPtr` is the basic pointer type of the LEM store. An `Atom` points to a field -/// element, and a `HashN` points to `N` children, which are also raw pointers. Thus, -/// they are a building block for graphs that represent Lurk data. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Hash)] -pub enum RawPtr { - Atom(usize), - Hash4(usize), - Hash6(usize), - Hash8(usize), +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +pub struct GPtr { + pub tag: Tag, + pub val: Val, } -impl RawPtr { +impl GPtr { #[inline] - pub fn is_hash(&self) -> bool { - matches!( - self, - RawPtr::Hash4(..) | RawPtr::Hash6(..) | RawPtr::Hash8(..) - ) + pub fn new(tag: Tag, val: Val) -> Self { + Self { tag, val } } #[inline] - pub fn get_atom(&self) -> Option { - match self { - RawPtr::Atom(x) => Some(*x), - _ => None, - } + pub fn tag(&self) -> &Tag { + &self.tag } #[inline] - pub fn get_hash4(&self) -> Option { - match self { - RawPtr::Hash4(x) => Some(*x), - _ => None, - } + pub fn val(&self) -> &Val { + &self.val } #[inline] - pub fn get_hash6(&self) -> Option { - match self { - RawPtr::Hash6(x) => Some(*x), - _ => None, - } + pub fn parts(&self) -> (&Tag, &Val) { + let Self { tag, val } = self; + (tag, val) } #[inline] - pub fn get_hash8(&self) -> Option { - match self { - RawPtr::Hash8(x) => Some(*x), - _ => None, - } + pub fn into_parts(self) -> (Tag, Val) { + let Self { tag, val } = self; + (tag, val) } } -/// `Ptr` is a tagged pointer. The tag is there to say what kind of data it encodes. -/// Since tags can be encoded as field elements, they are also able to be expressed -/// as raw pointers. A `Ptr` can thus be seen as a tuple of `RawPtr`s. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Hash)] -pub struct Ptr { - tag: Tag, - raw: RawPtr, +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum IVal { + Atom(usize), + Tuple2(usize), + Tuple3(usize), + Tuple4(usize), + Compact(usize), } -impl Ptr { +impl IVal { #[inline] - pub fn new(tag: Tag, raw: RawPtr) -> Self { - Ptr { tag, raw } + pub fn is_atom(&self) -> bool { + matches!(self, IVal::Atom(_)) } #[inline] - pub fn tag(&self) -> &Tag { - &self.tag + pub fn is_compound(&self) -> bool { + !self.is_atom() + } + + #[inline] + pub fn get_atom_idx(&self) -> Option { + match_opt!(self, IVal::Atom(idx) => *idx) + } + + #[inline] + pub fn get_tuple2_idx(&self) -> Option { + match_opt!(self, IVal::Tuple2(idx) => *idx) } #[inline] - pub fn raw(&self) -> &RawPtr { - &self.raw + pub fn get_tuple3_idx(&self) -> Option { + match_opt!(self, IVal::Tuple3(idx) => *idx) } #[inline] - pub fn parts(&self) -> (&Tag, &RawPtr) { - let Ptr { tag, raw } = self; - (tag, raw) + pub fn get_tuple4_idx(&self) -> Option { + match_opt!(self, IVal::Tuple4(idx) => *idx) } #[inline] - pub fn into_parts(self) -> (Tag, RawPtr) { - let Ptr { tag, raw } = self; - (tag, raw) + pub fn get_compact_idx(&self) -> Option { + match_opt!(self, IVal::Compact(idx) => *idx) } +} + +pub type IPtr = GPtr; +pub type Ptr = IPtr; + +impl Ptr { #[inline] pub fn has_tag(&self, tag: &Tag) -> bool { self.tag() == tag @@ -144,34 +144,34 @@ impl Ptr { #[inline] pub fn cast(self, tag: Tag) -> Self { - Ptr { tag, raw: self.raw } + Ptr { tag, val: self.val } } #[inline] pub fn get_atom(&self) -> Option { - self.raw().get_atom() + self.val().get_atom_idx() } #[inline] pub fn get_index2(&self) -> Option { - self.raw().get_hash4() + self.val().get_tuple2_idx() } #[inline] pub fn get_index3(&self) -> Option { - self.raw().get_hash6() + self.val().get_tuple3_idx() } #[inline] pub fn get_index4(&self) -> Option { - self.raw().get_hash8() + self.val().get_tuple4_idx() } #[inline] pub fn atom(tag: Tag, idx: usize) -> Ptr { Ptr { tag, - raw: RawPtr::Atom(idx), + val: IVal::Atom(idx), } } } @@ -189,11 +189,28 @@ impl Ptr { /// note that content-addressing can be expensive, especially in the context of /// interpretation, because of the Poseidon hashes. That's why we operate on `Ptr`s /// when interpreting LEMs and delay the need for `ZPtr`s as much as possible. -pub type ZPtr = crate::z_data::z_ptr::ZPtr; +pub type ZPtr = GPtr>; impl ZPtr { #[inline] pub fn dummy() -> Self { - Self(Tag::Expr(Nil), F::ZERO) + GPtr::new(Tag::Expr(Nil), FWrap(F::ZERO)) + } + + #[inline] + pub fn hash(&self) -> &F { + self.val().get() + } + + #[inline] + pub fn from_parts(tag: Tag, hash: F) -> Self { + Self::new(tag, FWrap(hash)) + } +} + +impl GPtr> { + #[inline] + pub fn tag_field(&self) -> F { + self.tag().to_field() } } diff --git a/src/lem/slot.rs b/src/lem/slot.rs index 7fa336e8e..91ce1bc7b 100644 --- a/src/lem/slot.rs +++ b/src/lem/slot.rs @@ -106,7 +106,7 @@ use match_opt::match_opt; use super::{ - pointers::{Ptr, RawPtr}, + pointers::{IVal, Ptr}, Block, Ctrl, Op, }; @@ -246,7 +246,7 @@ impl Block { /// take only 0 or 1 values. pub enum Val { Pointer(Ptr), - Num(RawPtr), + Num(IVal), Boolean(bool), } diff --git a/src/lem/store.rs b/src/lem/store.rs index f83c40b11..22f3ff2c9 100644 --- a/src/lem/store.rs +++ b/src/lem/store.rs @@ -1,16 +1,9 @@ use anyhow::{bail, Context, Result}; -use arc_swap::ArcSwap; use bellpepper::util_cs::witness_cs::SizedWitness; -use elsa::{ - sync::index_set::FrozenIndexSet, - sync::{FrozenMap, FrozenVec}, -}; -use indexmap::IndexSet; +use elsa::sync::FrozenMap; use match_opt::match_opt; use neptune::Poseidon; use nom::{sequence::preceded, Parser}; -use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; -use std::sync::Arc; use crate::{ field::{FWrap, LurkField}, @@ -29,7 +22,77 @@ use crate::{ }, }; -use super::pointers::{Ptr, RawPtr, ZPtr}; +use super::{ + pointers::{IVal, Ptr, ZPtr}, + store_core::{StoreCore, StoreHasher}, +}; + +#[derive(Default, Debug)] +pub struct PoseidonStoreHasher { + pub poseidon_cache: PoseidonCache, +} + +impl StoreHasher> for PoseidonStoreHasher { + fn hash_ptrs(&self, mut ptrs: Vec>) -> FWrap { + match ptrs.len() { + 2 => { + let (b_tag, b_hash) = ptrs.pop().unwrap().into_parts(); + let (a_tag, a_hash) = ptrs.pop().unwrap().into_parts(); + FWrap(self.poseidon_cache.hash4(&[ + a_tag.to_field(), + a_hash.0, + b_tag.to_field(), + b_hash.0, + ])) + } + 3 => { + let (c_tag, c_hash) = ptrs.pop().unwrap().into_parts(); + let (b_tag, b_hash) = ptrs.pop().unwrap().into_parts(); + let (a_tag, a_hash) = ptrs.pop().unwrap().into_parts(); + FWrap(self.poseidon_cache.hash6(&[ + a_tag.to_field(), + a_hash.0, + b_tag.to_field(), + b_hash.0, + c_tag.to_field(), + c_hash.0, + ])) + } + 4 => { + let (d_tag, d_hash) = ptrs.pop().unwrap().into_parts(); + let (c_tag, c_hash) = ptrs.pop().unwrap().into_parts(); + let (b_tag, b_hash) = ptrs.pop().unwrap().into_parts(); + let (a_tag, a_hash) = ptrs.pop().unwrap().into_parts(); + FWrap(self.poseidon_cache.hash8(&[ + a_tag.to_field(), + a_hash.0, + b_tag.to_field(), + b_hash.0, + c_tag.to_field(), + c_hash.0, + d_tag.to_field(), + d_hash.0, + ])) + } + _ => unimplemented!(), + } + } + + fn hash_commitment(&self, secret: FWrap, payload: ZPtr) -> FWrap { + let (tag, hash) = payload.into_parts(); + FWrap( + self.poseidon_cache + .hash3(&[secret.0, tag.to_field(), hash.0]), + ) + } + + fn hash_compact(&self, d1: FWrap, t2: Tag, d2: FWrap, d3: FWrap) -> FWrap { + FWrap( + self.poseidon_cache + .hash4(&[d1.0, t2.to_field(), d2.0, d3.0]), + ) + } +} /// The `Store` is a crucial part of Lurk's implementation and tries to be a /// vesatile data structure for many parts of Lurk's data pipeline. @@ -43,10 +106,7 @@ use super::pointers::{Ptr, RawPtr, ZPtr}; /// This data is saved in `string_ptr_cache` and `symbol_ptr_cache`. #[derive(Debug)] pub struct Store { - f_elts: FrozenIndexSet>>, - hash4: FrozenIndexSet>, - hash6: FrozenIndexSet>, - hash8: FrozenIndexSet>, + pub core: StoreCore, PoseidonStoreHasher>, string_ptr_cache: FrozenMap>, symbol_ptr_cache: FrozenMap>, @@ -54,15 +114,8 @@ pub struct Store { ptr_string_cache: FrozenMap, ptr_symbol_cache: FrozenMap>, - pub comms: FrozenMap, Box<(F, Ptr)>>, // hash -> (secret, src) - - pub poseidon_cache: PoseidonCache, pub inverse_poseidon_cache: InversePoseidonCache, - dehydrated: ArcSwap>>, - z_cache: FrozenMap>>, - inverse_z_cache: FrozenMap, Box>, - // cached indices for the hashes of 3, 4, 6 and 8 padded zeros pub hash3zeros_idx: usize, pub hash4zeros_idx: usize, @@ -89,43 +142,24 @@ impl<'a, F: LurkField, T> WithStore<'a, F, T> { impl Default for Store { fn default() -> Self { - let poseidon_cache = PoseidonCache::default(); - let hash3zeros = poseidon_cache.hash3(&[F::ZERO; 3]); - let hash4zeros = poseidon_cache.hash4(&[F::ZERO; 4]); - let hash6zeros = poseidon_cache.hash6(&[F::ZERO; 6]); - let hash8zeros = poseidon_cache.hash8(&[F::ZERO; 8]); - - // Since tags are used very often, we will allocate them at the beginning - // in order, so that we do not need to use the `f_elts` when we have a tag - // This is similar to the `hashNzeros` optimization - let f_elts = FrozenIndexSet::default(); - let mut i = 0; - while let Some(tag) = Tag::pos(i) { - let (j, _) = f_elts.insert_probe(FWrap(tag.to_field()).into()); - // This is to make sure the indices are ordered - assert_eq!(i, j); - i += 1; - } - let (hash3zeros_idx, _) = f_elts.insert_probe(FWrap(hash3zeros).into()); - let (hash4zeros_idx, _) = f_elts.insert_probe(FWrap(hash4zeros).into()); - let (hash6zeros_idx, _) = f_elts.insert_probe(FWrap(hash6zeros).into()); - let (hash8zeros_idx, _) = f_elts.insert_probe(FWrap(hash8zeros).into()); + let core: StoreCore, PoseidonStoreHasher> = StoreCore::default(); + let hash3zeros = core.hasher.poseidon_cache.hash3(&[F::ZERO; 3]); + let hash4zeros = core.hasher.poseidon_cache.hash4(&[F::ZERO; 4]); + let hash6zeros = core.hasher.poseidon_cache.hash6(&[F::ZERO; 6]); + let hash8zeros = core.hasher.poseidon_cache.hash8(&[F::ZERO; 8]); + + let (hash3zeros_idx, _) = core.intern_digest(FWrap(hash3zeros)); + let (hash4zeros_idx, _) = core.intern_digest(FWrap(hash4zeros)); + let (hash6zeros_idx, _) = core.intern_digest(FWrap(hash6zeros)); + let (hash8zeros_idx, _) = core.intern_digest(FWrap(hash8zeros)); Self { - f_elts, - hash4: Default::default(), - hash6: Default::default(), - hash8: Default::default(), + core, string_ptr_cache: Default::default(), symbol_ptr_cache: Default::default(), ptr_string_cache: Default::default(), ptr_symbol_cache: Default::default(), - comms: Default::default(), - poseidon_cache, inverse_poseidon_cache: Default::default(), - dehydrated: Default::default(), - z_cache: Default::default(), - inverse_z_cache: Default::default(), hash3zeros_idx, hash4zeros_idx, hash6zeros_idx, @@ -134,67 +168,29 @@ impl Default for Store { } } -// These are utility macros for store methods on `Ptr`s, especially because -// they contain two const generic variables (more on this later) -macro_rules! count { - () => (0); - ( $_x:tt $($xs:tt)* ) => (1 + crate::lem::store::count!($($xs)*)); -} -pub(crate) use count; - -macro_rules! intern_ptrs { - ($store:expr, $tag:expr, $($ptrs:expr),*) => {{ - const N: usize = crate::lem::store::count!($($ptrs)*); - ($store).intern_ptrs::<{2*N}, N>($tag, [$($ptrs),*]) - }} -} -pub(crate) use intern_ptrs; - -macro_rules! intern_ptrs_hydrated { - ($store:expr, $tag:expr, $z:expr, $($ptrs:expr),*) => {{ - const N: usize = crate::lem::store::count!($($ptrs)*); - ($store).intern_ptrs_hydrated::<{2*N}, N>($tag, [$($ptrs),*], $z) - }} -} -pub(crate) use intern_ptrs_hydrated; - -macro_rules! fetch_ptrs { - ($store:expr, $n:expr, $idx:expr) => {{ - ($store).fetch_ptrs::<{ 2 * $n }, $n>($idx) - }}; -} -pub(crate) use fetch_ptrs; - -macro_rules! expect_ptrs { - ($store:expr, $n:expr, $idx:expr) => {{ - ($store).expect_ptrs::<{ 2 * $n }, $n>($idx) - }}; -} -pub(crate) use expect_ptrs; - impl Store { /// Cost of poseidon hash with arity 3, including the input #[inline] pub fn hash3_cost(&self) -> usize { - Poseidon::new(self.poseidon_cache.constants.c3()).num_aux() + 1 + Poseidon::new(self.core.hasher.poseidon_cache.constants.c3()).num_aux() + 1 } /// Cost of poseidon hash with arity 4, including the input #[inline] pub fn hash4_cost(&self) -> usize { - Poseidon::new(self.poseidon_cache.constants.c4()).num_aux() + 1 + Poseidon::new(self.core.hasher.poseidon_cache.constants.c4()).num_aux() + 1 } /// Cost of poseidon hash with arity 6, including the input #[inline] pub fn hash6_cost(&self) -> usize { - Poseidon::new(self.poseidon_cache.constants.c6()).num_aux() + 1 + Poseidon::new(self.core.hasher.poseidon_cache.constants.c6()).num_aux() + 1 } /// Cost of poseidon hash with arity 8, including the input #[inline] pub fn hash8_cost(&self) -> usize { - Poseidon::new(self.poseidon_cache.constants.c8()).num_aux() + 1 + Poseidon::new(self.core.hasher.poseidon_cache.constants.c8()).num_aux() + 1 } /// Retrieves the hash of 3 padded zeros @@ -221,217 +217,100 @@ impl Store { self.expect_f(self.hash8zeros_idx) } - /// Converts array of pointers of size `P` to array of raw pointers of size `N` such that `P = N * 2`. - /// Since the `generic_const_exprs` feature is still unstable, we cannot substitute `N * 2` - /// for generic const `P` and remove it completely, so we must keep it and do a dynamic assertion - /// that it equals `N * 2`. This is not very ergonomic though, since we must add turbofishes - /// like `::<6, 3>` instead of the simpler `::<3>`. - #[inline] - pub fn ptrs_to_raw_ptrs(&self, ptrs: &[Ptr; P]) -> [RawPtr; N] { - assert_eq!(P * 2, N); - let mut raw_ptrs = [self.raw_zero(); N]; - for i in 0..P { - raw_ptrs[2 * i] = self.tag(*ptrs[i].tag()); - raw_ptrs[2 * i + 1] = *ptrs[i].raw(); - } - raw_ptrs - } - - /// Tries to convert array of raw pointers of size `N` to array of pointers of size `P = N * 2`. - /// It might fail since not all raw pointers represent valid tags. - #[inline] - pub fn raw_ptrs_to_ptrs( - &self, - raw_ptrs: &[RawPtr; N], - ) -> Option<[Ptr; P]> { - assert_eq!(P * 2, N); - let mut ptrs = [self.dummy(); P]; - for i in 0..P { - let tag = self.fetch_tag(&raw_ptrs[2 * i])?; - ptrs[i] = Ptr::new(tag, raw_ptrs[2 * i + 1]) - } - Some(ptrs) - } - #[inline] pub fn intern_f(&self, f: F) -> (usize, bool) { - self.f_elts.insert_probe(Box::new(FWrap(f))) - } - - /// Creates an atom `RawPtr` which points to a cached element of the finite - /// field `F` - pub fn intern_raw_atom(&self, f: F) -> RawPtr { - let (idx, _) = self.intern_f(f); - RawPtr::Atom(idx) + self.core.intern_digest(FWrap(f)) } pub fn intern_atom(&self, tag: Tag, f: F) -> Ptr { - Ptr::new(tag, self.intern_raw_atom(f)) - } - - /// Creates a `RawPtr` that's a parent of `N` children - pub fn intern_raw_ptrs(&self, ptrs: [RawPtr; N]) -> RawPtr { - let (ptr, inserted) = self.intern_raw_ptrs_internal::(ptrs); - if inserted { - // this is for `hydrate_z_cache` - self.dehydrated.load().push(Box::new(ptr)); - } - ptr - } - - /// Similar to `intern_raw_ptrs` but doesn't add the resulting pointer to - /// `dehydrated`. This function is used when converting a `ZStore` to a - /// `Store`. - pub fn intern_raw_ptrs_hydrated( - &self, - ptrs: [RawPtr; N], - z: FWrap, - ) -> RawPtr { - let (ptr, _) = self.intern_raw_ptrs_internal::(ptrs); - self.z_cache.insert(ptr, Box::new(z)); - self.inverse_z_cache.insert(z, Box::new(ptr)); - ptr - } - - #[inline] - fn intern_raw_ptrs_internal(&self, ptrs: [RawPtr; N]) -> (RawPtr, bool) { - macro_rules! intern { - ($Hash:ident, $hash:ident, $n:expr) => {{ - let ptrs = unsafe { std::mem::transmute::<&[RawPtr; N], &[RawPtr; $n]>(&ptrs) }; - let (idx, inserted) = self.$hash.insert_probe(Box::new(*ptrs)); - (RawPtr::$Hash(idx), inserted) - }}; - } - match N { - 4 => intern!(Hash4, hash4, 4), - 6 => intern!(Hash6, hash6, 6), - 8 => intern!(Hash8, hash8, 8), - _ => unimplemented!(), - } + self.core.intern_atom(tag, FWrap(f)) } - /// Creates a `Ptr` that's a parent of `N` children - pub fn intern_ptrs(&self, tag: Tag, ptrs: [Ptr; P]) -> Ptr { - let raw_ptrs = self.ptrs_to_raw_ptrs::(&ptrs); - let payload = self.intern_raw_ptrs::(raw_ptrs); - Ptr::new(tag, payload) + #[inline] + pub fn fetch_f(&self, idx: usize) -> Option<&F> { + self.core.fetch_digest(idx).map(|fw| &fw.0) } - /// Similar to `intern_ptrs` but doesn't add the resulting pointer to - /// `dehydrated`. This function is used when converting a `ZStore` to a - /// `Store`. - pub fn intern_ptrs_hydrated( - &self, - tag: Tag, - ptrs: [Ptr; P], - z: ZPtr, - ) -> Ptr { - let raw_ptrs = self.ptrs_to_raw_ptrs::(&ptrs); - let payload = self.intern_raw_ptrs_hydrated::(raw_ptrs, FWrap(*z.value())); - Ptr::new(tag, payload) + #[inline] + pub fn expect_f(&self, idx: usize) -> &F { + self.core.expect_digest(idx).get() } #[inline] - pub fn fetch_f_by_idx(&self, idx: usize) -> Option<&F> { - self.f_elts.get_index(idx).map(|fw| &fw.0) + pub fn fetch_f_by_val(&self, ptr_val: &IVal) -> Option<&F> { + ptr_val.get_atom_idx().and_then(|idx| self.fetch_f(idx)) } #[inline] - pub fn fetch_f(&self, ptr: &Ptr) -> Option<&F> { - ptr.raw() - .get_atom() - .and_then(|idx| self.fetch_f_by_idx(idx)) + pub fn intern_tuple2(&self, ptrs: [Ptr; 2], tag: Tag, hash: Option) -> Ptr { + self.core.intern_tuple2(ptrs, tag, hash.map(FWrap)) } #[inline] - pub fn expect_f(&self, idx: usize) -> &F { - self.fetch_f_by_idx(idx).expect("Index missing from f_elts") + pub fn fetch_tuple2(&self, idx: usize) -> Option<&[Ptr; 2]> { + self.core.fetch_tuple2(idx) } #[inline] - pub fn fetch_raw_ptrs(&self, idx: usize) -> Option<&[RawPtr; N]> { - macro_rules! fetch { - ($hash:ident, $n:expr) => {{ - let ptrs = self.$hash.get_index(idx)?; - let ptrs = unsafe { std::mem::transmute::<&[RawPtr; $n], &[RawPtr; N]>(ptrs) }; - Some(ptrs) - }}; - } - match N { - 4 => fetch!(hash4, 4), - 6 => fetch!(hash6, 6), - 8 => fetch!(hash8, 8), - _ => unimplemented!(), - } + pub fn expect_tuple2(&self, idx: usize) -> &[Ptr; 2] { + self.core.expect_tuple2(idx) } #[inline] - pub fn expect_raw_ptrs(&self, idx: usize) -> &[RawPtr; N] { - self.fetch_raw_ptrs::(idx) - .expect("Index missing from store") + pub fn intern_tuple3(&self, ptrs: [Ptr; 3], tag: Tag, hash: Option) -> Ptr { + self.core.intern_tuple3(ptrs, tag, hash.map(FWrap)) } #[inline] - pub fn fetch_ptrs(&self, idx: usize) -> Option<[Ptr; P]> { - assert_eq!(P * 2, N); - let raw_ptrs = self.fetch_raw_ptrs::(idx)?; - self.raw_ptrs_to_ptrs(raw_ptrs) + pub fn fetch_tuple3(&self, idx: usize) -> Option<&[Ptr; 3]> { + self.core.fetch_tuple3(idx) } #[inline] - pub fn expect_ptrs(&self, idx: usize) -> [Ptr; P] { - self.fetch_ptrs::(idx) - .expect("Index missing from store") + pub fn expect_tuple3(&self, idx: usize) -> &[Ptr; 3] { + self.core.expect_tuple3(idx) } #[inline] - pub fn tag(&self, tag: Tag) -> RawPtr { - // Tags are interned in order, so their index is the store index - RawPtr::Atom(tag.index()) + pub fn intern_tuple4(&self, ptrs: [Ptr; 4], tag: Tag, hash: Option) -> Ptr { + self.core.intern_tuple4(ptrs, tag, hash.map(FWrap)) } #[inline] - pub fn fetch_tag(&self, ptr: &RawPtr) -> Option { - ptr.get_atom().and_then(Tag::pos) + pub fn fetch_tuple4(&self, idx: usize) -> Option<&[Ptr; 4]> { + self.core.fetch_tuple4(idx) } #[inline] - pub fn raw_to_ptr(&self, tag: &RawPtr, raw: RawPtr) -> Option { - self.fetch_tag(tag).map(|tag| Ptr::new(tag, raw)) + pub fn expect_tuple4(&self, idx: usize) -> &[Ptr; 4] { + self.core.expect_tuple4(idx) } - /// Interns 3 pointers using only 4 raw pointers by ignoring the tags of the - /// first and third pointers - fn intern_compact(&self, a: Ptr, b: Ptr, c: Ptr, tag: Tag) -> Ptr { - let (_, a_raw) = a.into_parts(); - let (b_tag, b_raw) = b.into_parts(); - let (_, c_raw) = c.into_parts(); - let raw = self.intern_raw_ptrs([a_raw, self.tag(b_tag), b_raw, c_raw]); - Ptr::new(tag, raw) + #[inline] + pub fn intern_compact(&self, ptrs: [Ptr; 3], tag: Tag, hash: Option) -> Ptr { + self.core.intern_compact(ptrs, tag, hash.map(FWrap)) } /// Fetches 3 pointers that were interned with `intern_compact` - fn fetch_compact(&self, ptr: &Ptr, a_tag: Tag, c_tag: Tag) -> Option<[Ptr; 3]> { - let idx = ptr.get_index2()?; - let [a_raw, b_tag, b_raw, c_raw] = self.fetch_raw_ptrs(idx)?; - let b_tag = self.fetch_tag(b_tag)?; - let a = Ptr::new(a_tag, *a_raw); - let b = Ptr::new(b_tag, *b_raw); - let c = Ptr::new(c_tag, *c_raw); - Some([a, b, c]) + fn fetch_compact(&self, ptr: &Ptr) -> Option<&[Ptr; 3]> { + self.core.fetch_compact_by_val(ptr.val()) } #[inline] - pub fn push_binding(&self, sym: Ptr, val: Ptr, env: Ptr) -> Ptr { + pub fn push_binding(&self, sym: Ptr, v: Ptr, env: Ptr) -> Ptr { assert_eq!(sym.tag(), &Tag::Expr(Sym)); assert_eq!(env.tag(), &Tag::Expr(Env)); - self.intern_compact(sym, val, env, Tag::Expr(Env)) + self.core + .intern_compact([sym, v, env], Tag::Expr(Env), None) } #[inline] - pub fn pop_binding(&self, env: &Ptr) -> Option<[Ptr; 3]> { + pub fn pop_binding(&self, env: &Ptr) -> Option<&[Ptr; 3]> { assert_eq!(env.tag(), &Tag::Expr(Env)); - self.fetch_compact(env, Tag::Expr(Sym), Tag::Expr(Env)) + let ptrs = self.fetch_compact(env)?; + assert_eq!(ptrs[0].tag(), &Tag::Expr(Sym)); + assert_eq!(ptrs[2].tag(), &Tag::Expr(Env)); + Some(ptrs) } #[inline] @@ -441,13 +320,14 @@ impl Store { // arguably okay, but it means that in order to recover the preimage we will need to know the expected arity // based on the query. assert!(matches!(deps.tag(), Tag::Expr(Prov | Cons | Nil))); - self.intern_compact(query, val, deps, Tag::Expr(Prov)) + self.core + .intern_compact([query, val, deps], Tag::Expr(Prov), None) } #[inline] - pub fn deconstruct_provenance(&self, prov: &Ptr) -> Option<[Ptr; 3]> { + pub fn deconstruct_provenance(&self, prov: &Ptr) -> Option<&[Ptr; 3]> { assert_eq!(prov.tag(), &Tag::Expr(Prov)); - self.fetch_compact(prov, Tag::Expr(Cons), Tag::Expr(Cons)) + self.fetch_compact(prov) } #[inline] @@ -462,7 +342,7 @@ impl Store { #[inline] pub fn fetch_num(&self, ptr: &Ptr) -> Option<&F> { - match_opt!(ptr.tag(), Tag::Expr(Num) => self.fetch_f(ptr)?) + match_opt!(ptr.tag(), Tag::Expr(Num) => self.fetch_f_by_val(ptr.val())?) } #[inline] @@ -477,7 +357,7 @@ impl Store { #[inline] pub fn fetch_u64(&self, ptr: &Ptr) -> Option { - match_opt!(ptr.tag(), Tag::Expr(U64) => self.fetch_f(ptr).and_then(F::to_u64)?) + match_opt!(ptr.tag(), Tag::Expr(U64) => self.fetch_f_by_val(ptr.val()).and_then(F::to_u64)?) } #[inline] @@ -487,7 +367,7 @@ impl Store { #[inline] pub fn fetch_char(&self, ptr: &Ptr) -> Option { - match_opt!(ptr.tag(), Tag::Expr(Char) => self.fetch_f(ptr).and_then(F::to_char)?) + match_opt!(ptr.tag(), Tag::Expr(Char) => self.fetch_f_by_val(ptr.val()).and_then(F::to_char)?) } #[inline] @@ -495,43 +375,38 @@ impl Store { self.intern_atom(Tag::Expr(Comm), hash) } - #[inline] - pub fn raw_zero(&self) -> RawPtr { - self.intern_raw_atom(F::ZERO) - } - #[inline] pub fn zero(&self, tag: Tag) -> Ptr { - Ptr::new(tag, self.raw_zero()) + self.core.intern_atom(tag, FWrap(F::ZERO)) } - pub fn is_zero(&self, raw: &RawPtr) -> bool { + pub fn is_zero(&self, raw: &IVal) -> bool { match raw { - RawPtr::Atom(idx) => self.fetch_f_by_idx(*idx) == Some(&F::ZERO), + IVal::Atom(idx) => self.fetch_f(*idx) == Some(&F::ZERO), _ => false, } } #[inline] pub fn dummy(&self) -> Ptr { - Ptr::new(Tag::Expr(Nil), self.raw_zero()) + self.zero(Tag::Expr(Nil)) } /// Creates an atom pointer from a `ZPtr`, with its hash. Hashing /// such pointer will result on the same original `ZPtr` #[inline] pub fn opaque(&self, z: ZPtr) -> Ptr { - let crate::z_ptr::ZPtr(tag, value) = z; - self.intern_atom(tag, value) + self.core.opaque(z) } pub fn intern_string(&self, s: &str) -> Ptr { if let Some(ptr) = self.string_ptr_cache.get(s) { *ptr } else { - let empty_str = Ptr::new(Tag::Expr(Str), self.raw_zero()); + let empty_str = self.zero(Tag::Expr(Str)); let ptr = s.chars().rev().fold(empty_str, |acc, c| { - intern_ptrs!(self, Tag::Expr(Str), self.char(c), acc) + self.core + .intern_tuple2([self.char(c), acc], Tag::Expr(Str), None) }); self.string_ptr_cache.insert(s.to_string(), Box::new(ptr)); self.ptr_string_cache.insert(ptr, s.to_string()); @@ -549,24 +424,24 @@ impl Store { return None; } loop { - match *ptr.raw() { - RawPtr::Atom(idx) => { - if self.fetch_f_by_idx(idx)? == &F::ZERO { + match *ptr.val() { + IVal::Atom(idx) => { + if self.fetch_f(idx)? == &F::ZERO { self.ptr_string_cache.insert(ptr, string.clone()); return Some(string); } else { return None; } } - RawPtr::Hash4(idx) => { - let [car_tag, car, cdr_tag, cdr] = self.fetch_raw_ptrs(idx)?; - assert_eq!(*car_tag, self.tag(Tag::Expr(Char))); - assert_eq!(*cdr_tag, self.tag(Tag::Expr(Str))); - match car { - RawPtr::Atom(idx) => { - let f = self.fetch_f_by_idx(*idx)?; + IVal::Tuple2(idx) => { + let [car, cdr] = self.core.fetch_tuple2(idx)?; + assert_eq!(car.tag(), &Tag::Expr(Char)); + assert_eq!(cdr.tag(), &Tag::Expr(Str)); + match car.val() { + IVal::Atom(idx) => { + let f = self.fetch_f(*idx)?; string.push(f.to_char().expect("malformed char pointer")); - ptr = Ptr::new(Tag::Expr(Str), *cdr) + ptr = *cdr; } _ => return None, } @@ -578,9 +453,10 @@ impl Store { } pub fn intern_symbol_path(&self, path: &[String]) -> Ptr { - let zero_sym = Ptr::new(Tag::Expr(Sym), self.raw_zero()); + let zero_sym = self.zero(Tag::Expr(Sym)); path.iter().fold(zero_sym, |acc, s| { - intern_ptrs!(self, Tag::Expr(Sym), self.intern_string(s), acc) + self.core + .intern_tuple2([self.intern_string(s), acc], Tag::Expr(Sym), None) }) } @@ -590,9 +466,9 @@ impl Store { } else { let path_ptr = self.intern_symbol_path(sym.path()); let sym_ptr = if sym == &lurk_sym("nil") { - Ptr::new(Tag::Expr(Nil), *path_ptr.raw()) + Ptr::new(Tag::Expr(Nil), *path_ptr.val()) } else if sym.is_keyword() { - Ptr::new(Tag::Expr(Key), *path_ptr.raw()) + Ptr::new(Tag::Expr(Key), *path_ptr.val()) } else { path_ptr }; @@ -606,22 +482,22 @@ impl Store { fn fetch_symbol_path(&self, mut idx: usize) -> Option> { let mut path = vec![]; loop { - let [car_tag, car, cdr_tag, cdr] = self.fetch_raw_ptrs(idx)?; - assert_eq!(*car_tag, self.tag(Tag::Expr(Str))); - assert_eq!(*cdr_tag, self.tag(Tag::Expr(Sym))); - let string = self.fetch_string(&Ptr::new(Tag::Expr(Str), *car))?; + let [car, cdr] = self.core.fetch_tuple2(idx)?; + assert_eq!(car.tag(), &Tag::Expr(Str)); + assert_eq!(cdr.tag(), &Tag::Expr(Sym)); + let string = self.fetch_string(car)?; path.push(string); - match cdr { - RawPtr::Atom(idx) => { - if self.fetch_f_by_idx(*idx)? == &F::ZERO { + match cdr.val() { + IVal::Atom(idx) => { + if self.fetch_f(*idx)? == &F::ZERO { path.reverse(); return Some(path); } else { return None; } } - RawPtr::Hash4(idx_cdr) => idx = *idx_cdr, + IVal::Tuple2(idx_cdr) => idx = *idx_cdr, _ => return None, } } @@ -631,9 +507,9 @@ impl Store { if let Some(sym) = self.ptr_symbol_cache.get(ptr) { Some(sym.clone()) } else { - match (ptr.tag(), ptr.raw()) { - (Tag::Expr(Sym), RawPtr::Atom(idx)) => { - if self.fetch_f_by_idx(*idx)? == &F::ZERO { + match (ptr.tag(), ptr.val()) { + (Tag::Expr(Sym), IVal::Atom(idx)) => { + if self.fetch_f(*idx)? == &F::ZERO { let sym = Symbol::root_sym(); self.ptr_symbol_cache.insert(*ptr, Box::new(sym.clone())); Some(sym) @@ -641,8 +517,8 @@ impl Store { None } } - (Tag::Expr(Key), RawPtr::Atom(idx)) => { - if self.fetch_f_by_idx(*idx)? == &F::ZERO { + (Tag::Expr(Key), IVal::Atom(idx)) => { + if self.fetch_f(*idx)? == &F::ZERO { let key = Symbol::root_key(); self.ptr_symbol_cache.insert(*ptr, Box::new(key.clone())); Some(key) @@ -650,13 +526,13 @@ impl Store { None } } - (Tag::Expr(Sym | Nil), RawPtr::Hash4(idx)) => { + (Tag::Expr(Sym | Nil), IVal::Tuple2(idx)) => { let path = self.fetch_symbol_path(*idx)?; let sym = Symbol::sym_from_vec(path); self.ptr_symbol_cache.insert(*ptr, Box::new(sym.clone())); Some(sym) } - (Tag::Expr(Key), RawPtr::Hash4(idx)) => { + (Tag::Expr(Key), IVal::Tuple2(idx)) => { let path = self.fetch_symbol_path(*idx)?; let key = Symbol::key_from_vec(path); self.ptr_symbol_cache.insert(*ptr, Box::new(key.clone())); @@ -704,8 +580,7 @@ impl Store { #[inline] pub fn add_comm(&self, hash: F, secret: F, payload: Ptr) { - self.comms - .insert(FWrap::(hash), Box::new((secret, payload))); + self.core.add_comm(FWrap(hash), FWrap(secret), payload) } #[inline] @@ -713,13 +588,10 @@ impl Store { self.comm(self.hide_and_return_z_payload(secret, payload).0) } + #[inline] pub fn hide_and_return_z_payload(&self, secret: F, payload: Ptr) -> (F, ZPtr) { - let z_ptr = self.hash_ptr(&payload); - let hash = self - .poseidon_cache - .hash3(&[secret, z_ptr.tag_field(), *z_ptr.value()]); - self.add_comm(hash, secret, payload); - (hash, z_ptr) + let (digest, z_ptr) = self.core.hide(FWrap(secret), payload); + (digest.0, z_ptr) } #[inline] @@ -728,23 +600,24 @@ impl Store { } #[inline] - pub fn open(&self, hash: F) -> Option<&(F, Ptr)> { - self.comms.get(&FWrap(hash)) + pub fn open(&self, hash: F) -> Option<&(FWrap, Ptr)> { + self.core.open(&FWrap(hash)) } #[inline] pub fn cons(&self, car: Ptr, cdr: Ptr) -> Ptr { - intern_ptrs!(self, Tag::Expr(Cons), car, cdr) + self.core.intern_tuple2([car, cdr], Tag::Expr(Cons), None) } #[inline] - pub fn intern_fun(&self, arg: Ptr, body: Ptr, env: Ptr) -> Ptr { - intern_ptrs!(self, Tag::Expr(Fun), arg, body, env, self.dummy()) + pub fn intern_fun(&self, args: Ptr, body: Ptr, env: Ptr) -> Ptr { + self.core + .intern_tuple4([args, body, env, self.dummy()], Tag::Expr(Fun), None) } #[inline] fn cont_atom(&self, cont_tag: ContTag) -> Ptr { - Ptr::new(Tag::Cont(cont_tag), RawPtr::Atom(self.hash8zeros_idx)) + Ptr::new(Tag::Cont(cont_tag), IVal::Atom(self.hash8zeros_idx)) } #[inline] @@ -773,9 +646,9 @@ impl Store { } /// Function specialized on deconstructing `Cons` pointers into their car/cdr - pub fn fetch_cons(&self, ptr: &Ptr) -> Option<(Ptr, Ptr)> { - match_opt!((ptr.tag(), ptr.raw()), (Tag::Expr(Cons), RawPtr::Hash4(idx)) => { - let [car, cdr] = fetch_ptrs!(self, 2, *idx)?; + pub fn fetch_cons(&self, ptr: &Ptr) -> Option<(&Ptr, &Ptr)> { + match_opt!((ptr.tag(), ptr.val()), (Tag::Expr(Cons), IVal::Tuple2(idx)) => { + let [car, cdr] = self.core.expect_tuple2(*idx); (car, cdr) }) } @@ -786,22 +659,25 @@ impl Store { /// * If applied on the empty string, returns `(nil, "")` /// * If applied on a string `"abc..."`, returns `('a', "bc...")` pub fn car_cdr(&self, ptr: &Ptr) -> Result<(Ptr, Ptr)> { - match (ptr.tag(), ptr.raw()) { + match (ptr.tag(), *ptr.val()) { (Tag::Expr(Nil), _) => { let nil = self.intern_nil(); Ok((nil, nil)) } - (Tag::Expr(Str), RawPtr::Atom(idx)) => { - if self.fetch_f_by_idx(*idx) == Some(&F::ZERO) { - let empty_str = Ptr::new(Tag::Expr(Str), self.raw_zero()); + (Tag::Expr(Str), IVal::Atom(idx)) => { + if self.fetch_f(idx) == Some(&F::ZERO) { + let empty_str = self.zero(Tag::Expr(Str)); Ok((self.intern_nil(), empty_str)) } else { bail!("Invalid empty string pointer") } } - (Tag::Expr(Cons | Str), RawPtr::Hash4(idx)) => { - let [car, cdr] = fetch_ptrs!(self, 2, *idx).context("couldn't fetch car/cdr")?; - Ok((car, cdr)) + (Tag::Expr(Cons | Str), IVal::Tuple2(idx)) => { + let [car, cdr] = self + .core + .fetch_tuple2(idx) + .context("couldn't fetch car/cdr")?; + Ok((*car, *cdr)) } _ => bail!("invalid pointer to extract car/cdr from"), } @@ -809,14 +685,17 @@ impl Store { /// Simpler version of `car_cdr` that doesn't deconstruct strings pub fn car_cdr_simple(&self, ptr: &Ptr) -> Result<(Ptr, Ptr)> { - match (ptr.tag(), ptr.raw()) { + match (ptr.tag(), ptr.val()) { (Tag::Expr(Nil), _) => { let nil = self.intern_nil(); Ok((nil, nil)) } - (Tag::Expr(Cons), RawPtr::Hash4(idx)) => { - let [car, cdr] = fetch_ptrs!(self, 2, *idx).context("couldn't fetch car/cdr")?; - Ok((car, cdr)) + (Tag::Expr(Cons), IVal::Tuple2(idx)) => { + let [car, cdr] = self + .core + .fetch_tuple2(*idx) + .context("couldn't fetch car/cdr")?; + Ok((*car, *cdr)) } _ => bail!("invalid pointer to extract car/cdr (simple) from"), } @@ -865,22 +744,20 @@ impl Store { if *ptr == self.intern_nil() { return Some((vec![], None)); } - match (ptr.tag(), ptr.raw()) { + match (ptr.tag(), ptr.val()) { (Tag::Expr(Nil), _) => panic!("Malformed nil expression"), - (Tag::Expr(Cons), RawPtr::Hash4(mut idx)) => { + (Tag::Expr(Cons), IVal::Tuple2(mut idx)) => { let mut list = vec![]; let mut last = None; - while let Some([car_tag, car, cdr_tag, cdr]) = self.fetch_raw_ptrs(idx) { - let car_ptr = self.raw_to_ptr(car_tag, *car)?; - let cdr_ptr = self.raw_to_ptr(cdr_tag, *cdr)?; - list.push(car_ptr); - match cdr_ptr.tag() { + while let Some([car, cdr]) = self.core.fetch_tuple2(idx) { + list.push(*car); + match cdr.tag() { Tag::Expr(Nil) => break, Tag::Expr(Cons) => { - idx = cdr.get_hash4()?; + idx = cdr.get_index2()?; } _ => { - last = Some(cdr_ptr); + last = Some(*cdr); break; } } @@ -891,17 +768,6 @@ impl Store { } } - pub fn expect_env_components(&self, idx: usize) -> [Ptr; 3] { - let [sym_pay, val_tag, val_pay, env_pay] = self.expect_raw_ptrs(idx); - let sym = Ptr::new(Tag::Expr(Sym), *sym_pay); - let val = Ptr::new( - self.fetch_tag(val_tag).expect("Couldn't fetch tag"), - *val_pay, - ); - let env = Ptr::new(Tag::Expr(Env), *env_pay); - [sym, val, env] - } - /// Fetches an environment pub fn fetch_env(&self, ptr: &Ptr) -> Option> { if ptr.tag() != &Tag::Expr(Env) { @@ -910,16 +776,17 @@ impl Store { if ptr == &self.intern_empty_env() { return Some(vec![]); } - let mut idx = ptr.raw().get_hash4()?; + + let mut env_val_mut = ptr.val(); let mut list = vec![]; - while let Some([sym_pay, val_tag, val_pay, env_pay]) = self.fetch_raw_ptrs(idx) { - let sym = Ptr::new(Tag::Expr(Sym), *sym_pay); - let val = self.raw_to_ptr(val_tag, *val_pay)?; - list.push((sym, val)); - if env_pay == self.intern_empty_env().raw() { + let empty_env_val = *self.intern_empty_env().val(); + while let Some([sym, v, env]) = self.core.fetch_compact_by_val(env_val_mut) { + list.push((*sym, *v)); + let env_val = env.val(); + if env_val == &empty_env_val { break; } - idx = env_pay.get_hash4().unwrap(); + env_val_mut = env_val; } Some(list) } @@ -930,20 +797,18 @@ impl Store { return None; } - let idx = ptr.raw().get_hash4()?; - self.fetch_raw_ptrs(idx) - .and_then(|[query_pay, val_tag, val_pay, deps_pay]| { - let query = Ptr::new(Tag::Expr(Cons), *query_pay); - let val = self.raw_to_ptr(val_tag, *val_pay)?; - + self.core + .fetch_compact_by_val(ptr.val()) + .map(|[query, v, deps]| { let nil = self.intern_nil(); - let deps = if deps_pay == nil.raw() { + let deps_val = deps.val(); + let deps = if deps_val == nil.val() { nil } else { - Ptr::new(Tag::Expr(Prov), *deps_pay) + Ptr::new(Tag::Expr(Prov), *deps_val) }; - Some((query, val, deps)) + (*query, *v, deps) }) } @@ -1003,126 +868,23 @@ impl Store { self.read(State::init_lurk_state().rccell(), input) } - /// Recursively hashes the children of a `Ptr` in order to obtain its - /// corresponding `ZPtr`. While traversing a `Ptr` tree, it consults the - /// cache of `Ptr`s that have already been hydrated and also populates this - /// cache for the new `Ptr`s. - /// - /// Warning: without cache hits, this function might blow up Rust's recursion - /// depth limit. This limitation is circumvented by calling `hydrate_z_cache` - /// beforehand or by using `hash_raw_ptr` instead, which is slightly slower. - fn hash_raw_ptr_unsafe(&self, ptr: &RawPtr) -> FWrap { - macro_rules! hash_raw { - ($hash:ident, $n:expr, $idx:expr) => {{ - if let Some(z) = self.z_cache.get(ptr) { - *z - } else { - let children_ptrs = self.expect_raw_ptrs::<$n>($idx); - let mut children_zs = [F::ZERO; $n]; - for (idx, child_ptr) in children_ptrs.iter().enumerate() { - children_zs[idx] = self.hash_raw_ptr_unsafe(child_ptr).0; - } - let z = FWrap(self.poseidon_cache.$hash(&children_zs)); - self.z_cache.insert(*ptr, Box::new(z)); - self.inverse_z_cache.insert(z, Box::new(*ptr)); - z - } - }}; - } - match ptr { - RawPtr::Atom(idx) => FWrap(*self.expect_f(*idx)), - RawPtr::Hash4(idx) => hash_raw!(hash4, 4, *idx), - RawPtr::Hash6(idx) => hash_raw!(hash6, 6, *idx), - RawPtr::Hash8(idx) => hash_raw!(hash8, 8, *idx), - } - } - - /// Hashes pointers in parallel, consuming chunks of length 256, which is a - /// reasonably safe limit. The danger of longer chunks is that the rightmost - /// pointers are the ones which are more likely to reach the recursion depth - /// limit in `hash_raw_ptr_unsafe`. So we move in smaller chunks from left to - /// right, populating the `z_cache`, which can rescue `hash_raw_ptr_unsafe` from - /// dangerously deep recursions - fn hydrate_z_cache_with_ptrs(&self, ptrs: &[&RawPtr]) { - ptrs.chunks(256).for_each(|chunk| { - chunk.par_iter().for_each(|ptr| { - self.hash_raw_ptr_unsafe(ptr); - }); - }); - } - /// Hashes enqueued `RawPtr` trees from the bottom to the top, avoiding deep /// recursions in `hash_raw_ptr`. Resets the `dehydrated` queue afterwards. + #[inline] pub fn hydrate_z_cache(&self) { - self.hydrate_z_cache_with_ptrs(&self.dehydrated.load().iter().collect::>()); - self.dehydrated.swap(Arc::new(FrozenVec::default())); + self.core.hydrate_z_cache() } - /// Whether the length of the dehydrated queue is within the safe limit. - /// Note: these values are experimental and may be machine dependant. #[inline] - fn is_below_safe_threshold(&self) -> bool { - if cfg!(debug_assertions) { - // not release mode - self.dehydrated.load().len() < 443 - } else { - // release mode - self.dehydrated.load().len() < 2497 - } - } - - /// Safe version of `hash_raw_ptr_unsafe` that doesn't hit a stack overflow by - /// precomputing the pointers that need to be hashed in order to hash the - /// provided `ptr` - pub fn hash_raw_ptr(&self, ptr: &RawPtr) -> FWrap { - if self.is_below_safe_threshold() { - // just run `hash_raw_ptr_unsafe` for extra speed when the dehydrated - // queue is small enough - return self.hash_raw_ptr_unsafe(ptr); - } - let mut ptrs: IndexSet<&RawPtr> = IndexSet::default(); - let mut stack = vec![ptr]; - macro_rules! feed_loop { - ($x:expr) => { - if $x.is_hash() { - if self.z_cache.get($x).is_none() { - if ptrs.insert($x) { - stack.push($x); - } - } - } - }; - } - while let Some(ptr) = stack.pop() { - match ptr { - RawPtr::Atom(..) => (), - RawPtr::Hash4(idx) => { - for ptr in self.expect_raw_ptrs::<4>(*idx) { - feed_loop!(ptr) - } - } - RawPtr::Hash6(idx) => { - for ptr in self.expect_raw_ptrs::<6>(*idx) { - feed_loop!(ptr) - } - } - RawPtr::Hash8(idx) => { - for ptr in self.expect_raw_ptrs::<8>(*idx) { - feed_loop!(ptr) - } - } - } - } - ptrs.reverse(); - self.hydrate_z_cache_with_ptrs(&ptrs.into_iter().collect::>()); - // Now it's okay to call `hash_raw_ptr_unsafe` - self.hash_raw_ptr_unsafe(ptr) + pub fn hash_ptr_val(&self, ptr_val: &IVal) -> FWrap { + self.core.hash_ptr_val(ptr_val) } /// Hydrates a `Ptr`. That is, creates a `ZPtr` with the tag of the pointer /// and the hash of its raw pointer + #[inline] pub fn hash_ptr(&self, ptr: &Ptr) -> ZPtr { - ZPtr::from_parts(*ptr.tag(), self.hash_raw_ptr(ptr.raw()).0) + self.core.hash_ptr(ptr) } /// Constructs a vector of scalars that correspond to tags and hashes computed @@ -1130,45 +892,32 @@ impl Store { pub fn to_scalar_vector(&self, ptrs: &[Ptr]) -> Vec { ptrs.iter() .fold(Vec::with_capacity(2 * ptrs.len()), |mut acc, ptr| { - let tag = ptr.tag().to_field(); - let payload = self.hash_raw_ptr(ptr.raw()).0; + let z_ptr = self.hash_ptr(ptr); + let tag = z_ptr.tag().to_field(); + let payload = z_ptr.val().0; acc.push(tag); acc.push(payload); acc }) } - pub fn to_scalar_vector_raw(&self, ptrs: &[RawPtr]) -> Vec { - ptrs.iter().map(|ptr| self.hash_raw_ptr(ptr).0).collect() - } - - /// Equality of the content-addressed versions of two pointers - #[inline] - pub fn raw_ptr_eq(&self, a: &RawPtr, b: &RawPtr) -> bool { - self.hash_raw_ptr(a) == self.hash_raw_ptr(b) - } - #[inline] pub fn ptr_eq(&self, a: &Ptr, b: &Ptr) -> bool { - self.hash_ptr(a) == self.hash_ptr(b) + self.core.ptr_eq(a, b) } - /// Attempts to recover the `RawPtr` that corresponds to a field element `z` - /// from `inverse_z_cache`. If the mapping is not there, returns a raw atom - /// pointer with value + /// Attempts to recover the `Ptr` that corresponds to a `ZPtr`. If the mapping + /// is not there, returns an atom pointer with the same tag and value #[inline] - pub fn to_raw_ptr(&self, z: &FWrap) -> RawPtr { - self.inverse_z_cache - .get(z) - .cloned() - .unwrap_or_else(|| self.intern_raw_atom(z.0)) + pub fn to_ptr_val(&self, hash: &FWrap) -> IVal { + self.core.to_ptr_val(hash) } /// Attempts to recover the `Ptr` that corresponds to a `ZPtr`. If the mapping /// is not there, returns an atom pointer with the same tag and value #[inline] pub fn to_ptr(&self, z_ptr: &ZPtr) -> Ptr { - Ptr::new(*z_ptr.tag(), self.to_raw_ptr(&FWrap(*z_ptr.value()))) + self.core.to_ptr(z_ptr) } } @@ -1201,7 +950,7 @@ impl Ptr { format!("\"{str}\"") } Char => { - if let Some(c) = store.fetch_f(self).and_then(F::to_char) { + if let Some(c) = store.fetch_f_by_val(self.val()).and_then(F::to_char) { format!("\'{c}\'") } else { "".into() @@ -1225,7 +974,7 @@ impl Ptr { ) } Num => { - let Some(f) = store.fetch_f(self) else { + let Some(f) = store.fetch_f_by_val(self.val()) else { return "".into(); }; let Some(u) = f.to_u64() else { @@ -1234,17 +983,17 @@ impl Ptr { u.to_string() } U64 => { - if let Some(u) = store.fetch_f(self).and_then(F::to_u64) { + if let Some(u) = store.fetch_f_by_val(self.val()).and_then(F::to_u64) { format!("{u}u64") } else { "".into() } } Fun => { - let Some(idx) = self.raw().get_hash8() else { + let Some(idx) = self.val().get_tuple4_idx() else { return "".into(); }; - let Some([vars, body, _, _]) = fetch_ptrs!(store, 4, idx) else { + let Some([vars, body, _, _]) = store.core.fetch_tuple4(idx) else { return "".into(); }; match vars.tag() { @@ -1262,10 +1011,10 @@ impl Ptr { } } Rec => { - let Some(idx) = self.raw().get_hash8() else { + let Some(idx) = self.val().get_tuple4_idx() else { return "".into(); }; - let Some([vars, body, _, _]) = fetch_ptrs!(store, 4, idx) else { + let Some([vars, body, _, _]) = store.core.fetch_tuple4(idx) else { return "".into(); }; match vars.tag() { @@ -1283,10 +1032,10 @@ impl Ptr { } } Thunk => { - let Some(idx) = self.raw().get_hash4() else { + let Some(idx) = self.val().get_tuple2_idx() else { return "".into(); }; - let Some([val, cont]) = fetch_ptrs!(store, 2, idx) else { + let Some([val, cont]) = store.core.fetch_tuple2(idx) else { return "".into(); }; format!( @@ -1296,21 +1045,21 @@ impl Ptr { ) } Comm => { - let Some(idx) = self.raw().get_atom() else { + let Some(idx) = self.val().get_atom_idx() else { return "".into(); }; let f = store.expect_f(idx); - if store.comms.get(&FWrap(*f)).is_some() { + if store.core.can_open(&FWrap(*f)) { format!("(comm 0x{})", f.hex_digits()) } else { format!("", f.hex_digits()) } } Cproc => { - let Some(idx) = self.raw().get_hash4() else { + let Some(idx) = self.val().get_tuple2_idx() else { return "".into(); }; - let Some([cproc_name, args]) = fetch_ptrs!(store, 2, idx) else { + let Some([cproc_name, args]) = store.core.fetch_tuple2(idx) else { return "".into(); }; format!( @@ -1411,10 +1160,10 @@ impl Ptr { state: &State, ) -> String { { - let Some(idx) = self.raw().get_hash8() else { + let Some(idx) = self.val().get_tuple4_idx() else { return format!(""); }; - let Some([a, cont, _, _]) = fetch_ptrs!(store, 4, idx) else { + let Some([a, cont, _, _]) = store.core.fetch_tuple4(idx) else { return format!(""); }; format!( @@ -1433,10 +1182,10 @@ impl Ptr { state: &State, ) -> String { { - let Some(idx) = self.raw().get_hash8() else { + let Some(idx) = self.val().get_tuple4_idx() else { return format!(""); }; - let Some([a, b, cont, _]) = fetch_ptrs!(store, 4, idx) else { + let Some([a, b, cont, _]) = store.core.fetch_tuple4(idx) else { return format!(""); }; let (fa, fb) = fields; @@ -1457,10 +1206,10 @@ impl Ptr { state: &State, ) -> String { { - let Some(idx) = self.raw().get_hash8() else { + let Some(idx) = self.val().get_tuple4_idx() else { return format!(""); }; - let Some([a, b, c, cont]) = fetch_ptrs!(store, 4, idx) else { + let Some([a, b, c, cont]) = store.core.fetch_tuple4(idx) else { return format!(""); }; let (fa, fb, fc) = fields; @@ -1484,7 +1233,7 @@ mod tests { use crate::{ field::LurkField, - lem::Tag, + lem::{pointers::IVal, Tag}, parser::position::Pos, state::{initial_lurk_state, lurk_sym}, syntax::Syntax, @@ -1492,7 +1241,25 @@ mod tests { Num, Symbol, }; - use super::{Ptr, RawPtr, Store}; + use super::{Ptr, Store}; + + #[test] + fn test_ptr_hashing_safety() { + let string = String::from_utf8(vec![b'0'; 4096]).unwrap(); + let store = Store::::default(); + let ptr = store.intern_string(&string); + // `hash_ptr_val_unsafe` would overflow the stack, whereas `hash_ptr_val` works + let x = store.core.hash_ptr_val(ptr.val()); + + let store = Store::::default(); + let ptr = store.intern_string(&string); + store.hydrate_z_cache(); + // but `hash_ptr_val_unsafe` works just fine after manual hydration + let y = store.core.hash_ptr_val_unsafe(ptr.val()); + + // and, of course, `y` and `x` should be equal + assert_eq!(x, y); + } #[test] fn test_car_cdr() { @@ -1571,29 +1338,40 @@ mod tests { let z_foo = store.hash_ptr(&foo); assert_eq!(z_foo.tag(), &zero_tag); - assert_eq!(z_foo.value(), &zero); + assert_eq!(z_foo.val().0, zero); let comm = store.hide(zero, foo); assert_eq!(comm.tag(), &Tag::Expr(ExprTag::Comm)); assert_eq!( store.expect_f(comm.get_atom().unwrap()), - &store.poseidon_cache.hash3(&[zero; 3]) + &store.core.hasher.poseidon_cache.hash3(&[zero; 3]) ); - let ptr2 = intern_ptrs!(store, zero_tag, foo, foo); + let ptr2 = store.core.intern_tuple2([foo, foo], zero_tag, None); let z_ptr2 = store.hash_ptr(&ptr2); assert_eq!(z_ptr2.tag(), &zero_tag); - assert_eq!(z_ptr2.value(), &store.poseidon_cache.hash4(&[zero; 4])); + assert_eq!( + z_ptr2.val().0, + store.core.hasher.poseidon_cache.hash4(&[zero; 4]) + ); - let ptr3 = intern_ptrs!(store, zero_tag, foo, foo, foo); + let ptr3 = store.core.intern_tuple3([foo, foo, foo], zero_tag, None); let z_ptr3 = store.hash_ptr(&ptr3); assert_eq!(z_ptr3.tag(), &zero_tag); - assert_eq!(z_ptr3.value(), &store.poseidon_cache.hash6(&[zero; 6])); + assert_eq!( + z_ptr3.val().0, + store.core.hasher.poseidon_cache.hash6(&[zero; 6]) + ); - let ptr4 = intern_ptrs!(store, zero_tag, foo, foo, foo, foo); + let ptr4 = store + .core + .intern_tuple4([foo, foo, foo, foo], zero_tag, None); let z_ptr4 = store.hash_ptr(&ptr4); assert_eq!(z_ptr4.tag(), &zero_tag); - assert_eq!(z_ptr4.value(), &store.poseidon_cache.hash8(&[zero; 8])); + assert_eq!( + z_ptr4.val().0, + store.core.hasher.poseidon_cache.hash8(&[zero; 8]) + ); } #[test] @@ -1623,34 +1401,16 @@ mod tests { assert!(z_list == z_list1 || z_list == z_list2); } - #[test] - fn test_ptr_hashing_safety() { - let string = String::from_utf8(vec![b'0'; 4096]).unwrap(); - let store = Store::::default(); - let ptr = store.intern_string(&string); - // `hash_raw_ptr_unsafe` would overflow the stack, whereas `hash_raw_ptr` works - let x = store.hash_raw_ptr(ptr.raw()); - - let store = Store::::default(); - let ptr = store.intern_string(&string); - store.hydrate_z_cache(); - // but `hash_raw_ptr_unsafe` works just fine after manual hydration - let y = store.hash_raw_ptr_unsafe(ptr.raw()); - - // and, of course, those functions result on the same `ZPtr` - assert_eq!(x, y); - } - #[test] fn string_hashing() { let s = &Store::::default(); let hi_ptr = s.intern_string("hi"); - let hi_hash_manual = s.poseidon_cache.hash4(&[ + let hi_hash_manual = s.core.hasher.poseidon_cache.hash4(&[ ExprTag::Char.to_field(), Fr::from_char('h'), ExprTag::Str.to_field(), - s.poseidon_cache.hash4(&[ + s.core.hasher.poseidon_cache.hash4(&[ ExprTag::Char.to_field(), Fr::from_char('i'), ExprTag::Str.to_field(), @@ -1658,7 +1418,7 @@ mod tests { ]), ]); - let hi_hash = s.hash_ptr(&hi_ptr).1; + let hi_hash = s.hash_ptr(&hi_ptr).val().0; assert_eq!(hi_hash, hi_hash_manual); } @@ -1672,42 +1432,42 @@ mod tests { let foo_z_ptr = s.hash_ptr(&foo_ptr); let bar_z_ptr = s.hash_ptr(&bar_ptr); - let foo_bar_hash_manual = s.poseidon_cache.hash4(&[ + let foo_bar_hash_manual = s.core.hasher.poseidon_cache.hash4(&[ ExprTag::Str.to_field(), - bar_z_ptr.1, + bar_z_ptr.val().0, ExprTag::Sym.to_field(), - s.poseidon_cache.hash4(&[ + s.core.hasher.poseidon_cache.hash4(&[ ExprTag::Str.to_field(), - foo_z_ptr.1, + foo_z_ptr.val().0, ExprTag::Sym.to_field(), Fr::ZERO, ]), ]); - let foo_bar_hash = s.hash_ptr(&foo_bar_ptr).1; + let foo_bar_hash = s.hash_ptr(&foo_bar_ptr).val().0; assert_eq!(foo_bar_hash, foo_bar_hash_manual); } // helper function to test syntax interning roundtrip fn fetch_syntax(ptr: Ptr, store: &Store) -> Syntax { match ptr.parts() { - (Tag::Expr(ExprTag::Num), RawPtr::Atom(idx)) => { + (Tag::Expr(ExprTag::Num), IVal::Atom(idx)) => { Syntax::Num(Pos::No, Num::Scalar(*store.expect_f(*idx))) } - (Tag::Expr(ExprTag::Char), RawPtr::Atom(idx)) => { + (Tag::Expr(ExprTag::Char), IVal::Atom(idx)) => { Syntax::Char(Pos::No, store.expect_f(*idx).to_char().unwrap()) } - (Tag::Expr(ExprTag::U64), RawPtr::Atom(idx)) => Syntax::UInt( + (Tag::Expr(ExprTag::U64), IVal::Atom(idx)) => Syntax::UInt( Pos::No, crate::UInt::U64(store.expect_f(*idx).to_u64_unchecked()), ), - (Tag::Expr(ExprTag::Sym | ExprTag::Key), RawPtr::Atom(_) | RawPtr::Hash4(_)) => { + (Tag::Expr(ExprTag::Sym | ExprTag::Key), IVal::Atom(_) | IVal::Tuple2(_)) => { Syntax::Symbol(Pos::No, store.fetch_symbol(&ptr).unwrap().into()) } - (Tag::Expr(ExprTag::Str), RawPtr::Atom(_) | RawPtr::Hash4(_)) => { + (Tag::Expr(ExprTag::Str), IVal::Atom(_) | IVal::Tuple2(_)) => { Syntax::String(Pos::No, store.fetch_string(&ptr).unwrap()) } - (Tag::Expr(ExprTag::Cons), RawPtr::Hash4(_)) => { + (Tag::Expr(ExprTag::Cons), IVal::Tuple2(_)) => { let (elts, last) = store.fetch_list(&ptr).unwrap(); let elts = elts .into_iter() @@ -1719,7 +1479,7 @@ mod tests { Syntax::List(Pos::No, elts) } } - (Tag::Expr(ExprTag::Nil), RawPtr::Hash4(_)) => { + (Tag::Expr(ExprTag::Nil), IVal::Tuple2(_)) => { Syntax::Symbol(Pos::No, lurk_sym("nil").into()) } _ => unreachable!(), diff --git a/src/lem/store_core.rs b/src/lem/store_core.rs new file mode 100644 index 000000000..cba62126d --- /dev/null +++ b/src/lem/store_core.rs @@ -0,0 +1,398 @@ +use arc_swap::ArcSwap; +use elsa::sync::{index_set::FrozenIndexSet, FrozenMap, FrozenVec}; +use indexmap::IndexSet; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; +use std::sync::Arc; + +use super::pointers::{GPtr, IPtr, IVal}; + +pub trait StoreHasher { + fn hash_ptrs(&self, ptrs: Vec>) -> D; + fn hash_compact(&self, d1: D, t2: T, d2: D, d3: D) -> D; + fn hash_commitment(&self, secret: D, payload: GPtr) -> D; +} + +#[derive(Debug)] +pub struct StoreCore> { + atom: FrozenIndexSet>, + tuple2: FrozenIndexSet; 2]>>, + tuple3: FrozenIndexSet; 3]>>, + tuple4: FrozenIndexSet; 4]>>, + + pub hasher: H, + + pub comms: FrozenMap)>>, // hash -> (secret, payload) + + dehydrated: ArcSwap>>, + z_cache: FrozenMap>, + inverse_z_cache: FrozenMap>, +} + +impl< + T: PartialEq + std::cmp::Eq + std::hash::Hash, + D: PartialEq + std::cmp::Eq + std::hash::Hash, + H: StoreHasher + Default, + > Default for StoreCore +{ + fn default() -> Self { + Self { + atom: Default::default(), + tuple2: Default::default(), + tuple3: Default::default(), + tuple4: Default::default(), + hasher: Default::default(), + comms: Default::default(), + dehydrated: Default::default(), + z_cache: Default::default(), + inverse_z_cache: Default::default(), + } + } +} + +pub trait HasZero { + fn zero() -> Self; +} + +impl< + T: Copy + PartialEq + std::cmp::Eq + std::hash::Hash + Send + Sync, + D: Copy + PartialEq + std::cmp::Eq + std::hash::Hash + Send + Sync, + H: StoreHasher + Sync, + > StoreCore +{ + #[inline] + pub fn intern_digest(&self, d: D) -> (usize, bool) { + self.atom.insert_probe(Box::new(d)) + } + + #[inline] + pub fn fetch_digest(&self, idx: usize) -> Option<&D> { + self.atom.get_index(idx) + } + + #[inline] + pub fn expect_digest(&self, idx: usize) -> &D { + self.fetch_digest(idx).expect("Digest wasn't interned") + } + + pub fn intern_tuple2(&self, ptrs: [IPtr; 2], tag: T, digest: Option) -> IPtr { + let (idx, inserted) = self.tuple2.insert_probe(Box::new(ptrs)); + let ptr = IPtr::new(tag, IVal::Tuple2(idx)); + if let Some(digest) = digest { + let val = *ptr.val(); + self.z_cache.insert(val, Box::new(digest)); + self.inverse_z_cache.insert(digest, Box::new(val)); + } else if inserted { + self.dehydrated.load().push(Box::new(*ptr.val())); + } + ptr + } + + #[inline] + pub fn fetch_tuple2(&self, idx: usize) -> Option<&[IPtr; 2]> { + self.tuple2.get_index(idx) + } + + #[inline] + pub fn expect_tuple2(&self, idx: usize) -> &[IPtr; 2] { + self.fetch_tuple2(idx).expect("Tuple2 not interned") + } + + pub fn intern_tuple3(&self, ptrs: [IPtr; 3], tag: T, digest: Option) -> IPtr { + let (idx, inserted) = self.tuple3.insert_probe(Box::new(ptrs)); + let ptr = IPtr::new(tag, IVal::Tuple3(idx)); + if let Some(digest) = digest { + let val = *ptr.val(); + self.z_cache.insert(val, Box::new(digest)); + self.inverse_z_cache.insert(digest, Box::new(val)); + } else if inserted { + self.dehydrated.load().push(Box::new(*ptr.val())); + } + ptr + } + + #[inline] + pub fn fetch_tuple3(&self, idx: usize) -> Option<&[IPtr; 3]> { + self.tuple3.get_index(idx) + } + + #[inline] + pub fn expect_tuple3(&self, idx: usize) -> &[IPtr; 3] { + self.fetch_tuple3(idx).expect("Tuple3 not interned") + } + + pub fn intern_tuple4(&self, ptrs: [IPtr; 4], tag: T, digest: Option) -> IPtr { + let (idx, inserted) = self.tuple4.insert_probe(Box::new(ptrs)); + let ptr = IPtr::new(tag, IVal::Tuple4(idx)); + if let Some(digest) = digest { + let val = *ptr.val(); + self.z_cache.insert(val, Box::new(digest)); + self.inverse_z_cache.insert(digest, Box::new(val)); + } else if inserted { + self.dehydrated.load().push(Box::new(*ptr.val())); + } + ptr + } + + #[inline] + pub fn fetch_tuple4(&self, idx: usize) -> Option<&[IPtr; 4]> { + self.tuple4.get_index(idx) + } + + #[inline] + pub fn expect_tuple4(&self, idx: usize) -> &[IPtr; 4] { + self.fetch_tuple4(idx).expect("Tuple4 not interned") + } + + pub fn intern_compact(&self, ptrs: [IPtr; 3], tag: T, digest: Option) -> IPtr { + let (idx, inserted) = self.tuple3.insert_probe(Box::new(ptrs)); + let ptr = IPtr::new(tag, IVal::Compact(idx)); + if let Some(digest) = digest { + let val = *ptr.val(); + self.z_cache.insert(val, Box::new(digest)); + self.inverse_z_cache.insert(digest, Box::new(val)); + } else if inserted { + self.dehydrated.load().push(Box::new(*ptr.val())); + } + ptr + } + + #[inline] + pub fn fetch_compact(&self, idx: usize) -> Option<&[IPtr; 3]> { + self.fetch_tuple3(idx) + } + + #[inline] + pub fn fetch_compact_by_val(&self, ptr_val: &IVal) -> Option<&[IPtr; 3]> { + ptr_val + .get_compact_idx() + .and_then(|idx| self.fetch_compact(idx)) + } + + #[inline] + pub fn expect_compact(&self, idx: usize) -> &[IPtr; 3] { + self.fetch_compact(idx).expect("Compact not interned") + } + + /// Recursively hashes the children of a `Ptr` in order to obtain its + /// corresponding ZPtr. While traversing a `Ptr` tree, it consults the + /// cache of `Ptr`s that have already been hydrated and also populates this + /// cache for the new `Ptr`s. + /// + /// Warning: without cache hits, this function might blow up Rust's recursion + /// depth limit. This limitation is circumvented by calling `hydrate_z_cache` + /// beforehand or by using `hash_ptr` instead, which is slightly slower. + pub fn hash_ptr_val_unsafe(&self, ptr_val: &IVal) -> D { + if let Some(digest) = self.z_cache.get(ptr_val) { + *digest + } else { + let digest = match *ptr_val { + IVal::Atom(idx) => *self.atom.get_index(idx).expect("Atom not interned"), + IVal::Tuple2(idx) => { + let [a, b] = self.expect_tuple2(idx); + let a_digest = self.hash_ptr_val_unsafe(a.val()); + let b_digest = self.hash_ptr_val_unsafe(b.val()); + let a = GPtr::new(*a.tag(), a_digest); + let b = GPtr::new(*b.tag(), b_digest); + self.hasher.hash_ptrs(vec![a, b]) + } + IVal::Tuple3(idx) => { + let [a, b, c] = self.expect_tuple3(idx); + let a_digest = self.hash_ptr_val_unsafe(a.val()); + let b_digest = self.hash_ptr_val_unsafe(b.val()); + let c_digest = self.hash_ptr_val_unsafe(c.val()); + let a = GPtr::new(*a.tag(), a_digest); + let b = GPtr::new(*b.tag(), b_digest); + let c = GPtr::new(*c.tag(), c_digest); + self.hasher.hash_ptrs(vec![a, b, c]) + } + IVal::Tuple4(idx) => { + let [a, b, c, d] = self.expect_tuple4(idx); + let a_digest = self.hash_ptr_val_unsafe(a.val()); + let b_digest = self.hash_ptr_val_unsafe(b.val()); + let c_digest = self.hash_ptr_val_unsafe(c.val()); + let d_digest = self.hash_ptr_val_unsafe(d.val()); + let a = GPtr::new(*a.tag(), a_digest); + let b = GPtr::new(*b.tag(), b_digest); + let c = GPtr::new(*c.tag(), c_digest); + let d = GPtr::new(*d.tag(), d_digest); + self.hasher.hash_ptrs(vec![a, b, c, d]) + } + IVal::Compact(idx) => { + let [a, b, c] = self.expect_compact(idx); + let a_digest = self.hash_ptr_val_unsafe(a.val()); + let b_digest = self.hash_ptr_val_unsafe(b.val()); + let c_digest = self.hash_ptr_val_unsafe(c.val()); + self.hasher + .hash_compact(a_digest, *b.tag(), b_digest, c_digest) + } + }; + self.z_cache.insert(*ptr_val, Box::new(digest)); + self.inverse_z_cache.insert(digest, Box::new(*ptr_val)); + digest + } + } + + /// Hashes pointers in parallel, consuming chunks of length 256, which is a + /// reasonably safe limit. The danger of longer chunks is that the rightmost + /// pointers are the ones which are more likely to reach the recursion depth + /// limit in `hash_ptr_val_unsafe`. So we move in smaller chunks from left to + /// right, populating the `z_cache`, which can rescue `hash_ptr_val_unsafe` from + /// dangerous deep recursions + fn hydrate_z_cache_with_ptr_vals(&self, ptrs: &[&IVal]) { + ptrs.chunks(256).for_each(|chunk| { + chunk.par_iter().for_each(|ptr| { + self.hash_ptr_val_unsafe(ptr); + }); + }); + } + + /// Hashes enqueued `Ptr` trees in chunks, avoiding deep recursions in + /// `hash_ptr_val_unsafe`. Resets the `dehydrated` queue afterwards. + pub fn hydrate_z_cache(&self) { + self.hydrate_z_cache_with_ptr_vals(&self.dehydrated.load().iter().collect::>()); + self.dehydrated.swap(Arc::new(FrozenVec::default())); + } + + /// Whether the length of the dehydrated queue is within the safe limit. + /// Note: these values are experimental and may be machine dependant. + #[inline] + fn is_below_safe_threshold(&self) -> bool { + if cfg!(debug_assertions) { + // not release mode + self.dehydrated.load().len() < 443 + } else { + // release mode + self.dehydrated.load().len() < 2497 + } + } + + /// Safe version of `hash_ptr_val_unsafe` that doesn't hit a stack overflow + /// by precomputing the pointers that need to be hashed in order to hash the + /// provided `ptr` + pub fn hash_ptr_val(&self, ptr_val: &IVal) -> D { + if self.is_below_safe_threshold() { + // just run `hash_ptr_val_unsafe` for extra speed when the dehydrated + // queue is small enough + return self.hash_ptr_val_unsafe(ptr_val); + } + let mut ptrs_vals: IndexSet<&IVal> = IndexSet::default(); + let mut stack = vec![ptr_val]; + macro_rules! feed_loop { + ($x:expr) => { + if $x.is_compound() { + if self.z_cache.get($x).is_none() { + if ptrs_vals.insert($x) { + stack.push($x); + } + } + } + }; + } + while let Some(ptr_val) = stack.pop() { + match ptr_val { + IVal::Atom(..) => (), + IVal::Tuple2(idx) => { + for ptr in self.expect_tuple2(*idx) { + feed_loop!(ptr.val()) + } + } + IVal::Tuple3(idx) | IVal::Compact(idx) => { + for ptr in self.expect_tuple3(*idx) { + feed_loop!(ptr.val()) + } + } + IVal::Tuple4(idx) => { + for ptr in self.expect_tuple4(*idx) { + feed_loop!(ptr.val()) + } + } + } + } + ptrs_vals.reverse(); + self.hydrate_z_cache_with_ptr_vals(&ptrs_vals.into_iter().collect::>()); + // Now it's okay to call `hash_ptr_val_unsafe` + self.hash_ptr_val_unsafe(ptr_val) + } + + #[inline] + pub fn hash_ptr(&self, ptr: &IPtr) -> GPtr { + GPtr::new(*ptr.tag(), self.hash_ptr_val(ptr.val())) + } + + #[inline] + pub fn add_comm(&self, digest: D, secret: D, payload: IPtr) { + self.comms.insert(digest, Box::new((secret, payload))); + } + + pub fn hide(&self, secret: D, payload: IPtr) -> (D, GPtr) { + let z = self.hash_ptr(&payload); + let digest = self.hasher.hash_commitment(secret, z); + self.add_comm(digest, secret, payload); + (digest, z) + } + + #[inline] + pub fn open(&self, digest: &D) -> Option<&(D, IPtr)> { + self.comms.get(digest) + } + + #[inline] + pub fn can_open(&self, digest: &D) -> bool { + self.open(digest).is_some() + } + + /// `Ptr` equality w.r.t. content-addresses + #[inline] + pub fn ptr_eq(&self, a: &IPtr, b: &IPtr) -> bool { + self.hash_ptr(a) == self.hash_ptr(b) + } + + #[inline] + pub fn intern_atom(&self, tag: T, d: D) -> IPtr { + IPtr::new(tag, IVal::Atom(self.intern_digest(d).0)) + } + + /// Creates an atom pointer from a ZPtr, with its hash. Hashing + /// such pointer will result on the same original ZPtr + #[inline] + pub fn opaque(&self, z: GPtr) -> IPtr { + let (tag, value) = z.into_parts(); + self.intern_atom(tag, value) + } + + #[inline] + pub fn to_ptr_val(&self, digest: &D) -> IVal { + self.inverse_z_cache + .get(digest) + .cloned() + .unwrap_or_else(|| IVal::Atom(self.intern_digest(*digest).0)) + } + + /// Attempts to recover the `Ptr` from `inverse_z_cache`. If the mapping is + /// not there, returns the corresponding opaque pointer instead + #[inline] + pub fn to_ptr(&self, z: &GPtr) -> IPtr { + GPtr::new(*z.tag(), self.to_ptr_val(z.val())) + } +} + +// #[cfg(test)] +// mod tests { +// #[test] +// fn test_ptr_hashing_safety() { +// let string = String::from_utf8(vec![b'0'; 4096]).unwrap(); +// let store = Store::::default(); +// let ptr = store.intern_string(&string); +// // `hash_raw_ptr_unsafe` would overflow the stack, whereas `hash_raw_ptr` works +// let x = store.core.hash_ptr_val(ptr.val()); + +// let store = Store::::default(); +// let ptr = store.intern_string(&string); +// store.hydrate_z_cache(); +// // but `hash_raw_ptr_unsafe` works just fine after manual hydration +// let y = store.core.hash_ptr_val_unsafe(ptr.val()); + +// // and, of course, those functions result on the same `ZPtr` +// assert_eq!(x, y); +// } +// } diff --git a/src/lem/tag.rs b/src/lem/tag.rs index 80ab7bf4f..63a4bc112 100644 --- a/src/lem/tag.rs +++ b/src/lem/tag.rs @@ -1,17 +1,13 @@ use anyhow::{bail, Result}; use serde::{Deserialize, Serialize}; -use strum::EnumCount; use crate::{ field::LurkField, - tag::{ - ContTag, ExprTag, Op1, Op2, Tag as TagTrait, CONT_TAG_INIT, EXPR_TAG_INIT, OP1_TAG_INIT, - OP2_TAG_INIT, - }, + tag::{ContTag, ExprTag, Op1, Op2, Tag as TagTrait}, }; /// The LEM `Tag` is a wrapper around other types that are used as tags -#[derive(Copy, Debug, PartialEq, Clone, Eq, Hash, Serialize, Deserialize)] +#[derive(Copy, Debug, PartialEq, Clone, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] pub enum Tag { Expr(ExprTag), Cont(ContTag), @@ -68,47 +64,6 @@ impl Tag { Self::Op2(tag) => tag.to_field(), } } - - pub fn pos(i: usize) -> Option { - let mut last = 0; - if (last..last + ExprTag::COUNT).contains(&i) { - let j = i + EXPR_TAG_INIT as usize - last; - let expr_tag = (j as u16).try_into().expect("unreachable"); - return Some(Tag::Expr(expr_tag)); - } - last += ExprTag::COUNT; - if (last..last + ContTag::COUNT).contains(&i) { - let j = i + CONT_TAG_INIT as usize - last; - let cont_tag = (j as u16).try_into().expect("unreachable"); - return Some(Tag::Cont(cont_tag)); - } - last += ContTag::COUNT; - if (last..last + Op1::COUNT).contains(&i) { - let j = i + OP1_TAG_INIT as usize - last; - let op1_tag = (j as u16).try_into().expect("unreachable"); - return Some(Tag::Op1(op1_tag)); - } - last += Op1::COUNT; - if (last..last + Op2::COUNT).contains(&i) { - let j = i + OP2_TAG_INIT as usize - last; - let op2_tag = (j as u16).try_into().expect("unreachable"); - return Some(Tag::Op2(op2_tag)); - } - None - } - - pub fn index(&self) -> usize { - match self { - Self::Expr(tag) => *tag as usize - EXPR_TAG_INIT as usize, - Self::Cont(tag) => *tag as usize - CONT_TAG_INIT as usize + ExprTag::COUNT, - Self::Op1(tag) => { - *tag as usize - OP1_TAG_INIT as usize + ExprTag::COUNT + ContTag::COUNT - } - Self::Op2(tag) => { - *tag as usize - OP2_TAG_INIT as usize + ExprTag::COUNT + ContTag::COUNT + Op1::COUNT - } - } - } } impl std::fmt::Display for Tag { @@ -122,44 +77,3 @@ impl std::fmt::Display for Tag { } } } - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use strum::IntoEnumIterator; - - #[test] - fn pos_index_roundtrip() { - for i in 0.. { - let Some(tag) = Tag::pos(i) else { - break; - }; - let j = tag.index(); - assert_eq!(i, j); - } - - for expr_tag in ExprTag::iter() { - let tag = Tag::Expr(expr_tag); - let tag_2 = Tag::pos(tag.index()).unwrap(); - assert_eq!(tag, tag_2); - } - - for cont_tag in ContTag::iter() { - let tag = Tag::Cont(cont_tag); - let tag_2 = Tag::pos(tag.index()).unwrap(); - assert_eq!(tag, tag_2); - } - - for op1_tag in Op1::iter() { - let tag = Tag::Op1(op1_tag); - let tag_2 = Tag::pos(tag.index()).unwrap(); - assert_eq!(tag, tag_2); - } - - for op2_tag in Op2::iter() { - let tag = Tag::Op2(op2_tag); - let tag_2 = Tag::pos(tag.index()).unwrap(); - assert_eq!(tag, tag_2); - } - } -} diff --git a/src/lem/tests/eval_tests.rs b/src/lem/tests/eval_tests.rs index 74f4181c1..bba861e00 100644 --- a/src/lem/tests/eval_tests.rs +++ b/src/lem/tests/eval_tests.rs @@ -1879,7 +1879,7 @@ fn hide_opaque_open_available() { let (output, ..) = evaluate_simple::>(None, expr, s, 10, &dummy_terminal()).unwrap(); - let c = *s.hash_ptr(&output[0]).value(); + let c = *s.hash_ptr(&output[0]).hash(); let comm = s.comm(c); let open = s.intern_lurk_symbol("open"); @@ -1914,7 +1914,7 @@ fn hide_opaque_open_unavailable() { let (output, ..) = evaluate_simple::>(None, expr, s, 10, &dummy_terminal()).unwrap(); - let c = *s.hash_ptr(&output[0]).value(); + let c = *s.hash_ptr(&output[0]).hash(); let s2 = &Store::::default(); let comm = s.comm(c); @@ -3433,11 +3433,11 @@ fn test_sym_hash_values() { // Symbol and keyword scalar hash values are the same as // those of the name string consed onto the parent symbol. - assert_eq!(cons_z_ptr.value(), sym_z_ptr.value()); - assert_eq!(cons_z_ptr.value(), key_z_ptr.value()); + assert_eq!(cons_z_ptr.hash(), sym_z_ptr.hash()); + assert_eq!(cons_z_ptr.hash(), key_z_ptr.hash()); // Toplevel symbols also have this property, and their parent symbol is the root symbol. - assert_eq!(consed_with_root_z_ptr.value(), toplevel_z_ptr.value()); + assert_eq!(consed_with_root_z_ptr.hash(), toplevel_z_ptr.hash()); // The tags differ though. use crate::tag::ExprTag::{Key, Sym}; diff --git a/src/lem/tests/nivc_steps.rs b/src/lem/tests/nivc_steps.rs index f31b6ff09..c8f08d3c7 100644 --- a/src/lem/tests/nivc_steps.rs +++ b/src/lem/tests/nivc_steps.rs @@ -6,7 +6,7 @@ use crate::{ lang::Lang, lem::{ eval::{evaluate, make_cprocs_funcs_from_lang, make_eval_step_from_config, EvalConfig}, - store::{expect_ptrs, intern_ptrs, Store}, + store::Store, Tag, }, state::user_sym, @@ -90,13 +90,12 @@ fn test_nivc_steps() { let expr = cproc_input.pop().unwrap(); let idx = expr.get_index2().unwrap(); - let [_, args] = expect_ptrs!(store, 2, idx); + let [_, args] = store.expect_tuple2(idx); let new_name = user_sym("cproc-dumb-not"); - let new_expr = intern_ptrs!( - store, + let new_expr = store.intern_tuple2( + [store.intern_symbol(&new_name), *args], Tag::Expr(ExprTag::Cproc), - store.intern_symbol(&new_name), - args + None, ); // `cproc` can't reduce the altered cproc input (with the wrong name) diff --git a/src/lem/tests/stream.rs b/src/lem/tests/stream.rs index 2ab913772..7fc6ca695 100644 --- a/src/lem/tests/stream.rs +++ b/src/lem/tests/stream.rs @@ -29,7 +29,7 @@ fn assert_start_stream( callable: Ptr, arg: Ptr, store: &Store, - expected_result: Ptr, + expected_result: &Ptr, expected_iterations: &Expect, ) -> Vec { let (t1, t2) = pair_terminals(); @@ -47,7 +47,7 @@ fn assert_resume_stream( input: Vec, arg: Ptr, store: &Store, - expected_result: Ptr, + expected_result: &Ptr, expected_iterations: &Expect, ) -> Vec { let (t1, t2) = pair_terminals(); @@ -76,21 +76,21 @@ fn test_comm_callable() { callable, store.num_u64(123), &store, - store.num_u64(123), + &store.num_u64(123), expected_iterations, ); let output = assert_resume_stream( output, store.num_u64(321), &store, - store.num_u64(444), + &store.num_u64(444), expected_iterations, ); assert_resume_stream( output, store.num_u64(111), &store, - store.num_u64(555), + &store.num_u64(555), expected_iterations, ); } @@ -109,21 +109,21 @@ fn test_fun_callable() { callable, store.num_u64(123), &store, - store.num_u64(123), + &store.num_u64(123), expected_iterations, ); let output = assert_resume_stream( output, store.num_u64(321), &store, - store.num_u64(444), + &store.num_u64(444), expected_iterations, ); assert_resume_stream( output, store.num_u64(111), &store, - store.num_u64(555), + &store.num_u64(555), expected_iterations, ); } diff --git a/src/proof/tests/nova_tests.rs b/src/proof/tests/nova_tests.rs index 222791138..f5672b9f2 100644 --- a/src/proof/tests/nova_tests.rs +++ b/src/proof/tests/nova_tests.rs @@ -4,10 +4,7 @@ use std::sync::Arc; use crate::{ lang::{Coproc, Lang}, - lem::{ - store::{intern_ptrs, Store}, - tag::Tag, - }, + lem::{store::Store, tag::Tag}, num::Num, state::{user_sym, State, StateRcCell}, tag::{ExprTag, Op, Op1, Op2}, @@ -3886,7 +3883,7 @@ fn test_prove_call_literal_fun() { let empty_env = s.intern_empty_env(); let args = s.list(vec![s.intern_user_symbol("x")]); let body = s.read_with_default_state("(+ x 1)").unwrap(); - let fun = intern_ptrs!(s, Tag::Expr(ExprTag::Fun), args, body, empty_env, s.dummy()); + let fun = s.intern_fun(args, body, empty_env); let input = s.num_u64(9); let expr = s.list(vec![fun, input]); let res = s.num_u64(10); diff --git a/src/proof/tests/stream.rs b/src/proof/tests/stream.rs index ead0406f6..03d3938f5 100644 --- a/src/proof/tests/stream.rs +++ b/src/proof/tests/stream.rs @@ -56,7 +56,7 @@ fn test_continued_proof() { expect_eq(frames.len(), expected_iterations); let output = &frames.last().unwrap().output; let (result, _) = store.fetch_cons(&output[0]).unwrap(); - assert_eq!(result, store.num_u64(123)); + assert_eq!(result, &store.num_u64(123)); let (proof, ..) = prover .prove_from_frames(&pp, &frames, &store, None) @@ -78,7 +78,7 @@ fn test_continued_proof() { expect_eq(frames.len(), expected_iterations); let output = &frames.last().unwrap().output; let (result, _) = store.fetch_cons(&output[0]).unwrap(); - assert_eq!(result, store.num_u64(444)); + assert_eq!(result, &store.num_u64(444)); let (proof, ..) = prover .prove_from_frames(&pp, &frames, &store, base_snark) diff --git a/src/tag.rs b/src/tag.rs index 949cda5bd..574036656 100644 --- a/src/tag.rs +++ b/src/tag.rs @@ -3,7 +3,7 @@ use lurk_macros::TryFromRepr; use proptest_derive::Arbitrary; use serde_repr::{Deserialize_repr, Serialize_repr}; use std::{convert::TryFrom, fmt}; -use strum::{EnumCount, EnumIter}; +use strum::EnumCount; use crate::field::LurkField; @@ -30,11 +30,12 @@ pub(crate) const EXPR_TAG_INIT: u16 = 0b0000_0000_0000_0000; PartialEq, Eq, Hash, + PartialOrd, + Ord, Serialize_repr, Deserialize_repr, TryFromRepr, EnumCount, - EnumIter, )] #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] #[repr(u16)] @@ -118,9 +119,10 @@ pub(crate) const CONT_TAG_INIT: u16 = 0b0001_0000_0000_0000; PartialEq, Eq, Hash, + PartialOrd, + Ord, TryFromRepr, EnumCount, - EnumIter, )] #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] #[repr(u16)] @@ -209,14 +211,14 @@ pub(crate) const OP1_TAG_INIT: u16 = 0b0010_0000_0000_0000; Clone, Debug, PartialEq, - PartialOrd, Eq, Hash, + PartialOrd, + Ord, Serialize_repr, Deserialize_repr, TryFromRepr, EnumCount, - EnumIter, )] #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] #[repr(u16)] @@ -341,14 +343,14 @@ pub(crate) const OP2_TAG_INIT: u16 = 0b0011_0000_0000_0000; Clone, Debug, PartialEq, - PartialOrd, Eq, Hash, + PartialOrd, + Ord, Serialize_repr, Deserialize_repr, TryFromRepr, EnumCount, - EnumIter, )] #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] #[repr(u16)]