diff --git a/circuits/src/memory/memory_stark.rs b/circuits/src/memory/memory_stark.rs index 653d69f0..c748c467 100644 --- a/circuits/src/memory/memory_stark.rs +++ b/circuits/src/memory/memory_stark.rs @@ -241,7 +241,7 @@ impl, const D: usize> Stark for MemoryStark "Must set a proof file generated by OlaVM prover")]) .arg_required_else_help(true), ) diff --git a/plonky2/plonky2/src/gates/low_degree_interpolation.rs b/plonky2/plonky2/src/gates/low_degree_interpolation.rs index b8cf70b1..a961df3d 100644 --- a/plonky2/plonky2/src/gates/low_degree_interpolation.rs +++ b/plonky2/plonky2/src/gates/low_degree_interpolation.rs @@ -55,8 +55,8 @@ impl, const D: usize> LowDegreeInterpolationGate Range { debug_assert!(0 < i && i < self.num_points()); if i == 1 { diff --git a/plonky2/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs b/plonky2/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs index 18466d90..1d90e6e0 100644 --- a/plonky2/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs +++ b/plonky2/plonky2/src/hash/arch/aarch64/poseidon_goldilocks_neon.rs @@ -92,7 +92,7 @@ unsafe fn add_with_wraparound(a: u64, b: u64) -> u64 { adj = lateout(reg) adj, options(pure, nomem, nostack), ); - res + adj // adj is EPSILON if wraparound occured and 0 otherwise + res + adj // adj is EPSILON if wraparound occurred and 0 otherwise } /// Subtraction of a and (b >> 32) modulo ORDER accounting for wraparound. diff --git a/plonky2/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs b/plonky2/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs index b40b4277..8332fb1e 100644 --- a/plonky2/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs +++ b/plonky2/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs @@ -18,7 +18,7 @@ use crate::hash::poseidon::{ const WIDTH: usize = 12; -// These tranformed round constants are used where the constant layer is fused with the preceeding +// These transformed round constants are used where the constant layer is fused with the preceeding // MDS layer. The FUSED_ROUND_CONSTANTS for round i are the ALL_ROUND_CONSTANTS for round i + 1. // The FUSED_ROUND_CONSTANTS for the very last round are 0, as it is not followed by a constant // layer. On top of that, all FUSED_ROUND_CONSTANTS are shifted by 2 ** 63 to save a few XORs per @@ -183,10 +183,10 @@ unsafe fn const_layer( // occur if all round constants are < 0xffffffff00000001 = ORDER: if the high bits are // 0xffffffff, then the low bits are 0, so the carry bit cannot occur. So this trick is valid // as long as all the round constants are in canonical form. - // The mask contains 0xffffffff in the high doubleword if wraparound occured and 0 otherwise. + // The mask contains 0xffffffff in the high doubleword if wraparound occurred and 0 otherwise. // We will ignore the low doubleword. let wraparound_mask = map3!(_mm256_cmpgt_epi32, state_s, res_maybe_wrapped_s); - // wraparound_adjustment contains 0xffffffff = EPSILON if wraparound occured and 0 otherwise. + // wraparound_adjustment contains 0xffffffff = EPSILON if wraparound occurred and 0 otherwise. let wraparound_adjustment = map3!(_mm256_srli_epi64::<32>, wraparound_mask); // XOR commutes with the addition below. Placing it here helps mask latency. let res_maybe_wrapped = map3!(_mm256_xor_si256, res_maybe_wrapped_s, rep sign_bit);