Skip to content

Commit e793e27

Browse files
authored
Merge pull request #3274 from o1-labs/al/update-commitment
Saffron: add update commitment logic
2 parents 43cf0ea + 7761a3a commit e793e27

File tree

4 files changed

+104
-44
lines changed

4 files changed

+104
-44
lines changed

saffron/src/blob.rs

Lines changed: 5 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ impl FieldBlob {
4545
domain: &Radix2EvaluationDomain<ScalarField>,
4646
diff: &Diff<ScalarField>,
4747
) {
48-
assert!(diff.addresses.len() == diff.new_values.len());
48+
assert_eq!(diff.addresses.len(), diff.diff_values.len());
4949

5050
let lagrange_basis = srs
5151
.get_lagrange_basis(*domain)
@@ -60,28 +60,13 @@ impl FieldBlob {
6060
.map(|idx| basis[*idx as usize])
6161
.collect();
6262

63-
// Old values at `addresses`
64-
let old_values_at_addr: Vec<_> = diff
65-
.addresses
66-
.iter()
67-
.map(|idx| self.data[diff.region as usize * SRS_SIZE + *idx as usize])
68-
.collect();
69-
70-
for (idx, value) in diff.addresses.iter().zip(diff.new_values.iter()) {
71-
self.data[SRS_SIZE * diff.region as usize + *idx as usize] = *value;
63+
for (idx, value) in diff.addresses.iter().zip(diff.diff_values.iter()) {
64+
self.data[SRS_SIZE * diff.region as usize + *idx as usize] += *value;
7265
}
7366

7467
// Lagrange commitment to the (new values-old values) at `addresses`
75-
let delta_data_commitment_at_addr = ProjectiveCurve::msm(
76-
address_basis.as_slice(),
77-
old_values_at_addr
78-
.iter()
79-
.zip(diff.new_values.iter())
80-
.map(|(old, new)| new - old)
81-
.collect::<Vec<_>>()
82-
.as_slice(),
83-
)
84-
.unwrap();
68+
let delta_data_commitment_at_addr =
69+
ProjectiveCurve::msm(address_basis.as_slice(), diff.diff_values.as_slice()).unwrap();
8570

8671
let new_commitment =
8772
(self.commitments[diff.region as usize] + delta_data_commitment_at_addr).into();

saffron/src/commitment.rs

Lines changed: 46 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,13 @@
1+
use crate::{diff::Diff, utils};
12
use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM};
23
use kimchi::curve::KimchiCurve;
34
use mina_poseidon::FqSponge;
45
use poly_commitment::{ipa::SRS, SRS as _};
56
use rayon::prelude::*;
67
use tracing::instrument;
78

8-
use crate::utils;
9-
9+
/// Compute the commitment to `data` ; if the length of `data` is greater than
10+
/// `SRS_SIZE`, the data is splitted in chunks of at most `SRS_SIZE` length.
1011
#[instrument(skip_all, level = "debug")]
1112
pub fn commit_to_field_elems<G: KimchiCurve>(srs: &SRS<G>, data: &[G::ScalarField]) -> Vec<G>
1213
where
@@ -48,3 +49,46 @@ pub fn combine_commitments<G: AffineRepr, EFqSponge: FqSponge<G::BaseField, G, G
4849
let combined_data_commitment = utils::aggregate_commitments(alpha, commitments);
4950
(combined_data_commitment, alpha)
5051
}
52+
53+
/// A commitment that represent a whole data
54+
/// TODO: for now, we consider 1 commitment = 1 contract = 1 data
55+
/// This type may be redundant with other types in Proof-systems
56+
#[derive(Debug, Clone, PartialEq, Eq)]
57+
pub struct Commitment<G: KimchiCurve> {
58+
pub cm: G,
59+
}
60+
61+
impl<G: KimchiCurve> From<G> for Commitment<G> {
62+
fn from(cm: G) -> Self {
63+
Self { cm }
64+
}
65+
}
66+
67+
impl<G: KimchiCurve> Commitment<G> {
68+
/// Commit a `data` of length smaller than `SRS_SIZE`
69+
/// If greater data is provided, anything above `SRS_SIZE` is ignored
70+
pub fn from_data(srs: &SRS<G>, data: &[G::ScalarField]) -> Commitment<G> {
71+
Commitment {
72+
cm: commit_to_field_elems::<G>(srs, data)[0],
73+
}
74+
}
75+
76+
/// TODO: This only handle the single commitment version for now
77+
/// This function update the given commitment based on the given diff. The
78+
/// returned commitment correspond to the data for the given commitment updated
79+
/// according to the diff.
80+
/// This function is tested in storage.rs
81+
pub fn update(&self, srs: &SRS<G>, diff: Diff<G::ScalarField>) -> Commitment<G> {
82+
// TODO: precompute this, or cache it & compute it in a lazy way ; it feels like it’s already cached but I’m not sure
83+
let basis: Vec<G> = srs
84+
.get_lagrange_basis_from_domain_size(crate::SRS_SIZE)
85+
.iter()
86+
.map(|x| x.chunks[0])
87+
.collect();
88+
let basis: Vec<G> = diff.addresses.iter().map(|&i| basis[i as usize]).collect();
89+
let cm_diff = G::Group::msm(&basis, &diff.diff_values).unwrap();
90+
Commitment {
91+
cm: self.cm.add(cm_diff).into(),
92+
}
93+
}
94+
}

saffron/src/diff.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ pub struct Diff<F: PrimeField> {
1313
pub region: u64,
1414
/// A list of unique addresses, each ∈ [0, SRS_SIZE]
1515
pub addresses: Vec<u64>,
16-
/// A list of new values, each corresponding to address in `addresses`
17-
pub new_values: Vec<F>,
16+
/// A list of `new_value - old_values`, each corresponding to address in `addresses`
17+
pub diff_values: Vec<F>,
1818
}
1919

2020
#[derive(Debug, Error, Clone, PartialEq)]
@@ -44,19 +44,19 @@ impl<F: PrimeField> Diff<F> {
4444
.enumerate()
4545
.filter_map(|(region, (o, n))| {
4646
let mut addresses: Vec<u64> = vec![];
47-
let mut new_values: Vec<F> = vec![];
47+
let mut diff_values: Vec<F> = vec![];
4848
for (index, (o_elem, n_elem)) in o.iter().zip(n.iter()).enumerate() {
4949
if o_elem != n_elem {
5050
addresses.push(index as u64);
51-
new_values.push(*n_elem);
51+
diff_values.push(*n_elem - *o_elem);
5252
}
5353
}
5454

5555
if !addresses.is_empty() {
5656
Some(Diff {
5757
region: region as u64,
5858
addresses,
59-
new_values,
59+
diff_values,
6060
})
6161
} else {
6262
// do not record a diff with empty changes
@@ -82,8 +82,8 @@ impl<F: PrimeField> Diff<F> {
8282
/// Updates the data with the provided diff, replacing old values at
8383
/// specified addresses by corresponding new ones
8484
pub fn apply_inplace(data: &mut [Vec<F>], diff: &Diff<F>) {
85-
for (addr, new_value) in diff.addresses.iter().zip(diff.new_values.iter()) {
86-
data[diff.region as usize][*addr as usize] = *new_value;
85+
for (addr, diff_value) in diff.addresses.iter().zip(diff.diff_values.iter()) {
86+
data[diff.region as usize][*addr as usize] += *diff_value;
8787
}
8888
}
8989

saffron/src/storage.rs

Lines changed: 46 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,8 @@ impl<F: PrimeField> Data<F> {
6060

6161
/// Commit a `data` of length smaller than `SRS_SIZE`
6262
/// If greater data is provided, anything above `SRS_SIZE` is ignored
63-
pub fn to_commitment<G: KimchiCurve<ScalarField = F>>(&self, srs: &SRS<G>) -> G {
64-
commit_to_field_elems::<G>(srs, &self.data)[0]
63+
pub fn to_commitment<G: KimchiCurve<ScalarField = F>>(&self, srs: &SRS<G>) -> Commitment<G> {
64+
Commitment::from_data(srs, &self.data)
6565
}
6666

6767
/// Modifies inplace the provided data with `diff`
@@ -111,23 +111,45 @@ pub fn read<F: PrimeField>(path: &str) -> std::io::Result<Data<F>> {
111111
/// new scalar value expected for the new data.
112112
/// Note that this only update the file, not the commitment
113113
pub fn update<F: PrimeField>(path: &str, diff: &Diff<F>) -> std::io::Result<()> {
114-
let mut file = OpenOptions::new().write(true).open(path)?;
114+
// Open the file in read mode to get the old value & write mode to write the new value
115+
let mut file = OpenOptions::new().read(true).write(true).open(path)?;
115116
let region_offset = diff.region * (SRS_SIZE as u64);
116117
let scalar_size = encoding::encoding_size_full::<F>() as u64;
117-
for (index, new_value) in diff.addresses.iter().zip(diff.new_values.iter()) {
118+
for (index, diff_value) in diff.addresses.iter().zip(diff.diff_values.iter()) {
118119
let corresponding_bytes_index = (region_offset + index) * scalar_size;
119120
file.seek(SeekFrom::Start(corresponding_bytes_index))?;
120-
let new_value_bytes = encoding::decode_full(*new_value);
121+
let new_value: F = {
122+
// The old value is taken directly from the file
123+
let old_value: F = {
124+
// Save the current cursor position to be able to reset the
125+
// cursor after the read later
126+
let pos = file.stream_position()?;
127+
let mut old_value_bytes = vec![0u8; encoding::encoding_size_full::<F>()];
128+
file.read_exact(&mut old_value_bytes)?;
129+
// Go back to the previous position in the file, so the read value
130+
// will be overwritten by the new one
131+
file.seek(SeekFrom::Start(pos))?;
132+
encoding::encode(&old_value_bytes)
133+
};
134+
old_value + diff_value
135+
};
136+
let new_value_bytes = encoding::decode_full(new_value);
121137
file.write_all(&new_value_bytes)?;
122138
}
123139
Ok(())
124140
}
125141

126142
#[cfg(test)]
127143
mod tests {
128-
use crate::{diff::Diff, encoding, storage, storage::Data, Curve, ScalarField, SRS_SIZE};
144+
use crate::{
145+
diff::Diff,
146+
encoding, storage,
147+
storage::{Commitment, Data},
148+
Curve, ScalarField, SRS_SIZE,
149+
};
129150
use ark_ff::{One, UniformRand, Zero};
130151
use mina_curves::pasta::Fp;
152+
use poly_commitment::ipa::SRS;
131153
use rand::Rng;
132154
use std::fs;
133155
use tempfile::NamedTempFile;
@@ -139,7 +161,7 @@ mod tests {
139161
fn test_data_consistency() {
140162
let mut rng = o1_utils::tests::make_test_rng(None);
141163

142-
let srs = poly_commitment::precomputed_srs::get_srs_test();
164+
let srs: SRS<Curve> = poly_commitment::precomputed_srs::get_srs_test();
143165

144166
// Path of the file that will contain the test data
145167
let file = NamedTempFile::new().unwrap();
@@ -148,7 +170,10 @@ mod tests {
148170
let data_bytes: Vec<u8> = (0..(SRS_SIZE * (encoding::encoding_size_full::<ScalarField>())))
149171
.map(|_| rng.gen())
150172
.collect();
151-
let data = Data::of_bytes(&data_bytes);
173+
let mut data = Data::of_bytes(&data_bytes);
174+
// Setting the first value of data to zero will make the updated bytes
175+
// with the well chosen diff
176+
data.data[0] = Fp::zero();
152177
let data_comm = data.to_commitment(&srs);
153178

154179
let read_consistency = {
@@ -157,10 +182,10 @@ mod tests {
157182
let read_data_comm = read_data.to_commitment(&srs);
158183

159184
// True if read data are the same as initial data
160-
Curve::eq(&data_comm, &read_data_comm)
185+
Commitment::eq(&data_comm, &read_data_comm)
161186
};
162187

163-
let (data_updated, update_consistency) = {
188+
let (data_updated, update_consistency, diff_comm_consistency) = {
164189
let diff = {
165190
// The number of updates is proportional to the data length,
166191
// but we make sure to have at least one update if the data is
@@ -170,16 +195,16 @@ mod tests {
170195
let addresses: Vec<u64> = (0..nb_updates)
171196
.map(|_| (rng.gen_range(0..data.len() as u64)))
172197
.collect();
173-
let mut new_values: Vec<ScalarField> =
198+
let mut diff_values: Vec<ScalarField> =
174199
addresses.iter().map(|_| Fp::rand(&mut rng)).collect();
175200
// The first value is replaced by a scalar that would
176201
// overflow 31 bytes, so the update is not consistent and the
177202
// test fails if this case is not handled
178-
new_values[0] = Fp::zero() - Fp::one();
203+
diff_values[0] = Fp::zero() - Fp::one();
179204
Diff {
180205
region,
181206
addresses,
182-
new_values,
207+
diff_values,
183208
}
184209
};
185210

@@ -191,10 +216,15 @@ mod tests {
191216
let updated_read_data = storage::read(path).unwrap();
192217
let updated_read_data_comm = updated_read_data.to_commitment(&srs);
193218

219+
let updated_diff_data_comm = data_comm.update(&srs, diff);
220+
194221
(
195-
Curve::ne(&updated_data_comm, &data_comm),
222+
// True if the data have changed because of the update
223+
Commitment::ne(&updated_data_comm, &data_comm),
196224
// True if read data from updated file are the same as updated data
197-
Curve::eq(&updated_data_comm, &updated_read_data_comm),
225+
Commitment::eq(&updated_data_comm, &updated_read_data_comm),
226+
// True if the commitments are the same as the commitment obtained by direct diff application
227+
Commitment::eq(&updated_diff_data_comm, &updated_data_comm),
198228
)
199229
};
200230

@@ -203,5 +233,6 @@ mod tests {
203233
assert!(read_consistency);
204234
assert!(data_updated);
205235
assert!(update_consistency);
236+
assert!(diff_comm_consistency);
206237
}
207238
}

0 commit comments

Comments
 (0)