From d1e8ec3811538d1d6cff784dff8471a7285d65b2 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 25 Jun 2025 13:09:43 +0300 Subject: [PATCH 01/17] Add data column custody info db record and logic --- beacon_node/beacon_chain/src/builder.rs | 4 ++ .../beacon_chain/src/validator_custody.rs | 2 + beacon_node/http_api/src/lib.rs | 1 + beacon_node/network/src/service.rs | 6 ++ beacon_node/network/src/status.rs | 13 +++- beacon_node/store/src/hot_cold_store.rs | 72 +++++++++++++++++-- beacon_node/store/src/lib.rs | 3 + beacon_node/store/src/metadata.rs | 11 +++ 8 files changed, 106 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ce4264d5508..38016edbd24 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -825,6 +825,10 @@ where )); } + store + .get_data_column_custody_info(true) + .map_err(|e| format!("Unable to fetch data column custody info {:?}", e))?; + let validator_pubkey_cache = self .validator_pubkey_cache .map(|mut validator_pubkey_cache| { diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs index 1169b64537d..f3783d39991 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -210,6 +210,7 @@ impl CustodyContext { return Some(CustodyCountChanged { new_custody_group_count: updated_cgc, sampling_count: self.sampling_size(Some(effective_epoch), spec), + slot: current_slot, }); } } @@ -258,6 +259,7 @@ impl CustodyContext { pub struct CustodyCountChanged { pub new_custody_group_count: u64, pub sampling_count: u64, + pub slot: Slot, } /// The custody information that gets persisted across runs. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index a627fb0353d..4488effb35c 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3790,6 +3790,7 @@ pub fn serve( network_tx.send(NetworkMessage::CustodyCountChanged { new_custody_group_count: cgc_change.new_custody_group_count, sampling_count: cgc_change.sampling_count, + slot: cgc_change.slot, }).unwrap_or_else(|e| { debug!(error = %e, "Could not send message to the network service. \ Likely shutdown") diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0a6d5152322..2b97d4f03d3 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -111,6 +111,7 @@ pub enum NetworkMessage { CustodyCountChanged { new_custody_group_count: u64, sampling_count: u64, + slot: Slot, }, } @@ -743,6 +744,7 @@ impl NetworkService { NetworkMessage::CustodyCountChanged { new_custody_group_count, sampling_count, + slot, } => { // subscribe to `sampling_count` subnets self.libp2p @@ -753,6 +755,10 @@ impl NetworkService { .advertise_false_custody_group_count .is_none() { + // Update data column custody info with the slot at which cgc was changed. + self.beacon_chain.store.put_data_column_custody_info(slot).unwrap_or_else(|e| { + tracing::error!(error = ?e, "Failed to update data column custody info") + }); self.libp2p.update_enr_cgc(new_custody_group_count); } } diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index be0d7c063be..7e55a82b41f 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -29,7 +29,18 @@ pub(crate) fn status_message(beacon_chain: &BeaconChain) finalized_checkpoint.root = Hash256::zero(); } - let earliest_available_slot = beacon_chain.store.get_anchor_info().oldest_block_slot; + // If there is no data column custody info in the db, that indicates that + // no recent cgc changes have occurred and no cgc backfill is in progress. + let earliest_available_slot = if let Ok(Some(data_column_custody_info)) = + beacon_chain.store.get_data_column_custody_info(false) + { + std::cmp::max( + beacon_chain.store.get_anchor_info().oldest_block_slot, + data_column_custody_info.earliest_data_column_slot, + ) + } else { + beacon_chain.store.get_anchor_info().oldest_block_slot + }; StatusMessage::V2(StatusMessageV2 { fork_digest, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 4d94042b5b0..374ce4fe5b3 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -6,10 +6,10 @@ use crate::historic_state_cache::HistoricStateCache; use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator}; use crate::memory_store::MemoryStore; use crate::metadata::{ - AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnInfo, SchemaVersion, ANCHOR_INFO_KEY, - ANCHOR_UNINITIALIZED, BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, - CURRENT_SCHEMA_VERSION, DATA_COLUMN_INFO_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, - STATE_UPPER_LIMIT_NO_RETAIN, + AnchorInfo, BlobInfo, CompactionTimestamp, DataColumnCustodyInfo, DataColumnInfo, + SchemaVersion, ANCHOR_INFO_KEY, ANCHOR_UNINITIALIZED, BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, + CONFIG_KEY, CURRENT_SCHEMA_VERSION, DATA_COLUMN_CUSTODY_INFO_KEY, DATA_COLUMN_INFO_KEY, + SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN, }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ @@ -91,6 +91,7 @@ struct BlockCache { block_cache: LruCache>, blob_cache: LruCache>, data_column_cache: LruCache>>>, + data_column_custody_info_cache: Option, } impl BlockCache { @@ -99,6 +100,7 @@ impl BlockCache { block_cache: LruCache::new(size), blob_cache: LruCache::new(size), data_column_cache: LruCache::new(size), + data_column_custody_info_cache: None, } } pub fn put_block(&mut self, block_root: Hash256, block: SignedBeaconBlock) { @@ -112,6 +114,12 @@ impl BlockCache { .get_or_insert_mut(block_root, Default::default) .insert(data_column.index, data_column); } + pub fn put_data_column_custody_info( + &mut self, + data_column_custody_info: Option, + ) { + self.data_column_custody_info_cache = data_column_custody_info; + } pub fn get_block<'a>(&'a mut self, block_root: &Hash256) -> Option<&'a SignedBeaconBlock> { self.block_cache.get(block_root) } @@ -129,6 +137,9 @@ impl BlockCache { .get(block_root) .and_then(|map| map.get(column_index).cloned()) } + pub fn get_data_column_custody_info(&mut self) -> Option { + self.data_column_custody_info_cache.clone() + } pub fn delete_block(&mut self, block_root: &Hash256) { let _ = self.block_cache.pop(block_root); } @@ -922,6 +933,27 @@ impl, Cold: ItemStore> HotColdDB )); } + pub fn put_data_column_custody_info( + &self, + earliest_data_column_slot: Slot, + ) -> Result<(), Error> { + let data_column_custody_info = DataColumnCustodyInfo { + earliest_data_column_slot, + }; + + self.blobs_db.put_bytes( + DBColumn::BeaconDataColumnCustodyInfo, + DATA_COLUMN_CUSTODY_INFO_KEY.as_slice(), + &data_column_custody_info.as_ssz_bytes(), + )?; + + self.block_cache + .lock() + .put_data_column_custody_info(Some(data_column_custody_info)); + + Ok(()) + } + pub fn put_data_columns( &self, block_root: &Hash256, @@ -2389,6 +2421,36 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Fetch custody info from the cache. + /// A `None` value indicates that we have fulfilled our custody + /// requirements up to the DA window. + pub fn get_data_column_custody_info( + &self, + query_store: bool, + ) -> Result, Error> { + // We only query on startup, when the cache isn't initialized + if query_store { + let bytes_opt = self.blobs_db.get_bytes( + DBColumn::BeaconDataColumnCustodyInfo, + DATA_COLUMN_CUSTODY_INFO_KEY.as_slice(), + )?; + + let Some(bytes) = bytes_opt else { + return Ok(None); + }; + + let data_column_custody_info = Some(DataColumnCustodyInfo::from_ssz_bytes(&bytes)?); + // Update the cache + self.block_cache + .lock() + .put_data_column_custody_info(data_column_custody_info.clone()); + + Ok(data_column_custody_info) + } else { + Ok(self.block_cache.lock().get_data_column_custody_info()) + } + } + /// Fetch all columns for a given block from the store. pub fn get_data_columns( &self, @@ -3538,7 +3600,7 @@ pub fn get_ancestor_state_root<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStor .get_cold_state_root(target_slot) .map_err(Box::new) .map_err(StateSummaryIteratorError::LoadStateRootError)? - .ok_or_else(|| StateSummaryIteratorError::MissingStateRoot { + .ok_or(StateSummaryIteratorError::MissingStateRoot { target_slot, state_upper_limit, }); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index ede4b4435e3..953b5395581 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -264,6 +264,8 @@ pub enum DBColumn { BeaconBlob, #[strum(serialize = "bdc")] BeaconDataColumn, + #[strum(serialize = "bdi")] + BeaconDataColumnCustodyInfo, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). /// /// DEPRECATED. @@ -424,6 +426,7 @@ impl DBColumn { | Self::CustodyContext | Self::OptimisticTransitionBlock => 32, Self::BeaconBlockRoots + | Self::BeaconDataColumnCustodyInfo | Self::BeaconBlockRootsChunked | Self::BeaconStateRoots | Self::BeaconStateRootsChunked diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index bc9d708e14a..97b5fb358ed 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -18,6 +18,7 @@ pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4); pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5); pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6); pub const DATA_COLUMN_INFO_KEY: Hash256 = Hash256::repeat_byte(7); +pub const DATA_COLUMN_CUSTODY_INFO_KEY: Hash256 = Hash256::repeat_byte(8); /// State upper limit value used to indicate that a node is not storing historic states. pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX); @@ -204,6 +205,16 @@ impl StoreItem for BlobInfo { } } +/// Database parameter relevant to data column custody sync. There is only at most a single +/// `DataColumnCustodyInfo` stored in the db. This record is added to the db when cgc +/// count changes and is updated incrementally during data column custody backfill. Once custody backfill +/// is complete the record is removed from the db. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] +pub struct DataColumnCustodyInfo { + /// The earliest slot for which data columns are available. + pub earliest_data_column_slot: Slot, +} + /// Database parameters relevant to data column sync. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] pub struct DataColumnInfo { From 678e8828c1629c0782b3fbbd71c83a9eaedd9014 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 2 Jul 2025 17:08:44 +0300 Subject: [PATCH 02/17] update comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 88c87346e3b..bc6663e1987 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6818,7 +6818,6 @@ impl BeaconChain { } /// Update data column custody info with the slot at which cgc was changed. - /// Note: This won't update custody info if `advertise_false_custody_group_count` is set. pub fn update_data_column_custody_info(&self, slot: Slot) { self.store .put_data_column_custody_info(slot) From ce1685541859aae8a0b2a85ada81742c3a16f27a Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 3 Jul 2025 07:55:44 +0300 Subject: [PATCH 03/17] Add migration and make custody info earliest slot an Option --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/builder.rs | 4 -- beacon_node/beacon_chain/src/schema_change.rs | 9 ++++ .../src/schema_change/migration_schema_v27.rs | 49 +++++++++++++++++++ beacon_node/http_api/src/lib.rs | 2 +- beacon_node/network/src/status.rs | 8 +-- beacon_node/store/src/hot_cold_store.rs | 23 ++++----- beacon_node/store/src/metadata.rs | 2 +- 8 files changed, 76 insertions(+), 23 deletions(-) create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c82b99ee651..5cc7bf1a67e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6826,7 +6826,7 @@ impl BeaconChain { } /// Update data column custody info with the slot at which cgc was changed. - pub fn update_data_column_custody_info(&self, slot: Slot) { + pub fn update_data_column_custody_info(&self, slot: Option) { self.store .put_data_column_custody_info(slot) .unwrap_or_else( diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ba52f71586a..c46cc015c9c 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -828,10 +828,6 @@ where )); } - store - .get_data_column_custody_info(true) - .map_err(|e| format!("Unable to fetch data column custody info {:?}", e))?; - let validator_pubkey_cache = self .validator_pubkey_cache .map(|mut validator_pubkey_cache| { diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 317b89cbdd4..09f3a2a9685 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -3,6 +3,7 @@ mod migration_schema_v23; mod migration_schema_v24; mod migration_schema_v25; mod migration_schema_v26; +mod migration_schema_v27; use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; @@ -67,6 +68,14 @@ pub fn migrate_schema( let ops = migration_schema_v26::downgrade_from_v26::(db.clone())?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(26), SchemaVersion(27)) => { + let ops = migration_schema_v27::upgrade_to_v27::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(27), SchemaVersion(26)) => { + let ops = migration_schema_v27::downgrade_from_v27::(db.clone())?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs new file mode 100644 index 00000000000..a566d0abae6 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs @@ -0,0 +1,49 @@ +use crate::persisted_custody::{PersistedCustody, CUSTODY_DB_KEY}; +use crate::validator_custody::CustodyContextSsz; +use crate::BeaconChainTypes; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::sync::Arc; +use store::metadata::DATA_COLUMN_CUSTODY_INFO_KEY; +use store::{DBColumn, Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use tracing::info; + +/// Add `DataColumnCustodyInfo` entry to v27. +pub fn upgrade_to_v27( + db: Arc>, +) -> Result, Error> { + let ops = if db.spec.is_peer_das_scheduled() { + info!("Adding `DataColumnCustodyInfo` to the db"); + let data_column_custody_info = DataColumnCustodyInfo { + earliest_available_slot: None, + }; + vec![KeyValueStoreOp::PutKeyValue( + DBColumn::DataColumnCustodyInfo, + DATA_COLUMN_CUSTODY_INFO_KEY.as_slice().to_vec(), + data_column_custody_info.as_ssz_bytes(), + )] + } else { + // Delete it from the db if PeerDAS hasn't been scheduled + vec![KeyValueStoreOp::DeleteKey( + DBColumn::DataColumnCustodyInfo, + data_column_custody_info.as_ssz_bytes(), + )] + }; + + Ok(ops) +} + +pub fn downgrade_from_v27( + db: Arc>, +) -> Result, Error> { + if db.spec.is_peer_das_scheduled() { + return Err(Error::MigrationError( + "Cannot downgrade from v27 if peerDAS is scheduled".to_string(), + )); + } + let ops = vec![KeyValueStoreOp::DeleteKey( + DBColumn::DataColumnCustodyInfo, + data_column_custody_info.as_ssz_bytes(), + )]; + Ok(ops) +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 918123e1ebd..1bb44dcd51a 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3849,7 +3849,7 @@ pub fn serve( .advertise_false_custody_group_count .is_none() { - chain.update_data_column_custody_info(cgc_change.slot) + chain.update_data_column_custody_info(Some(cgc_change.slot)) } network_tx.send(NetworkMessage::CustodyCountChanged { new_custody_group_count: cgc_change.new_custody_group_count, diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 7e55a82b41f..d9965870988 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,5 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::{EthSpec, FixedBytesExtended, Hash256}; +use types::{EthSpec, FixedBytesExtended, Hash256, Slot}; use lighthouse_network::rpc::{methods::StatusMessageV2, StatusMessage}; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. @@ -32,11 +32,13 @@ pub(crate) fn status_message(beacon_chain: &BeaconChain) // If there is no data column custody info in the db, that indicates that // no recent cgc changes have occurred and no cgc backfill is in progress. let earliest_available_slot = if let Ok(Some(data_column_custody_info)) = - beacon_chain.store.get_data_column_custody_info(false) + beacon_chain.store.get_data_column_custody_info() { std::cmp::max( beacon_chain.store.get_anchor_info().oldest_block_slot, - data_column_custody_info.earliest_data_column_slot, + data_column_custody_info + .earliest_data_column_slot + .unwrap_or(Slot::new(0)), ) } else { beacon_chain.store.get_anchor_info().oldest_block_slot diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 7e17064c7c6..28b11b935ad 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -935,7 +935,7 @@ impl, Cold: ItemStore> HotColdDB pub fn put_data_column_custody_info( &self, - earliest_data_column_slot: Slot, + earliest_data_column_slot: Option, ) -> Result<(), Error> { let data_column_custody_info = DataColumnCustodyInfo { earliest_data_column_slot, @@ -2422,14 +2422,11 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch custody info from the cache. - /// A `None` value indicates that we have fulfilled our custody - /// requirements up to the DA window. - pub fn get_data_column_custody_info( - &self, - query_store: bool, - ) -> Result, Error> { - // We only query on startup, when the cache isn't initialized - if query_store { + /// If custody info doesn't exist in the cache, + /// try to fetch from the DB and prime the cache. + pub fn get_data_column_custody_info(&self) -> Result, Error> { + let Some(data_column_custody_info) = self.block_cache.lock().get_data_column_custody_info() + else { let bytes_opt = self.blobs_db.get_bytes( DBColumn::BeaconDataColumnCustodyInfo, DATA_COLUMN_CUSTODY_INFO_KEY.as_slice(), @@ -2445,10 +2442,10 @@ impl, Cold: ItemStore> HotColdDB .lock() .put_data_column_custody_info(data_column_custody_info.clone()); - Ok(data_column_custody_info) - } else { - Ok(self.block_cache.lock().get_data_column_custody_info()) - } + return Ok(data_column_custody_info); + }; + + Ok(Some(data_column_custody_info)) } /// Fetch all columns for a given block from the store. diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index ecec5a8c620..1d7e4200706 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -212,7 +212,7 @@ impl StoreItem for BlobInfo { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] pub struct DataColumnCustodyInfo { /// The earliest slot for which data columns are available. - pub earliest_data_column_slot: Slot, + pub earliest_data_column_slot: Option, } /// Database parameters relevant to data column sync. From 445c80f4e37782017ff455ea591e42965c754443 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 3 Jul 2025 08:45:34 +0300 Subject: [PATCH 04/17] fix test' --- .../src/schema_change/migration_schema_v27.rs | 20 +++++++++---------- .../beacon_chain/tests/schema_stability.rs | 6 +++--- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs index a566d0abae6..a9b3d266a77 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs @@ -1,11 +1,9 @@ -use crate::persisted_custody::{PersistedCustody, CUSTODY_DB_KEY}; -use crate::validator_custody::CustodyContextSsz; use crate::BeaconChainTypes; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; +use ssz::Encode; use std::sync::Arc; +use store::metadata::DataColumnCustodyInfo; use store::metadata::DATA_COLUMN_CUSTODY_INFO_KEY; -use store::{DBColumn, Error, HotColdDB, KeyValueStoreOp, StoreItem}; +use store::{DBColumn, Error, HotColdDB, KeyValueStoreOp}; use tracing::info; /// Add `DataColumnCustodyInfo` entry to v27. @@ -15,18 +13,18 @@ pub fn upgrade_to_v27( let ops = if db.spec.is_peer_das_scheduled() { info!("Adding `DataColumnCustodyInfo` to the db"); let data_column_custody_info = DataColumnCustodyInfo { - earliest_available_slot: None, + earliest_data_column_slot: None, }; vec![KeyValueStoreOp::PutKeyValue( - DBColumn::DataColumnCustodyInfo, + DBColumn::BeaconDataColumnCustodyInfo, DATA_COLUMN_CUSTODY_INFO_KEY.as_slice().to_vec(), data_column_custody_info.as_ssz_bytes(), )] } else { // Delete it from the db if PeerDAS hasn't been scheduled vec![KeyValueStoreOp::DeleteKey( - DBColumn::DataColumnCustodyInfo, - data_column_custody_info.as_ssz_bytes(), + DBColumn::BeaconDataColumnCustodyInfo, + DATA_COLUMN_CUSTODY_INFO_KEY.as_slice().to_vec(), )] }; @@ -42,8 +40,8 @@ pub fn downgrade_from_v27( )); } let ops = vec![KeyValueStoreOp::DeleteKey( - DBColumn::DataColumnCustodyInfo, - data_column_custody_info.as_ssz_bytes(), + DBColumn::BeaconDataColumnCustodyInfo, + DATA_COLUMN_CUSTODY_INFO_KEY.as_slice().to_vec(), )]; Ok(ops) } diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index fc37a1159bc..4ca7cde4f35 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -100,9 +100,9 @@ async fn schema_stability() { fn check_db_columns() { let current_columns: Vec<&'static str> = DBColumn::iter().map(|c| c.as_str()).collect(); let expected_columns = vec![ - "bma", "blk", "blb", "bdc", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", "bst", - "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", "brm", - "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", + "bma", "blk", "blb", "bdc", "bdi", "ste", "hsd", "hsn", "bsn", "bsd", "bss", "bs3", "bcs", + "bst", "exp", "bch", "opo", "etc", "frk", "pkc", "brp", "bsx", "bsr", "bbx", "bbr", "bhr", + "brm", "dht", "cus", "otb", "bhs", "olc", "lcu", "scb", "scm", "dmy", ]; assert_eq!(expected_columns, current_columns); } From 78e33e010e022bfe98c49a65c920ad6444dee4ae Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 3 Jul 2025 09:04:39 +0300 Subject: [PATCH 05/17] Add metadata assert --- beacon_node/beacon_chain/tests/schema_stability.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index 4ca7cde4f35..2d80f9b732f 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -9,7 +9,7 @@ use operation_pool::PersistedOperationPool; use ssz::Encode; use std::sync::{Arc, LazyLock}; use store::{ - database::interface::BeaconNodeBackend, hot_cold_store::Split, metadata::DataColumnInfo, + database::interface::BeaconNodeBackend, hot_cold_store::Split, metadata::{DataColumnCustodyInfo, DataColumnInfo}, DBColumn, HotColdDB, StoreConfig, StoreItem, }; use strum::IntoEnumIterator; @@ -122,6 +122,7 @@ fn check_metadata_sizes(store: &Store) { } ); assert_eq!(DataColumnInfo::default().ssz_bytes_len(), 5); + assert_eq!(DataColumnCustodyInfo::default().ssz_bytes_len(), 5) } fn check_op_pool(store: &Store) { From 59b1520a78cbd3ebc1e7a168cb63b29e4b33548e Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 3 Jul 2025 09:04:50 +0300 Subject: [PATCH 06/17] Add metadata assert --- beacon_node/beacon_chain/tests/schema_stability.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index 2d80f9b732f..dced442d332 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -9,7 +9,9 @@ use operation_pool::PersistedOperationPool; use ssz::Encode; use std::sync::{Arc, LazyLock}; use store::{ - database::interface::BeaconNodeBackend, hot_cold_store::Split, metadata::{DataColumnCustodyInfo, DataColumnInfo}, + database::interface::BeaconNodeBackend, + hot_cold_store::Split, + metadata::{DataColumnCustodyInfo, DataColumnInfo}, DBColumn, HotColdDB, StoreConfig, StoreItem, }; use strum::IntoEnumIterator; @@ -122,7 +124,7 @@ fn check_metadata_sizes(store: &Store) { } ); assert_eq!(DataColumnInfo::default().ssz_bytes_len(), 5); - assert_eq!(DataColumnCustodyInfo::default().ssz_bytes_len(), 5) + assert_eq!(DataColumnCustodyInfo::default().ssz_bytes_len(), 5); } fn check_op_pool(store: &Store) { From 7f936e30d3f4af302622b9cb101c4a9a220f7895 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 7 Jul 2025 11:15:15 +0300 Subject: [PATCH 07/17] add additional test, make sure we're adding datacolumn custody info to the blobs db --- beacon_node/beacon_chain/src/schema_change.rs | 11 ++++-- .../src/schema_change/migration_schema_v27.rs | 39 +++++-------------- .../beacon_chain/tests/schema_stability.rs | 10 +++++ beacon_node/store/src/metadata.rs | 2 +- 4 files changed, 27 insertions(+), 35 deletions(-) diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 09f3a2a9685..15c9498e1c1 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -69,12 +69,15 @@ pub fn migrate_schema( db.store_schema_version_atomically(to, ops) } (SchemaVersion(26), SchemaVersion(27)) => { - let ops = migration_schema_v27::upgrade_to_v27::(db.clone())?; - db.store_schema_version_atomically(to, ops) + // This migration updates the blobs db. The schema version + // is bumped inside upgrade_to_v27. + migration_schema_v27::upgrade_to_v27::(db.clone()) } (SchemaVersion(27), SchemaVersion(26)) => { - let ops = migration_schema_v27::downgrade_from_v27::(db.clone())?; - db.store_schema_version_atomically(to, ops) + // Downgrading is essentially a no-op and is only possible + // if peer das isn't scheduled. + migration_schema_v27::downgrade_from_v27::(db.clone())?; + db.store_schema_version_atomically(to, vec![]) } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs index a9b3d266a77..6275b1c5bea 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v27.rs @@ -1,47 +1,26 @@ use crate::BeaconChainTypes; -use ssz::Encode; use std::sync::Arc; -use store::metadata::DataColumnCustodyInfo; -use store::metadata::DATA_COLUMN_CUSTODY_INFO_KEY; -use store::{DBColumn, Error, HotColdDB, KeyValueStoreOp}; -use tracing::info; +use store::{metadata::SchemaVersion, Error, HotColdDB}; /// Add `DataColumnCustodyInfo` entry to v27. pub fn upgrade_to_v27( db: Arc>, -) -> Result, Error> { - let ops = if db.spec.is_peer_das_scheduled() { - info!("Adding `DataColumnCustodyInfo` to the db"); - let data_column_custody_info = DataColumnCustodyInfo { - earliest_data_column_slot: None, - }; - vec![KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconDataColumnCustodyInfo, - DATA_COLUMN_CUSTODY_INFO_KEY.as_slice().to_vec(), - data_column_custody_info.as_ssz_bytes(), - )] - } else { - // Delete it from the db if PeerDAS hasn't been scheduled - vec![KeyValueStoreOp::DeleteKey( - DBColumn::BeaconDataColumnCustodyInfo, - DATA_COLUMN_CUSTODY_INFO_KEY.as_slice().to_vec(), - )] - }; +) -> Result<(), Error> { + if db.spec.is_peer_das_scheduled() { + db.put_data_column_custody_info(None)?; + db.store_schema_version_atomically(SchemaVersion(27), vec![])?; + } - Ok(ops) + Ok(()) } pub fn downgrade_from_v27( db: Arc>, -) -> Result, Error> { +) -> Result<(), Error> { if db.spec.is_peer_das_scheduled() { return Err(Error::MigrationError( "Cannot downgrade from v27 if peerDAS is scheduled".to_string(), )); } - let ops = vec![KeyValueStoreOp::DeleteKey( - DBColumn::BeaconDataColumnCustodyInfo, - DATA_COLUMN_CUSTODY_INFO_KEY.as_slice().to_vec(), - )]; - Ok(ops) + Ok(()) } diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index dced442d332..b90a3f7b39f 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -91,6 +91,7 @@ async fn schema_stability() { check_metadata_sizes(&store); check_op_pool(&store); check_custody_context(&store, &harness.spec); + check_custody_info(&store, &harness.spec); check_persisted_chain(&store); // Not covered here: @@ -146,6 +147,15 @@ fn check_custody_context(store: &Store, spec: &ChainSpec) { } } +fn check_custody_info(store: &Store, spec: &ChainSpec) { + let data_column_custody_info = store.get_data_column_custody_info().unwrap(); + if spec.is_peer_das_scheduled() { + assert_eq!(data_column_custody_info.unwrap().as_ssz_bytes().len(), 13); + } else { + assert!(data_column_custody_info.is_none()); + } +} + fn check_persisted_chain(store: &Store) { let chain = store .get_item::(&Hash256::ZERO) diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 1d7e4200706..dacf3620ef7 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(26); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(27); // All the keys that get stored under the `BeaconMeta` column. // From 725d98eb9f1632ead7714e764c80e059db61041f Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 7 Jul 2025 23:24:58 +0300 Subject: [PATCH 08/17] effective epoch --- beacon_node/beacon_chain/src/validator_custody.rs | 4 ++-- beacon_node/http_api/src/lib.rs | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs index 9507ca73315..e796f2a3e86 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -217,7 +217,7 @@ impl CustodyContext { new_custody_group_count: updated_cgc, sampling_count: self .num_of_custody_groups_to_sample(Some(effective_epoch), spec), - slot: current_slot, + effective_epoch: current_slot.epoch(E::slots_per_epoch()), }); } } @@ -288,7 +288,7 @@ impl CustodyContext { pub struct CustodyCountChanged { pub new_custody_group_count: u64, pub sampling_count: u64, - pub slot: Slot, + pub effective_epoch: Epoch, } /// The custody information that gets persisted across runs. diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 1bb44dcd51a..08115785f38 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3849,7 +3849,11 @@ pub fn serve( .advertise_false_custody_group_count .is_none() { - chain.update_data_column_custody_info(Some(cgc_change.slot)) + chain.update_data_column_custody_info(Some( + cgc_change + .effective_epoch + .start_slot(T::EthSpec::slots_per_epoch()), + )) } network_tx.send(NetworkMessage::CustodyCountChanged { new_custody_group_count: cgc_change.new_custody_group_count, From ce7431b519ef44b03558e08212604db4e406272d Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 7 Jul 2025 23:26:24 +0300 Subject: [PATCH 09/17] remove mut --- beacon_node/store/src/hot_cold_store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 28b11b935ad..f28140ee010 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -137,7 +137,7 @@ impl BlockCache { .get(block_root) .and_then(|map| map.get(column_index).cloned()) } - pub fn get_data_column_custody_info(&mut self) -> Option { + pub fn get_data_column_custody_info(&self) -> Option { self.data_column_custody_info_cache.clone() } pub fn delete_block(&mut self, block_root: &Hash256) { From ddb70621c848247e76e5a0878e806640f8bb16c6 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 7 Jul 2025 23:40:16 +0300 Subject: [PATCH 10/17] clean-up earliest avail slot logic --- beacon_node/network/src/status.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index d9965870988..3df7b50db8c 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,5 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::{EthSpec, FixedBytesExtended, Hash256, Slot}; +use types::{EthSpec, FixedBytesExtended, Hash256}; use lighthouse_network::rpc::{methods::StatusMessageV2, StatusMessage}; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. @@ -29,21 +29,21 @@ pub(crate) fn status_message(beacon_chain: &BeaconChain) finalized_checkpoint.root = Hash256::zero(); } + let earliest_available_data_column_slot = beacon_chain + .store + .get_data_column_custody_info() + .ok() + .flatten() + .and_then(|info| info.earliest_data_column_slot); + // If there is no data column custody info in the db, that indicates that // no recent cgc changes have occurred and no cgc backfill is in progress. - let earliest_available_slot = if let Ok(Some(data_column_custody_info)) = - beacon_chain.store.get_data_column_custody_info() - { - std::cmp::max( - beacon_chain.store.get_anchor_info().oldest_block_slot, - data_column_custody_info - .earliest_data_column_slot - .unwrap_or(Slot::new(0)), - ) - } else { - beacon_chain.store.get_anchor_info().oldest_block_slot - }; - + let earliest_available_slot = + if let Some(earliest_available_data_column_slot) = earliest_available_data_column_slot { + earliest_available_data_column_slot + } else { + beacon_chain.store.get_anchor_info().oldest_block_slot + }; StatusMessage::V2(StatusMessageV2 { fork_digest, finalized_root: finalized_checkpoint.root, From 71a5b0f476e40ac77bebfc31ec11385530b9cc06 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 7 Jul 2025 23:53:06 +0300 Subject: [PATCH 11/17] update comment --- beacon_node/network/src/status.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 3df7b50db8c..89a573d9c6d 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -36,7 +36,7 @@ pub(crate) fn status_message(beacon_chain: &BeaconChain) .flatten() .and_then(|info| info.earliest_data_column_slot); - // If there is no data column custody info in the db, that indicates that + // If data_column_custody_info.earliest_data_column_slot is `None`, // no recent cgc changes have occurred and no cgc backfill is in progress. let earliest_available_slot = if let Some(earliest_available_data_column_slot) = earliest_available_data_column_slot { From 0f2fc7d842fce127ce487225e8f7da5be030149b Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sat, 12 Jul 2025 12:14:24 +0300 Subject: [PATCH 12/17] small refactor --- beacon_node/network/src/status.rs | 1 + beacon_node/store/src/hot_cold_store.rs | 19 +++++-------------- beacon_node/store/src/metadata.rs | 18 ++++++++++++++++-- 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index 89a573d9c6d..6c2ada447d2 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -29,6 +29,7 @@ pub(crate) fn status_message(beacon_chain: &BeaconChain) finalized_checkpoint.root = Hash256::zero(); } + // NOTE: We are making an assumption that `get_data_column_custody_info` wont fail. let earliest_available_data_column_slot = beacon_chain .store .get_data_column_custody_info() diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index f28140ee010..9c9374e7fe5 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -941,11 +941,8 @@ impl, Cold: ItemStore> HotColdDB earliest_data_column_slot, }; - self.blobs_db.put_bytes( - DBColumn::BeaconDataColumnCustodyInfo, - DATA_COLUMN_CUSTODY_INFO_KEY.as_slice(), - &data_column_custody_info.as_ssz_bytes(), - )?; + self.blobs_db + .put(&DATA_COLUMN_CUSTODY_INFO_KEY, &data_column_custody_info)?; self.block_cache .lock() @@ -2427,16 +2424,10 @@ impl, Cold: ItemStore> HotColdDB pub fn get_data_column_custody_info(&self) -> Result, Error> { let Some(data_column_custody_info) = self.block_cache.lock().get_data_column_custody_info() else { - let bytes_opt = self.blobs_db.get_bytes( - DBColumn::BeaconDataColumnCustodyInfo, - DATA_COLUMN_CUSTODY_INFO_KEY.as_slice(), - )?; - - let Some(bytes) = bytes_opt else { - return Ok(None); - }; + let data_column_custody_info = self + .blobs_db + .get::(&DATA_COLUMN_CUSTODY_INFO_KEY)?; - let data_column_custody_info = Some(DataColumnCustodyInfo::from_ssz_bytes(&bytes)?); // Update the cache self.block_cache .lock() diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index dacf3620ef7..b6091087efc 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -206,15 +206,29 @@ impl StoreItem for BlobInfo { } /// Database parameter relevant to data column custody sync. There is only at most a single -/// `DataColumnCustodyInfo` stored in the db. This record is added to the db when cgc +/// `DataColumnCustodyInfo` stored in the db. `earliest_data_column_slot` is updated when cgc /// count changes and is updated incrementally during data column custody backfill. Once custody backfill -/// is complete the record is removed from the db. +/// is complete `earliest_data_column_slot` is set to `None`. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] pub struct DataColumnCustodyInfo { /// The earliest slot for which data columns are available. pub earliest_data_column_slot: Option, } +impl StoreItem for DataColumnCustodyInfo { + fn db_column() -> DBColumn { + DBColumn::BeaconDataColumnCustodyInfo + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(DataColumnCustodyInfo::from_ssz_bytes(bytes)?) + } +} + /// Database parameters relevant to data column sync. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Default)] pub struct DataColumnInfo { From dc043083375a855b4fe8ec5aeaa05a5539539860 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Sat, 12 Jul 2025 12:19:04 +0300 Subject: [PATCH 13/17] effective epoch --- beacon_node/beacon_chain/src/validator_custody.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/validator_custody.rs index e796f2a3e86..4224125a2ac 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/validator_custody.rs @@ -217,7 +217,7 @@ impl CustodyContext { new_custody_group_count: updated_cgc, sampling_count: self .num_of_custody_groups_to_sample(Some(effective_epoch), spec), - effective_epoch: current_slot.epoch(E::slots_per_epoch()), + effective_epoch, }); } } From 53b1be377bd153f1d0af214cbf5c3820a573d8dd Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 14 Jul 2025 01:57:53 +0200 Subject: [PATCH 14/17] fix test --- beacon_node/beacon_chain/tests/schema_stability.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index b90a3f7b39f..e620bf85053 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -86,6 +86,7 @@ async fn schema_stability() { chain.persist_op_pool().unwrap(); chain.persist_custody_context().unwrap(); + insert_data_column_custody_info(&store, &harness.spec); check_db_columns(); check_metadata_sizes(&store); @@ -110,6 +111,12 @@ fn check_db_columns() { assert_eq!(expected_columns, current_columns); } +fn insert_data_column_custody_info(store: &Store, spec: &ChainSpec) { + if spec.is_peer_das_scheduled() { + store.put_data_column_custody_info(None).unwrap(); + } +} + /// Check the SSZ sizes of known on-disk metadata. /// /// New types can be added here as the schema evolves. From 996c140b280212d3ec50d5523f905c24f29fb519 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 14 Jul 2025 07:49:33 +0200 Subject: [PATCH 15/17] fix --- beacon_node/beacon_chain/tests/schema_stability.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index e620bf85053..1d12fc878e7 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -16,7 +16,7 @@ use store::{ }; use strum::IntoEnumIterator; use tempfile::{tempdir, TempDir}; -use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec}; +use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec, Slot}; type E = MainnetEthSpec; type Store = Arc, BeaconNodeBackend>>; @@ -113,7 +113,9 @@ fn check_db_columns() { fn insert_data_column_custody_info(store: &Store, spec: &ChainSpec) { if spec.is_peer_das_scheduled() { - store.put_data_column_custody_info(None).unwrap(); + store + .put_data_column_custody_info(Some(Slot::new(0))) + .unwrap(); } } From a13bb6522e4f84a6c84d310595382ff9b4cb0be9 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 17 Jul 2025 16:37:09 +1000 Subject: [PATCH 16/17] Drop update data custody info condition --- beacon_node/http_api/src/lib.rs | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 08115785f38..7838c292568 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -3774,14 +3774,12 @@ pub fn serve( .and(network_tx_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .and(network_globals.clone()) .and(warp_utils::json::json()) .then( |not_synced_filter: Result<(), Rejection>, network_tx: UnboundedSender>, task_spawner: TaskSpawner, chain: Arc>, - network_globals: Arc>, preparation_data: Vec| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { not_synced_filter?; @@ -3842,19 +3840,12 @@ pub fn serve( current_slot, &chain.spec, ) { - // Don't update custody info if we're advertising a - // false custody group count. - if network_globals - .config - .advertise_false_custody_group_count - .is_none() - { - chain.update_data_column_custody_info(Some( - cgc_change - .effective_epoch - .start_slot(T::EthSpec::slots_per_epoch()), - )) - } + chain.update_data_column_custody_info(Some( + cgc_change + .effective_epoch + .start_slot(T::EthSpec::slots_per_epoch()), + )); + network_tx.send(NetworkMessage::CustodyCountChanged { new_custody_group_count: cgc_change.new_custody_group_count, sampling_count: cgc_change.sampling_count, From 5725bd48756ef9525fdbd344ecd74eb69c65891d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 17 Jul 2025 16:43:02 +1000 Subject: [PATCH 17/17] Fix beacon chain tests. --- beacon_node/beacon_chain/tests/store_tests.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index e9b19ee6e0f..691ec003179 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3157,7 +3157,11 @@ async fn schema_downgrade_to_min_version( ) .await; - let min_version = SchemaVersion(22); + let min_version = if spec.is_fulu_scheduled() { + SchemaVersion(27) + } else { + SchemaVersion(22) + }; // Save the slot clock so that the new harness doesn't revert in time. let slot_clock = harness.chain.slot_clock.clone();