diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 01075ae4a4c..68cb285643e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -434,6 +434,9 @@ pub struct BeaconChain { Mutex>, /// Interfaces with the execution client. pub execution_layer: Option>, + /// Storage for execution payload proofs used in stateless validation. + pub execution_payload_proof_store: + Arc, /// Stores information about the canonical head and finalized/justified checkpoints of the /// chain. Also contains the fork choice struct, for computing the canonical head. pub canonical_head: CanonicalHead, @@ -3907,6 +3910,14 @@ impl BeaconChain { .map_err(|e| BlockError::BeaconChainError(Box::new(e.into())))?; } + // Register optimistic blocks for proof validation in stateless validation mode + if payload_verification_status.is_optimistic() { + if let Ok(execution_payload) = block.execution_payload() { + let execution_block_hash = execution_payload.block_hash().into(); + self.register_optimistic_block_for_proof(block_root, execution_block_hash); + } + } + // If the block is recent enough and it was not optimistically imported, check to see if it // becomes the head block. If so, apply it to the early attester cache. This will allow // attestations to the block without waiting for the block and state to be inserted to the diff --git a/beacon_node/beacon_chain/src/beacon_chain_execution_proof.rs b/beacon_node/beacon_chain/src/beacon_chain_execution_proof.rs new file mode 100644 index 00000000000..527126085e9 --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_chain_execution_proof.rs @@ -0,0 +1,229 @@ +use crate::errors::BeaconChainError as Error; +use crate::{BeaconChain, BeaconChainTypes}; +use tracing::{debug, info}; +use types::{EthSpec, ExecPayload, ExecutionBlockHash, Hash256, Slot}; + +// Execution Proof Management for BeaconChain +// +// This module contains all execution proof-related functionality for the +// BeaconChain, if we follow the current code structure, this would belong in +// beacon_chain.rs. It has been pulled into this separate file to make the diff +// easier to manage. +// +impl BeaconChain { + // ======================================================================== + // Subnet Management + // ======================================================================== + + /// Determine which execution proof subnets this node should subscribe to. + /// + /// Currently uses a simple sequential allocation: if max_execution_proof_subnets is N, + /// this node will subscribe to subnets [0, 1, 2, ..., N-1]. + /// + /// Examples: + /// - max_execution_proof_subnets = 8: subscribes to subnets [0, 1, 2, 3, 4, 5, 6, 7] + /// - max_execution_proof_subnets = 4: subscribes to subnets [0, 1, 2, 3] + /// - max_execution_proof_subnets = 1: subscribes to subnet [0] only + /// + /// In the future, this could be made more sophisticated to support: + /// - Random assignment for better distribution + pub fn execution_proof_subnets(&self) -> Vec { + (0..self.config.max_execution_proof_subnets).collect() + } + + /// Get the maximum number of execution proof subnets for this configuration + pub fn max_execution_proof_subnets(&self) -> u64 { + self.config.max_execution_proof_subnets + } + + /// Check if this node should generate execution proofs for the given subnet + /// + /// Returns true if the subnet is within our configured range + pub fn should_generate_execution_proof_for_subnet(&self, subnet_id: u64) -> bool { + // We generate proofs for all subnets we're subscribed to + subnet_id < self.max_execution_proof_subnets() && self.config.generate_execution_proofs + } + + // ======================================================================== + // Proof Validation and Chain Updates + // ======================================================================== + + /// Re-evaluate optimistic blocks that can now be validated with received proofs + /// This method is called when new execution proofs arrive via gossip + /// In the dual-view architecture, this updates the proven chain but does NOT + /// modify fork choice weights + pub fn re_evaluate_optimistic_blocks_with_proofs( + &self, + execution_block_hash: ExecutionBlockHash, + ) -> Result { + // Only perform re-evaluation if stateless validation is enabled + if !self.config.stateless_validation { + return Ok(false); + } + + // Get the proofs we have for this execution block hash + let available_proofs = self + .execution_payload_proof_store + .get_proofs(&execution_block_hash); + let proof_count = available_proofs.len(); + + // Check if we have enough valid proofs + if proof_count < self.config.stateless_min_proofs_required { + // Only log if we're close to having enough proofs + if proof_count > 0 { + debug!( + execution_block_hash = %execution_block_hash, + proof_count, + required_proofs = self.config.stateless_min_proofs_required, + "Insufficient proofs for execution block" + ); + } + return Ok(false); + } + + debug!( + execution_block_hash = %execution_block_hash, + proof_count, + required_proofs = self.config.stateless_min_proofs_required, + "Minimum proofs reached, updating proven chain" + ); + + // Get current chain state + let head = self.canonical_head.cached_head(); + let head_block_root = head.head_block_root(); + let head_slot = head.head_slot(); + let current_slot = self.slot().unwrap_or(Slot::new(0)); + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + + // Update the proven canonical chain based on available proofs + // This does NOT modify fork choice - validators continue with optimistic view + let proven_status = self + .execution_payload_proof_store + .update_proven_chain( + |block_root| { + self.get_blinded_block(block_root).map(|result| { + result.map(|block| { + let slot = block.slot(); + let parent_root = block.parent_root(); + let exec_hash_opt = block + .message() + .execution_payload() + .ok() + .map(|payload| payload.block_hash()); + (slot, parent_root, exec_hash_opt) + }) + }) + }, + head_block_root, + current_slot, + slots_per_epoch, + self.config.stateless_min_proofs_required, + ) + .map_err(Error::ExecutionProofError)?; + + // Log proven chain status if it changed + if let Some((_proven_root, proven_slot)) = proven_status.proven_head { + if proven_status.head_changed { + let lag_slots = head_slot.saturating_sub(proven_slot); + info!( + proven_slot = %proven_slot, + head_slot = %head_slot, + lag_slots = %lag_slots, + "Proven chain updated" + ); + } + } + + // Remove pending blocks that now have sufficient proofs + let proven_blocks = self + .execution_payload_proof_store + .take_pending_blocks(&execution_block_hash); + // Note: That if we were to modify fork choice, it would likely be here, where we know what set of + // beacon blocks have valid execution payloads. + + if !proven_blocks.is_empty() { + debug!( + %execution_block_hash, + proven_count = proven_blocks.len(), + "Removed pending blocks that now have sufficient proofs" + ); + } + + // Perform periodic cleanup of finalized pending blocks + if proven_status.head_changed { + // TODO: Revisit, if this is still needed + let _cleaned_count = self.cleanup_finalized_pending_blocks(); + } + + // Return false - we never trigger head recomputation in dual-view mode + // Fork choice remains permanently optimistic + Ok(false) + } + + /// Register a beacon block as pending execution proof validation + /// This is called when a block is imported optimistically in stateless validation mode + pub fn register_optimistic_block_for_proof( + &self, + beacon_block_root: Hash256, + execution_block_hash: ExecutionBlockHash, + ) { + if self.config.stateless_validation { + self.execution_payload_proof_store + .register_pending_block(execution_block_hash, beacon_block_root); + + debug!( + beacon_block_root = %beacon_block_root, + execution_block_hash = %execution_block_hash, + "Registered optimistic block awaiting proofs" + ); + } + } + + // ======================================================================== + // Cleanup Operations + // ======================================================================== + + /// Clean up pending blocks that have been finalized or are too old + /// This should be called periodically to prevent memory leaks in the proof store + /// + /// This method in mainly here for the case that a block has been finalized + /// that did not have sufficient amount of proofs. This can happen while + /// proofs are not on the critical path and for reasons (prover killers), + /// take more than 2 epochs to generate. + pub fn cleanup_finalized_pending_blocks(&self) -> usize { + if !self.config.stateless_validation { + return 0; + } + + let finalized_slot = self + .canonical_head + .cached_head() + .finalized_checkpoint() + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // Remove pending blocks that are older than finalized slot + let removed_count = + self.execution_payload_proof_store + .cleanup_pending_blocks(|block_root| { + // Check if this block is older than finalized slot + // We need to look up the block to get its slot + if let Ok(Some(block)) = self.get_blinded_block(&block_root) { + block.slot() <= finalized_slot + } else { + // If we can't find the block, it's likely been pruned, so remove it + true + } + }); + + if removed_count > 0 { + debug!( + finalized_slot = %finalized_slot, + removed_count, + "Cleaned up finalized pending blocks from proof store" + ); + } + + removed_count + } +} diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c46cc015c9c..198b95344bd 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -3,6 +3,7 @@ use crate::beacon_chain::{ }; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; +use crate::execution_proof_store::ExecutionPayloadProofStore; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; @@ -898,6 +899,7 @@ where let genesis_time = head_snapshot.beacon_state.genesis_time(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; + let max_execution_payload_proofs = self.chain_config.max_execution_payload_proofs; // Calculate the weak subjectivity point in which to backfill blocks to. let genesis_backfill_slot = if self.chain_config.genesis_backfill { @@ -978,6 +980,10 @@ where observed_attester_slashings: <_>::default(), observed_bls_to_execution_changes: <_>::default(), execution_layer: self.execution_layer.clone(), + // TODO: allow for persisting and loading from disk (when a block has been confirmed) + execution_payload_proof_store: Arc::new(ExecutionPayloadProofStore::new( + max_execution_payload_proofs, + )), genesis_validators_root, genesis_time, canonical_head, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 808c96d9650..9f94bb59ec2 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -81,6 +81,36 @@ pub struct ChainConfig { pub prepare_payload_lookahead: Duration, /// Use EL-free optimistic sync for the finalized part of the chain. pub optimistic_finalized_sync: bool, + /// Enable stateless validation mode for new payloads. + /// + /// Currently this means that the node will accept blocks optimistically + /// and maintain metadata about which blocks have been proven and which ones have not. + pub stateless_validation: bool, + /// Generate execution proofs for all blocks received. + /// + /// Nodes that have this enabled will be used to bootstrap proofs into the subnets, + /// whether they are a proposer or not. + pub generate_execution_proofs: bool, + /// Maximum number of execution payload proofs to store in memory. + pub max_execution_payload_proofs: usize, + /// Maximum number of execution proof subnets this node will participate in. + /// + /// This is a per-node configuration that must not exceed the protocol maximum + /// (MAX_EXECUTION_PROOF_SUBNETS). Nodes may choose to participate in fewer + /// subnets to reduce resource usage, but this limits the number of proofs they + /// can generate or validate. + /// + /// TODO: We can remove the sequential allocations with a random allocation, so that lower numbered + /// TODO: subnets are not important. Current strategy is mostly POC. + /// + /// Note: stateless_min_proofs_required must not exceed this value, as a node + /// cannot require more proofs than the number of subnets it participates in. + pub max_execution_proof_subnets: u64, + /// Minimum number of proofs required to consider a block valid in stateless mode. + /// + /// Must be between 1 and max_execution_proof_subnets. Higher values provide + /// more security but may increase block validation latency. + pub stateless_min_proofs_required: usize, /// The size of the shuffling cache, pub shuffling_cache_size: usize, /// If using a weak-subjectivity sync, whether we should download blocks all the way back to @@ -142,6 +172,11 @@ impl Default for ChainConfig { prepare_payload_lookahead: Duration::from_secs(4), // This value isn't actually read except in tests. optimistic_finalized_sync: true, + stateless_validation: false, + generate_execution_proofs: false, + max_execution_payload_proofs: 10_000, + max_execution_proof_subnets: 8, + stateless_min_proofs_required: 1, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, genesis_backfill: false, always_prepare_payload: false, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index b6db3fa84f2..9372a915b9a 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -230,6 +230,7 @@ pub enum BeaconChainError { columns_found: usize, }, FailedToReconstructBlobs(String), + ExecutionProofError(String), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index aa98310c121..70f03719240 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -24,7 +24,7 @@ use state_processing::per_block_processing::{ }; use std::sync::Arc; use tokio::task::JoinHandle; -use tracing::{debug, warn}; +use tracing::{debug, info, warn}; use tree_hash::TreeHash; use types::payload::BlockProductionVersion; use types::*; @@ -136,6 +136,45 @@ async fn notify_new_payload( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let execution_block_hash = block.execution_payload()?.block_hash(); + + // Generate proof for this payload (even though it may not be your proposed block) + if chain.config.generate_execution_proofs { + spawn_proof_generation_task_with_block(chain, block); + } + + // Check if stateless validation is enabled + if chain.config.stateless_validation { + let proof_count = chain + .execution_payload_proof_store + .proof_count_for_payload(&execution_block_hash); + + // Eagerly heck if we have enough proofs for this execution payload + if proof_count >= chain.config.stateless_min_proofs_required { + info!( + execution_block_hash = ?execution_block_hash, + proof_count, + required_proofs = chain.config.stateless_min_proofs_required, + "Execution payload verified with proofs" + ); + return Ok(PayloadVerificationStatus::Verified); + } else { + // We don't have enough proofs, so we mark the block as optimistic, + // save it in the proof store and wait for proofs. + // + // In production, we would have some form of delayed execution + // instead of piggy-backing off of optimistic sync. + let beacon_block_root = block.tree_hash_root(); + debug!( + beacon_block_root = ?beacon_block_root, + execution_block_hash = ?execution_block_hash, + proof_count, + required_proofs = chain.config.stateless_min_proofs_required, + "Insufficient proofs for block, marking as optimistic" + ); + return Ok(PayloadVerificationStatus::Optimistic); + } + } + let new_payload_response = execution_layer.notify_new_payload(block.try_into()?).await; match new_payload_response { @@ -451,6 +490,16 @@ where .as_ref() .ok_or(BlockProductionError::ExecutionLayerMissing)?; + // Check for stateless validation mode + if chain.config.stateless_validation { + // TODO: We could return an empty payload here until we hook up mev-boost + eprintln!( + "ERROR: Cannot produce blocks in stateless validation mode - no execution layer attached. \ + TODO: Use MEV-boost for block production in stateless validation mode." + ); + return Err(BlockProductionError::ExecutionLayerMissing); + } + let parent_hash = if !is_merge_transition_complete { let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); let is_activation_epoch_reached = @@ -540,3 +589,132 @@ where Ok(block_contents) } + +/// Spawn a background task to generate and store execution proofs using a block reference +/// This converts the BeaconBlockRef to ExecutionPayload before spawning the proof generation task +fn spawn_proof_generation_task_with_block( + chain: &Arc>, + block: BeaconBlockRef<'_, T::EthSpec, FullPayload>, +) { + // Clone the chain for the async task + let chain_clone = chain.clone(); + + // Extract the concrete ExecutionPayload from the BeaconBlock + let payload = match extract_execution_payload(block) { + Ok(payload) => payload, + Err(e) => { + warn!( + "Failed to extract execution payload for proof generation: {:?}", + e + ); + return; + } + }; + + // Spawn the proof generation task in the background + // WARNING: No resource limits or task counting is performed here. + // TODO: Implement a task queue with concurrency limits and resource monitoring. + chain.task_executor.spawn( + async move { + if let Err(e) = + generate_and_store_execution_proofs_from_block(&chain_clone, &payload).await + { + warn!("Failed to generate execution proofs: {:?}", e); + } + }, + "execution_proof_generation", + ); +} + +/// Generate and store dummy execution proofs from a block +/// This simulates receiving proofs that would normally come from zkVMs or other proof generators +/// +/// TODO: This implementation lacks a circuit breaker for proof generation +async fn generate_and_store_execution_proofs_from_block( + chain: &Arc>, + payload: &ExecutionPayload, +) -> Result<(), BlockProductionError> { + let execution_block_hash = payload.block_hash(); + + info!( + execution_block_hash = ?execution_block_hash, + "Starting execution proof generation" + ); + + // For real proof generation, we would: + // 1. Send the ExecutionPayload to EL to fetch witness data (via debug_executionWitness or similar) + // 2. Generate actual cryptographic proofs using payload + witness + // For now, we generate dummy proofs with a simulated witness + + // let witness = execution_layer.get_execution_witness(execution_block_hash).await?; + let witness = format!("dummy_witness_for_block_{:?}", execution_block_hash).into_bytes(); + + // Get the subnets this node wants to generate proofs for + let proof_subnets = chain.execution_proof_subnets(); + + debug!( + execution_block_hash = ?execution_block_hash, + subnet_count = proof_subnets.len(), + subnets = ?proof_subnets, + "Generating proofs for configured subnets" + ); + + // Generate and store a proof for each subnet + for subnet_id in proof_subnets.iter() { + if chain.should_generate_execution_proof_for_subnet(*subnet_id) { + // Create ExecutionProofSubnetId from the u64 subnet_id + let proof_id = match ExecutionProofSubnetId::new(*subnet_id) { + Ok(id) => id, + Err(e) => { + debug!("Invalid subnet ID {}: {}", subnet_id, e); + continue; + } + }; + + // Use the proof store method to get or generate the proof + match chain + .execution_payload_proof_store + .get_or_generate_proof(&payload, &witness, proof_id) + .await + { + Ok(_proof) => { + debug!( + execution_block_hash = ?execution_block_hash, + subnet_id, + "Generated execution proof" + ); + } + Err(e) => { + warn!( + subnet_id, + error = %e, + "Failed to generate execution proof" + ); + } + } + } + } + + Ok(()) +} + +fn extract_execution_payload( + block: BeaconBlockRef<'_, E, FullPayload>, +) -> Result, BeaconStateError> { + let payload_ref = block.body().execution_payload()?; + Ok(match payload_ref { + FullPayloadRef::Bellatrix(payload) => { + ExecutionPayload::Bellatrix(payload.execution_payload.clone()) + } + FullPayloadRef::Capella(payload) => { + ExecutionPayload::Capella(payload.execution_payload.clone()) + } + FullPayloadRef::Deneb(payload) => { + ExecutionPayload::Deneb(payload.execution_payload.clone()) + } + FullPayloadRef::Electra(payload) => { + ExecutionPayload::Electra(payload.execution_payload.clone()) + } + FullPayloadRef::Fulu(payload) => ExecutionPayload::Fulu(payload.execution_payload.clone()), + }) +} diff --git a/beacon_node/beacon_chain/src/execution_proof_generation.rs b/beacon_node/beacon_chain/src/execution_proof_generation.rs new file mode 100644 index 00000000000..29d5a8c7444 --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_proof_generation.rs @@ -0,0 +1,286 @@ +//! Execution proof generation and verification +//! +//! This module handles the generation and verification of execution proofs. +//! Currently implements dummy proof generation, but will be replaced with +//! actual proof generation from zkVMs or other proof systems. + +use tracing::debug; +use types::{ + execution_proof_subnet_id::ExecutionProofSubnetId, EthSpec, ExecutionPayload, ExecutionProof, +}; + +/// Generate a proof for an execution payload +/// +/// TODO: Currently generates dummy proofs. Will be replaced with actual proof generation +/// from zkVMs or other proof systems. +/// +/// This accepts the concrete ExecutionPayload type which is what the EL expects +/// and can be easily serialized for sending to external systems. +/// The execution_state_witness would be obtained from the EL (e.g., via debug_executionWitness) +pub async fn generate_proof( + payload: &ExecutionPayload, + execution_state_witness: &[u8], + proof_id: ExecutionProofSubnetId, +) -> ExecutionProof { + let execution_block_hash = payload.block_hash(); + let block_number = payload.block_number(); + + // Simulate (some) proof computation delay + // In a real implementation, this would be the time needed for zkVM local proof generation + // or communication with external proof generation services + use rand::{thread_rng, Rng}; + let delay_ms = thread_rng().gen_range(1000..=3000); + + debug!( + execution_block_hash = ?execution_block_hash, + subnet_id = *proof_id, + delay_ms, + "Simulating proof generation delay" + ); + + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + + // Create dummy proof data that includes the subnet information and payload details + // In a real implementation, this would use the execution_state_witness to generate + // a cryptographic proof of the payload's validity + let dummy_data = format!( + "dummy_proof_subnet_{}_block_{:?}_number_{}_witness_len_{}", + *proof_id, + execution_block_hash, + block_number, + execution_state_witness.len() + ) + .into_bytes(); + + ExecutionProof::new(execution_block_hash, proof_id, 1, dummy_data) +} + +/// Validate a proof (placeholder implementation) +/// +/// TODO: Implement actual cryptographic proof validation based on version and type +pub fn validate_proof(proof: &ExecutionProof) -> bool { + // Placeholder validation - in reality this would verify cryptographic proofs + // based on both proof_id and version + match proof.version { + 1 => { + // Version 1: basic validation - non-empty proof data + !proof.proof_data.is_empty() + } + _ => { + // Unknown versions are considered invalid + false + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{ + ExecutionBlockHash, ExecutionPayloadBellatrix, FixedBytesExtended, FullPayloadBellatrix, + Hash256, MainnetEthSpec, Uint256, + }; + + #[tokio::test] + async fn test_generate_proof() { + let execution_block_hash = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(5).unwrap(); + + // Create a dummy payload for testing + let payload = FullPayloadBellatrix:: { + execution_payload: ExecutionPayloadBellatrix:: { + parent_hash: ExecutionBlockHash::zero(), + fee_recipient: Default::default(), + state_root: Hash256::zero(), + receipts_root: Hash256::zero(), + logs_bloom: Default::default(), + prev_randao: Hash256::zero(), + block_number: 12345, + gas_limit: 30_000_000, + gas_used: 0, + timestamp: 0, + extra_data: Default::default(), + base_fee_per_gas: Uint256::from(1u64), + block_hash: execution_block_hash, + transactions: Default::default(), + }, + }; + + let exec_payload = ExecutionPayload::Bellatrix(payload.execution_payload); + let dummy_witness = b"test_witness_data"; + let proof = generate_proof(&exec_payload, dummy_witness, proof_id).await; + + assert_eq!(proof.block_hash, execution_block_hash); + assert_eq!(proof.subnet_id, proof_id); + assert_eq!(proof.version, 1); + assert!(!proof.proof_data.is_empty()); + assert!(validate_proof(&proof)); + + // Verify the proof data contains expected information + let proof_data_str = String::from_utf8_lossy(&proof.proof_data); + assert!(proof_data_str.contains("subnet_5")); + assert!(proof_data_str.contains("number_12345")); + assert!(proof_data_str.contains("witness_len_17")); // 17 is the length of "test_witness_data" + } + + #[test] + fn test_validate_proof() { + let hash = ExecutionBlockHash::from(Hash256::random()); + + // Test version 1 proof (supported) + let v1_proof = ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + assert!(validate_proof(&v1_proof)); + + // Test unsupported version + let v2_proof = ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 2, + vec![7, 8, 9], + ); + assert!(!validate_proof(&v2_proof)); // Should fail validation for unknown version + + // Test empty data with version 1 (should be invalid) + let empty_v1 = + ExecutionProof::new(hash, ExecutionProofSubnetId::new(0).unwrap(), 1, vec![]); + assert!(!validate_proof(&empty_v1)); + } + + #[tokio::test] + async fn test_generate_proof_different_subnets() { + let execution_block_hash = ExecutionBlockHash::from(Hash256::random()); + + // Create a dummy payload for testing + let payload = FullPayloadBellatrix:: { + execution_payload: ExecutionPayloadBellatrix:: { + parent_hash: ExecutionBlockHash::zero(), + fee_recipient: Default::default(), + state_root: Hash256::zero(), + receipts_root: Hash256::zero(), + logs_bloom: Default::default(), + prev_randao: Hash256::zero(), + block_number: 42, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: Default::default(), + base_fee_per_gas: Uint256::from(0u64), + block_hash: execution_block_hash, + transactions: Default::default(), + }, + }; + + let exec_payload = ExecutionPayload::Bellatrix(payload.execution_payload); + let dummy_witness = b"test_witness_data"; + + let proof_0 = generate_proof( + &exec_payload, + dummy_witness, + ExecutionProofSubnetId::new(0).unwrap(), + ) + .await; + let proof_1 = generate_proof( + &exec_payload, + dummy_witness, + ExecutionProofSubnetId::new(1).unwrap(), + ) + .await; + let proof_2 = generate_proof( + &exec_payload, + dummy_witness, + ExecutionProofSubnetId::new(2).unwrap(), + ) + .await; + + // All proofs should be for the same block hash + assert_eq!(proof_0.block_hash, execution_block_hash); + assert_eq!(proof_1.block_hash, execution_block_hash); + assert_eq!(proof_2.block_hash, execution_block_hash); + + // But should have different proof IDs and data + assert_eq!(*proof_0.subnet_id, 0); + assert_eq!(*proof_1.subnet_id, 1); + assert_eq!(*proof_2.subnet_id, 2); + + // Proof data should be different for different subnets + assert_ne!(proof_0.proof_data, proof_1.proof_data); + assert_ne!(proof_1.proof_data, proof_2.proof_data); + + let data_0 = String::from_utf8_lossy(&proof_0.proof_data); + let data_1 = String::from_utf8_lossy(&proof_1.proof_data); + let data_2 = String::from_utf8_lossy(&proof_2.proof_data); + + assert!(data_0.contains("subnet_0")); + assert!(data_1.contains("subnet_1")); + assert!(data_2.contains("subnet_2")); + } + + #[tokio::test] + async fn test_generate_proof_deterministic() { + // Test that proof generation is deterministic - same input always produces same output + let execution_block_hash = ExecutionBlockHash::from(Hash256::from_low_u64_be(12345)); + let proof_id = ExecutionProofSubnetId::new(3).unwrap(); + + // Create a specific payload with fixed values + let payload = FullPayloadBellatrix:: { + execution_payload: ExecutionPayloadBellatrix:: { + parent_hash: ExecutionBlockHash::from(Hash256::from_low_u64_be(111)), + fee_recipient: Default::default(), + state_root: Hash256::from_low_u64_be(222), + receipts_root: Hash256::from_low_u64_be(333), + logs_bloom: Default::default(), + prev_randao: Hash256::from_low_u64_be(444), + block_number: 555, + gas_limit: 30_000_000, + gas_used: 15_000_000, + timestamp: 1234567890, + extra_data: b"test_extra_data".to_vec().into(), + base_fee_per_gas: Uint256::from(7u64), + block_hash: execution_block_hash, + transactions: vec![b"tx1".to_vec().into(), b"tx2".to_vec().into()].into(), + }, + }; + + let exec_payload = ExecutionPayload::Bellatrix(payload.execution_payload); + let witness_data = b"deterministic_witness_data"; + + // Generate proof multiple times with same input + let proof1 = generate_proof(&exec_payload, witness_data, proof_id).await; + let proof2 = generate_proof(&exec_payload, witness_data, proof_id).await; + let proof3 = generate_proof(&exec_payload, witness_data, proof_id).await; + + // All proofs should be identical + assert_eq!(proof1.block_hash, proof2.block_hash); + assert_eq!(proof1.block_hash, proof3.block_hash); + + assert_eq!(proof1.subnet_id, proof2.subnet_id); + assert_eq!(proof1.subnet_id, proof3.subnet_id); + + assert_eq!(proof1.version, proof2.version); + assert_eq!(proof1.version, proof3.version); + + // Most importantly, proof data should be identical + assert_eq!(proof1.proof_data, proof2.proof_data); + assert_eq!(proof1.proof_data, proof3.proof_data); + + // Verify the content is as expected + let proof_str = String::from_utf8_lossy(&proof1.proof_data); + assert!(proof_str.contains("subnet_3")); + assert!(proof_str.contains("number_555")); + assert!(proof_str.contains("witness_len_26")); + + // Now test that different inputs produce different proofs + let different_witness = b"different_witness_data"; + let proof_different = generate_proof(&exec_payload, different_witness, proof_id).await; + + // Same block hash and subnet, but different proof data + assert_eq!(proof_different.block_hash, proof1.block_hash); + assert_eq!(proof_different.subnet_id, proof1.subnet_id); + assert_ne!(proof_different.proof_data, proof1.proof_data); + } +} diff --git a/beacon_node/beacon_chain/src/execution_proof_store.rs b/beacon_node/beacon_chain/src/execution_proof_store.rs new file mode 100644 index 00000000000..34c344a9b6a --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_proof_store.rs @@ -0,0 +1,1816 @@ +use parking_lot::RwLock; +use std::collections::{HashMap, VecDeque}; +use std::fmt; +use std::sync::Arc; +use tracing::debug; +use types::{ + execution_proof_subnet_id::ExecutionProofSubnetId, EthSpec, ExecutionBlockHash, + ExecutionPayload, ExecutionProof, Hash256, Slot, +}; + +/// Error types for execution proof operations +#[derive(Debug, Clone, PartialEq)] +pub enum ExecutionProofError { + /// Validation errors indicate the proof is invalid + ValidationError { + proof_id: u64, + block_hash: ExecutionBlockHash, + reason: String, + }, + /// Storage errors indicate internal issues and should not result in peer penalties + StorageError { reason: String }, +} + +impl fmt::Display for ExecutionProofError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ExecutionProofError::ValidationError { + proof_id, + block_hash, + reason, + } => write!( + f, + "Invalid proof for block hash {:?}, proof ID {}: {}", + block_hash, proof_id, reason + ), + ExecutionProofError::StorageError { reason } => { + write!(f, "Storage error: {}", reason) + } + } + } +} + +impl std::error::Error for ExecutionProofError {} + +impl ExecutionProofError { + /// Check if this error should result in peer penalties + /// + /// TODO: maybe remove this and put this in `process_gossip_execution_proof` + pub fn should_penalize_peer(&self) -> bool { + matches!(self, ExecutionProofError::ValidationError { .. }) + } + + /// Create a validation error + pub fn validation_error( + proof_id: u64, + block_hash: ExecutionBlockHash, + reason: impl Into, + ) -> Self { + Self::ValidationError { + proof_id, + block_hash, + reason: reason.into(), + } + } + + /// Create a storage error + pub fn storage_error(reason: impl Into) -> Self { + Self::StorageError { + reason: reason.into(), + } + } +} + +/// Type alias for ProofId using ExecutionProofSubnetId +pub type ProofId = ExecutionProofSubnetId; + +/// Information about a block that has been proven using execution proofs +#[derive(Debug, Clone)] +#[allow(dead_code)] +struct ProvenBlockInfo { + /// The beacon block root + beacon_block_root: Hash256, + /// The execution block hash + execution_block_hash: ExecutionBlockHash, + /// The slot of the block + slot: Slot, + /// The parent beacon block root + parent_root: Hash256, + /// Number of proofs available for this block + proof_count: usize, +} + +/// Information about the current proven chain status +#[derive(Debug, Clone)] +pub struct ProvenChainStatus { + /// The proven head (block root and slot), if any + pub proven_head: Option<(Hash256, Slot)>, + /// The proven finalized checkpoint (block root and slot), if any + pub proven_finalized: Option<(Hash256, Slot)>, + /// The depth of the proven chain + pub proven_chain_depth: usize, + /// Whether the proven head changed in the last update + pub head_changed: bool, +} + +/// Default maximum number of proofs to store +const DEFAULT_MAX_PROOFS: usize = 10_000; + +impl Default for ExecutionPayloadProofStore { + fn default() -> Self { + Self::new(DEFAULT_MAX_PROOFS) + } +} + +/// Manages storage and tracking of execution payload proofs for stateless validation. +/// +/// It maintains proofs submitted by network participants and tracks +/// which beacon blocks have transitioned from `optimistic` to `proven` based on +/// receiving sufficient independent proofs for that beacon blocks execution payload. +/// +/// # Key Responsibilities +/// +/// 1. **Proof Storage**: Stores execution proofs indexed by (ExecutionBlockHash, ProofId) +/// with LRU eviction to manage memory usage. +/// +/// 2. **Pending Block Tracking**: Maintains a mapping of execution block hashes to +/// beacon blocks awaiting proofs. +/// +/// 3. **Proven Chain Tracking**: Maintains the longest chain of blocks that have received +/// sufficient proofs, including tracking the proven head and finalized checkpoints. +/// Note: Once proofs are a part of consensus, this tracking will not be needed. +/// +/// 4. **Broadcast Queue**: Manages a queue of newly received proofs that need to be +/// broadcasted to the network. +/// +/// # Stateless Validation Flow (Roughly) +/// +/// 1. When a beacon block is imported optimistically, it is registered as pending +/// 2. Network participants generate and submit proofs for the execution payload +/// 3. Once sufficient proofs are received (e.g., 2 out of 3), the block transitions to proven +/// 4. The proven chain is updated, potentially advancing the proven head +/// 5. Old or abandoned blocks are periodically cleaned up +/// +/// # Thread Safety +/// +/// All state is protected by read-write locks, making this store safe to access +/// across multiple threads. +#[derive(Debug)] +pub struct ExecutionPayloadProofStore { + /// Map from (execution block hash, proof ID) to proof + /// This allows multiple proof types for the same execution payload + /// TODO: Handle orphaned proofs - proofs that arrive for blocks we never imported + proofs: Arc>>, + /// Tracks insertion order for LRU eviction + insertion_order: Arc>>, + /// Queue of proofs waiting to be broadcast + broadcast_queue: Arc>>, + /// Reverse mapping: execution block hash -> beacon block roots waiting for proofs + /// This allows efficient lookup of which beacon blocks to re-evaluate when proofs arrive + /// + /// TODO: need to verify the following Note, on one execution payload mapping to multiple beacon blocks + /// TODO: If not the case, then it becomes a 1-1 mapping + /// + /// Note: Multiple beacon block roots can share the same execution block hash in fork scenarios. + /// For example, during consensus layer forks, competing beacon blocks may contain the same + /// execution payload, resulting in multiple beacon block roots waiting for the same execution proof. + /// + /// TODO: The most common case is 1 ExecutionBlockHash to 1 BeaconRoot, so SmallVec<[Hash256; 1]> might make more sense + pending_blocks: Arc>>>, + /// Maximum number of proofs to store (LRU eviction) + max_proofs: usize, + /// Tracks the proven canonical chain - blocks that have sufficient proofs + /// Maps beacon block root to proven block information + proven_canonical_chain: Arc>>, + /// The current proven head (beacon block root, slot) + /// This is the deepest block in the canonical chain that has sufficient proofs + proven_head: Arc>>, + /// The proven finalized checkpoint (beacon block root, slot) + /// This is updated when the proven chain reaches finalization depth + proven_finalized: Arc>>, +} + +impl ExecutionPayloadProofStore { + /// Create a new proof store with given capacity + pub fn new(max_proofs: usize) -> Self { + Self { + proofs: Arc::new(RwLock::new(HashMap::new())), + insertion_order: Arc::new(RwLock::new(VecDeque::new())), + broadcast_queue: Arc::new(RwLock::new(Vec::new())), + pending_blocks: Arc::new(RwLock::new(HashMap::new())), + max_proofs, + proven_canonical_chain: Arc::new(RwLock::new(HashMap::new())), + proven_head: Arc::new(RwLock::new(None)), + proven_finalized: Arc::new(RwLock::new(None)), + } + } +} + +// ============================================================================ +// Proof Management +// ============================================================================ + +impl ExecutionPayloadProofStore { + /// Get all proofs for the given execution block hash + pub fn get_proofs(&self, block_hash: &ExecutionBlockHash) -> Vec { + let proofs = self.proofs.read(); + proofs + .iter() + .filter_map(|((hash, _proof_id), proof)| { + if hash == block_hash { + Some(proof.clone()) + } else { + None + } + }) + .collect() + } + + /// Get a specific proof for the given execution block hash and proof ID + pub fn get_proof( + &self, + block_hash: &ExecutionBlockHash, + proof_id: ProofId, + ) -> Option { + let proofs = self.proofs.read(); + proofs.get(&(*block_hash, proof_id)).cloned() + } + + /// Store a proof for an execution payload + /// + /// Note: This method validates the proof before storing it + pub fn store_proof(&self, proof: ExecutionProof) -> Result<(), ExecutionProofError> { + // Validate the proof before storing + if !Self::validate_proof(&proof) { + return Err(ExecutionProofError::validation_error( + *proof.subnet_id, + proof.block_hash, + "validation failed", + )); + } + + let key = (proof.block_hash, proof.subnet_id); + + // Acquire both locks to maintain consistency + let mut proofs = self.proofs.write(); + let mut insertion_order = self.insertion_order.write(); + + // Simple LRU eviction if we're at capacity + if proofs.len() >= self.max_proofs { + // Remove the oldest proof (front of the queue) + if let Some(oldest_key) = insertion_order.pop_front() { + proofs.remove(&oldest_key); + } + } + + // Insert the new proof + proofs.insert(key, proof); + insertion_order.push_back(key); + + // Add to broadcast queue + drop(proofs); // Release the proofs lock + drop(insertion_order); // Release the insertion_order lock + let mut queue = self.broadcast_queue.write(); + queue.push(key); + + Ok(()) + } + + /// Get the number of proofs for a specific execution block hash + /// + /// This method is essential for stateless validation to determine: + /// - Whether a block has received the minimum required number of proofs (e.g., 2 out of 3) + /// - If we should transition from optimistic to proven state for a block + /// - The exact proof count for logging and chain tracking purposes + /// + /// Multiple proof types can exist for a single execution payload (one per subnet), + /// and we need to count them to ensure sufficient independent validation before + /// considering the payload as proven. + pub fn proof_count_for_payload(&self, block_hash: &ExecutionBlockHash) -> usize { + let proofs = self.proofs.read(); + proofs + .keys() + .filter(|(hash, _proof_id)| hash == block_hash) + .count() + } + + /// Generate a proof for an execution payload + /// TODO: can remove + async fn generate_proof( + payload: &ExecutionPayload, + execution_state_witness: &[u8], + proof_id: ProofId, + ) -> ExecutionProof { + crate::execution_proof_generation::generate_proof( + payload, + execution_state_witness, + proof_id, + ) + .await + } + + /// Validate a proof + /// TODO: can remove + fn validate_proof(proof: &ExecutionProof) -> bool { + crate::execution_proof_generation::validate_proof(proof) + } + + /// Get an existing proof or generate a new one for the given execution payload and proof ID + /// + /// This method first checks if a proof already exists. If it does, it returns the existing + /// proof. Otherwise, it generates a new proof and stores it before returning. + pub async fn get_or_generate_proof( + &self, + payload: &ExecutionPayload, + execution_state_witness: &[u8], + proof_id: ProofId, + ) -> Result { + let block_hash = payload.block_hash(); + + // Check if we already have this proof + if let Some(existing_proof) = self.get_proof(&block_hash, proof_id) { + debug!( + execution_block_hash = ?block_hash, + subnet_id = *proof_id, + "Proof already exists, skipping generation" + ); + return Ok(existing_proof); + } + + let proof = Self::generate_proof(payload, execution_state_witness, proof_id).await; + self.store_proof(proof.clone())?; + Ok(proof) + } +} + +// ============================================================================ +// Broadcasting +// ============================================================================ + +impl ExecutionPayloadProofStore { + /// Take all proofs from the broadcast queue + /// This drains the queue and returns all pending proofs + /// + /// Note: This is used for the BroadcastManager + pub fn take_unqueued_proofs(&self) -> Vec<(ExecutionBlockHash, ProofId)> { + let mut queue = self.broadcast_queue.write(); + std::mem::take(&mut *queue) + } +} + +// ============================================================================ +// Pending Block Management +// ============================================================================ + +impl ExecutionPayloadProofStore { + /// Adds a beacon block to the list of blocks awaiting proofs for their execution payloads + /// + /// This is called when a block is imported optimistically. + /// + /// Note: Prevents duplicate registration of the same block + pub fn register_pending_block( + &self, + execution_block_hash: ExecutionBlockHash, + beacon_block_root: Hash256, + ) { + let mut pending = self.pending_blocks.write(); + let blocks = pending.entry(execution_block_hash).or_insert_with(Vec::new); + + // Only add if not already present (prevents duplicate registration of the same beacon block) + // Note: Multiple different beacon blocks can reference the same execution payload hash (e.g., during reorgs) + if !blocks.contains(&beacon_block_root) { + blocks.push(beacon_block_root); + } + } + + /// Remove and return pending blocks for the given execution block hash + /// + /// This is called after we've verified that sufficient proofs exist for the payload. + /// The returned blocks have transitioned from optimistic to proven state. + pub fn take_pending_blocks(&self, execution_block_hash: &ExecutionBlockHash) -> Vec { + self.pending_blocks + .write() + .remove(execution_block_hash) + .unwrap_or_default() + } + + /// Clean up pending blocks based on a provided predicate + /// + /// Note: This should be called periodically to prevent memory leaks + /// + /// Note: `take_pending_blocks` is called to remove blocks which have received + /// enough proofs, it uses the `ExecutionPayloadHash` to do the removal. + /// Whereas this method is periodically triggered to remove blocks that will no longer receive proofs, it uses `BeaconBlockRoot`s + /// to do this removal. + /// The reason for removal can be due to: + /// - Beacon blocks being on abandoned forks + /// - Beacon blocks are too old (past the finalization slot) TODO: This shouldn't happen once proofs are a part of consensus + /// + /// Uses a two-phase approach to avoid holding locks during callback execution + /// + /// TODO: Test edge case where we receive a lot of pending blocks and cannot + /// TODO: finalize. Perhaps we can move to storage, when doing LRU evictions + pub fn cleanup_pending_blocks(&self, should_remove: F) -> usize + where + F: Fn(Hash256) -> bool, + { + use std::collections::HashSet; + + // 1) Collect all unique beacon block roots to check + let blocks_to_check: HashSet = self + .pending_blocks + .read() + .values() + .flatten() + .copied() + .collect(); + + // 2) Evaluate predicate (without holding locks) + let blocks_to_remove: HashSet = blocks_to_check + .into_iter() + .filter(|&block_root| should_remove(block_root)) + .collect(); + + if blocks_to_remove.is_empty() { + return 0; + } + + // 3) Remove identified blocks + let mut pending = self.pending_blocks.write(); + let mut removed_count = 0; + + pending.retain(|_, blocks| { + let original_len = blocks.len(); + blocks.retain(|block_root| !blocks_to_remove.contains(block_root)); + removed_count += original_len - blocks.len(); + !blocks.is_empty() + }); + + removed_count + } +} + +// ============================================================================ +// Chain Updates +// ============================================================================ + +impl ExecutionPayloadProofStore { + /// Check if an execution payload has sufficient proofs to be considered proven + /// This uses the `stateless_min_proofs_required` from the chain config + fn has_sufficient_proofs( + &self, + execution_block_hash: &ExecutionBlockHash, + min_proofs_required: usize, + ) -> bool { + let proof_count = self.proof_count_for_payload(execution_block_hash); + proof_count >= min_proofs_required + } + + /// Collect proven blocks by walking backwards from the given head + /// + /// # Parameters + /// - `get_block`: A function that takes a block root and returns block information: + /// - Input: `&Hash256` - The block root to look up + /// - Output: `Result, Error>` + /// - `slot`: The slot number of the block + /// - `parent_root`: The parent block's root hash + /// - `exec_hash_opt`: The execution payload hash (None for pre-merge blocks) + /// - Returns `Ok(None)` if block doesn't exist + /// - Returns `Err` on storage/retrieval errors + /// - `head_block_root`: The block root to start walking backwards from + /// - `min_proofs_required`: Minimum number of proofs needed to consider a block proven + /// + /// # Returns + /// - `proven_chain`: Vector of proven blocks (from newest to oldest) + /// - `proven_head`: The deepest proven block (highest slot with proofs), if any + fn collect_proven_blocks( + &self, + get_block: F, + head_block_root: Hash256, + min_proofs_required: usize, + ) -> (Vec, Option<(Hash256, Slot)>) + where + F: Fn(&Hash256) -> Result)>, E>, + E: std::fmt::Debug, + { + let mut current = head_block_root; + let mut proven_chain = Vec::new(); + let mut proven_head_candidate = None; + + // Walk backwards through the chain using the provided getter + while let Ok(Some((slot, parent_root, exec_hash_opt))) = get_block(¤t) { + // Check if this is a pre-merge block + let exec_hash = match exec_hash_opt { + Some(hash) => hash, + None => { + // Pre-merge block, stop here + // TODO: should we just panic here? + break; + } + }; + + // Check if this block has sufficient proofs + if self.has_sufficient_proofs(&exec_hash, min_proofs_required) { + let proof_count = self.proof_count_for_payload(&exec_hash); + + let proven_info = ProvenBlockInfo { + beacon_block_root: current, // Use the current block root we're examining + execution_block_hash: exec_hash, + slot, + parent_root, + proof_count, + }; + + proven_chain.push(proven_info); + + // Track the deepest proven block as head candidate + if proven_head_candidate.is_none() { + proven_head_candidate = Some((current, slot)); + } + + // Continue walking backwards + current = parent_root; + } else { + // Stop walking when we find a block without sufficient proofs + break; + } + } + + (proven_chain, proven_head_candidate) + } + + /// Update the proven chain storage with new proven blocks + /// Returns true if the proven head changed + fn update_proven_storage( + &self, + proven_chain: &[ProvenBlockInfo], + proven_head_candidate: Option<(Hash256, Slot)>, + ) -> bool { + let mut proven_canonical_chain = self.proven_canonical_chain.write(); + let mut proven_head = self.proven_head.write(); + + // Clear old proven chain + proven_canonical_chain.clear(); + + // Add new proven blocks + for block_info in proven_chain.iter() { + proven_canonical_chain.insert(block_info.beacon_block_root, block_info.clone()); + } + + // Update proven head + let head_changed = *proven_head != proven_head_candidate; + *proven_head = proven_head_candidate; + + head_changed + } + + /// Update the proven canonical chain based on available proofs + /// This method walks backwards from the optimistic head to find the longest proven chain + /// + /// Example: + /// + ///```text + /// Genesis ← Block 1 ← Block 2 ← Block 3 ← Block 4 ← Block 5 (optimistic head) + /// [proven] [proven] [proven] [no proofs] [no proofs] + /// ↑ + /// proven_head + ///``` + /// TODO: Walking back each time is expensive, we can probably make this faster by + /// TODO: having the proof store save intermediate information would help here, but don't want to + /// TODO: make it complex (ie keeping track of different forks) + pub fn update_proven_chain( + &self, + get_block: F, + head_block_root: Hash256, + current_slot: Slot, + slots_per_epoch: u64, + min_proofs_required: usize, + ) -> Result + where + F: Fn(&Hash256) -> Result)>, E>, + E: std::fmt::Debug, + { + // Keep track of the current proven head before update to detect actual changes + let _previous_proven_head = self.proven_head.read().clone(); + + // Step 1: Collect proven blocks by walking backwards from head + let (proven_chain, proven_head_candidate) = + self.collect_proven_blocks(get_block, head_block_root, min_proofs_required); + + // Step 2: Update storage with the new proven chain + let head_changed = self.update_proven_storage(&proven_chain, proven_head_candidate); + + // Step 3: Update proven finalized checkpoint + self.update_proven_finalized(&proven_chain, current_slot, slots_per_epoch); + + // Step 4: Get current proven finalized for status + let proven_finalized = self.proven_finalized.read().clone(); + + Ok(ProvenChainStatus { + proven_head: proven_head_candidate, + proven_finalized, + proven_chain_depth: proven_chain.len(), + head_changed, + }) + } + + /// Update the proven finalized checkpoint based on the proven chain + fn update_proven_finalized( + &self, + proven_chain: &[ProvenBlockInfo], + current_slot: Slot, + slots_per_epoch: u64, + ) { + if proven_chain.is_empty() { + return; + } + + let current_epoch = current_slot.epoch(slots_per_epoch); + + // Find the latest proven block that is at least 2 epochs old (similar to finalization distance) + let finalization_distance = 2u64; + let mut proven_finalized_candidate = None; + + for block_info in proven_chain.iter().rev() { + let block_epoch = block_info.slot.epoch(slots_per_epoch); + if current_epoch.saturating_sub(block_epoch).as_u64() >= finalization_distance { + proven_finalized_candidate = Some((block_info.beacon_block_root, block_info.slot)); + break; + } + } + + // Update proven finalized if we found a candidate + let mut proven_finalized = self.proven_finalized.write(); + if proven_finalized_candidate != *proven_finalized { + *proven_finalized = proven_finalized_candidate; + } + } +} + +#[cfg(test)] +impl ExecutionPayloadProofStore { + /// Get the total number of stored proofs (across all proof types and payloads) + fn len(&self) -> usize { + self.proofs.read().len() + } + + /// Check if we have any proof for the given execution block hash + /// Returns true if at least one proof type exists + /// + /// Note: all stored proofs are validated. We assume that proofs are added via `store_proofs` + fn has_valid_proof(&self, block_hash: &ExecutionBlockHash) -> bool { + let proofs = self.proofs.read(); + proofs.keys().any(|(hash, _proof_id)| hash == block_hash) + } + + /// Check if we have a proof for a specific proof ID and execution block hash + /// Returns true if the proof exists + /// + /// Note: all stored proofs are validated. We assume that proofs are added via `store_proofs` + fn has_valid_proof_for_id(&self, block_hash: &ExecutionBlockHash, proof_id: ProofId) -> bool { + let proofs = self.proofs.read(); + proofs.contains_key(&(*block_hash, proof_id)) + } + + /// Get the number of unique payloads that have at least one proof + fn unique_payload_count(&self) -> usize { + let proofs = self.proofs.read(); + let unique_hashes: std::collections::HashSet = + proofs.keys().map(|(hash, _proof_id)| *hash).collect(); + unique_hashes.len() + } + + /// Get the number of execution block hashes that have pending blocks + fn pending_execution_hashes_count(&self) -> usize { + self.pending_blocks.read().len() + } + + /// Get the total number of pending beacon blocks across all execution hashes + fn total_pending_blocks_count(&self) -> usize { + self.pending_blocks + .read() + .values() + .map(|blocks| blocks.len()) + .sum() + } + + /// Get the current proven head (beacon block root and slot) + /// Returns None if no proven head has been established yet + fn get_proven_head(&self) -> Option<(Hash256, Slot)> { + *self.proven_head.read() + } + + /// Get the proven finalized checkpoint (beacon block root and slot) + /// Returns None if no proven finalized checkpoint has been established yet + fn get_proven_finalized(&self) -> Option<(Hash256, Slot)> { + *self.proven_finalized.read() + } + + /// Check if a beacon block is part of the proven canonical chain + fn is_block_proven(&self, beacon_block_root: &Hash256) -> bool { + self.proven_canonical_chain + .read() + .contains_key(beacon_block_root) + } + + /// Get beacon block roots that are pending proofs for the given execution block hash + /// (Test-only helper method) + fn get_pending_blocks(&self, execution_block_hash: &ExecutionBlockHash) -> Vec { + self.pending_blocks + .read() + .get(execution_block_hash) + .cloned() + .unwrap_or_default() + } + + /// Get information about a proven block + fn get_proven_block_info(&self, beacon_block_root: &Hash256) -> Option { + self.proven_canonical_chain + .read() + .get(beacon_block_root) + .cloned() + } + + /// Get the entire proven canonical chain from finalized to head + /// Returns a vector of proven blocks ordered from oldest to newest + fn get_proven_canonical_chain(&self) -> Vec { + let chain = self.proven_canonical_chain.read(); + let mut blocks: Vec = chain.values().cloned().collect(); + // Sort by slot, oldest first + blocks.sort_by_key(|b| b.slot); + blocks + } + + /// Get the depth of the proven chain (number of proven blocks) + fn get_proven_chain_depth(&self) -> usize { + self.proven_canonical_chain.read().len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{ + execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS, FixedBytesExtended, Hash256, + }; + + #[test] + fn test_proof_store_basic_operations() { + let store = ExecutionPayloadProofStore::new(4); + let hash1 = ExecutionBlockHash::from(Hash256::random()); + let hash2 = ExecutionBlockHash::from(Hash256::random()); + + // Initially empty + assert!(!store.has_valid_proof(&hash1)); + assert_eq!(store.len(), 0); + assert_eq!(store.unique_payload_count(), 0); + + // Store an execution witness proof for hash1 + let proof1 = ExecutionProof::new( + hash1, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + store + .store_proof(proof1.clone()) + .expect("valid proof should store successfully"); + + assert!(store.has_valid_proof(&hash1)); + assert!(store.has_valid_proof_for_id(&hash1, ExecutionProofSubnetId::new(0).unwrap())); + assert!(!store.has_valid_proof_for_id(&hash1, ExecutionProofSubnetId::new(1).unwrap())); + assert_eq!(store.len(), 1); + assert_eq!(store.unique_payload_count(), 1); + assert_eq!(store.proof_count_for_payload(&hash1), 1); + + // Store a custom zkVM proof for the same hash1 + let proof1_custom = ExecutionProof::new( + hash1, + ExecutionProofSubnetId::new(1).unwrap(), + 1, + vec![7, 8, 9], + ); + store + .store_proof(proof1_custom) + .expect("valid proof should store successfully"); + + assert!(store.has_valid_proof(&hash1)); + assert!(store.has_valid_proof_for_id(&hash1, ExecutionProofSubnetId::new(0).unwrap())); + assert!(store.has_valid_proof_for_id(&hash1, ExecutionProofSubnetId::new(1).unwrap())); + assert_eq!(store.len(), 2); + assert_eq!(store.unique_payload_count(), 1); // Still 1 unique payload + assert_eq!(store.proof_count_for_payload(&hash1), 2); + + // Store a proof for hash2 + let proof2 = ExecutionProof::new( + hash2, + ExecutionProofSubnetId::new(2).unwrap(), + 1, + vec![4, 5, 6], + ); + store + .store_proof(proof2) + .expect("valid proof should store successfully"); + + assert!(store.has_valid_proof(&hash2)); + assert!(store.has_valid_proof_for_id(&hash2, ExecutionProofSubnetId::new(2).unwrap())); + assert_eq!(store.len(), 3); + assert_eq!(store.unique_payload_count(), 2); + assert_eq!(store.proof_count_for_payload(&hash2), 1); + + // Get all proofs for hash1 + let hash1_proofs = store.get_proofs(&hash1); + assert_eq!(hash1_proofs.len(), 2); + } + + #[test] + fn test_proof_validation() { + let store = ExecutionPayloadProofStore::new(10); + let hash = ExecutionBlockHash::from(Hash256::random()); + + // Valid proof (non-empty data) should store successfully + let valid_proof = ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + assert!(ExecutionPayloadProofStore::validate_proof(&valid_proof)); + assert!(store.store_proof(valid_proof).is_ok()); + assert!(store.has_valid_proof(&hash)); + + // Invalid proof (empty data) should fail to store + let invalid_proof = + ExecutionProof::new(hash, ExecutionProofSubnetId::new(1).unwrap(), 1, vec![]); + assert!(!ExecutionPayloadProofStore::validate_proof(&invalid_proof)); + assert!(store.store_proof(invalid_proof).is_err()); + // Should still only have the first proof + assert_eq!(store.proof_count_for_payload(&hash), 1); + assert!(!store.has_valid_proof_for_id(&hash, ExecutionProofSubnetId::new(1).unwrap())); + } + + #[test] + fn test_lru_eviction() { + let store = ExecutionPayloadProofStore::new(2); + let hash1 = ExecutionBlockHash::from(Hash256::random()); + let hash2 = ExecutionBlockHash::from(Hash256::random()); + let hash3 = ExecutionBlockHash::from(Hash256::random()); + + // Create proofs - insertion order will determine LRU eviction + let proof1 = + ExecutionProof::new(hash1, ExecutionProofSubnetId::new(0).unwrap(), 1, vec![1]); + let proof2 = + ExecutionProof::new(hash2, ExecutionProofSubnetId::new(1).unwrap(), 1, vec![2]); + let proof3 = + ExecutionProof::new(hash3, ExecutionProofSubnetId::new(2).unwrap(), 1, vec![3]); + + // Store first proof + store + .store_proof(proof1) + .expect("valid proof should store successfully"); + assert_eq!(store.len(), 1); + assert!(store.has_valid_proof(&hash1)); + + // Store second proof + store + .store_proof(proof2) + .expect("valid proof should store successfully"); + assert_eq!(store.len(), 2); + assert!(store.has_valid_proof(&hash1)); + assert!(store.has_valid_proof(&hash2)); + + // Store a third proof, should evict the oldest (hash1) + store + .store_proof(proof3) + .expect("valid proof should store successfully"); + + assert_eq!(store.len(), 2); + assert!(!store.has_valid_proof(&hash1)); // Evicted (oldest) + assert!(store.has_valid_proof(&hash2)); // Kept (middle) + assert!(store.has_valid_proof(&hash3)); // Kept (newest) + } + + #[test] + fn test_multiple_proof_types_per_payload() { + let store = ExecutionPayloadProofStore::new(10); + let hash = ExecutionBlockHash::from(Hash256::random()); + + // Store different proof types for the same payload + store + .store_proof(ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + )) + .expect("valid proof should store successfully"); + store + .store_proof(ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(1).unwrap(), + 1, + vec![4, 5, 6], + )) + .expect("valid proof should store successfully"); + store + .store_proof(ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(2).unwrap(), + 1, + vec![7, 8, 9], + )) + .expect("valid proof should store successfully"); + + // Should have all three proof types + assert!(store.has_valid_proof_for_id(&hash, ExecutionProofSubnetId::new(0).unwrap())); + assert!(store.has_valid_proof_for_id(&hash, ExecutionProofSubnetId::new(1).unwrap())); + assert!(store.has_valid_proof_for_id(&hash, ExecutionProofSubnetId::new(2).unwrap())); + assert!(!store.has_valid_proof_for_id(&hash, ExecutionProofSubnetId::new(3).unwrap())); + + // Should have valid proof overall + assert!(store.has_valid_proof(&hash)); + + // Get all proofs for this payload + let proofs = store.get_proofs(&hash); + assert_eq!(proofs.len(), 3); + + // Verify counts + assert_eq!(store.len(), 3); + assert_eq!(store.unique_payload_count(), 1); + assert_eq!(store.proof_count_for_payload(&hash), 3); + } + + #[test] + fn test_proof_versions() { + let hash = ExecutionBlockHash::from(Hash256::random()); + + // Test version 1 proof (supported) + let v1_proof = ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + assert_eq!(v1_proof.version, 1); + assert!(ExecutionPayloadProofStore::validate_proof(&v1_proof)); + + // Test explicit version constructor with custom proof ID + let v1_explicit = ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(1).unwrap(), + 1, + vec![4, 5, 6], + ); + assert_eq!(v1_explicit.version, 1); + assert!(ExecutionPayloadProofStore::validate_proof(&v1_explicit)); + + // Test unsupported version + let v2_proof = ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 2, + vec![7, 8, 9], + ); + assert_eq!(v2_proof.version, 2); + assert!(!ExecutionPayloadProofStore::validate_proof(&v2_proof)); // Should fail validation for unknown version + + // Test empty data with version 1 (should be invalid) + let empty_v1 = + ExecutionProof::new(hash, ExecutionProofSubnetId::new(0).unwrap(), 1, vec![]); + assert!(!ExecutionPayloadProofStore::validate_proof(&empty_v1)); + + // Test custom proof ID (within valid range) + let custom_proof = ExecutionProof::new( + hash, + ExecutionProofSubnetId::new(7).unwrap(), + 1, + vec![1, 2, 3], + ); + assert!(ExecutionPayloadProofStore::validate_proof(&custom_proof)); + } + + #[test] + fn test_proof_id_validation() { + // Test valid subnet IDs + for id in 0..MAX_EXECUTION_PROOF_SUBNETS { + assert!(ExecutionProofSubnetId::new(id).is_ok()); + } + + // Test that high subnet IDs are rejected + assert!(ExecutionProofSubnetId::new(MAX_EXECUTION_PROOF_SUBNETS).is_err()); + assert!(ExecutionProofSubnetId::new(100).is_err()); + } + + #[tokio::test] + async fn test_get_or_generate_proof_method() { + use types::{ExecutionPayloadBellatrix, FullPayloadBellatrix, MainnetEthSpec}; + + let store = ExecutionPayloadProofStore::new(10); + let execution_block_hash = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(3).unwrap(); + + // Create a dummy payload for testing + let payload = FullPayloadBellatrix:: { + execution_payload: ExecutionPayloadBellatrix:: { + parent_hash: ExecutionBlockHash::zero(), + fee_recipient: Default::default(), + state_root: Hash256::default(), + receipts_root: Hash256::default(), + logs_bloom: Default::default(), + prev_randao: Hash256::default(), + block_number: 333, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: Default::default(), + base_fee_per_gas: types::Uint256::from(0u64), + block_hash: execution_block_hash, + transactions: Default::default(), + }, + }; + + // Initially no proofs + assert!(!store.has_valid_proof(&execution_block_hash)); + assert_eq!(store.len(), 0); + + // Generate and store a proof + let exec_payload = ExecutionPayload::Bellatrix(payload.execution_payload.clone()); + let dummy_witness = b"test_witness_data"; + let result = store + .get_or_generate_proof(&exec_payload, dummy_witness, proof_id) + .await; + assert!(result.is_ok()); + + let proof = result.unwrap(); + assert_eq!(proof.block_hash, execution_block_hash); + assert_eq!(proof.subnet_id, proof_id); + + // Verify it's stored in the store + assert!(store.has_valid_proof(&execution_block_hash)); + assert!(store.has_valid_proof_for_id(&execution_block_hash, proof_id)); + assert_eq!(store.len(), 1); + assert_eq!(store.proof_count_for_payload(&execution_block_hash), 1); + + // Generate another proof for the same payload with different proof ID + let proof_id_2 = ExecutionProofSubnetId::new(7).unwrap(); + let exec_payload2 = ExecutionPayload::Bellatrix(payload.execution_payload); + let result_2 = store + .get_or_generate_proof(&exec_payload2, dummy_witness, proof_id_2) + .await; + assert!(result_2.is_ok()); + + // Should have 2 proofs now + assert_eq!(store.len(), 2); + assert_eq!(store.proof_count_for_payload(&execution_block_hash), 2); + assert!(store.has_valid_proof_for_id(&execution_block_hash, proof_id)); + assert!(store.has_valid_proof_for_id(&execution_block_hash, proof_id_2)); + + // Test that get_or_generate_proof returns existing proof without regenerating + let result_3 = store + .get_or_generate_proof(&exec_payload, dummy_witness, proof_id) + .await; + assert!(result_3.is_ok()); + let proof_3 = result_3.unwrap(); + + // Should be the same proof as before + assert_eq!(proof_3.block_hash, execution_block_hash); + assert_eq!(proof_3.subnet_id, proof_id); + + // Store size should not change (still 2 proofs) + assert_eq!(store.len(), 2); + assert_eq!(store.proof_count_for_payload(&execution_block_hash), 2); + } + + #[test] + fn test_proven_chain_tracking_basic() { + let store = ExecutionPayloadProofStore::new(100); + + // Initially no proven head + assert!(store.get_proven_head().is_none()); + assert!(store.get_proven_finalized().is_none()); + + // The proven head and finalized are set internally by update_proven_chain + // We can't set them directly, so this test focuses on checking initial state + } + + #[test] + fn test_is_execution_payload_proven() { + let store = ExecutionPayloadProofStore::new(100); + let min_proofs = 2; + + let block_hash = ExecutionBlockHash::from(Hash256::random()); + + // No proofs = not proven + assert!(store.proof_count_for_payload(&block_hash) < min_proofs); + + // Add one proof - still not enough + let proof1 = ExecutionProof::new( + block_hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + assert!(store.store_proof(proof1).is_ok()); + assert!(store.proof_count_for_payload(&block_hash) < min_proofs); + + // Add second proof - now it's proven + let proof2 = ExecutionProof::new( + block_hash, + ExecutionProofSubnetId::new(1).unwrap(), + 1, + vec![4, 5, 6], + ); + assert!(store.store_proof(proof2).is_ok()); + assert!(store.proof_count_for_payload(&block_hash) >= min_proofs); + + // With min_proofs = 1, it should have been proven with just one proof + assert!(store.proof_count_for_payload(&block_hash) >= 1); + } + + #[test] + fn test_is_beacon_block_proven() { + let store = ExecutionPayloadProofStore::new(100); + + let beacon_root = Hash256::from_low_u64_be(1); + let exec_hash = ExecutionBlockHash::from(Hash256::from_low_u64_be(101)); + + // Initially not proven + assert!(!store.is_block_proven(&beacon_root)); + + // Store proofs for the execution payload + for i in 0..2 { + let proof = ExecutionProof::new( + exec_hash, + ExecutionProofSubnetId::new(i).unwrap(), + 1, + vec![i as u8], + ); + store.store_proof(proof).unwrap(); + } + + // Mock getter that returns our block + let mock_getter = |block_root: &Hash256| -> Result< + Option<(Slot, Hash256, Option)>, + &'static str, + > { + if *block_root == beacon_root { + Ok(Some((Slot::new(1), Hash256::zero(), Some(exec_hash)))) + } else { + Ok(None) + } + }; + + // Update proven chain + let status = store + .update_proven_chain(mock_getter, beacon_root, Slot::new(10), 32, 2) + .unwrap(); + + // Now the block should be proven + assert!(store.is_block_proven(&beacon_root)); + assert_eq!(status.proven_head, Some((beacon_root, Slot::new(1)))); + + // Test with a different block + assert!(!store.is_block_proven(&Hash256::from_low_u64_be(999))); + } + + #[test] + fn test_get_proven_chain_empty() { + let store = ExecutionPayloadProofStore::new(100); + + // Initially the proven canonical chain should be empty + let proven_blocks = store.get_proven_canonical_chain(); + assert_eq!(proven_blocks.len(), 0); + assert_eq!(store.get_proven_chain_depth(), 0); + } + + #[test] + fn test_pending_blocks() { + let store = ExecutionPayloadProofStore::new(100); + + let exec_hash = ExecutionBlockHash::from(Hash256::random()); + let beacon_root1 = Hash256::random(); + let beacon_root2 = Hash256::random(); + + // Register pending blocks + store.register_pending_block(exec_hash, beacon_root1); + + // Check pending blocks + let pending = store.get_pending_blocks(&exec_hash); + assert_eq!(pending.len(), 1); + assert!(pending.contains(&beacon_root1)); + + // Register another + store.register_pending_block(exec_hash, beacon_root2); + let pending = store.get_pending_blocks(&exec_hash); + assert_eq!(pending.len(), 2); + assert!(pending.contains(&beacon_root1)); + assert!(pending.contains(&beacon_root2)); + } + + #[test] + fn test_take_pending_blocks() { + let store = ExecutionPayloadProofStore::new(100); + + let exec_hash = ExecutionBlockHash::from(Hash256::random()); + let beacon_root1 = Hash256::random(); + let beacon_root2 = Hash256::random(); + + // Register pending blocks + store.register_pending_block(exec_hash, beacon_root1); + store.register_pending_block(exec_hash, beacon_root2); + + // Take pending blocks (removes them) + let taken = store.take_pending_blocks(&exec_hash); + assert_eq!(taken.len(), 2); + assert!(taken.contains(&beacon_root1)); + assert!(taken.contains(&beacon_root2)); + + // Should be empty now + let pending = store.get_pending_blocks(&exec_hash); + assert_eq!(pending.len(), 0); + } + + #[test] + fn test_pending_blocks_counts() { + let store = ExecutionPayloadProofStore::new(100); + + let exec_hash1 = ExecutionBlockHash::from(Hash256::random()); + let exec_hash2 = ExecutionBlockHash::from(Hash256::random()); + let beacon_root1 = Hash256::random(); + let beacon_root2 = Hash256::random(); + let beacon_root3 = Hash256::random(); + + // Initially no pending blocks + assert_eq!(store.pending_execution_hashes_count(), 0); + assert_eq!(store.total_pending_blocks_count(), 0); + + // Add pending blocks + store.register_pending_block(exec_hash1, beacon_root1); + store.register_pending_block(exec_hash1, beacon_root2); + store.register_pending_block(exec_hash2, beacon_root3); + + // Check counts + assert_eq!(store.pending_execution_hashes_count(), 2); // 2 unique execution hashes + assert_eq!(store.total_pending_blocks_count(), 3); // 3 total pending blocks + } + + #[test] + fn test_is_block_proven() { + let store = ExecutionPayloadProofStore::new(100); + + let beacon_root = Hash256::random(); + let exec_hash = ExecutionBlockHash::from(Hash256::random()); + + // Initially not proven + assert!(!store.is_block_proven(&beacon_root)); + + // Store proofs + for i in 0..3 { + let proof = ExecutionProof::new( + exec_hash, + ExecutionProofSubnetId::new(i).unwrap(), + 1, + vec![i as u8], + ); + store.store_proof(proof).unwrap(); + } + + // Create a simple chain: genesis <- block1 + let mock_getter = |block_root: &Hash256| -> Result< + Option<(Slot, Hash256, Option)>, + &'static str, + > { + if *block_root == beacon_root { + Ok(Some((Slot::new(1), Hash256::zero(), Some(exec_hash)))) + } else if block_root == &Hash256::zero() { + Ok(None) // Genesis + } else { + Ok(None) + } + }; + + // Update proven chain with min_proofs = 3 + store + .update_proven_chain(mock_getter, beacon_root, Slot::new(100), 32, 3) + .unwrap(); + + // Now it should be proven + assert!(store.is_block_proven(&beacon_root)); + + // Random block should not be proven + let random_root = Hash256::random(); + assert!(!store.is_block_proven(&random_root)); + } + + #[test] + fn test_get_proven_block_info() { + let store = ExecutionPayloadProofStore::new(100); + + let beacon_root = Hash256::random(); + let exec_hash = ExecutionBlockHash::from(Hash256::random()); + let parent_root = Hash256::random(); + + // Should return None for non-existent blocks + assert!(store.get_proven_block_info(&beacon_root).is_none()); + + // Store proofs + for i in 0..2 { + let proof = ExecutionProof::new( + exec_hash, + ExecutionProofSubnetId::new(i).unwrap(), + 1, + vec![i as u8], + ); + store.store_proof(proof).unwrap(); + } + + // Mock getter + let mock_getter = |block_root: &Hash256| -> Result< + Option<(Slot, Hash256, Option)>, + &'static str, + > { + if *block_root == beacon_root { + Ok(Some((Slot::new(42), parent_root, Some(exec_hash)))) + } else if *block_root == parent_root { + Ok(Some((Slot::new(41), Hash256::zero(), None))) // Parent is pre-merge + } else { + Ok(None) + } + }; + + // Update proven chain + store + .update_proven_chain(mock_getter, beacon_root, Slot::new(100), 32, 2) + .unwrap(); + + // Now we should be able to get the proven block info + let info = store.get_proven_block_info(&beacon_root); + assert!(info.is_some()); + + let info = info.unwrap(); + assert_eq!(info.beacon_block_root, beacon_root); + assert_eq!(info.execution_block_hash, exec_hash); + assert_eq!(info.slot, Slot::new(42)); + assert_eq!(info.parent_root, parent_root); + assert_eq!(info.proof_count, 2); + } + + #[test] + fn test_cleanup_pending_blocks() { + let store = ExecutionPayloadProofStore::new(100); + + let exec_hash1 = ExecutionBlockHash::from(Hash256::random()); + let exec_hash2 = ExecutionBlockHash::from(Hash256::random()); + let beacon_root1 = Hash256::random(); + let beacon_root2 = Hash256::random(); + let beacon_root3 = Hash256::random(); + + // Register pending blocks + store.register_pending_block(exec_hash1, beacon_root1); + store.register_pending_block(exec_hash1, beacon_root2); + store.register_pending_block(exec_hash2, beacon_root3); + + // Cleanup with a predicate that removes beacon_root1 and beacon_root2 + let removed = + store.cleanup_pending_blocks(|root| root == beacon_root1 || root == beacon_root2); + + assert_eq!(removed, 2); + + // Check remaining blocks + let pending1 = store.get_pending_blocks(&exec_hash1); + assert_eq!(pending1.len(), 0); // All blocks for exec_hash1 were removed + + let pending2 = store.get_pending_blocks(&exec_hash2); + assert_eq!(pending2.len(), 1); + assert!(pending2.contains(&beacon_root3)); + } + + #[test] + fn test_cleanup_pending_blocks_by_slot() { + let store = ExecutionPayloadProofStore::new(100); + + let exec_hash1 = ExecutionBlockHash::from(Hash256::random()); + let exec_hash2 = ExecutionBlockHash::from(Hash256::random()); + let old_block = Hash256::random(); + let new_block = Hash256::random(); + + // Register blocks + store.register_pending_block(exec_hash1, old_block); + store.register_pending_block(exec_hash2, new_block); + + // Cleanup old blocks + let removed = store.cleanup_pending_blocks(|root| root == old_block); + + assert_eq!(removed, 1); + + // Verify old block is gone + let pending1 = store.get_pending_blocks(&exec_hash1); + assert_eq!(pending1.len(), 0); + + // Verify new block remains + let pending2 = store.get_pending_blocks(&exec_hash2); + assert_eq!(pending2.len(), 1); + assert!(pending2.contains(&new_block)); + } + + #[test] + fn test_has_sufficient_proofs() { + let store = ExecutionPayloadProofStore::new(100); + + let exec_hash = ExecutionBlockHash::from(Hash256::random()); + + // No proofs = insufficient + assert!(!store.has_sufficient_proofs(&exec_hash, 1)); + assert!(!store.has_sufficient_proofs(&exec_hash, 2)); + + // Add one proof + let proof1 = ExecutionProof::new( + exec_hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + assert!(store.store_proof(proof1).is_ok()); + + // Sufficient for min=1, insufficient for min=2 + assert!(store.has_sufficient_proofs(&exec_hash, 1)); + assert!(!store.has_sufficient_proofs(&exec_hash, 2)); + + // Add second proof + let proof2 = ExecutionProof::new( + exec_hash, + ExecutionProofSubnetId::new(1).unwrap(), + 1, + vec![4, 5, 6], + ); + assert!(store.store_proof(proof2).is_ok()); + + // Now sufficient for min=2 + assert!(store.has_sufficient_proofs(&exec_hash, 2)); + } + + #[test] + fn test_update_proven_storage() { + let store = ExecutionPayloadProofStore::new(100); + + // Initially no proven head + assert!(store.get_proven_head().is_none()); + + // Create some test proven blocks + let block1_root = Hash256::from_low_u64_be(1); + let block2_root = Hash256::from_low_u64_be(2); + let exec_hash1 = ExecutionBlockHash::from(Hash256::from_low_u64_be(101)); + let exec_hash2 = ExecutionBlockHash::from(Hash256::from_low_u64_be(102)); + + let proven_chain = vec![ + ProvenBlockInfo { + beacon_block_root: block1_root, + execution_block_hash: exec_hash1, + slot: Slot::new(1), + parent_root: Hash256::zero(), + proof_count: 2, + }, + ProvenBlockInfo { + beacon_block_root: block2_root, + execution_block_hash: exec_hash2, + slot: Slot::new(2), + parent_root: block1_root, + proof_count: 3, + }, + ]; + + let proven_head_candidate = Some((block2_root, Slot::new(2))); + + // Update storage + let head_changed = store.update_proven_storage(&proven_chain, proven_head_candidate); + assert!(head_changed); + + // Verify proven head was updated + assert_eq!(store.get_proven_head(), proven_head_candidate); + + // Verify blocks are in proven chain + assert!(store.is_block_proven(&block1_root)); + assert!(store.is_block_proven(&block2_root)); + + // Verify block info is stored correctly + let block1_info = store.get_proven_block_info(&block1_root).unwrap(); + assert_eq!(block1_info.execution_block_hash, exec_hash1); + assert_eq!(block1_info.slot, Slot::new(1)); + + // Test updating with same head (no change) + let head_changed2 = store.update_proven_storage(&proven_chain, proven_head_candidate); + assert!(!head_changed2); + + // Test clearing proven chain + let head_changed3 = store.update_proven_storage(&[], None); + assert!(head_changed3); + assert!(store.get_proven_head().is_none()); + assert!(!store.is_block_proven(&block1_root)); + } + + #[test] + fn test_proven_chain_depth_tracking() { + let store = ExecutionPayloadProofStore::new(100); + + // Initially depth is 0 + assert_eq!(store.get_proven_chain_depth(), 0); + + // Create a chain of 5 blocks + let mut proven_chain = Vec::new(); + for i in 1..=5 { + proven_chain.push(ProvenBlockInfo { + beacon_block_root: Hash256::from_low_u64_be(i), + execution_block_hash: ExecutionBlockHash::from(Hash256::from_low_u64_be(100 + i)), + slot: Slot::new(i), + parent_root: if i == 1 { + Hash256::zero() + } else { + Hash256::from_low_u64_be(i - 1) + }, + proof_count: 2, + }); + } + + let proven_head = Some((Hash256::from_low_u64_be(5), Slot::new(5))); + store.update_proven_storage(&proven_chain, proven_head); + + // Verify depth + assert_eq!(store.get_proven_chain_depth(), 5); + + // Verify chain is ordered correctly + let chain = store.get_proven_canonical_chain(); + assert_eq!(chain.len(), 5); + for (i, block) in chain.iter().enumerate() { + assert_eq!(block.slot, Slot::new((i + 1) as u64)); + } + } + + #[test] + fn test_cleanup_pending_blocks_integration() { + let store = ExecutionPayloadProofStore::new(100); + + // Set up some execution hashes and beacon blocks + let exec_hash1 = ExecutionBlockHash::from(Hash256::random()); + let exec_hash2 = ExecutionBlockHash::from(Hash256::random()); + let beacon_root1 = Hash256::from_low_u64_be(1); + let beacon_root2 = Hash256::from_low_u64_be(2); + let beacon_root3 = Hash256::from_low_u64_be(3); + + // Register pending blocks + store.register_pending_block(exec_hash1, beacon_root1); + store.register_pending_block(exec_hash1, beacon_root2); + store.register_pending_block(exec_hash2, beacon_root3); + + // Initial counts + assert_eq!(store.pending_execution_hashes_count(), 2); + assert_eq!(store.total_pending_blocks_count(), 3); + + // Clean up blocks 1 and 3 (simulating they're finalized) + let removed = + store.cleanup_pending_blocks(|root| root == beacon_root1 || root == beacon_root3); + assert_eq!(removed, 2); + + // Verify remaining state + assert_eq!(store.pending_execution_hashes_count(), 1); + assert_eq!(store.total_pending_blocks_count(), 1); + + let remaining = store.get_pending_blocks(&exec_hash1); + assert_eq!(remaining.len(), 1); + assert!(remaining.contains(&beacon_root2)); + } + + #[test] + fn test_collect_proven_blocks() { + let store = ExecutionPayloadProofStore::new(100); + + // Set up test data + let block1_root = Hash256::from_low_u64_be(1); + let block2_root = Hash256::from_low_u64_be(2); + let block3_root = Hash256::from_low_u64_be(3); + let block4_root = Hash256::from_low_u64_be(4); + let block5_root = Hash256::from_low_u64_be(5); + + let exec_hash1 = ExecutionBlockHash::from(Hash256::from_low_u64_be(101)); + let exec_hash2 = ExecutionBlockHash::from(Hash256::from_low_u64_be(102)); + let exec_hash3 = ExecutionBlockHash::from(Hash256::from_low_u64_be(103)); + let exec_hash4 = ExecutionBlockHash::from(Hash256::from_low_u64_be(104)); + let exec_hash5 = ExecutionBlockHash::from(Hash256::from_low_u64_be(105)); + + // Add proofs for blocks 1, 2, and 3 (but not 4 and 5) + for exec_hash in &[exec_hash1, exec_hash2, exec_hash3] { + for subnet in 0..2 { + let proof = ExecutionProof::new( + *exec_hash, + ExecutionProofSubnetId::new(subnet).unwrap(), + 1, + vec![1, 2, 3], + ); + store.store_proof(proof).unwrap(); + } + } + + // Create a mock getter that simulates the chain + let mock_getter = |block_root: &Hash256| -> Result< + Option<(Slot, Hash256, Option)>, + String, + > { + if *block_root == block5_root { + Ok(Some((Slot::new(5), block4_root, Some(exec_hash5)))) + } else if *block_root == block4_root { + Ok(Some((Slot::new(4), block3_root, Some(exec_hash4)))) + } else if *block_root == block3_root { + Ok(Some((Slot::new(3), block2_root, Some(exec_hash3)))) + } else if *block_root == block2_root { + Ok(Some((Slot::new(2), block1_root, Some(exec_hash2)))) + } else if *block_root == block1_root { + Ok(Some((Slot::new(1), Hash256::zero(), Some(exec_hash1)))) + } else if *block_root == Hash256::zero() { + Ok(None) // Genesis has no parent + } else { + Ok(None) + } + }; + + // Test collecting from block 5 (head) with min_proofs = 2 + let (proven_chain, proven_head) = store.collect_proven_blocks( + mock_getter, + block5_root, + 2, // min_proofs_required + ); + + // Should find no proven blocks because block 5 has no proofs (stops immediately) + assert_eq!(proven_chain.len(), 0); + assert_eq!(proven_head, None); + + // Now test from block 3 which has proofs + let (proven_chain2, proven_head2) = + store.collect_proven_blocks(mock_getter, block3_root, 2); + + // Should find blocks 3, 2, 1 as proven + assert_eq!(proven_chain2.len(), 3); + assert_eq!(proven_chain2[0].beacon_block_root, block3_root); + assert_eq!(proven_chain2[1].beacon_block_root, block2_root); + assert_eq!(proven_chain2[2].beacon_block_root, block1_root); + assert_eq!(proven_head2, Some((block3_root, Slot::new(3)))); + + // Test with pre-merge block + let pre_merge_getter = |block_root: &Hash256| -> Result< + Option<(Slot, Hash256, Option)>, + String, + > { + if *block_root == block3_root { + Ok(Some((Slot::new(3), block2_root, Some(exec_hash3)))) + } else if *block_root == block2_root { + Ok(Some((Slot::new(2), block1_root, None))) // Pre-merge + } else { + Ok(None) + } + }; + + let (proven_chain_pre_merge, proven_head_pre_merge) = + store.collect_proven_blocks(pre_merge_getter, block3_root, 2); + + // Should only find block 3 (stops at pre-merge block 2) + assert_eq!(proven_chain_pre_merge.len(), 1); + assert_eq!(proven_chain_pre_merge[0].beacon_block_root, block3_root); + assert_eq!(proven_head_pre_merge, Some((block3_root, Slot::new(3)))); + } + + #[test] + fn test_proof_broadcast_queue() { + let store = ExecutionPayloadProofStore::new(100); + + // Initially queue is empty + let proofs = store.take_unqueued_proofs(); + assert!(proofs.is_empty()); + + // Store a proof (should add to broadcast queue) + let exec_hash = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(0).unwrap(); + let proof = ExecutionProof::new(exec_hash, proof_id, 1, vec![1, 2, 3]); + store.store_proof(proof).unwrap(); + + // Take from queue + let proofs = store.take_unqueued_proofs(); + assert_eq!(proofs.len(), 1); + assert_eq!(proofs[0], (exec_hash, proof_id)); + + // Queue should be empty after taking + let proofs = store.take_unqueued_proofs(); + assert!(proofs.is_empty()); + } + + #[test] + fn test_update_proven_chain_returns_status() { + let store = ExecutionPayloadProofStore::new(100); + + // Setup mock blocks + let exec_hash1 = ExecutionBlockHash::from(Hash256::from_low_u64_be(101)); + let exec_hash2 = ExecutionBlockHash::from(Hash256::from_low_u64_be(102)); + let block1_root = Hash256::from_low_u64_be(1); + let block2_root = Hash256::from_low_u64_be(2); + + // Store proofs for both blocks + for i in 0..2 { + let proof1 = ExecutionProof::new( + exec_hash1, + ExecutionProofSubnetId::new(i).unwrap(), + 1, + vec![i as u8], + ); + store.store_proof(proof1).unwrap(); + + let proof2 = ExecutionProof::new( + exec_hash2, + ExecutionProofSubnetId::new(i).unwrap(), + 1, + vec![i as u8, 2], + ); + store.store_proof(proof2).unwrap(); + } + + // Mock getter + let mock_getter = |block_root: &Hash256| -> Result< + Option<(Slot, Hash256, Option)>, + &'static str, + > { + if *block_root == block2_root { + Ok(Some((Slot::new(2), block1_root, Some(exec_hash2)))) + } else if *block_root == block1_root { + Ok(Some((Slot::new(1), Hash256::zero(), Some(exec_hash1)))) + } else { + Ok(None) + } + }; + + // Test update_proven_chain + let status = store + .update_proven_chain( + mock_getter, + block2_root, + Slot::new(10), // current slot + 32, // slots per epoch + 2, // min proofs required + ) + .unwrap(); + + // Verify status - both blocks have proofs, so proven head should be block2 + assert_eq!(status.proven_head, Some((block2_root, Slot::new(2)))); + assert_eq!(status.proven_chain_depth, 2); + assert!(status.head_changed); + assert_eq!(status.proven_finalized, None); // Too recent to be finalized + + // Update again with same state - head_changed should be false + let status2 = store + .update_proven_chain(mock_getter, block2_root, Slot::new(10), 32, 2) + .unwrap(); + + assert!(!status2.head_changed); + assert_eq!(status2.proven_head, status.proven_head); + } + + #[test] + fn test_update_proven_finalized() { + let store = ExecutionPayloadProofStore::new(100); + + // Create a proven chain with blocks at different epochs + let mut proven_chain = Vec::new(); + let slots_per_epoch = 32; + + // Block at slot 32 (epoch 1) + proven_chain.push(ProvenBlockInfo { + beacon_block_root: Hash256::from_low_u64_be(1), + execution_block_hash: ExecutionBlockHash::from(Hash256::from_low_u64_be(101)), + slot: Slot::new(32), + parent_root: Hash256::zero(), + proof_count: 2, + }); + + // Block at slot 64 (epoch 2) + proven_chain.push(ProvenBlockInfo { + beacon_block_root: Hash256::from_low_u64_be(2), + execution_block_hash: ExecutionBlockHash::from(Hash256::from_low_u64_be(102)), + slot: Slot::new(64), + parent_root: Hash256::from_low_u64_be(1), + proof_count: 2, + }); + + // Block at slot 96 (epoch 3) + proven_chain.push(ProvenBlockInfo { + beacon_block_root: Hash256::from_low_u64_be(3), + execution_block_hash: ExecutionBlockHash::from(Hash256::from_low_u64_be(103)), + slot: Slot::new(96), + parent_root: Hash256::from_low_u64_be(2), + proof_count: 2, + }); + + // Test at current slot 160 (epoch 5) - blocks at epochs 1,2,3 should be finalizable + store.update_proven_finalized(&proven_chain, Slot::new(160), slots_per_epoch); + + let finalized = store.proven_finalized.read(); + assert_eq!( + *finalized, + Some((Hash256::from_low_u64_be(3), Slot::new(96))) + ); + drop(finalized); + + // Test with current slot 96 (epoch 3) - only block at epoch 1 should be finalizable + store.update_proven_finalized(&proven_chain, Slot::new(96), slots_per_epoch); + + let finalized2 = store.proven_finalized.read(); + assert_eq!( + *finalized2, + Some((Hash256::from_low_u64_be(1), Slot::new(32))) + ); + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index df253bf72c0..1080b232453 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -5,6 +5,7 @@ mod attester_cache; pub mod beacon_block_reward; mod beacon_block_streamer; mod beacon_chain; +mod beacon_chain_execution_proof; mod beacon_fork_choice_store; pub mod beacon_proposer_cache; mod beacon_snapshot; @@ -26,6 +27,8 @@ pub mod electra_readiness; mod errors; pub mod events; pub mod execution_payload; +pub mod execution_proof_generation; +pub mod execution_proof_store; pub mod fetch_blobs; pub mod fork_choice_signal; pub mod fork_revert; diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 0f324071a1e..919cee969b5 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -124,6 +124,7 @@ pub struct BeaconProcessorQueueLengths { gossip_block_queue: usize, gossip_blob_queue: usize, gossip_data_column_queue: usize, + gossip_execution_proof_queue: usize, delayed_block_queue: usize, status_queue: usize, bbrange_queue: usize, @@ -192,6 +193,7 @@ impl BeaconProcessorQueueLengths { gossip_block_queue: 1024, gossip_blob_queue: 1024, gossip_data_column_queue: 1024, + gossip_execution_proof_queue: 1024, delayed_block_queue: 1024, status_queue: 1024, bbrange_queue: 1024, @@ -592,6 +594,7 @@ pub enum Work { GossipBlock(AsyncFn), GossipBlobSidecar(AsyncFn), GossipDataColumnSidecar(AsyncFn), + GossipExecutionProof(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -657,6 +660,7 @@ pub enum WorkType { GossipBlock, GossipBlobSidecar, GossipDataColumnSidecar, + GossipExecutionProof, DelayedImportBlock, GossipVoluntaryExit, GossipProposerSlashing, @@ -706,6 +710,7 @@ impl Work { Work::GossipBlock(_) => WorkType::GossipBlock, Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, + Work::GossipExecutionProof(_) => WorkType::GossipExecutionProof, Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, Work::GossipProposerSlashing(_) => WorkType::GossipProposerSlashing, @@ -897,6 +902,8 @@ impl BeaconProcessor { let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); let mut gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); let mut gossip_data_column_queue = FifoQueue::new(queue_lengths.gossip_data_column_queue); + let mut gossip_execution_proof_queue = + FifoQueue::new(queue_lengths.gossip_execution_proof_queue); let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); let mut status_queue = FifoQueue::new(queue_lengths.status_queue); @@ -1350,6 +1357,9 @@ impl BeaconProcessor { Work::GossipDataColumnSidecar { .. } => { gossip_data_column_queue.push(work, work_id) } + Work::GossipExecutionProof { .. } => { + gossip_execution_proof_queue.push(work, work_id) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id) } @@ -1458,6 +1468,7 @@ impl BeaconProcessor { WorkType::GossipBlock => gossip_block_queue.len(), WorkType::GossipBlobSidecar => gossip_blob_queue.len(), WorkType::GossipDataColumnSidecar => gossip_data_column_queue.len(), + WorkType::GossipExecutionProof => gossip_execution_proof_queue.len(), WorkType::DelayedImportBlock => delayed_block_queue.len(), WorkType::GossipVoluntaryExit => gossip_voluntary_exit_queue.len(), WorkType::GossipProposerSlashing => gossip_proposer_slashing_queue.len(), @@ -1618,7 +1629,8 @@ impl BeaconProcessor { Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) - | Work::GossipDataColumnSidecar(work) => task_spawner.spawn_async(async move { + | Work::GossipDataColumnSidecar(work) + | Work::GossipExecutionProof(work) => task_spawner.spawn_async(async move { work.await; }), Work::BlobsByRangeRequest(process_fn) diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 479b4b3192a..001c1d2542f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -2,6 +2,7 @@ use crate::compute_light_client_updates::{ compute_light_client_updates, LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY, }; use crate::config::{ClientGenesis, Config as ClientConfig}; +use crate::execution_proof_broadcaster::start_execution_proof_broadcaster_service; use crate::notifier::spawn_notifier; use crate::Client; use beacon_chain::attestation_simulator::start_attestation_simulator_service; @@ -776,6 +777,20 @@ where beacon_chain.task_executor.clone(), beacon_chain.clone(), ); + + // Start the execution proof broadcaster service if we have network senders + // and we're in stateless validation mode + if let Some(network_senders) = &self.network_senders { + if beacon_chain.config.stateless_validation + || beacon_chain.config.generate_execution_proofs + { + start_execution_proof_broadcaster_service( + runtime_context.executor.clone(), + beacon_chain.clone(), + network_senders.network_send(), + ); + } + } } Ok(Client { diff --git a/beacon_node/client/src/execution_proof_broadcaster.rs b/beacon_node/client/src/execution_proof_broadcaster.rs new file mode 100644 index 00000000000..fb5a4690f94 --- /dev/null +++ b/beacon_node/client/src/execution_proof_broadcaster.rs @@ -0,0 +1,541 @@ +//! Background task for broadcasting execution proofs when they become available. +//! +//! This module implements the background proof broadcaster that periodically checks for +//! unbroadcast proofs and broadcasts them to the gossip network. This ensures that +//! proofs generated asynchronously are eventually broadcast, even if they weren't +//! ready during initial block production. + +use beacon_chain::execution_proof_store::ProofId; +use beacon_chain::{parking_lot::RwLock, BeaconChain, BeaconChainTypes}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use task_executor::TaskExecutor; +use tokio::sync::mpsc::UnboundedSender; +use tracing::{debug, info, warn}; +use types::{EthSpec, ExecutionBlockHash, ExecutionProof}; + +/// Information about failed broadcast attempts +#[derive(Debug, Clone)] +struct FailedAttempt { + /// Number of broadcast attempts made + attempts: u32, + /// Timestamp of the last broadcast attempt + last_attempt: Instant, +} + +impl FailedAttempt { + /// Create a new failed attempt record + fn new() -> Self { + Self { + attempts: 1, + last_attempt: Instant::now(), + } + } + + /// Check if this can be retried based on delay and max attempts + fn can_retry(&self, max_attempts: u32, retry_delay: Duration) -> bool { + self.attempts < max_attempts && self.last_attempt.elapsed() >= retry_delay + } + + /// Increment attempt count and update timestamp + fn increment(&mut self) { + self.attempts += 1; + self.last_attempt = Instant::now(); + } +} + +/// Manages broadcast state for execution proofs separately from proof storage +#[derive(Debug)] +struct ProofBroadcastManager { + /// Proofs queued for broadcast (including retries) + queued: RwLock>, + /// Currently broadcasting + broadcasting: RwLock>, + /// Failed attempts for retry logic + failed: RwLock>, +} + +impl ProofBroadcastManager { + /// Create a new broadcast manager + fn new() -> Self { + Self { + queued: RwLock::new(HashSet::new()), + broadcasting: RwLock::new(HashSet::new()), + failed: RwLock::new(HashMap::new()), + } + } + + /// Add new proofs to broadcast queue + fn queue_proofs(&self, proofs: Vec<(ExecutionBlockHash, ProofId)>) { + let mut queued = self.queued.write(); + + for proof in proofs { + queued.insert(proof); + } + } + + /// Get proofs ready to broadcast + fn get_ready_proofs( + &self, + max_attempts: u32, + retry_delay: Duration, + ) -> Vec<(ExecutionBlockHash, ProofId)> { + let queued = self.queued.read(); + let broadcasting = self.broadcasting.read(); + let failed = self.failed.read(); + + queued + .iter() + .filter(|p| !broadcasting.contains(p)) + .filter(|p| { + // Check retry logic for failed proofs + if let Some(attempt) = failed.get(p) { + attempt.can_retry(max_attempts, retry_delay) + } else { + true // Not failed, ready to broadcast + } + }) + .cloned() + .collect() + } + + /// Mark proof as currently broadcasting + fn start_broadcast(&self, block_hash: ExecutionBlockHash, proof_id: ProofId) { + let key = (block_hash, proof_id); + + // Move from queued to broadcasting + let mut queued = self.queued.write(); + let mut broadcasting = self.broadcasting.write(); + + queued.remove(&key); + broadcasting.insert(key); + } + + /// Mark proof as successfully broadcast + fn mark_success(&self, block_hash: ExecutionBlockHash, proof_id: ProofId) { + let key = (block_hash, proof_id); + + let mut broadcasting = self.broadcasting.write(); + broadcasting.remove(&key); + + // Also remove from failed in case this was a retry + let mut failed = self.failed.write(); + failed.remove(&key); + } + + /// Mark proof broadcast as failed + fn mark_failed(&self, block_hash: ExecutionBlockHash, proof_id: ProofId, max_attempts: u32) { + let key = (block_hash, proof_id); + + let mut broadcasting = self.broadcasting.write(); + broadcasting.remove(&key); + + // Update or create failed attempt record + let mut failed = self.failed.write(); + let attempt = failed + .entry(key) + .and_modify(|attempt| attempt.increment()) + .or_insert_with(FailedAttempt::new); + + // Check if we've exceeded retry limit + if attempt.attempts >= max_attempts { + // Remove from failed tracking + failed.remove(&key); + warn!( + "Proof for block {:?} subnet {} exceeded retry limit ({} attempts), abandoning", + key.0, *key.1, max_attempts + ); + } else { + // Still have retries left, re-add to queued + let mut queued = self.queued.write(); + queued.insert(key); + } + } +} + +/// Configuration for the execution proof broadcaster +#[derive(Debug)] +struct ExecutionProofBroadcasterConfig { + /// How often to check for unbroadcast proofs + broadcast_interval: Duration, + /// Maximum number of broadcast attempts per proof + max_broadcast_attempts: u32, + /// Delay between retries for failed broadcasts + retry_delay: Duration, +} + +impl Default for ExecutionProofBroadcasterConfig { + fn default() -> Self { + Self { + broadcast_interval: Duration::from_secs(1), // Check every second + max_broadcast_attempts: 3, // Try up to 3 times + retry_delay: Duration::from_secs(3), // Wait 3 seconds between retries + } + } +} + +/// Start the execution proof broadcaster service +/// This spawns the background task that periodically broadcasts unbroadcast execution proofs +pub fn start_execution_proof_broadcaster_service( + executor: TaskExecutor, + chain: Arc>, + network_tx: UnboundedSender>, +) { + // TODO: We use default config, but we could get it from cli + let config = ExecutionProofBroadcasterConfig::default(); + let broadcast_manager = Arc::new(ProofBroadcastManager::new()); + + info!("Starting execution proof broadcaster service"); + + executor.spawn( + execution_proof_broadcaster_task(chain, network_tx, config, broadcast_manager), + "execution_proof_broadcaster", + ); +} + +/// Background task that periodically broadcasts unbroadcast execution proofs +async fn execution_proof_broadcaster_task( + chain: Arc>, + network_tx: UnboundedSender>, + config: ExecutionProofBroadcasterConfig, + broadcast_manager: Arc, +) { + let mut interval = tokio::time::interval(config.broadcast_interval); + + info!("Starting execution proof broadcaster task"); + + loop { + interval.tick().await; + + // Get new unqueued proofs from the proof store + let new_proofs = chain.execution_payload_proof_store.take_unqueued_proofs(); + if !new_proofs.is_empty() { + debug!( + proof_count = new_proofs.len(), + "Queueing execution proofs for broadcast" + ); + broadcast_manager.queue_proofs(new_proofs); + } + + // Get proofs ready to broadcast (new and retries) + let ready_proofs = + broadcast_manager.get_ready_proofs(config.max_broadcast_attempts, config.retry_delay); + + // Broadcast each ready proof + for (execution_block_hash, proof_id) in ready_proofs { + if let Some(proof) = chain + .execution_payload_proof_store + .get_proof(&execution_block_hash, proof_id) + { + broadcast_single_proof::( + &network_tx, + &broadcast_manager, + execution_block_hash, + proof_id, + &proof, + config.max_broadcast_attempts, + ) + .await; + } else { + // Proof was removed from store, remove from tracking + broadcast_manager.mark_success(execution_block_hash, proof_id); + } + } + } +} + +/// Broadcast a single execution proof to the gossip network +async fn broadcast_single_proof( + network_tx: &UnboundedSender>, + broadcast_manager: &ProofBroadcastManager, + execution_block_hash: ExecutionBlockHash, + proof_id: ProofId, + stored_proof: &ExecutionProof, + max_attempts: u32, +) { + // Mark as currently broadcasting + broadcast_manager.start_broadcast(execution_block_hash, proof_id); + + // Use the stored proof directly (already in ExecutionProof format) + let gossip_proof = stored_proof.clone(); + + // Create the gossip message + let pubsub_message = + PubsubMessage::ExecutionProofMessage(Box::new((proof_id, Arc::new(gossip_proof)))); + + // Broadcast the proof + match network_tx.send(NetworkMessage::Publish { + messages: vec![pubsub_message], + }) { + Ok(()) => { + // Mark as successfully broadcast + broadcast_manager.mark_success(execution_block_hash, proof_id); + debug!( + execution_block_hash = ?execution_block_hash, + subnet_id = *proof_id, + "Broadcast execution proof" + ); + } + Err(e) => { + // Mark as failed + broadcast_manager.mark_failed(execution_block_hash, proof_id, max_attempts); + warn!( + execution_block_hash = ?execution_block_hash, + subnet_id = *proof_id, + error = %e, + "Failed to broadcast execution proof" + ); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::sync::mpsc; + use types::execution_proof_subnet_id::ExecutionProofSubnetId; + use types::{ExecutionBlockHash, Hash256, MainnetEthSpec}; + + type E = MainnetEthSpec; + + #[test] + fn test_failed_attempt_new() { + let attempt = FailedAttempt::new(); + assert_eq!(attempt.attempts, 1); + assert!(attempt.last_attempt.elapsed() < Duration::from_secs(1)); + } + + #[test] + fn test_failed_attempt_can_retry() { + let mut attempt = FailedAttempt::new(); + + // Should be able to retry with attempts under limit + assert!(attempt.can_retry(3, Duration::from_secs(0))); + + // Increment attempts + attempt.increment(); + assert_eq!(attempt.attempts, 2); + assert!(attempt.can_retry(3, Duration::from_secs(0))); + + // At limit + attempt.increment(); + assert_eq!(attempt.attempts, 3); + assert!(!attempt.can_retry(3, Duration::from_secs(0))); + } + + #[test] + fn test_proof_broadcast_manager_queue_proofs() { + let manager = ProofBroadcastManager::new(); + let block_hash1 = ExecutionBlockHash::from(Hash256::random()); + let block_hash2 = ExecutionBlockHash::from(Hash256::random()); + let proof_id1 = ExecutionProofSubnetId::new(1).unwrap(); + let proof_id2 = ExecutionProofSubnetId::new(2).unwrap(); + + // Queue some proofs + manager.queue_proofs(vec![(block_hash1, proof_id1), (block_hash2, proof_id2)]); + + // Verify they're queued + { + let queued = manager.queued.read(); + assert_eq!(queued.len(), 2); + assert!(queued.contains(&(block_hash1, proof_id1))); + assert!(queued.contains(&(block_hash2, proof_id2))); + } + + // Start broadcast for one (moves from queue to broadcasting) + manager.start_broadcast(block_hash1, proof_id1); + // Mark one as successful (removes from broadcasting) + manager.mark_success(block_hash1, proof_id1); + + // Verify it was removed from queue + { + let queued = manager.queued.read(); + assert_eq!(queued.len(), 1); + assert!(!queued.contains(&(block_hash1, proof_id1))); + assert!(queued.contains(&(block_hash2, proof_id2))); + } + } + + #[test] + fn test_proof_broadcast_manager_get_ready_proofs() { + let manager = ProofBroadcastManager::new(); + let block_hash1 = ExecutionBlockHash::from(Hash256::random()); + let block_hash2 = ExecutionBlockHash::from(Hash256::random()); + let block_hash3 = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(1).unwrap(); + + // Queue some proofs + manager.queue_proofs(vec![ + (block_hash1, proof_id), + (block_hash2, proof_id), + (block_hash3, proof_id), + ]); + + // Mark one as broadcasting + manager.start_broadcast(block_hash2, proof_id); + + // Mark one as failed but can retry + manager.mark_failed(block_hash3, proof_id, 3); + + // Get ready proofs + let ready = manager.get_ready_proofs(3, Duration::from_secs(0)); + + // Should get the non-broadcasting one and the failed one (retry) + assert_eq!(ready.len(), 2); + assert!(ready.contains(&(block_hash1, proof_id))); + assert!(ready.contains(&(block_hash3, proof_id))); + assert!(!ready.contains(&(block_hash2, proof_id))); + } + + #[test] + fn test_proof_broadcast_manager_mark_methods() { + let manager = ProofBroadcastManager::new(); + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(1).unwrap(); + + // Queue a proof + manager.queue_proofs(vec![(block_hash, proof_id)]); + + // Start broadcast + manager.start_broadcast(block_hash, proof_id); + { + let broadcasting = manager.broadcasting.read(); + assert!(broadcasting.contains(&(block_hash, proof_id))); + } + + // Mark as success + manager.mark_success(block_hash, proof_id); + { + let queued = manager.queued.read(); + let broadcasting = manager.broadcasting.read(); + let failed = manager.failed.read(); + + assert!(!queued.contains(&(block_hash, proof_id))); + assert!(!broadcasting.contains(&(block_hash, proof_id))); + assert!(!failed.contains_key(&(block_hash, proof_id))); + } + } + + #[test] + fn test_proof_broadcast_manager_retry_logic() { + let manager = ProofBroadcastManager::new(); + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(1).unwrap(); + + // Queue and fail multiple times + manager.queue_proofs(vec![(block_hash, proof_id)]); + + // First failure - start broadcast, then fail + manager.start_broadcast(block_hash, proof_id); + manager.mark_failed(block_hash, proof_id, 3); + { + let failed = manager.failed.read(); + let attempt = failed.get(&(block_hash, proof_id)).unwrap(); + assert_eq!(attempt.attempts, 1); + } + + // Second failure - start broadcast, then fail + manager.start_broadcast(block_hash, proof_id); + manager.mark_failed(block_hash, proof_id, 3); + { + let failed = manager.failed.read(); + let attempt = failed.get(&(block_hash, proof_id)).unwrap(); + assert_eq!(attempt.attempts, 2); + } + + // At max attempts (3) - start broadcast, then fail + manager.start_broadcast(block_hash, proof_id); + manager.mark_failed(block_hash, proof_id, 3); + let ready = manager.get_ready_proofs(3, Duration::from_secs(0)); + assert!(!ready.contains(&(block_hash, proof_id))); + } + + #[test] + fn test_execution_proof_broadcaster_config_default() { + let config = ExecutionProofBroadcasterConfig::default(); + assert_eq!(config.broadcast_interval, Duration::from_secs(1)); + assert_eq!(config.max_broadcast_attempts, 3); + assert_eq!(config.retry_delay, Duration::from_secs(3)); + } + + #[tokio::test] + async fn test_broadcast_single_proof_success() { + let (network_tx, mut network_rx) = mpsc::unbounded_channel(); + let broadcast_manager = ProofBroadcastManager::new(); + + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(1).unwrap(); + let stored_proof = ExecutionProof::new(block_hash, proof_id, 1, vec![1, 2, 3, 4, 5]); + + // Broadcast the proof + broadcast_single_proof::( + &network_tx, + &broadcast_manager, + block_hash, + proof_id, + &stored_proof, + 3, // max_attempts + ) + .await; + + // Verify the network message was sent + let msg = network_rx.recv().await; + assert!(msg.is_some()); + + if let Some(NetworkMessage::Publish { messages }) = msg { + assert_eq!(messages.len(), 1); + if let PubsubMessage::ExecutionProofMessage(proof_box) = &messages[0] { + let (subnet_id, proof) = proof_box.as_ref(); + assert_eq!(u64::from(subnet_id), 1); + assert_eq!(proof.block_hash, block_hash); + assert_eq!(proof.proof_data, vec![1, 2, 3, 4, 5]); + } else { + panic!("Expected ExecutionProofMessage"); + } + } else { + panic!("Expected Publish message"); + } + + // Verify the proof was removed from tracking + { + let queued = broadcast_manager.queued.read(); + let broadcasting = broadcast_manager.broadcasting.read(); + assert!(!queued.contains(&(block_hash, proof_id))); + assert!(!broadcasting.contains(&(block_hash, proof_id))); + } + } + + #[tokio::test] + async fn test_broadcast_single_proof_network_error() { + // Create a closed channel to simulate network error + let (network_tx, _) = mpsc::unbounded_channel::>(); + drop(network_tx); + let (network_tx_closed, _) = mpsc::unbounded_channel::>(); + + let broadcast_manager = ProofBroadcastManager::new(); + + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(1).unwrap(); + let stored_proof = ExecutionProof::new(block_hash, proof_id, 1, vec![1, 2, 3]); + + // Broadcast should handle the error gracefully + broadcast_single_proof::( + &network_tx_closed, + &broadcast_manager, + block_hash, + proof_id, + &stored_proof, + 3, // max_attempts + ) + .await; + + // Verify the proof was marked as failed + { + let failed = broadcast_manager.failed.read(); + assert!(failed.contains_key(&(block_hash, proof_id))); + } + } +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 916dae6db06..aac3cfd662c 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,5 +1,6 @@ mod compute_light_client_updates; pub mod config; +mod execution_proof_broadcaster; mod metrics; mod notifier; diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index aee53a469c4..b0c0356867f 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -142,6 +142,9 @@ pub struct Config { /// Flag for advertising a fake CGC to peers for testing ONLY. pub advertise_false_custody_group_count: Option, + + /// Whether stateless validation is enabled. + pub stateless_validation: bool, } impl Config { @@ -367,6 +370,7 @@ impl Default for Config { inbound_rate_limiter_config: None, idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, advertise_false_custody_group_count: None, + stateless_validation: false, } } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index df866dfc646..b1b047e90f3 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -560,6 +560,8 @@ impl Discovery { } // Data column subnets are computed from node ID. No subnet bitfield in the ENR. Subnet::DataColumn(_) => return Ok(()), + // Execution proof subnets don't use ENR bitfields + Subnet::ExecutionProof(_) => return Ok(()), } // replace the global version @@ -898,6 +900,7 @@ impl Discovery { Subnet::Attestation(_) => "attestation", Subnet::SyncCommittee(_) => "sync_committee", Subnet::DataColumn(_) => "data_column", + Subnet::ExecutionProof(_) => "execution_proof", }; if let Some(v) = metrics::get_int_counter( diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 735ef5b0f28..4ef043a99ae 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -41,6 +41,7 @@ where false } } + Subnet::ExecutionProof(_) => false, // Not used for peer discovery predicates }); if !predicate { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 01cc1611058..9eb350b9719 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1127,6 +1127,9 @@ impl PeerManager { // because data column topics are subscribed as core topics until we // implement recomputing data column subnets. Subnet::DataColumn(_) => {} + // Execution proof subnets don't need peer pruning logic yet + // TODO: need to check p2p logic to confirm + Subnet::ExecutionProof(_) => {} } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index 4c47df63437..789dc410646 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -105,6 +105,10 @@ impl PeerInfo { Subnet::DataColumn(subnet_id) => { return self.is_assigned_to_custody_subnet(subnet_id) } + Subnet::ExecutionProof(_) => { + // Execution proof subnets don't use metadata bitfields + return false; + } } } false diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index e46c69dc716..b06faaee30b 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -211,6 +211,8 @@ impl GossipCache { GossipKind::BlsToExecutionChange => self.bls_to_execution_change, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, + // TODO: maybe configure this better; proofs can be quite large + GossipKind::ExecutionProof(_) => self.data_column_sidecar, // Use same timeout as data columns }; let Some(expire_timeout) = expire_timeout else { return; diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 9a939368743..b875766d7f9 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -17,7 +17,8 @@ use std::sync::Arc; use std::time::Duration; use tracing::{debug, warn}; use types::{ - ChainSpec, DataColumnSubnetId, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId, + execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS, ChainSpec, DataColumnSubnetId, + EnrForkId, EthSpec, ExecutionProofSubnetId, ForkContext, SubnetId, SyncSubnetId, }; pub const NETWORK_KEY_FILENAME: &str = "key"; @@ -259,6 +260,13 @@ pub(crate) fn create_whitelist_filter( for id in 0..spec.data_column_sidecar_subnet_count { add(DataColumnSidecar(DataColumnSubnetId::new(id))); } + // Add execution proof subnets + for id in 0..MAX_EXECUTION_PROOF_SUBNETS { + add(ExecutionProof( + ExecutionProofSubnetId::new(id) + .expect("id is less than MAX_EXECUTION_PROOF_SUBNETS"), + )); + } } gossipsub::WhitelistSubscriptionFilter(possible_hashes) } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index cc4d758b4ae..a93a8daa644 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -245,6 +245,7 @@ impl NetworkGlobals { subscribe_all_subnets: self.config.subscribe_all_subnets, subscribe_all_data_column_subnets: self.config.subscribe_all_data_column_subnets, sampling_subnets: self.sampling_subnets.read().clone(), + stateless_validation: self.config.stateless_validation, } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 601c59a9c84..3138c341f77 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -8,13 +8,14 @@ use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BlobSidecar, - DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedVoluntaryExit, SingleAttestation, SubnetId, SyncCommitteeMessage, SyncSubnetId, + DataColumnSidecar, DataColumnSubnetId, EthSpec, ExecutionProof, ExecutionProofSubnetId, + ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, + ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, + SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, + SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -25,6 +26,8 @@ pub enum PubsubMessage { BlobSidecar(Box<(u64, Arc>)>), /// Gossipsub message providing notification of a [`DataColumnSidecar`] along with the subnet id where it was received. DataColumnSidecar(Box<(DataColumnSubnetId, Arc>)>), + /// Gossipsub message providing notification of an [`ExecutionProof`] along with the subnet id where it was received. + ExecutionProofMessage(Box<(ExecutionProofSubnetId, Arc)>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a `SingleAttestation` with its subnet id. @@ -134,6 +137,9 @@ impl PubsubMessage { PubsubMessage::DataColumnSidecar(column_sidecar_data) => { GossipKind::DataColumnSidecar(column_sidecar_data.0) } + PubsubMessage::ExecutionProofMessage(execution_proof_data) => { + GossipKind::ExecutionProof(execution_proof_data.0) + } PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -286,6 +292,15 @@ impl PubsubMessage { )), } } + GossipKind::ExecutionProof(subnet_id) => { + let execution_proof = Arc::new( + ExecutionProof::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?, + ); + Ok(PubsubMessage::ExecutionProofMessage(Box::new(( + *subnet_id, + execution_proof, + )))) + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -391,6 +406,7 @@ impl PubsubMessage { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), PubsubMessage::BlobSidecar(data) => data.1.as_ssz_bytes(), PubsubMessage::DataColumnSidecar(data) => data.1.as_ssz_bytes(), + PubsubMessage::ExecutionProofMessage(data) => data.1.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -426,6 +442,13 @@ impl std::fmt::Display for PubsubMessage { data.1.slot(), data.1.index, ), + PubsubMessage::ExecutionProofMessage(data) => write!( + f, + "ExecutionProof: subnet: {}, block_hash: {:?}, description: {}", + *data.0, + data.1.block_hash, + data.1.description(), + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {:?}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/subnet.rs b/beacon_node/lighthouse_network/src/types/subnet.rs index 1892dcc83af..9a9aff33050 100644 --- a/beacon_node/lighthouse_network/src/types/subnet.rs +++ b/beacon_node/lighthouse_network/src/types/subnet.rs @@ -1,6 +1,6 @@ use serde::Serialize; use std::time::Instant; -use types::{DataColumnSubnetId, SubnetId, SyncSubnetId}; +use types::{DataColumnSubnetId, ExecutionProofSubnetId, SubnetId, SyncSubnetId}; /// Represents a subnet on an attestation or sync committee `SubnetId`. /// @@ -14,6 +14,8 @@ pub enum Subnet { SyncCommittee(SyncSubnetId), /// Represents a gossipsub data column subnet. DataColumn(DataColumnSubnetId), + /// Represents a gossipsub execution proof subnet. + ExecutionProof(ExecutionProofSubnetId), } /// A subnet to discover peers on along with the instant after which it's no longer useful. diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 349bfe66a3d..280538c282d 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -2,7 +2,10 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use strum::AsRefStr; -use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; +use types::{ + execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS, ChainSpec, DataColumnSubnetId, EthSpec, + ExecutionProofSubnetId, ForkName, SubnetId, SyncSubnetId, Unsigned, +}; use crate::Subnet; @@ -16,6 +19,7 @@ pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const BLOB_SIDECAR_PREFIX: &str = "blob_sidecar_"; pub const DATA_COLUMN_SIDECAR_PREFIX: &str = "data_column_sidecar_"; +pub const EXECUTION_PROOF_PREFIX: &str = "execution_proof_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; @@ -31,6 +35,7 @@ pub struct TopicConfig { pub subscribe_all_subnets: bool, pub subscribe_all_data_column_subnets: bool, pub sampling_subnets: HashSet, + pub stateless_validation: bool, } /// Returns all the topics the node should subscribe at `fork_name` @@ -91,6 +96,16 @@ pub fn core_topics_to_subscribe( } } + // Subscribe to all execution proof subnets when stateless validation is enabled + if opts.stateless_validation { + for subnet_id in 0..MAX_EXECUTION_PROOF_SUBNETS { + topics.push(GossipKind::ExecutionProof( + ExecutionProofSubnetId::new(subnet_id) + .expect("subnet_id is less than MAX_EXECUTION_PROOF_SUBNETS"), + )); + } + } + topics } @@ -115,7 +130,8 @@ pub fn is_fork_non_core_topic(topic: &GossipTopic, _fork_name: ForkName) -> bool | GossipKind::SignedContributionAndProof | GossipKind::BlsToExecutionChange | GossipKind::LightClientFinalityUpdate - | GossipKind::LightClientOptimisticUpdate => false, + | GossipKind::LightClientOptimisticUpdate + | GossipKind::ExecutionProof(_) => false, } } @@ -127,6 +143,7 @@ pub fn all_topics_at_fork(fork: ForkName, spec: &ChainSpec) -> Vec(fork, &opts, spec) } @@ -156,6 +173,9 @@ pub enum GossipKind { BlobSidecar(u64), /// Topic for publishing DataColumnSidecars. DataColumnSidecar(DataColumnSubnetId), + /// Topic for publishing execution payload proofs on a particular subnet. + #[strum(serialize = "execution_proof")] + ExecutionProof(ExecutionProofSubnetId), /// Topic for publishing raw attestations on a particular subnet. #[strum(serialize = "beacon_attestation")] Attestation(SubnetId), @@ -191,6 +211,9 @@ impl std::fmt::Display for GossipKind { GossipKind::DataColumnSidecar(column_index) => { write!(f, "{}{}", DATA_COLUMN_SIDECAR_PREFIX, **column_index) } + GossipKind::ExecutionProof(subnet_id) => { + write!(f, "{}{}", EXECUTION_PROOF_PREFIX, **subnet_id) + } x => f.write_str(x.as_ref()), } } @@ -279,6 +302,7 @@ impl GossipTopic { GossipKind::Attestation(subnet_id) => Some(Subnet::Attestation(*subnet_id)), GossipKind::SyncCommitteeMessage(subnet_id) => Some(Subnet::SyncCommittee(*subnet_id)), GossipKind::DataColumnSidecar(subnet_id) => Some(Subnet::DataColumn(*subnet_id)), + GossipKind::ExecutionProof(subnet_id) => Some(Subnet::ExecutionProof(*subnet_id)), _ => None, } } @@ -323,6 +347,9 @@ impl std::fmt::Display for GossipTopic { GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), + GossipKind::ExecutionProof(index) => { + format!("{}{}", EXECUTION_PROOF_PREFIX, *index) + } }; write!( f, @@ -341,6 +368,7 @@ impl From for GossipKind { Subnet::Attestation(s) => GossipKind::Attestation(s), Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), Subnet::DataColumn(s) => GossipKind::DataColumnSidecar(s), + Subnet::ExecutionProof(s) => GossipKind::ExecutionProof(s), } } } @@ -368,6 +396,11 @@ fn subnet_topic_index(topic: &str) -> Option { return Some(GossipKind::DataColumnSidecar(DataColumnSubnetId::new( index.parse::().ok()?, ))); + } else if let Some(index) = topic.strip_prefix(EXECUTION_PROOF_PREFIX) { + let subnet_id = index.parse::().ok()?; + return ExecutionProofSubnetId::new(subnet_id) + .ok() + .map(GossipKind::ExecutionProof); } None } @@ -522,6 +555,7 @@ mod tests { subscribe_all_subnets: false, subscribe_all_data_column_subnets: false, sampling_subnets: sampling_subnets.clone(), + stateless_validation: false, } } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 0b17965f3cb..2fe79643667 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -35,11 +35,11 @@ use tokio::sync::mpsc::error::TrySendError; use tracing::{debug, error, info, trace, warn}; use types::{ beacon_block::BlockImportSource, Attestation, AttestationData, AttestationRef, - AttesterSlashing, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, - IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + AttesterSlashing, BlobSidecar, DataColumnSidecar, DataColumnSubnetId, EthSpec, ExecutionProof, + ExecutionProofSubnetId, Hash256, IndexedAttestation, LightClientFinalityUpdate, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, + Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; @@ -3167,4 +3167,122 @@ impl NetworkBeaconProcessor { write_file(error_path, error.to_string().as_bytes()); } } + + /// Process a gossip execution proof message. + pub async fn process_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + _peer_client: Client, + subnet_id: ExecutionProofSubnetId, + execution_proof: Arc, + _seen_duration: Duration, + ) { + let block_hash = execution_proof.block_hash; + let proof_description = execution_proof.description(); + let subnet_id_u64 = *subnet_id; + + debug!( + %block_hash, + subnet_id = %subnet_id_u64, + description = %proof_description, + "Processing gossip execution proof" + ); + + // Basic structural validation + if !execution_proof.is_structurally_valid() { + warn!( + %block_hash, + subnet_id = %subnet_id_u64, + "Rejecting structurally invalid execution proof" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "invalid_execution_proof_structure", + ); + return; + } + + // Validate subnet ID matches "proof ID" + // + // Note: `subnet_id_u64` was the subnet that the message was received on + // while `execution_proof.subnet_id` was the subnet ID embedded in the proof + if subnet_id_u64 != *execution_proof.subnet_id { + warn!( + %block_hash, + expected_subnet = %subnet_id_u64, + proof_subnet = %execution_proof.subnet_id, + "Rejecting execution proof with mismatched subnet ID" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "execution_proof_subnet_mismatch", + ); + return; + } + + // Store the proof in the execution payload proof store + if let Err(e) = self + .chain + .execution_payload_proof_store + .store_proof(execution_proof.as_ref().clone()) + { + warn!( + %block_hash, + subnet_id = %subnet_id_u64, + error = %e, + "Failed to store execution proof" + ); + + // Handle different error types appropriately + if e.should_penalize_peer() { + // Validation errors should penalize the peer + self.gossip_penalize_peer( + peer_id, + PeerAction::LowToleranceError, + "execution_proof_validation_failed", + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + } else { + // Storage errors should not penalize peers + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + } + return; + } + + // Proof stored successfully + debug!( + execution_block_hash = %block_hash, + subnet_id = subnet_id_u64, + "Execution proof received via gossip" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + + // Re-evaluate optimistic blocks now that we have this proof + self.handle_proof_chain_update(block_hash, subnet_id_u64); + } + + /// Handle proven chain updates after successfully storing a proof + fn handle_proof_chain_update(&self, block_hash: types::ExecutionBlockHash, subnet_id: u64) { + match self + .chain + .re_evaluate_optimistic_blocks_with_proofs(block_hash) + { + Ok(_) => { + // Success - any important updates are logged by re_evaluate_optimistic_blocks_with_proofs + } + Err(e) => { + warn!( + %block_hash, + subnet_id = %subnet_id, + error = ?e, + "Failed to update proven chain after proof reception" + ); + } + } + } } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index f7c3a1bf8db..61f4cfc6d68 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -254,6 +254,36 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some execution proof. + pub fn send_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + subnet_id: ExecutionProofSubnetId, + execution_proof: Arc, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_execution_proof( + message_id, + peer_id, + peer_client, + subnet_id, + execution_proof, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipExecutionProof(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some sync committee signature. pub fn send_gossip_sync_signature( self: &Arc, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 5d5daae4aee..f8e79e5a53c 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -386,6 +386,19 @@ impl Router { ), ) } + PubsubMessage::ExecutionProofMessage(data) => { + let (subnet_id, execution_proof) = *data; + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_execution_proof( + message_id, + peer_id, + self.network_globals.client(&peer_id), + subnet_id, + execution_proof, + timestamp_now(), + ), + ) + } PubsubMessage::VoluntaryExit(exit) => { debug!(%peer_id, "Received a voluntary exit"); self.handle_beacon_processor_send_result( diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index eb27a03552b..94c04cbe26b 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1509,6 +1509,36 @@ pub fn cli_app() -> Command { Lighthouse and only passed to the EL if initial verification fails.") .display_order(0) ) + .arg( + Arg::new("stateless-validation") + .long("stateless-validation") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Enable stateless validation mode where all new payloads are marked as \ + optimistically valid without verification from the execution layer. This \ + bypasses normal payload validation and should only be used for testing.") + .display_order(0) + ) + .arg( + Arg::new("generate-execution-proofs") + .long("generate-execution-proofs") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Generate execution proofs for all blocks (both produced and received). \ + This makes the node act as a proof generator for the network. \ + Cannot be used with --stateless-validation.") + .display_order(0) + ) + .arg( + Arg::new("stateless-min-proofs-required") + .long("stateless-min-proofs-required") + .value_name("COUNT") + .help("Minimum number of execution proofs required to consider a block valid in \ + stateless validation mode. Only applies when --stateless-validation is enabled. \ + Must be between 1 and max_execution_proof_subnets.") + .action(ArgAction::Set) + .display_order(0) + ) .arg( Arg::new("light-client-server") .long("light-client-server") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f55b91d58c3..af62694f15c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -833,6 +833,57 @@ pub fn get_config( client_config.chain.optimistic_finalized_sync = !cli_args.get_flag("disable-optimistic-finalized-sync"); + // Stateless validation. + client_config.chain.stateless_validation = cli_args.get_flag("stateless-validation"); + + // Copy stateless validation configuration to network config + client_config.network.stateless_validation = client_config.chain.stateless_validation; + + // Stateless minimum proofs required + if let Some(min_proofs) = + clap_utils::parse_optional::(cli_args, "stateless-min-proofs-required")? + { + if min_proofs == 0 { + return Err("--stateless-min-proofs-required must be at least 1".to_string()); + } + if min_proofs as u64 > client_config.chain.max_execution_proof_subnets { + return Err(format!( + "--stateless-min-proofs-required ({}) cannot exceed max_execution_proof_subnets ({})", + min_proofs, + client_config.chain.max_execution_proof_subnets + )); + } + client_config.chain.stateless_min_proofs_required = min_proofs; + // Only validate stateless-validation requirement if the flag was explicitly provided + if !client_config.chain.stateless_validation { + return Err( + "--stateless-min-proofs-required requires --stateless-validation to be enabled" + .to_string(), + ); + } + } + + // Execution proof generation. + client_config.chain.generate_execution_proofs = cli_args.get_flag("generate-execution-proofs"); + + // Validate that stateless nodes cannot generate proofs + if client_config.chain.generate_execution_proofs && client_config.chain.stateless_validation { + return Err("The --generate-execution-proofs flag cannot be used with --stateless-validation. Stateless nodes cannot generate proofs.".to_string()); + } + + // Validate that node's max_execution_proof_subnets doesn't exceed protocol maximum. + // The protocol defines a hard limit of 8 subnets, but individual nodes can choose + // to participate in fewer subnets to reduce resource usage. + if client_config.chain.max_execution_proof_subnets + > types::execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS + { + return Err(format!( + "Node's max_execution_proof_subnets ({}) cannot exceed protocol MAX_EXECUTION_PROOF_SUBNETS ({})", + client_config.chain.max_execution_proof_subnets, + types::execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS + )); + } + if cli_args.get_flag("genesis-backfill") { client_config.chain.genesis_backfill = true; } diff --git a/book/src/advanced_stateless_integration.md b/book/src/advanced_stateless_integration.md new file mode 100644 index 00000000000..3a582e53a62 --- /dev/null +++ b/book/src/advanced_stateless_integration.md @@ -0,0 +1,455 @@ +# ZK Stateless Node Integration + +This document explains the architecture and implementation of zero-knowledge (ZK) stateless node integration in Lighthouse, including the interaction between the Consensus Layer (CL) and Execution Layer (EL). + +## Table of Contents +1. [Primer: CL/EL Architecture](#primer-clel-architecture) +2. [Execution Proofs and ZK Integration](#execution-proofs-and-zk-integration) +3. [Stateless Validation Mode](#stateless-validation-mode) +4. [Implementation Changes](#implementation-changes) +5. [Configuration](#configuration) +6. [Network Architecture](#network-architecture) + +## Primer: CL/EL Architecture + +### Overview + +Ethereum operates with a modular architecture consisting of two main layers: + +1. **Consensus Layer (CL)**: Responsible for block consensus, validator management, and the beacon chain +2. **Execution Layer (EL)**: Handles transaction execution, execution layer state management, and the EVM + +In some ways, you can view it as being two chains, where the EL chain can only progress when the Consensus Layer explicitly tells it to. + +> Both chains store state, however when this document mentions state, it will refer to EL state if not explicitly noted. + + + +### Why Stateless Validation? + +The traditional Ethereum architecture faces several challenges: + +1. **State Growth**: The Ethereum state grows continuously (currently ~200GB+) as new accounts, contracts, and storage are added +2. **Node Accessibility**: Running a full node requires significant disk space, making it inaccessible to many users +3. **Centralization Risk**: As hardware requirements increase, fewer entities can afford to run nodes +4. **Sync Times**: New nodes must either sync from genesis (taking weeks) or use checkpoint sync (still requiring state download) + +Stateless validation addresses these issues by: +- Allowing nodes to validate blocks without storing the full state + - This is because in order to verify a block, you only need a small subset of the state. This subset can arrive with the block; this is the essence of "stateless validation". Ofcourse the state that + is attached with the block needs to be correct. So a state proof is attached to prove correctness. This state proof is a Merkle Patricia Proof because the state trie is a Merkle Patricia Trie(MPT). + Changing the state trie, changes the state proof, verkle is an example of one of these proposed changes. +- Reducing disk requirements from hundreds of GBs to just MBs data + - This is because state proofs using the MPT are quite large. In the *worse case* they are 300MB at 30Million gas and scale linearly. This is a problem for bandwidth and three ways to address this are to + change the trie, so that we have more efficient state proofs, address the worse cases or zk stateless. +- Enabling instant sync for new nodes + - This is because (in the final design) a proof for the block is also a proof for all previous blocks + +### What the CL Needs from the EL + +The Consensus Layer relies on the Execution Layer for a few functions including: + +#### 1. **Payload Execution** (Block Production) + +- When **producing** blocks, the CL asks the EL to build an execution payload +- The EL assembles transactions from its mempool, executes them, and returns the payload +- This happens during block proposal when a validator (proposer) need to create new blocks +- This interaction happens through the Engine API (`engine_getPayload`) + +#### 2. **EL State Validation** (Block Validation) + +- When **validating** blocks (from other validators), the CL needs to verify the execution payload is correct +- This is completely separate from block production - it's about verifying someone else's work +- Traditionally, this requires the EL to maintain the full Ethereum state and re-execute all transactions +- This interaction happens through the Engine API (`engine_newPayload`) + +#### 3. **Fork Choice Updates** + +- The CL informs the EL about the canonical chain head +- The EL uses this information to organize its own state and handle execution payload reorgs +- This interaction happens through the Engine API (`engine_forkchoiceUpdated`) + +### Traditional vs Stateless Architecture + +#### Traditional Setup + +- **State Storage**: The EL maintains the complete Ethereum state (200GB+ and growing) +- **State Access**: Direct database lookups - "I trust this data because I put it there" +- **Validation Process**: + 1. Receive block with transactions + 2. Load affected state from local database + 3. Execute transactions against that state + 4. Compute new state root + 5. Verify it matches the block's claimed state root +- **Trust Model**: Self-verifying through re-execution +- **Requirements**: Full/Snap state sync before validation can begin + +#### Stateless Architecture (For validators) + +- **State Storage**: No state storage required +- **State Access**: Every state access must be accompanied by a cryptographic proof with the execution payload +- **Validation Process**: + 1. Receive block with transactions AND state proof + state + 2. Use state data to re-execute transactions (no full state database needed) + 3. Verify the computed state root matches the block's claimed state root +- **Trust Model**: "I trust this state root because I re-executed using proven state data" + +#### Executionless Architecture (For validators) + +- **State Storage**: No state storage required +- **State Access**: Not needed - no execution performed +- **Validation Process**: + 1. Receive beacon block with execution payload AND execution proofs (may arrive later via gossip) + 2. Verify the execution proofs (no execution needed) + 3. Accept the state root if proofs are valid +- **Trust Model**: "I trust this state root because the cryptographic proof guarantees it" + +> TODO: One confusing thing is that we may have a subnet for state proof, and the other subnets for execution proofs. + + +### Key Implementation Considerations + +When implementing either stateless or executionless validation, several architectural decisions must be made: + +#### 1. **Proof Distribution Strategy** +- **Push model Unbundled**: Proofs distributed via dedicated subnets (similar to attestations) +- **Pull mode Unbundled**: Nodes request proofs when needed +- **Bundled**: Proofs included with blocks (increases block size) + +#### 2. **Validation Timing** +- **Optimistic Import**: Accept blocks immediately, validate when proofs arrive (This is similar to the pipelining for delayed execution) +- **Pessimistic Import**: Wait for proofs before accepting blocks + +#### 3. **Fork Choice Integration** +- **Separate Views**: Keep proven and optimistic chains separate +- **Integrated**: Modify fork choice weights based on proof availability + +#### 4. **Resource Management** +- **Number of proofs**: Number of proofs to generate +- **Proof Storage**: How long to keep proofs, storage limits +- **GPU Usage**: Proof generation can be intensive +- **Network Bandwidth**: Proof propagation overhead (300KB per proof) + + +## Execution Proofs and ZK Integration + +Lighthouse implements a sophisticated execution proof system to enable stateless validation. The key components include: + +### Execution Proof Messages + +Located in `consensus/types/src/execution_proof.rs`, these messages contain: + + + +```rust +pub struct ExecutionProof { + /// The execution block hash this proof attests to + pub block_hash: ExecutionBlockHash, + /// The subnet ID where this proof was received/should be sent + pub subnet_id: ExecutionProofSubnetId, + /// Version of the proof format + pub version: u32, + /// Opaque proof data - structure depends on subnet_id and version + /// This contains cryptographic proofs from zkVMs or other proof systems + pub proof_data: Vec, + /// Timestamp when this proof was generated (Unix timestamp) + pub timestamp: u64, +} +``` + +The proof system supports multiple execution proofs. These are referred to as proof types. + +### Proof Distribution + +Execution proofs are distributed via gossip subnets to ensure efficient propagation: + +1. Proofs are published to specific subnets based on the proof type. For example: + - Subnet 0: Execution witness proofs + - Subnet 1: SP1 zkVM proofs + - Subnet 2: RISC-V zkVM proofs + - Subnet 3: zkEVM proofs + - etc. (up to 8 subnets by default) +2. Nodes subscribe to relevant subnets based on their validation needs +3. The broadcaster service manages proof distribution and retries + +## Stateless Validation Mode + +When `stateless_validation` is enabled in the chain configuration: + +### 1. **Proof Reception and Optimistic Import** + +- The node subscribes to execution proof subnets automatically +- Incoming proofs are validated and stored in a proof pool +- Optimistic block handling: + - When a beacon block arrives, it's marked as "optimistic" (since the execution payload cannot be verified without a proof) + - The block's execution payload hash is registered for proof tracking + - The node continues processing the block optimistically, assuming it will be valid + - When matching proofs arrive via gossip, they trigger re-evaluation of pending blocks + - Beacon blocks transition from optimistic to verified status once enough valid proofs are received + - The system never rejects blocks due to missing proofs - they remain optimistic until proven valid + +> None of this changes the fork choice rules. A beacon block becoming valid, is additional metadata that we store but do not use it to modify fork choice. + +### 2. **Block Validation** + +- Instead of executing payloads locally, the node waits for execution proofs +- Execution proofs provide cryptographic guarantees of correct execution of the payload +- The validator can therefore validate blocks without maintaining state + +### 3. **Dual-View Architecture** + +We implement a dual-view architecture for stateless validation that separates consensus from proof validation: + +#### Optimistic View (Fork Choice) +- Used by validators for all consensus duties (attestations, proposals, etc.) +- Remains permanently optimistic when `stateless_validation` is enabled (This is akin to the EL always returning SYNCING) +- Fork choice weights are therefore NOT modified by proof availability/correctness +- Allows beacon nodes to participate in consensus without waiting for proofs (or more importantly, it allows us to implement this change without changing fork-choice) + +#### Proven View (Proof Store) +- Tracks which blocks have received sufficient execution proofs +- Maintains a "proven canonical chain" from finalized checkpoint to proven head +- Updates independently of fork choice + +This separation provides several benefits: + +1. **Simplicity**: No complex fork choice modifications needed +2. **Validator Safety**: Validators continue normal operations regardless of proof correctness +3. **Clear Monitoring**: Easy to see the gap between optimistic head and proven head +4. **Future Flexibility**: Can later integrate proven status into fork choice + +### Dual-View Architecture Diagram + +``` +┌─────────────────────────────────────────────────┐ +│ Fork Choice │ +│ (Always Optimistic in Stateless) │ +│ - Used for attestations, proposals │ +│ - Never modified by proof availability │ +│ - Beacon chain always see optimistic head │ +└─────────────────────────────────────────────────┘ + │ + │ Reads blocks + │ +┌─────────────────────────────────────────────────┐ +│ Proof Store │ +│ (Tracks Proven Chain Status) │ +│ - Maintains proven head │ +│ - Updates when sufficient proofs received │ +│ - For monitoring/metrics only │ +└─────────────────────────────────────────────────┘ +``` + +## Implementation Changes + +### Core Components Modified + +The Lighthouse implementation adds the following: + +1. **Chain Configuration**: New flags for `stateless_validation`, `stateless_min_proofs_required`, and `max_execution_proof_subnets` +2. **Execution Payload Proof Store**: Manages proof storage, validation, and proven chain tracking +3. **Execution Proof Broadcaster**: Background service for broadcasting proofs to gossip subnets +4. **Network Layer**: New gossip topics and subnet management for proof distribution + +### Integration Points + +1. **Block Import Process**: Blocks are imported optimistically while awaiting proofs +2. **Proof Reception Process**: Incoming proofs trigger re-evaluation of pending beacon blocks +3. **Dual-View Separation**: Fork choice remains optimistic while proven chain tracks validation status wrt proofs + +### Implementation Notes + +Main files to focus on: + +1. **Core Proof Infrastructure**: + - `consensus/types/src/execution_proof.rs` - Proof message types + - `consensus/types/src/execution_proof_subnet_id.rs` - Subnet ID type (0-7) + - `beacon_node/beacon_chain/src/execution_payload_proofs.rs` - Proof storage and management + - `beacon_node/beacon_chain/src/execution_proof_generation.rs` - Proof generation logic + +2. **Integration Points**: + - `beacon_node/beacon_chain/src/execution_payload.rs` - Modified `notify_new_payload` for proof generation + - `beacon_node/beacon_chain/src/beacon_chain_execution_proof.rs` - Beacon chain proof methods + - `beacon_node/network/src/network_beacon_processor/gossip_methods.rs` - Proof gossip handling + +3. **Background Services**: + - `beacon_node/client/src/execution_proof_broadcaster.rs` - Proof broadcasting service + +4. **Configuration**: + - `beacon_node/beacon_chain/src/chain_config.rs` - New configuration parameters + +## Configuration + +### Node Types + +Different node configurations are possible: + +1. **Regular Stateful Node** (default): + ```bash + lighthouse bn + ``` + - Maintains full state and validates through execution + +2. **Stateless Validator**: + ```bash + lighthouse bn --stateless-validation + ``` + - Validates using execution proofs, no state storage required + +3. **Proof Generator Node**: + ```bash + lighthouse bn --generate-execution-proofs + ``` + - Maintains full state and generates proofs for the network + +### Configuration Parameters + +Key settings include: +- `stateless_validation`: Enable proof-based validation +- `stateless_min_proofs_required`: Minimum proofs needed (default: 1) +- `max_execution_proof_subnets`: Number of proof subnets (default: 8) + +## Important Considerations + +### Proof Storage Philosophy + +- **Store All Valid Proofs**: All valid proofs are stored regardless of whether the block is canonical +- **No Temporal Storage**: proofs are not stored temporarily - they follow the same lifecycle as blocks +- **LRU Eviction**: Simple LRU eviction prevents unbounded growth (default: 10,000 proofs) +- **Finalization-Based Cleanup**: Proofs are pruned based on finalization, similar to block pruning + +### Reorg Handling + +During chain reorganizations: +- **Proofs Already Available**: Since we store valid proofs for all blocks (not just canonical), proofs are already available when blocks switch from non-canonical to canonical +- **No Re-propagation**: Blocks are not re-gossiped during reorgs, and neither are proofs +- **Automatic Proven Chain Update**: The proven chain automatically adjusts based on the new canonical chain + +## Few common questions + +**Q: Why doesn't proof availability/verification affect fork choice?** +A: This is currently a deliberate design choice for both simplicity and safety. This will allow us to run modified beacon nodes alongside testnets like hoodi without anything at stake. + +**Q: What happens if proofs never arrive?** +A: Blocks remain permanently optimistic. They can still be finalized through normal consensus. Cleanup removes them from pending after finalization though. This is okay since proof generation benchmarks are currenly around 3-4 minutes per proof. + +**Q: Why store proofs for non-canonical blocks?** +A: During reorgs, previously non-canonical blocks may become canonical. Having proofs already available avoids re-generation/re-propagation -- I think this is how the logic for beacon blocks work, so I wanted to be as close to that as possible. + +**Q: What's the memory impact?** +A: Default LRU cache stores 10,000 proofs. Each proof is ~300KB, so maximum ~3GB. Pending blocks are cleaned up after finalization, so these are worse cases where the chain is not finalizing and or for some reason we are receiving many proofs for valid execution payloads. + +## Network Architecture + +### Proof Propagation Flow + +```mermaid +sequenceDiagram + participant CL as Consensus Layer (Proposer) + participant EL as Execution Layer + participant ZK as ZK Prover + participant BG as Beacon Block Gossip + participant PG as Proof Gossip
(Subnet 0-7) + participant SV as Stateless Validator + + Note over CL,EL: Block Production Flow + CL->>EL: Request execution payload + EL->>CL: Return execution payload + CL->>BG: Broadcast beacon block + + Note over CL,ZK: Proof Generation Flow (CL-driven) + CL->>EL: debug_executionWitness or similar endpoint + EL->>CL: Return execution witness + CL->>ZK: Send witness to ZK prover + ZK->>ZK: Generate N different ZK proofs
(one per proof type/subnet) + ZK->>CL: Return all proofs + loop For each proof type (0 to N-1) + CL->>PG: Broadcast proof on corresponding subnet + end + + Note over BG,SV: Stateless Validation Flow + BG->>SV: Receive beacon block + SV->>SV: Store block as "pending proof" + PG->>SV: Receive execution proof
(from subnet N) + SV->>SV: Match proof with pending block + SV->>SV: Verify ZK proof cryptographically + SV->>SV: Accept/reject block based on proof +``` + +#### Detailed Workflow + +**Block Producer/Receiver with Proof Generation:** +1. Maintains full state and can execute transactions +2. When processing any block (produced or received): + - If `--generate-execution-proofs` is enabled, triggers proof generation + - Proof generation happens asynchronously in the background + - Generated proofs are stored in the execution payload proof store +3. The execution proof broadcaster service: + - Periodically checks for unbroadcast proofs + - Broadcasts proofs to the appropriate gossip subnets + - Manages retry logic for failed broadcasts +4. Note: Proof generation is triggered in `notify_new_payload` for all blocks when the flag is set + +**Stateless Validator:** +1. Subscribes to relevant proof subnets (based on supported proof types) +2. Receives blocks but cannot validate execution without state +3. Waits for matching execution proofs +4. Validates blocks using cryptographic proofs instead of re-execution +5. Participates in consensus without storing state + +### Subnet Distribution + +Execution proofs are distributed across multiple subnets to: +1. Prevent network congestion +2. Enable selective subscription (nodes can subscribe only to proof types they support) +3. Improve censorship resistance +4. Separate different proof systems (witness proofs vs various zkVM proofs) + +The subnet for a proof is determined by the proof type itself: +- Each proof system (witness, SP1, RISC-V, zkEVM, etc.) has a dedicated subnet +- This allows nodes to subscribe only to proof types they can validate +- The ProofId directly maps to the subnet ID (1:1 mapping) + +**Automatic Subnet Subscription:** +- When `--stateless-validation` is enabled, nodes automatically subscribe to ALL execution proof subnets (0-7 by default) +- This ensures stateless nodes can receive proofs from any proof system +- No manual subnet configuration is required for stateless nodes + + +### Scratch pad + +> Rough notes follows + + +1. **Real Proof Integration**: + - Implement actual EL witness fetching + - Integrate with real zkVM proof systems + - Support for multiple proof formats and versions + +2. **Resource Controls**: + - Add circuit breaker for proof generation with configurable limits + - Implement task queue with concurrency controls + - Add metrics for proof generation performance + - Rate limiting on proof acceptance ( This is essentially where a bad actor floods the network with invalid proofs, or just proofs that do not belong to payloads we will ever care about) + +3. **Block Production Support**: + - Enable stateless nodes to produce blocks via MEV-boost + - Generate proofs for self-produced blocks + +4. **Network Optimizations**: + - Support request-based proof sharing as fallback + +5. **Dummy Proof Generation**: + - Currently generates simulated proofs instead of real cryptographic proofs + - Lacks integration with actual EL witness data (`debug_executionWitness`) + - Proof data is placeholder content for testing + + +**Slashing Conditions**: Invalid proofs could result in slashing in the future (depends on if we choose to enshrine the proofs/incentivise it from issuance) +- **Only stateful nodes can generate proofs** - the `--generate-execution-proofs` flag cannot be used with `--stateless-validation` +- Proof generation requires access to the full execution layer state +- Proof generator nodes help the network by creating proofs for all blocks they process (both produced and received) +- Generated proofs are automatically broadcast to the appropriate gossip subnets + +**Proof Archival**: Add pluggable archival system for finalized proofs +**Selective Fork Choice Integration**: Optionally allow proven status to influence fork choice \ No newline at end of file diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 642add152e0..bbb7b8b615e 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -382,6 +382,10 @@ Options: full [default: 1] --state-cache-size Specifies the size of the state cache [default: 128] + --stateless-min-proofs-required + Minimum number of execution proofs required to consider a block valid + in stateless validation mode. Only applies when --stateless-validation + is enabled. Must be between 1 and max_execution_proof_subnets. --suggested-fee-recipient Emergency fallback fee recipient for use in case the validator client does not have one configured. You should set this flag on the @@ -486,6 +490,10 @@ Flags: --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses. + --generate-execution-proofs + Generate execution proofs for all blocks (both produced and received). + This makes the node act as a proof generator for the network. Cannot + be used with --stateless-validation. --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint syncing. @@ -556,6 +564,11 @@ Flags: Standard option for a staking beacon node. This will enable the HTTP server on localhost:5052 and import deposit logs from the execution node. + --stateless-validation + Enable stateless validation mode where all new payloads are marked as + optimistically valid without verification from the execution layer. + This bypasses normal payload validation and should only be used for + testing. --stdin-inputs If present, read all user inputs from stdin instead of tty. --subscribe-all-subnets diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs new file mode 100644 index 00000000000..b675f15313d --- /dev/null +++ b/consensus/types/src/execution_proof.rs @@ -0,0 +1,132 @@ +//! Execution payload proof message for gossip. + +use crate::execution_proof_subnet_id::ExecutionProofSubnetId; +use crate::ExecutionBlockHash; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; + +/// Represents a proof for an execution payload. +/// If this proof verifies as true, it is equivalent to the ExecutionLayer +/// specifying that the payload is valid. +/// Multiple proof types can exist for a single execution payload +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode)] +pub struct ExecutionProof { + /// The execution block hash this proof attests to + pub block_hash: ExecutionBlockHash, + /// The subnet ID where this proof was received/should be sent (maps to gossip subnet) + pub subnet_id: ExecutionProofSubnetId, + /// Version of the proof format - allows for one subnet to upgrade their proof without all needing to + pub version: u32, + /// Opaque proof data - structure depends on subnet_id and version + /// This will contain cryptographic proofs received via gossip + pub proof_data: Vec, +} + +impl ExecutionProof { + /// Create a new execution proof for gossip + pub fn new( + block_hash: ExecutionBlockHash, + subnet_id: ExecutionProofSubnetId, + version: u32, + proof_data: Vec, + ) -> Self { + Self { + block_hash, + subnet_id, + version, + proof_data, + } + } + + /// Get a description of the proof type based on subnet_id + pub fn description(&self) -> String { + format!("proof id {}", *self.subnet_id) + } + + /// Check if this proof version is supported + pub fn is_version_supported(&self) -> bool { + // TODO: We want each subnet to be able to update + // TODO: their version independently, for now it just supports 1 + // TODO: Think of the best structure to use here, noting that there + // TODO: could be quite a lot of subnets, if we consider the different + // TODO: zkVM and EL combos. So maybe the versioning comes from the + // TODO: middleware that verifies proofs. + matches!(self.version, 1) + } + + /// Validate basic structure of the proof + pub fn is_structurally_valid(&self) -> bool { + // Basic validation: non-empty proof data and supported version + !self.proof_data.is_empty() && self.is_version_supported() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Hash256; + use ssz::{Decode, Encode}; + + #[test] + fn test_execution_proof_creation() { + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let proof_data = vec![1, 2, 3, 4]; + + let proof = ExecutionProof::new(block_hash, subnet_id, 1, proof_data.clone()); + + assert_eq!(proof.block_hash, block_hash); + assert_eq!(proof.subnet_id, subnet_id); + assert_eq!(proof.version, 1); + assert_eq!(proof.proof_data, proof_data); + } + + #[test] + fn test_execution_proof_validation() { + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + + // Valid proof + let valid_proof = ExecutionProof::new(block_hash, subnet_id, 1, vec![1, 2, 3]); + assert!(valid_proof.is_version_supported()); + assert!(valid_proof.is_structurally_valid()); + + // Invalid version + let invalid_version = ExecutionProof::new(block_hash, subnet_id, 99, vec![1, 2, 3]); + assert!(!invalid_version.is_version_supported()); + assert!(!invalid_version.is_structurally_valid()); + + // Empty proof data + let empty_proof = ExecutionProof::new(block_hash, subnet_id, 1, vec![]); + assert!(empty_proof.is_version_supported()); + assert!(!empty_proof.is_structurally_valid()); + } + + #[test] + fn test_execution_proof_description() { + let block_hash = ExecutionBlockHash::from(Hash256::random()); + + let witness_proof = ExecutionProof::new( + block_hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + assert_eq!(witness_proof.description(), "proof id 0"); + } + + #[test] + fn test_execution_proof_ssz_encoding() { + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let subnet_id = ExecutionProofSubnetId::new(2).unwrap(); + let proof_data = vec![10, 20, 30, 40, 50]; + + let original = ExecutionProof::new(block_hash, subnet_id, 1, proof_data); + + // Test SSZ encoding and decoding + let encoded = original.as_ssz_bytes(); + let decoded = ExecutionProof::from_ssz_bytes(&encoded).expect("should decode successfully"); + + assert_eq!(original, decoded); + } +} diff --git a/consensus/types/src/execution_proof_subnet_id.rs b/consensus/types/src/execution_proof_subnet_id.rs new file mode 100644 index 00000000000..3fe405c316f --- /dev/null +++ b/consensus/types/src/execution_proof_subnet_id.rs @@ -0,0 +1,138 @@ +//! Identifies each execution proof subnet by an integer identifier. +use serde::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt::{self, Display}; +use std::ops::{Deref, DerefMut}; + +/// Maximum number of execution proof subnets allowed by the protocol. +/// +/// This is a hard protocol limit that defines the total number of proof subnets +/// that can exist in the network. Individual nodes may choose to participate in +/// fewer subnets (configured via max_execution_proof_subnets in ChainConfig), +/// but no node can exceed this protocol maximum. +/// +/// The value of 8 subnets provides a good balance between: +/// - Proof diversity (multiple independent proofs per block) +/// - Network overhead (not too many gossip topics) +/// - Resource requirements (reasonable for most nodes(?)) +/// +/// In reality, I do not think we will have 8, more closer to 3, though this is still being +/// explored. This number could be larger if we consider combining different zkVMs with different guests. +pub const MAX_EXECUTION_PROOF_SUBNETS: u64 = 8; + +/// ExecutionProofSubnetId is both the id for the subnet that a particular proof will be on +/// and the proof ID to identify the proof. ie, we have one type of proof per subnet. +#[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ExecutionProofSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); + +impl ExecutionProofSubnetId { + /// Create an ExecutionProofSubnetId from a u64, validating it's within bounds + /// + /// Note: bounds here relates to the fact that there is a maximum number of subnets + /// that we can have; it is the number of maximum number of proofs that we will accept. + pub fn new(id: u64) -> Result { + if id >= MAX_EXECUTION_PROOF_SUBNETS { + return Err(InvalidSubnetId(id)); + } + Ok(Self(id)) + } +} + +impl Display for ExecutionProofSubnetId { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } +} + +impl Deref for ExecutionProofSubnetId { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for ExecutionProofSubnetId { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for u64 { + fn from(val: ExecutionProofSubnetId) -> Self { + val.0 + } +} + +impl From<&ExecutionProofSubnetId> for u64 { + fn from(val: &ExecutionProofSubnetId) -> Self { + val.0 + } +} + +#[derive(Debug)] +pub struct InvalidSubnetId(pub u64); + +impl std::fmt::Display for InvalidSubnetId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Invalid execution proof subnet ID: {}, must be < {}", + self.0, MAX_EXECUTION_PROOF_SUBNETS + ) + } +} + +impl std::error::Error for InvalidSubnetId {} + +// Manual SSZ implementations for ExecutionProofSubnetId +impl Encode for ExecutionProofSubnetId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for ExecutionProofSubnetId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + u64::from_ssz_bytes(bytes).map(Self) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_execution_proof_subnet_id_creation() { + for id in 0..MAX_EXECUTION_PROOF_SUBNETS { + let subnet_id = ExecutionProofSubnetId::new(id).unwrap(); + assert_eq!(*subnet_id, id); + } + + assert!(ExecutionProofSubnetId::new(0).is_ok()); + assert!(ExecutionProofSubnetId::new(7).is_ok()); + assert!(ExecutionProofSubnetId::new(MAX_EXECUTION_PROOF_SUBNETS).is_err()); + assert!(ExecutionProofSubnetId::new(u64::MAX).is_err()); + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index f0555a06d6d..40c4b79887c 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -107,6 +107,8 @@ pub mod blob_sidecar; pub mod data_column_custody_group; pub mod data_column_sidecar; pub mod data_column_subnet_id; +pub mod execution_proof; +pub mod execution_proof_subnet_id; pub mod light_client_header; pub mod non_zero_usize; pub mod runtime_fixed_vector; @@ -176,6 +178,8 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; +pub use crate::execution_proof::ExecutionProof; +pub use crate::execution_proof_subnet_id::ExecutionProofSubnetId; pub use crate::execution_requests::{ExecutionRequests, RequestType}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index 05f1c5a54ce..2fddcab2671 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -1,18 +1,36 @@ # Full configuration reference [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). participants: + # Stateful proof generator nodes (generate proofs for the network) - el_type: geth el_image: ethereum/client-go:latest cl_type: lighthouse cl_image: lighthouse:local cl_extra_params: - --target-peers=3 - count: 4 + - --generate-execution-proofs + count: 1 + # Stateless validator nodes (consume proofs but cannot generate them) + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + - --stateless-validation + - --stateless-min-proofs-required=3 + count: 2 + # Regular stateful nodes (normal operation, no proof generation) + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + count: 1 network_params: electra_fork_epoch: 0 seconds_per_slot: 3 global_log_level: debug snooper_enabled: false additional_services: - - dora - - spamoor - - prometheus_grafana + - dora \ No newline at end of file