diff --git a/.github/wordlist.txt b/.github/wordlist.txt index e84ee1d17..eeebe91ef 100644 --- a/.github/wordlist.txt +++ b/.github/wordlist.txt @@ -1,26 +1,42 @@ +ai +async +Async blockchain +BLS +CamelCase +claude CLI clippy +codebase Config config cryptographic dir ETH Ethereum +Ethereum's +EventBus Exercism github Grafana http HTTPS +invariants io libp linter localhost MacOS +md mdBook middleware +Modularity +Mutex +NodeInfo +NodeMetadata PathBuf performant +pluggable pre PRs QBFT @@ -28,16 +44,22 @@ repo RSA runtime rustfmt +RwLock sigmaprime sigp spec'd SSV +stringly struct structs +subdirectories Styleguide +Subnet +testability Testnet testnet Testnets +Tokio TODOs UI Validator @@ -53,8 +75,6 @@ Responder responder Prepends Secp -NodeMetadata -NodeInfo subnets holesky responder's @@ -70,6 +90,7 @@ keyshare unencrypted Hoodi subcommands +submodules APIs websocket CORS @@ -90,4 +111,4 @@ cli ENR UPnP Golang -stdin +stdin \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..ab07e4849 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "anchor/spec_tests/ssv-spec"] + path = anchor/spec_tests/ssv-spec + url = https://github.com/ssvlabs/ssv-spec.git diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 000000000..240072bbc --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,363 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## About Anchor + +Anchor is an open-source implementation of the Secret Shared Validator (SSV) protocol, written in Rust and maintained by Sigma Prime. It serves as a validator client for Ethereum's proof-of-stake consensus mechanism using secret sharing techniques. + +## Common Commands + +### Build and Install + +```bash +# Build the project in release mode +cargo build --release + +# Install Anchor to your path +make install + +# Build for specific architectures +make build-x86_64 # Build for x86_64 Linux (requires cross) +make build-aarch64 # Build for aarch64 Linux (requires cross) + +# Create release tarballs +make build-release-tarballs +``` + +### Testing + +```bash +# Run all tests in release mode (standard) +make test +# or +cargo test --release --features "$(TEST_FEATURES)" + +# Run all tests in debug mode +make test-debug +# or +cargo test --workspace --features "$(TEST_FEATURES)" + +# Run tests with nextest (faster) +make nextest-release +make nextest-debug + +# Test a specific crate +cd anchor/common/qbft +cargo test + +# Check benchmark code (without running benchmarks) +make check-benches +``` + +### Linting and Formatting + +```bash +# Format code +make cargo-fmt +# or +cargo +nightly fmt --all + +# Check formatting +make cargo-fmt-check + +# Run linter +make lint +# or +cargo clippy --workspace --tests --features "$(TEST_FEATURES)" -- -D warnings + +# Fix linting issues automatically +make lint-fix + +# Check for unused dependencies +make udeps +# or +cargo +nightly udeps --tests --all-targets --release --features "$(TEST_FEATURES)" + +# Check if dependencies are sorted correctly +make sort +``` + +### Other Useful Commands + +```bash +# Run dependency audit for security issues +make audit + +# Update CLI documentation in the book +make cli-local + +# Check for markdown issues +make mdlint +``` + +## Architecture Overview + +Anchor is a multi-threaded client with several core components organized as a modular Rust workspace. The architecture follows a service-oriented approach with well-defined boundaries between components. + +### Core Design Principles + +1. **Modularity**: Components are separated into their own crates with clear boundaries +2. **Error Handling**: Comprehensive error types specific to each module +3. **Asynchronous Design**: Built on Tokio for non-blocking operations +4. **Thread Safety**: Uses Arc, Mutex, RwLock appropriately for shared state +5. **Message Passing**: Communication between components via channels + +### Thread Model + +Anchor consists of multiple long-standing tasks that are spawned during initialization: + +1. **Core Client**: The main control flow +2. **HTTP API**: Endpoint for reading data and modifying components +3. **Metrics**: Prometheus-compatible metrics endpoint +4. **Execution Service**: Syncs SSV information from execution layer nodes +5. **Duties Service**: Watches the beacon chain for validator duties for known SSV validator shares +6. **Network**: P2P network stack (libp2p) for communication on the SSV network +7. **Processor**: Middleware that handles CPU-intensive tasks and prioritizes client workload +8. **QBFT**: Manages QBFT instances to reach consensus in SSV committees + +### Key Components In Detail + +#### Consensus (QBFT) + +The QBFT module implements the Quorum Byzantine Fault Tolerance consensus algorithm: +- Located in `anchor/common/qbft` +- State machine-based implementation +- Supports pluggable network and validation layers +- Thread-safe for concurrent operation +- Includes comprehensive testing for consensus edge cases + +#### Signature Collection + +The Signature Collector manages distributed validator signatures: +- Located in `anchor/signature_collector` +- Collects partial signatures from distributed validator operators +- Uses threshold signature schemes with Lagrange interpolation +- Handles timeouts and failure modes +- Reconstructs full signatures when threshold is reached + +#### Network Layer + +The network component handles P2P communication: +- Based on libp2p +- Supports encrypted communications +- Handles peer discovery and connection management +- Routes messages to appropriate internal components + +### General Event Flow + +1. The Duties Service identifies a validator duty +2. The duty is sent to the Processor +3. The Processor creates a QBFT instance +4. The Network receives messages until the QBFT instance completes +5. The required consensus message is signed +6. The message is published on the P2P network + +## Code Organization + +The codebase is organized as a Rust workspace with multiple crates, each with a specific responsibility: + +- `anchor/`: Main crate with several submodules: + - `client/`: CLI and client interface + - `common/`: Shared types and utilities + - `api_types/`: API data structures + - `bls_lagrange/`: BLS cryptography implementations + - `global_config/`: Global configuration + - `operator_key/`: Key management + - `qbft/`: QBFT consensus implementation + - `ssv_network_config/`: Network configuration + - `ssv_types/`: Core SSV data types + - `version/`: Version information + - `database/`: Database operations and storage + - `duties_tracker/`: Validator duty tracking + - `eth/`: Ethereum connectivity + - `http_api/`: HTTP API implementation + - `http_metrics/`: Metrics API + - `keygen/`: Key generation + - `keysplit/`: Key splitting for SSV + - `logging/`: Logging infrastructure + - `message_receiver/`: Message reception + - `message_sender/`: Message sending + - `message_validator/`: Message validation + - `network/`: P2P networking + - `processor/`: Task processing + - `qbft_manager/`: QBFT instance management + - `signature_collector/`: Signature aggregation + - `subnet_service/`: Subnet operations + - `validator_store/`: Validator data storage + +## Modular Project Structure and Boundaries + +Anchor follows a modular design with clear boundaries between components, emphasizing the following principles: + +### Crate Structure + +1. **Independent Crates**: Each major component is its own crate with a clearly defined API +2. **Minimal Dependencies**: Crates should only depend on what they need +3. **Public API Surface**: APIs between crates should be well-documented and minimal +4. **Clear Ownership**: Each crate has a clear responsibility and ownership model + +### Dependency Flow + +- **Common Libraries**: Core types and utilities are in `common/` subdirectories +- **Service Dependencies**: Higher-level services depend on lower-level ones, not vice versa +- **Configuration Flow**: Config flows down from the client to individual components +- **Event Flow**: Events flow up from components to central coordinators + +### Inter-Component Communication + +1. **Message Passing**: Components communicate via typed message channels +2. **Event Bus**: System-wide events use the EventBus pattern +3. **Trait Boundaries**: Components interact through trait interfaces, not concrete implementations +4. **Error Propagation**: Errors are properly typed and propagated up the stack + +## Code Style and Best Practices + +When contributing to Anchor, follow these Rust best practices: + +### General Principles + +1. **Follow Rust Idioms**: Use idiomatic Rust patterns (e.g., `Option`, `Result`, iterators) +2. **Error Handling**: Use proper error types and the `?` operator; avoid `unwrap()`/`expect()` in production code +3. **Memory Safety**: Leverage Rust's ownership system; avoid unsafe code when possible +4. **Documentation**: All public APIs should be documented with examples +5. **Type Safety**: Use the type system to prevent errors; avoid stringly-typed interfaces + +### Specific Guidelines + +1. **Naming**: + - Use clear, descriptive names + - Follow Rust naming conventions (snake_case for functions/variables, CamelCase for types) + - Prefer explicit names over abbreviations + +2. **Code Organization**: + - Organize code into logical modules + - Keep functions small and focused + - Use the module system to control visibility + +3. **Error Types**: + - Create domain-specific error types using `thiserror` + - Include context in errors + - Make error messages user-friendly + +4. **Comments**: + - Comment "why", not "what" + - Use doc comments (`///`) for public API documentation + - Add `TODO`, `FIXME`, or `NOTE` markers as needed for future work + +5. **Async Code**: + - Use `async`/`.await` properly with Tokio + - Handle cancellation correctly + - Avoid blocking the runtime with CPU-intensive work + +6. **Dependencies**: + - Keep dependencies minimal and up to date + - Prefer well-maintained crates from the ecosystem + - Pin dependency versions appropriately + +## Testing Guidelines + +Anchor aims for high test coverage with different types of tests: + +### Test Categories + +1. **Unit Tests**: Test individual functions and methods + - Located in the same file as the code being tested + - Use `#[cfg(test)]` modules + - Mock external dependencies + +2. **Integration Tests**: Test interactions between components + - Located in `tests/` directories + - Test public APIs of crates + - May use test fixtures or mock services + +3. **End-to-End Tests**: Test complete workflows + - Test the system as a whole + - May require external services or mocks + +4. **Property-Based Tests**: Test invariants and properties + - Use frameworks like `proptest` + - Generate random inputs to find edge cases + +### Testing Best Practices + +1. **Test Coverage**: + - Aim for high coverage of business logic + - Test edge cases and error paths + - Use coverage tools to identify untested code + +2. **Test Organization**: + - Name tests clearly (`test__`) + - Use test fixtures for complex setup + - Group related tests with sub-modules + +3. **Test Quality**: + - Tests should be deterministic + - Avoid sleep/delay-based tests + - Use proper assertions with helpful messages + - Clean up test resources properly + +4. **Concurrent Testing**: + - Ensure tests can run concurrently + - Use unique resources for each test + - Use `tokio::test` for async tests + +5. **Mocking**: + - Design code for testability with traits + - Use trait mocking when needed + - Consider dependency injection for easier testing + +## Contribution Workflow + +When contributing to Anchor, follow these steps to ensure high-quality code that meets project standards: + +### Step 1: Plan Your Changes + +- Start with a clear understanding of the problem or feature +- Break down complex tasks into smaller, manageable steps +- Consider how your change affects the overall architecture +- Discuss significant changes with the team before implementing + +### Step 2: Development Process + +1. **Branch**: Create a feature branch from the `unstable` branch +2. **Implement**: Write code following project style guidelines +3. **Test**: Add tests that cover your changes +4. **Document**: Update documentation as needed +5. **Refactor**: Clean up code before submission + +### Step 3: Quality Assurance + +Before submitting your code: + +1. **Run Tests**: `make test` to run all tests +2. **Lint Code**: `make lint` to check for code style issues +3. **Check Performance**: Consider performance implications +4. **Ensure Backwards Compatibility**: When applicable + +### Step 4: Submit Changes + +1. **Commit**: Use clear commit messages that explain the change +2. **Push**: Push your branch to your fork +3. **PR**: Open a pull request against the `unstable` branch +4. **Review**: Address review feedback promptly +5. **CI**: Ensure all CI checks pass + +### Commit Message Guidelines + +- Use present tense ("Add feature", not "Added feature") +- First line is a summary (50 chars or less) +- Include component prefix (e.g., `network:`, `consensus:`) +- Reference issues or tickets when applicable +- Include context on why the change was made + +## Development Tips + +- This is a Rust project that follows standard Rust development practices +- The project is currently under active development and not ready for production +- Sigma Prime maintains two permanent branches: + - `stable`: Always points to the latest stable release, ideal for most users + - `unstable`: Used for development, contains the latest PRs, base branch for contributions +- When implementing new features, focus on modular design with clear boundaries +- Follow test-driven development principles when possible +- Use debugging tools like `tracing` and metrics to understand system behavior \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 72a62f4b9..6dd658cd5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -94,9 +94,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b4ae82946772d69f868b9ef81fc66acb1b149ef9b4601849bec4bcf5da6552e" +checksum = "48dff4dd98e17de00203f851800bbc8b76eb29a4d4e3e44074614338b7a3308d" dependencies = [ "alloy-consensus", "alloy-contract", @@ -118,26 +118,27 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.69" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28e2652684758b0d9b389d248b209ed9fd9989ef489a550265fe4bb8454fe7eb" +checksum = "4195a29a4b87137b2bb02105e746102873bc03561805cf45c0e510c961f160e6" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "num_enum", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] name = "alloy-consensus" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fbf458101ed6c389e9bb70a34ebc56039868ad10472540614816cdedc8f5265" +checksum = "eda689f7287f15bd3582daba6be8d1545bad3740fd1fb778f629a1fe866bb43b" dependencies = [ "alloy-eips", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "alloy-serde", "alloy-trie", + "alloy-tx-macros", "auto_impl", "c-kzg", "derive_more 2.0.1", @@ -145,6 +146,7 @@ dependencies = [ "k256", "once_cell", "rand 0.8.5", + "secp256k1", "serde", "serde_with", "thiserror 2.0.12", @@ -152,13 +154,13 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc982af629e511292310fe85b433427fd38cb3105147632b574abc997db44c91" +checksum = "2b5659581e41e8fe350ecc3593cb5c9dcffddfd550896390f2b78a07af67b0fa" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "alloy-serde", "serde", @@ -166,16 +168,16 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0a0c1ddee20ecc14308aae21c2438c994df7b39010c26d70f86e1d8fdb8db0" +checksum = "944085cf3ac8f32d96299aa26c03db7c8ca6cdaafdbc467910b889f0328e6b70" dependencies = [ "alloy-consensus", "alloy-dyn-abi", "alloy-json-abi", "alloy-network", "alloy-network-primitives", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-provider", "alloy-pubsub", "alloy-rpc-types-eth", @@ -183,33 +185,33 @@ dependencies = [ "alloy-transport", "futures", "futures-util", + "serde_json", "thiserror 2.0.12", ] [[package]] name = "alloy-core" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d8bcce99ad10fe02640cfaec1c6bc809b837c783c1d52906aa5af66e2a196f6" +checksum = "d47400608fc869727ad81dba058d55f97b29ad8b5c5256d9598523df8f356ab6" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "alloy-sol-types", ] [[package]] name = "alloy-dyn-abi" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb8e762aefd39a397ff485bc86df673465c4ad3ec8819cc60833a8a3ba5cdc87" +checksum = "d9e8a436f0aad7df8bb47f144095fba61202265d9f5f09a70b0e3227881a668e" dependencies = [ "alloy-json-abi", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-sol-type-parser", "alloy-sol-types", - "const-hex", "itoa", "serde", "serde_json", @@ -218,11 +220,11 @@ dependencies = [ [[package]] name = "alloy-eip2124" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "675264c957689f0fd75f5993a73123c2cc3b5c235a38f5b9037fe6c826bfb2c0" +checksum = "741bdd7499908b3aa0b159bba11e71c8cddd009a2c2eb7a06e825f1ec87900a5" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "crc", "serde", @@ -231,22 +233,22 @@ dependencies = [ [[package]] name = "alloy-eip2930" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +checksum = "7b82752a889170df67bbb36d42ca63c531eb16274f0d7299ae2a680facba17bd" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "serde", ] [[package]] name = "alloy-eip7702" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b15b13d38b366d01e818fe8e710d4d702ef7499eacd44926a06171dd9585d0c" +checksum = "9d4769c6ffddca380b0070d71c8b7f30bed375543fe76bb2f74ec0acf4b7cd16" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "serde", "thiserror 2.0.12", @@ -254,45 +256,45 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e86967eb559920e4b9102e4cb825fe30f2e9467988353ce4809f0d3f2c90cd4" +checksum = "6f35887da30b5fc50267109a3c61cd63e6ca1f45967983641053a40ee83468c1" dependencies = [ "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "alloy-serde", "auto_impl", "c-kzg", "derive_more 2.0.1", "either", - "once_cell", "serde", "sha2 0.10.9", ] [[package]] name = "alloy-genesis" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40de6f5b53ecf5fd7756072942f41335426d9a3704cd961f77d854739933bcf" +checksum = "11d4009efea6f403b3a80531f9c6f70fc242399498ff71196a1688cc1c901f44" dependencies = [ "alloy-eips", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-serde", "alloy-trie", "serde", + "serde_with", ] [[package]] name = "alloy-json-abi" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6beff64ad0aa6ad1019a3db26fef565aefeb011736150ab73ed3366c3cfd1b" +checksum = "459f98c6843f208856f338bfb25e65325467f7aff35dfeb0484d0a76e059134b" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-sol-type-parser", "serde", "serde_json", @@ -300,12 +302,13 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27434beae2514d4a2aa90f53832cbdf6f23e4b5e2656d95eaf15f9276e2418b6" +checksum = "883dee3b4020fcb5667ee627b4f401e899dad82bf37b246620339dd980720ed9" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-sol-types", + "http 1.3.1", "serde", "serde_json", "thiserror 2.0.12", @@ -314,16 +317,16 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26a33a38c7486b1945f8d093ff027add2f3a8f83c7300dbad6165cc49150085e" +checksum = "cd6e5b8ac1654a05c224390008e43634a2bdc74e181e02cf8ed591d8b3d4ad08" dependencies = [ "alloy-consensus", "alloy-consensus-any", "alloy-eips", "alloy-json-rpc", "alloy-network-primitives", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rpc-types-any", "alloy-rpc-types-eth", "alloy-serde", @@ -340,13 +343,13 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db973a7a23cbe96f2958e5687c51ce2d304b5c6d0dc5ccb3de8667ad8476f50b" +checksum = "80d7980333dd9391719756ac28bc2afa9baa705fc70ffd11dc86ab078dd64477" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-serde", "serde", ] @@ -358,22 +361,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c77490fe91a0ce933a1f219029521f20fc28c2c0ca95d53fa4da9c00b8d9d4e" dependencies = [ "alloy-rlp", - "arbitrary", "bytes", "cfg-if", "const-hex", - "derive_arbitrary", "derive_more 2.0.1", "foldhash", - "getrandom 0.2.16", - "hashbrown 0.15.3", - "indexmap 2.9.0", + "hashbrown 0.15.5", + "indexmap 2.10.0", "itoa", "k256", "keccak-asm", "paste", "proptest", - "proptest-derive", "rand 0.8.5", "ruint", "rustc-hash", @@ -382,11 +381,39 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "alloy-primitives" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cfebde8c581a5d37b678d0a48a32decb51efd7a63a08ce2517ddec26db705c8" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more 2.0.1", + "foldhash", + "getrandom 0.3.3", + "hashbrown 0.15.5", + "indexmap 2.10.0", + "itoa", + "k256", + "keccak-asm", + "paste", + "proptest", + "rand 0.9.2", + "ruint", + "rustc-hash", + "serde", + "sha3", + "tiny-keccak", +] + [[package]] name = "alloy-provider" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b03bde77ad73feae14aa593bcabb932c8098c0f0750ead973331cfc0003a4e1" +checksum = "478a42fe167057b7b919cd8b0c2844f0247f667473340dad100eaf969de5754e" dependencies = [ "alloy-chains", "alloy-consensus", @@ -394,10 +421,11 @@ dependencies = [ "alloy-json-rpc", "alloy-network", "alloy-network-primitives", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-pubsub", "alloy-rpc-client", "alloy-rpc-types-eth", + "alloy-signer", "alloy-sol-types", "alloy-transport", "alloy-transport-http", @@ -406,12 +434,13 @@ dependencies = [ "async-trait", "auto_impl", "dashmap", + "either", "futures", "futures-utils-wasm", "lru 0.13.0", "parking_lot", "pin-project", - "reqwest 0.12.15", + "reqwest 0.12.22", "serde", "serde_json", "thiserror 2.0.12", @@ -423,21 +452,24 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721aca709a9231815ad5903a2d284042cc77e7d9d382696451b30c9ee0950001" +checksum = "b0a99b17987f40a066b29b6b56d75e84cd193b866cac27cae17b59f40338de95" dependencies = [ "alloy-json-rpc", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-transport", + "auto_impl", "bimap", "futures", + "parking_lot", "serde", "serde_json", "tokio", "tokio-stream", "tower", "tracing", + "wasmtimer", ] [[package]] @@ -459,43 +491,41 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "alloy-rpc-client" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445a3298c14fae7afb5b9f2f735dead989f3dd83020c2ab8e48ed95d7b6d1acb" +checksum = "8a0c6d723fbdf4a87454e2e3a275e161be27edcfbf46e2e3255dd66c138634b6" dependencies = [ "alloy-json-rpc", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-pubsub", "alloy-transport", "alloy-transport-http", "alloy-transport-ws", - "async-stream", "futures", "pin-project", - "reqwest 0.12.15", + "reqwest 0.12.22", "serde", "serde_json", "tokio", "tokio-stream", "tower", "tracing", - "tracing-futures", "url", "wasmtimer", ] [[package]] name = "alloy-rpc-types" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9157deaec6ba2ad7854f16146e4cd60280e76593eed79fdcb06e0fa8b6c60f77" +checksum = "c41492dac39365b86a954de86c47ec23dcc7452cdb2fde591caadc194b3e34c6" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rpc-types-eth", "alloy-serde", "serde", @@ -503,9 +533,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604dea1f00fd646debe8033abe8e767c732868bf8a5ae9df6321909ccbc99c56" +checksum = "8f7eb22670a972ad6c222a6c6dac3eef905579acffe9d63ab42be24c7d158535" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -514,42 +544,43 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e13d71eac04513a71af4b3df580f52f2b4dcbff9d971cc9a52519acf55514cb" +checksum = "b777b98526bbe5b7892ca22a7fd5f18ed624ff664a79f40d0f9f2bf94ba79a84" dependencies = [ "alloy-consensus", "alloy-consensus-any", "alloy-eips", "alloy-network-primitives", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "alloy-serde", "alloy-sol-types", "itertools 0.14.0", "serde", "serde_json", + "serde_with", "thiserror 2.0.12", ] [[package]] name = "alloy-serde" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1cd73fc054de6353c7f22ff9b846b0f0f145cd0112da07d4119e41e9959207" +checksum = "ee8d2c52adebf3e6494976c8542fbdf12f10123b26e11ad56f77274c16a2a039" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "serde", "serde_json", ] [[package]] name = "alloy-signer" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c96fbde54bee943cd94ebacc8a62c50b38c7dfd2552dcd79ff61aea778b1bfcc" +checksum = "7c0494d1e0f802716480aabbe25549c7f6bc2a25ff33b08fd332bbb4b7d06894" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "async-trait", "auto_impl", "either", @@ -560,13 +591,13 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6e72002cc1801d8b41e9892165e3a6551b7bd382bd9d0414b21e90c0c62551" +checksum = "59c2435eb8979a020763ced3fb478932071c56e5f75ea86db41f320915d325ba" dependencies = [ "alloy-consensus", "alloy-network", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-signer", "async-trait", "k256", @@ -576,42 +607,42 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10ae8e9a91d328ae954c22542415303919aabe976fe7a92eb06db1b68fd59f2" +checksum = "aedac07a10d4c2027817a43cc1f038313fc53c7ac866f7363239971fd01f9f18" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83ad5da86c127751bc607c174d6c9fe9b85ef0889a9ca0c641735d77d4f98f26" +checksum = "24f9a598f010f048d8b8226492b6401104f5a5c1273c2869b72af29b48bb4ba9" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", - "indexmap 2.9.0", + "indexmap 2.10.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3d30f0d3f9ba3b7686f3ff1de9ee312647aac705604417a2f40c604f409a9e" +checksum = "f494adf9d60e49aa6ce26dfd42c7417aa6d4343cf2ae621f20e4d92a5ad07d85" dependencies = [ "alloy-json-abi", "const-hex", @@ -621,15 +652,15 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.101", + "syn 2.0.104", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d162f8524adfdfb0e4bd0505c734c985f3e2474eb022af32eef0d52a4f3935c" +checksum = "52db32fbd35a9c0c0e538b58b81ebbae08a51be029e7ad60e08b60481c2ec6c3" dependencies = [ "serde", "winnow", @@ -637,24 +668,25 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d43d5e60466a440230c07761aa67671d4719d46f43be8ea6e7ed334d8db4a9ab" +checksum = "a285b46e3e0c177887028278f04cc8262b76fd3b8e0e20e93cea0a58c35f5ac5" dependencies = [ "alloy-json-abi", - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-sol-macro", - "const-hex", "serde", ] [[package]] name = "alloy-transport" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec325c2af8562ef355c02aeb527c755a07e9d8cf6a1e65dda8d0bf23e29b2c" +checksum = "3c0107675e10c7f248bf7273c1e7fdb02409a717269cc744012e6f3c39959bfb" dependencies = [ "alloy-json-rpc", + "alloy-primitives 1.3.0", + "auto_impl", "base64 0.22.1", "derive_more 2.0.1", "futures", @@ -672,13 +704,13 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a082c9473c6642cce8b02405a979496126a03b096997888e86229afad05db06c" +checksum = "78e3736701b5433afd06eecff08f0688a71a10e0e1352e0bbf0bed72f0dd4e35" dependencies = [ "alloy-json-rpc", "alloy-transport", - "reqwest 0.12.15", + "reqwest 0.12.22", "serde_json", "tower", "tracing", @@ -687,15 +719,15 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.12.6" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae865917bdabaae21f418010fe7e8837c6daa6611fde25f8d78a1778d6ecb523" +checksum = "77fd607158cb9bc54cbcfcaab4c5f36c5b26994c7dc58b6f095ce27a54f270f3" dependencies = [ "alloy-pubsub", "alloy-transport", "futures", "http 1.3.1", - "rustls 0.23.27", + "rustls 0.23.31", "serde_json", "tokio", "tokio-tungstenite", @@ -705,20 +737,33 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.9" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95a94854e420f07e962f7807485856cde359ab99ab6413883e15235ad996e8b" +checksum = "bada1fc392a33665de0dc50d401a3701b62583c655e3522a323490a5da016962" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", "arrayvec", - "derive_more 1.0.0", + "derive_more 2.0.1", "nybbles", "serde", "smallvec", "tracing", ] +[[package]] +name = "alloy-tx-macros" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6acb36318dfa50817154064fea7932adf2eec3f51c86680e2b37d7e8906c66bb" +dependencies = [ + "alloy-primitives 1.3.0", + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "anchor" version = "0.2.0" @@ -727,7 +772,7 @@ dependencies = [ "bls", "clap", "client", - "dirs 6.0.0", + "dirs", "futures", "global_config", "keygen", @@ -736,7 +781,7 @@ dependencies = [ "metrics", "serde", "ssv_network_config", - "strum 0.27.1", + "strum 0.27.2", "task_executor", "tokio", "tracing", @@ -750,11 +795,11 @@ name = "anchor_validator_store" version = "0.1.0" dependencies = [ "beacon_node_fallback", - "dashmap", "database", "eth2", "ethereum_ssz", "hex", + "lru 0.16.0", "metrics", "openssl", "parking_lot", @@ -791,9 +836,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" dependencies = [ "anstyle", "anstyle-parse", @@ -806,37 +851,37 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.8" +version = "3.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -1011,9 +1056,9 @@ dependencies = [ [[package]] name = "asn1-rs" -version = "0.6.2" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" dependencies = [ "asn1-rs-derive", "asn1-rs-impl", @@ -1021,19 +1066,19 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 1.0.69", + "thiserror 2.0.12", "time", ] [[package]] name = "asn1-rs-derive" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] @@ -1045,7 +1090,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1067,9 +1112,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -1077,51 +1122,11 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "async-executor" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb812ffb58524bdd10860d7d974e2f01cc0950c2438a74ee5ec2e2280c6c4ffa" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "pin-project-lite", - "slab", -] - -[[package]] -name = "async-fs" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" -dependencies = [ - "async-lock", - "blocking", - "futures-lite", -] - -[[package]] -name = "async-global-executor" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" -dependencies = [ - "async-channel 2.3.1", - "async-executor", - "async-io", - "async-lock", - "blocking", - "futures-lite", - "once_cell", -] - [[package]] name = "async-io" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "19634d6336019ef220f09fd31168ce5c184b295cbf80345437cc36094ef223ca" dependencies = [ "async-lock", "cfg-if", @@ -1130,108 +1135,22 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.44", + "rustix 1.0.8", "slab", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "async-lock" -version = "3.4.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "event-listener-strategy", "pin-project-lite", ] -[[package]] -name = "async-net" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" -dependencies = [ - "async-io", - "blocking", - "futures-lite", -] - -[[package]] -name = "async-process" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" -dependencies = [ - "async-channel 2.3.1", - "async-io", - "async-lock", - "async-signal", - "async-task", - "blocking", - "cfg-if", - "event-listener 5.4.0", - "futures-lite", - "rustix 0.38.44", - "tracing", -] - -[[package]] -name = "async-recursion" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", -] - -[[package]] -name = "async-signal" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" -dependencies = [ - "async-io", - "async-lock", - "atomic-waker", - "cfg-if", - "futures-core", - "futures-io", - "rustix 0.38.44", - "signal-hook-registry", - "slab", - "windows-sys 0.59.0", -] - -[[package]] -name = "async-std" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730294c1c08c2e0f85759590518f6333f0d5a0a766a27d519c1b244c3dfd8a24" -dependencies = [ - "async-channel 1.9.0", - "async-global-executor", - "async-io", - "async-lock", - "crossbeam-utils", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "once_cell", - "pin-project-lite", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - [[package]] name = "async-stream" version = "0.3.6" @@ -1251,15 +1170,9 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] -[[package]] -name = "async-task" -version = "4.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" - [[package]] name = "async-trait" version = "0.1.88" @@ -1268,7 +1181,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1303,11 +1216,12 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "attohttpc" -version = "0.24.1" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" dependencies = [ - "http 0.2.12", + "base64 0.22.1", + "http 1.3.1", "log", "url", ] @@ -1320,14 +1234,14 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" @@ -1424,19 +1338,20 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.7.3" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "beacon_node_fallback" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "clap", "eth2", "futures", "itertools 0.10.5", + "sensitive_url", "serde", "slot_clock", "strum 0.24.1", @@ -1468,6 +1383,22 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" +[[package]] +name = "bitcoin-io" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -1519,29 +1450,16 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "blocking" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" -dependencies = [ - "async-channel 2.3.1", - "async-task", - "futures-io", - "futures-lite", - "piper", -] - [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "arbitrary", "blst", "ethereum_hashing", - "ethereum_serde_utils", + "ethereum_serde_utils 0.8.0", "ethereum_ssz", "fixed_bytes", "hex", @@ -1566,9 +1484,9 @@ dependencies = [ [[package]] name = "blst" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c79a94619fade3c0b887670333513a67ac28a6a7e653eb260bf0d4103db38d" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" dependencies = [ "cc", "glob", @@ -1595,7 +1513,7 @@ dependencies = [ [[package]] name = "blstrs_plus" version = "0.8.18" -source = "git+https://github.com/dknopik/blstrs?branch=pls#e281dca0c6c5c70d5c3b748c915612809e367a0d" +source = "git+https://github.com/dknopik/blstrs?branch=pls#37ee6679b865853509f2ace24f4dfa517d4d64b6" dependencies = [ "arrayref", "blst", @@ -1620,9 +1538,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byte-slice-cast" @@ -1667,9 +1585,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "1.0.3" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0307f72feab3300336fb803a57134159f6e20139af1357f36c54cb90d8e8928" +checksum = "7318cfa722931cb5fe0838b98d3ce5621e75f6a6408abc21721d80de9223f2e4" dependencies = [ "blst", "cc", @@ -1682,9 +1600,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.9" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "5d07aa9a93b00c76f71bc35d598bed923f6d4f3a9ca5c24b7737ae1a292841c0" dependencies = [ "serde", ] @@ -1714,9 +1632,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.24" +version = "1.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16595d3be041c03b09d08d0858631facccee9221e579704070e6e9e4915d3bc7" +checksum = "2352e5597e9c544d5e6d9c95190d5d27738ade584fa8db0a16e130e5c2b5296e" dependencies = [ "jobserver", "libc", @@ -1725,9 +1643,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -1796,9 +1714,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.38" +version = "4.5.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +checksum = "50fd97c9dc2399518aa331917ac6f274280ec5eb34e555dd291899745c48ec6f" dependencies = [ "clap_builder", "clap_derive", @@ -1806,9 +1724,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.38" +version = "4.5.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +checksum = "c35b5830294e1fa0462034af85cc95225a4cb07092c088c55bda3147cfcd8f65" dependencies = [ "anstream", "anstyle", @@ -1819,38 +1737,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" - -[[package]] -name = "clap_utils" -version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" -dependencies = [ - "alloy-primitives", - "clap", - "dirs 3.0.2", - "eth2_network_config", - "ethereum_ssz", - "hex", - "serde", - "serde_json", - "serde_yaml", - "types", -] +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "client" @@ -1860,7 +1761,7 @@ dependencies = [ "beacon_node_fallback", "clap", "database", - "dirs 6.0.0", + "dirs", "duties_tracker", "eth", "eth2", @@ -1877,6 +1778,7 @@ dependencies = [ "message_validator", "multiaddr", "network", + "network_utils", "openssl", "operator_key", "parking_lot", @@ -1890,13 +1792,12 @@ dependencies = [ "slot_clock", "ssv_network_config", "ssv_types", - "strum 0.27.1", + "strum 0.27.2", "subnet_service", "task_executor", "tokio", "tracing", "types", - "unused_port", "validator_metrics", "validator_services", "version", @@ -1905,14 +1806,14 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "itertools 0.10.5", ] @@ -1920,7 +1821,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "quote", "syn 1.0.109", @@ -1983,7 +1884,7 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "context_deserialize" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "milhouse", "serde", @@ -1993,7 +1894,7 @@ dependencies = [ [[package]] name = "context_deserialize_derive" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "quote", "syn 1.0.109", @@ -2008,15 +1909,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "convert_case" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baaaa0ecca5b51987b9423ccdc971514dd8b0bb7b4060b983d3664dad3f1f89f" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -2120,13 +2012,19 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crossbeam-channel" version = "0.5.15" @@ -2163,9 +2061,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-bigint" @@ -2243,7 +2141,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2291,7 +2189,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2313,7 +2211,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2373,7 +2271,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2381,12 +2279,11 @@ name = "database" version = "0.1.0" dependencies = [ "base64 0.22.1", - "multi_index_map", "once_cell", "openssl", "r2d2", "r2d2_sqlite", - "rand 0.9.1", + "rand 0.9.2", "rusqlite", "ssv_types", "tempfile", @@ -2413,15 +2310,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", - "pem-rfc7468", "zeroize", ] [[package]] name = "der-parser" -version = "9.0.0" +version = "10.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" dependencies = [ "asn1-rs", "displaydoc", @@ -2460,7 +2356,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2471,16 +2367,7 @@ checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", -] - -[[package]] -name = "derive_more" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" -dependencies = [ - "derive_more-impl 1.0.0", + "syn 2.0.104", ] [[package]] @@ -2489,31 +2376,20 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ - "derive_more-impl 2.0.1", + "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "1.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ + "convert_case", "proc-macro2", "quote", - "syn 2.0.101", -] - -[[package]] -name = "derive_more-impl" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" -dependencies = [ - "convert_case 0.7.1", - "proc-macro2", - "quote", - "syn 2.0.101", - "unicode-xid", + "syn 2.0.104", + "unicode-xid", ] [[package]] @@ -2537,43 +2413,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "directory" -version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" -dependencies = [ - "clap", - "clap_utils", - "eth2_network_config", -] - -[[package]] -name = "dirs" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309" -dependencies = [ - "dirs-sys 0.3.7", -] - [[package]] name = "dirs" version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" dependencies = [ - "dirs-sys 0.5.0", -] - -[[package]] -name = "dirs-sys" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" -dependencies = [ - "libc", - "redox_users 0.4.6", - "winapi", + "dirs-sys", ] [[package]] @@ -2584,8 +2430,8 @@ checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" dependencies = [ "libc", "option-ext", - "redox_users 0.5.0", - "windows-sys 0.59.0", + "redox_users", + "windows-sys 0.60.2", ] [[package]] @@ -2614,7 +2460,7 @@ dependencies = [ "parking_lot", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", "uint 0.10.0", @@ -2629,7 +2475,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2664,6 +2510,12 @@ dependencies = [ "types", ] +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "ecdsa" version = "0.16.9" @@ -2691,9 +2543,9 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", @@ -2713,7 +2565,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2738,7 +2590,6 @@ dependencies = [ "generic-array 0.14.7", "group", "hkdf", - "pem-rfc7468", "pkcs8", "rand_core 0.6.4", "sec1", @@ -2799,7 +2650,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2819,7 +2670,7 @@ checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -2830,12 +2681,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.12" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -2851,11 +2702,12 @@ dependencies = [ "fastrand", "futures", "hex", - "indexmap 2.9.0", + "indexmap 2.10.0", "metrics", - "reqwest 0.12.15", + "reqwest 0.12.22", "rusqlite", "sensitive_url", + "slashing_protection", "slot_clock", "ssv_network_config", "ssv_types", @@ -2869,13 +2721,13 @@ dependencies = [ [[package]] name = "eth2" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "derivative", "either", "enr", "eth2_keystore", - "ethereum_serde_utils", + "ethereum_serde_utils 0.8.0", "ethereum_ssz", "ethereum_ssz_derive", "futures", @@ -2901,7 +2753,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "paste", "types", @@ -2910,7 +2762,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "bls", "ethereum_hashing", @@ -2923,7 +2775,7 @@ dependencies = [ [[package]] name = "eth2_key_derivation" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "bls", "num-bigint-dig", @@ -2935,7 +2787,7 @@ dependencies = [ [[package]] name = "eth2_keystore" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "aes 0.7.5", "bls", @@ -2957,7 +2809,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "bytes", "discv5", @@ -2991,7 +2843,20 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70cbccfccf81d67bff0ab36e591fa536c8a935b078a7b0e58c1d00d418332fc9" dependencies = [ - "alloy-primitives", + "alloy-primitives 0.8.25", + "hex", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "ethereum_serde_utils" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dc1355dbb41fbbd34ec28d4fb2a57d9a70c67ac3c19f6a5ca4d4a176b9e997a" +dependencies = [ + "alloy-primitives 1.3.0", "hex", "serde", "serde_derive", @@ -3000,13 +2865,12 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.8.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86da3096d1304f5f28476ce383005385459afeaf0eea08592b65ddbc9b258d16" +checksum = "9ca8ba45b63c389c6e115b095ca16381534fdcc03cf58176a3f8554db2dbe19b" dependencies = [ - "alloy-primitives", - "arbitrary", - "ethereum_serde_utils", + "alloy-primitives 1.3.0", + "ethereum_serde_utils 0.8.0", "itertools 0.13.0", "serde", "serde_derive", @@ -3016,14 +2880,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.8.3" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d832a5c38eba0e7ad92592f7a22d693954637fbb332b4f669590d66a5c3183e5" +checksum = "0dd55d08012b4e0dfcc92b8d6081234df65f2986ad34cc76eeed69c5e2ce7506" dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3034,9 +2898,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -3049,7 +2913,7 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "pin-project-lite", ] @@ -3144,7 +3008,7 @@ dependencies = [ [[package]] name = "filesystem" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "winapi", "windows-acl", @@ -3165,17 +3029,17 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "safe_arith", ] [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -3284,14 +3148,11 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ - "fastrand", "futures-core", - "futures-io", - "parking", "pin-project-lite", ] @@ -3303,7 +3164,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -3313,7 +3174,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.27", + "rustls 0.23.31", "rustls-pki-types", ] @@ -3370,7 +3231,7 @@ dependencies = [ "libc", "log", "rustversion", - "windows 0.61.1", + "windows 0.61.3", ] [[package]] @@ -3403,7 +3264,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -3454,41 +3315,30 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "global_config" version = "0.1.0" dependencies = [ "clap", - "dirs 6.0.0", + "dirs", "ssv_network_config", + "thiserror 2.0.12", "tracing", ] -[[package]] -name = "gloo-timers" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "graffiti_file" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "bls", "serde", @@ -3505,15 +3355,15 @@ dependencies = [ "ff", "rand 0.8.5", "rand_core 0.6.4", - "rand_xorshift", + "rand_xorshift 0.3.0", "subtle", ] [[package]] name = "h2" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" dependencies = [ "bytes", "fnv", @@ -3521,7 +3371,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.9.0", + "indexmap 2.10.0", "slab", "tokio", "tokio-util", @@ -3530,9 +3380,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", @@ -3540,7 +3390,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.9.0", + "indexmap 2.10.0", "slab", "tokio", "tokio-util", @@ -3574,9 +3424,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", @@ -3605,7 +3455,7 @@ dependencies = [ [[package]] name = "health_metrics" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "eth2", "metrics", @@ -3643,9 +3493,9 @@ checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -3656,6 +3506,15 @@ dependencies = [ "serde", ] +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec", +] + [[package]] name = "hex_fmt" version = "0.3.0" @@ -3664,11 +3523,10 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.25.0-alpha.5" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d00147af6310f4392a31680db52a3ed45a2e0f68eb18e8c3fe5537ecc96d9e2" +checksum = "f8a6fe56c0038198998a6f217ca4e7ef3a5e51f46163bd6dd60b5c71ca6c6502" dependencies = [ - "async-recursion", "async-trait", "cfg-if", "data-encoding", @@ -3679,8 +3537,9 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.1", - "socket2", + "rand 0.9.2", + "ring", + "socket2 0.5.10", "thiserror 2.0.12", "tinyvec", "tokio", @@ -3690,9 +3549,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.25.0-alpha.5" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5762f69ebdbd4ddb2e975cd24690bf21fe6b2604039189c26acddbc427f12887" +checksum = "dc62a9a99b0bfb44d2ab95a7208ac952d31060efc16241c87eaf36406fecf87a" dependencies = [ "cfg-if", "futures-util", @@ -3701,7 +3560,7 @@ dependencies = [ "moka", "once_cell", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "resolv-conf", "smallvec", "thiserror 2.0.12", @@ -3822,9 +3681,11 @@ dependencies = [ "anchor_validator_store", "axum", "health_metrics", - "lighthouse_network", + "libp2p", "metrics", + "network_utils", "parking_lot", + "prometheus-client", "serde", "slot_clock", "tokio", @@ -3857,14 +3718,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -3880,7 +3741,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.10", + "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "httparse", @@ -3908,14 +3769,14 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.6" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a01595e11bdcec50946522c32dde3fc6914743000a68b93000965f2f02406d" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.3.1", "hyper 1.6.0", "hyper-util", - "rustls 0.23.27", + "rustls 0.23.31", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", @@ -3953,22 +3814,28 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.12" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9f1e950e0d9d1d3c47184416723cf29c0d1f93bd8cccf37e4beb6b44f31710" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", "http 1.3.1", "http-body 1.0.1", "hyper 1.6.0", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.0", + "system-configuration 0.6.1", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -4136,7 +4003,6 @@ dependencies = [ "netlink-proto", "netlink-sys", "rtnetlink", - "smol", "system-configuration 0.6.1", "tokio", "windows 0.53.0", @@ -4144,9 +4010,9 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76b0d7d4541def58a37bf8efc559683f21edce7c82f0d866c93ac21f7e098f93" +checksum = "516893339c97f6011282d5825ac94fc1c7aad5cad26bdc2d0cee068c0bf97f97" dependencies = [ "async-trait", "attohttpc", @@ -4157,7 +4023,7 @@ dependencies = [ "hyper 1.6.0", "hyper-util", "log", - "rand 0.8.5", + "rand 0.9.2", "tokio", "url", "xmltree", @@ -4180,7 +4046,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -4196,13 +4062,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ - "arbitrary", "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.5", "serde", ] @@ -4218,7 +4083,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "bytes", ] @@ -4234,13 +4099,24 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + [[package]] name = "ipconfig" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", + "socket2 0.5.10", "widestring 1.2.0", "windows-sys 0.48.0", "winreg", @@ -4252,6 +4128,16 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.1" @@ -4351,6 +4237,7 @@ version = "0.2.0" dependencies = [ "base64 0.22.1", "clap", + "global_config", "openssl", "operator_key", "rpassword", @@ -4379,30 +4266,22 @@ dependencies = [ "operator_key", "serde", "serde_json", + "ssv_types", "tokio", "tracing", "types", ] -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "arbitrary", "c-kzg", "derivative", "ethereum_hashing", - "ethereum_serde_utils", + "ethereum_serde_utils 0.8.0", "ethereum_ssz", "ethereum_ssz_derive", "hex", @@ -4423,9 +4302,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.172" +version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" [[package]] name = "libm" @@ -4435,9 +4314,9 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libp2p" -version = "0.55.0" +version = "0.56.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72dc443ddd0254cb49a794ed6b6728400ee446a0f7ab4a07d0209ee98de20e9" +checksum = "ce71348bf5838e46449ae240631117b487073d5f347c06d434caddcb91dceb5a" dependencies = [ "bytes", "either", @@ -4454,7 +4333,6 @@ dependencies = [ "libp2p-metrics", "libp2p-noise", "libp2p-ping", - "libp2p-plaintext", "libp2p-quic", "libp2p-request-response", "libp2p-swarm", @@ -4469,9 +4347,9 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38944b7cb981cc93f2f0fb411ff82d0e983bd226fbcc8d559639a3a73236568b" +checksum = "d16ccf824ee859ca83df301e1c0205270206223fd4b1f2e512a693e1912a8f4a" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4480,9 +4358,9 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efe9323175a17caa8a2ed4feaf8a548eeef5e0b72d03840a0eab4bcb0210ce1c" +checksum = "a18b8b607cf3bfa2f8c57db9c7d8569a315d5cc0a282e6bfd5ebfc0a9840b2a0" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4491,9 +4369,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "193c75710ba43f7504ad8f58a62ca0615b1d7e572cb0f1780bc607252c39e9ef" +checksum = "4d28e2d2def7c344170f5c6450c0dbe3dfef655610dbfde2f6ac28a527abbe36" dependencies = [ "either", "fnv", @@ -4503,7 +4381,6 @@ dependencies = [ "multiaddr", "multihash", "multistream-select", - "once_cell", "parking_lot", "pin-project", "quick-protobuf", @@ -4517,9 +4394,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.43.0" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b780a1150214155b0ed1cdf09fbd2e1b0442604f9146a431d1b21d23eef7bd7" +checksum = "0b770c1c8476736ca98c578cba4b505104ff8e842c2876b528925f9766379f9a" dependencies = [ "async-trait", "futures", @@ -4533,10 +4410,10 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.49.0" -source = "git+https://github.com/sigp/rust-libp2p.git?rev=61b2820#61b2820de7a3fab5ae5e1362c4dfa93bd7c41e98" +version = "0.50.0" +source = "git+https://github.com/sigp/rust-libp2p.git?rev=2a726cd#2a726cdbec1f055854755dc03a632d804e268543" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "asynchronous-codec", "base64 0.22.1", "byteorder", @@ -4563,9 +4440,9 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c06862544f02d05d62780ff590cc25a75f5c2b9df38ec7a370dcae8bb873cf" +checksum = "8ab792a8b68fdef443a62155b01970c81c3aadab5e659621b063ef252a8e65e8" dependencies = [ "asynchronous-codec", "either", @@ -4584,9 +4461,9 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb68ea10844211a59ce46230909fd0ea040e8a192454d4cc2ee0d53e12280eb" +checksum = "3104e13b51e4711ff5738caa1fb54467c8604c2e94d607e27745bcf709068774" dependencies = [ "asn1_der", "bs58", @@ -4594,10 +4471,8 @@ dependencies = [ "hkdf", "k256", "multihash", - "p256", "quick-protobuf", "rand 0.8.5", - "sec1", "sha2 0.10.9", "thiserror 2.0.12", "tracing", @@ -4606,9 +4481,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.47.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d0ba095e1175d797540e16b62e7576846b883cb5046d4159086837b36846cc" +checksum = "c66872d0f1ffcded2788683f76931be1c52e27f343edb93bc6d0bcd8887be443" dependencies = [ "futures", "hickory-proto", @@ -4618,16 +4493,16 @@ dependencies = [ "libp2p-swarm", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] [[package]] name = "libp2p-metrics" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ce58c64292e87af624fcb86465e7dd8342e46a388d71e8fec0ab37ee789630a" +checksum = "805a555148522cb3414493a5153451910cb1a146c53ffbf4385708349baf62b7" dependencies = [ "futures", "libp2p-core", @@ -4640,30 +4515,11 @@ dependencies = [ "web-time", ] -[[package]] -name = "libp2p-mplex" -version = "0.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aaa6fee3722e355443058472fc4705d78681bc2d8e447a0bdeb3fecf40cd197" -dependencies = [ - "asynchronous-codec", - "bytes", - "futures", - "libp2p-core", - "libp2p-identity", - "nohash-hasher", - "parking_lot", - "rand 0.8.5", - "smallvec", - "tracing", - "unsigned-varint 0.8.0", -] - [[package]] name = "libp2p-noise" -version = "0.46.0" +version = "0.46.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcc133e0f3cea07acde6eb8a9665cb11b600bd61110b010593a0210b8153b16" +checksum = "bc73eacbe6462a0eb92a6527cac6e63f02026e5407f8831bde8293f19217bfbf" dependencies = [ "asynchronous-codec", "bytes", @@ -4672,7 +4528,6 @@ dependencies = [ "libp2p-identity", "multiaddr", "multihash", - "once_cell", "quick-protobuf", "rand 0.8.5", "snow", @@ -4686,7 +4541,7 @@ dependencies = [ [[package]] name = "libp2p-peer-store" version = "0.1.0" -source = "git+https://github.com/libp2p/rust-libp2p.git?rev=082eb16#082eb166f5ca06b4baaaad1b6bb9c4cdb9dcdc14" +source = "git+https://github.com/libp2p/rust-libp2p.git?rev=ad9a1b2#ad9a1b27587f384f644f30f8fccb2b9e6ad47edb" dependencies = [ "libp2p-core", "libp2p-swarm", @@ -4695,9 +4550,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2529993ff22deb2504c0130a58b60fb77f036be555053922db1a0490b5798b" +checksum = "74bb7fcdfd9fead4144a3859da0b49576f171a8c8c7c0bfc7c541921d25e60d3" dependencies = [ "futures", "futures-timer", @@ -4727,9 +4582,9 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41432a159b00424a0abaa2c80d786cddff81055ac24aa127e0cf375f7858d880" +checksum = "8dc448b2de9f4745784e3751fe8bc6c473d01b8317edd5ababcb0dec803d843f" dependencies = [ "futures", "futures-timer", @@ -4740,8 +4595,8 @@ dependencies = [ "quinn", "rand 0.8.5", "ring", - "rustls 0.23.27", - "socket2", + "rustls 0.23.31", + "socket2 0.5.10", "thiserror 2.0.12", "tokio", "tracing", @@ -4749,9 +4604,9 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "548fe44a80ff275d400f1b26b090d441d83ef73efabbeb6415f4ce37e5aed865" +checksum = "a9f1cca83488b90102abac7b67d5c36fc65bc02ed47620228af7ed002e6a1478" dependencies = [ "async-trait", "futures", @@ -4766,11 +4621,10 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.46.0" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "803399b4b6f68adb85e63ab573ac568154b193e9a640f03e0f2890eabbcb37f8" +checksum = "6aa762e5215919a34e31c35d4b18bf2e18566ecab7f8a3d39535f4a3068f8b62" dependencies = [ - "async-std", "either", "fnv", "futures", @@ -4780,7 +4634,6 @@ dependencies = [ "libp2p-swarm-derive", "lru 0.12.5", "multistream-select", - "once_cell", "rand 0.8.5", "smallvec", "tokio", @@ -4790,21 +4643,20 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.35.0" +version = "0.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" +checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ "heck 0.5.0", - "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "libp2p-swarm-test" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb6354e3a50496d750805f6cf33679bd698850d535602f42c61e465e0734d0b" +checksum = "7b149112570d507efe305838c7130835955a0b1147aa8051c1c3867a83175cf6" dependencies = [ "async-trait", "futures", @@ -4820,26 +4672,25 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.43.0" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65346fb4d36035b23fec4e7be4c320436ba53537ce9b6be1d1db1f70c905cad0" +checksum = "65b4e030c52c46c8d01559b2b8ca9b7c4185f10576016853129ca1fe5cd1a644" dependencies = [ - "async-io", "futures", "futures-timer", "if-watch", "libc", "libp2p-core", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] [[package]] name = "libp2p-tls" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42bbf5084fb44133267ad4caaa72a253d68d709edd2ed1cf9b42431a8ead8fd5" +checksum = "96ff65a82e35375cbc31ebb99cacbbf28cb6c4fefe26bf13756ddcf708d40080" dependencies = [ "futures", "futures-rustls", @@ -4847,8 +4698,8 @@ dependencies = [ "libp2p-identity", "rcgen", "ring", - "rustls 0.23.27", - "rustls-webpki 0.101.7", + "rustls 0.23.31", + "rustls-webpki 0.103.4", "thiserror 2.0.12", "x509-parser", "yasna", @@ -4856,9 +4707,9 @@ dependencies = [ [[package]] name = "libp2p-upnp" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d457b9ecceb66e7199f049926fad447f1f17f040e8d29d690c086b4cab8ed14a" +checksum = "4757e65fe69399c1a243bbb90ec1ae5a2114b907467bf09f3575e899815bb8d3" dependencies = [ "futures", "futures-timer", @@ -4881,14 +4732,14 @@ dependencies = [ "thiserror 2.0.12", "tracing", "yamux 0.12.1", - "yamux 0.13.5", + "yamux 0.13.6", ] [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" dependencies = [ "bitflags 2.9.1", "libc", @@ -4905,79 +4756,12 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "lighthouse_network" -version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" -dependencies = [ - "alloy-primitives", - "alloy-rlp", - "bytes", - "delay_map", - "directory", - "dirs 3.0.2", - "discv5", - "either", - "eth2", - "ethereum_ssz", - "ethereum_ssz_derive", - "fnv", - "futures", - "hex", - "itertools 0.10.5", - "libp2p", - "libp2p-gossipsub", - "libp2p-mplex", - "lighthouse_version", - "local-ip-address", - "logging 0.2.0", - "lru 0.12.5", - "lru_cache", - "metrics", - "parking_lot", - "prometheus-client", - "rand 0.8.5", - "regex", - "serde", - "sha2 0.9.9", - "smallvec", - "snap", - "ssz_types", - "strum 0.24.1", - "superstruct", - "task_executor", - "tiny-keccak", - "tokio", - "tokio-io-timeout", - "tokio-util", - "tracing", - "tracing-subscriber", - "types", - "unsigned-varint 0.8.0", - "unused_port", -] - -[[package]] -name = "lighthouse_version" -version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" -dependencies = [ - "git-version", - "target_info", -] - [[package]] name = "linux-raw-sys" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - [[package]] name = "linux-raw-sys" version = "0.9.4" @@ -4990,23 +4774,11 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" -[[package]] -name = "local-ip-address" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" -dependencies = [ - "libc", - "neli", - "thiserror 2.0.12", - "windows-sys 0.59.0", -] - [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -5017,9 +4789,6 @@ name = "log" version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" -dependencies = [ - "value-bag", -] [[package]] name = "logging" @@ -5040,7 +4809,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "chrono", "logroller", @@ -5058,9 +4827,9 @@ dependencies = [ [[package]] name = "logroller" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90536db32a1cb3672665cdf3269bf030b0f395fabee863895c27b75b9f7a8a7d" +checksum = "83db12bbf439ebe64c0b0e4402f435b6f866db498fc1ae17e1b5d1a01625e2be" dependencies = [ "chrono", "flate2", @@ -5087,7 +4856,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.5", ] [[package]] @@ -5096,7 +4865,16 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.5", +] + +[[package]] +name = "lru" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86ea4e65087ff52f3862caff188d489f1fab49a0cb09e01b2e3f1a617b10aaed" +dependencies = [ + "hashbrown 0.15.5", ] [[package]] @@ -5108,16 +4886,16 @@ checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "fnv", ] [[package]] name = "mach2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" +checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44" dependencies = [ "libc", ] @@ -5130,7 +4908,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5162,9 +4940,9 @@ checksum = "33746aadcb41349ec291e7f2f0a3aa6834d1d7c58066fb4b01f68efc4c4b7631" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memoffset" @@ -5178,9 +4956,9 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "ethereum_hashing", "fixed_bytes", "safe_arith", @@ -5217,6 +4995,7 @@ dependencies = [ "slot_clock", "ssv_types", "subnet_service", + "thiserror 2.0.12", "tokio", "tracing", ] @@ -5274,19 +5053,18 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "prometheus", ] [[package]] name = "milhouse" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1ada1f56cc1c79f40517fdcbf57e19f60424a3a1ce372c3fe9b22e4fdd83eb" +checksum = "2bdb104e38d3a8c5ffb7e9d2c43c522e6bcc34070edbadba565e722f0dee56c7" dependencies = [ - "alloy-primitives", - "arbitrary", + "alloy-primitives 1.3.0", "educe", "ethereum_hashing", "ethereum_ssz", @@ -5316,22 +5094,22 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", ] [[package]] @@ -5350,7 +5128,7 @@ dependencies = [ "smallvec", "tagptr", "thiserror 1.0.69", - "uuid 1.17.0", + "uuid 1.18.0", ] [[package]] @@ -5359,30 +5137,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" -[[package]] -name = "multi_index_map" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2921dd3396771d28d081e93dbb62dc62e14ee4fe63978c6528e177ad0c6bfe" -dependencies = [ - "multi_index_map_derive", - "rustc-hash", - "slab", -] - -[[package]] -name = "multi_index_map_derive" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaa5e48a85384b125f486e729dbec5bd5e4a255318dd5fe1db772e2224b1234c" -dependencies = [ - "convert_case 0.8.0", - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "multiaddr" version = "0.18.2" @@ -5467,31 +5221,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "neli" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" -dependencies = [ - "byteorder", - "libc", - "log", - "neli-proc-macros", -] - -[[package]] -name = "neli-proc-macros" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" -dependencies = [ - "either", - "proc-macro2", - "quote", - "serde", - "syn 1.0.109", -] - [[package]] name = "netlink-packet-core" version = "0.7.0" @@ -5549,7 +5278,6 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" dependencies = [ - "async-io", "bytes", "futures", "libc", @@ -5562,20 +5290,22 @@ name = "network" version = "0.1.0" dependencies = [ "async-trait", - "dirs 6.0.0", + "dirs", "discv5", "ethereum_ssz", "futures", + "global_config", "hex", "libp2p", "libp2p-gossipsub", "libp2p-peer-store", "libp2p-swarm-test", - "lighthouse_network", "message_receiver", + "metrics", + "network_utils", "prometheus-client", "quick-protobuf", - "rand 0.9.1", + "rand 0.9.2", "serde", "serde_json", "ssv_types", @@ -5590,6 +5320,21 @@ dependencies = [ "version", ] +[[package]] +name = "network_utils" +version = "0.1.0" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" +dependencies = [ + "discv5", + "libp2p-identity", + "lru_cache", + "metrics", + "multiaddr", + "parking_lot", + "serde", + "tiny-keccak", +] + [[package]] name = "nix" version = "0.24.3" @@ -5743,43 +5488,45 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.5.2", "libc", ] [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" dependencies = [ "num_enum_derive", + "rustversion", ] [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "nybbles" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +checksum = "675b3a54e5b12af997abc8b6638b0aee51a28caedab70d4967e0d5db3a3f1d06" dependencies = [ "alloy-rlp", - "const-hex", + "cfg-if", "proptest", + "ruint", "serde", "smallvec", ] @@ -5795,9 +5542,9 @@ dependencies = [ [[package]] name = "oid-registry" -version = "0.7.1" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" dependencies = [ "asn1-rs", ] @@ -5807,6 +5554,10 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "once_cell_polyfill" @@ -5822,9 +5573,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ "bitflags 2.9.1", "cfg-if", @@ -5843,7 +5594,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5854,18 +5605,18 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" -version = "300.5.0+3.5.0" +version = "300.5.2+3.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" +checksum = "d270b79e2926f5150189d475bc7e9d2c69f9c4697b185fa917d5a32b792d21b4" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.108" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -5881,7 +5632,7 @@ dependencies = [ "base64 0.22.1", "eth2_keystore", "openssl", - "rand 0.9.1", + "rand 0.9.2", "serde", "serde_json", "thiserror 2.0.12", @@ -5900,18 +5651,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "p256" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" -dependencies = [ - "ecdsa", - "elliptic-curve", - "primeorder", - "sha2 0.10.9", -] - [[package]] name = "pairing" version = "0.23.0" @@ -5946,7 +5685,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -5957,9 +5696,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -5967,9 +5706,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -6026,15 +5765,6 @@ dependencies = [ "serde", ] -[[package]] -name = "pem-rfc7468" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" -dependencies = [ - "base64ct", -] - [[package]] name = "percent-encoding" version = "2.3.1" @@ -6043,9 +5773,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" dependencies = [ "memchr", "thiserror 2.0.12", @@ -6079,7 +5809,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -6094,17 +5824,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "piper" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" -dependencies = [ - "atomic-waker", - "fastrand", - "futures-io", -] - [[package]] name = "pkcs8" version = "0.10.2" @@ -6129,17 +5848,16 @@ checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "polling" -version = "3.7.4" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "b5bd19146350fe804f7cb2669c851c03d69da628803dab0d98018142aaa5d829" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi 0.5.2", "pin-project-lite", - "rustix 0.38.44", - "tracing", - "windows-sys 0.59.0", + "rustix 1.0.8", + "windows-sys 0.60.2", ] [[package]] @@ -6167,9 +5885,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "potential_utf" @@ -6198,21 +5916,12 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "reqwest 0.11.27", "sensitive_url", ] -[[package]] -name = "primeorder" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" -dependencies = [ - "elliptic-curve", -] - [[package]] name = "primitive-types" version = "0.12.2" @@ -6252,14 +5961,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "beef09f85ae72cea1ef96ba6870c51e6382ebfa4f0e85b643459331f3daa5be0" dependencies = [ "unicode-ident", ] @@ -6310,9 +6019,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.3" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" dependencies = [ "dtoa", "itoa", @@ -6328,44 +6037,33 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "proptest" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" dependencies = [ "bit-set", "bit-vec", "bitflags 2.9.1", "lazy_static", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift 0.4.0", "regex-syntax 0.8.5", "rusty-fork", "tempfile", "unarray", ] -[[package]] -name = "proptest-derive" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", -] - [[package]] name = "proto_array" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", @@ -6404,7 +6102,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "hex", - "indexmap 2.9.0", + "indexmap 2.10.0", "sha2 0.10.9", "ssv_types", "tracing", @@ -6474,8 +6172,8 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.27", - "socket2", + "rustls 0.23.31", + "socket2 0.5.10", "thiserror 2.0.12", "tokio", "tracing", @@ -6491,10 +6189,10 @@ dependencies = [ "bytes", "getrandom 0.3.3", "lru-slab", - "rand 0.9.1", + "rand 0.9.2", "ring", "rustc-hash", - "rustls 0.23.27", + "rustls 0.23.31", "rustls-pki-types", "slab", "thiserror 2.0.12", @@ -6505,14 +6203,14 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4e529991f949c5e25755532370b8af5d114acae52326361d68d47af64aa842" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.5.10", "tracing", "windows-sys 0.59.0", ] @@ -6528,9 +6226,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "r2d2" @@ -6573,12 +6271,13 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", + "serde", ] [[package]] @@ -6617,6 +6316,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ "getrandom 0.3.3", + "serde", ] [[package]] @@ -6628,6 +6328,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_xorshift" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a" +dependencies = [ + "rand_core 0.9.3", +] + [[package]] name = "rayon" version = "1.10.0" @@ -6663,33 +6372,42 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ "bitflags 2.9.1", ] [[package]] name = "redox_users" -version = "0.4.6" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ "getrandom 0.2.16", "libredox", - "thiserror 1.0.69", + "thiserror 2.0.12", ] [[package]] -name = "redox_users" -version = "0.5.0" +name = "ref-cast" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b" +checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf" dependencies = [ - "getrandom 0.2.16", - "libredox", - "thiserror 2.0.12", + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", ] [[package]] @@ -6747,7 +6465,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", @@ -6762,7 +6480,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", @@ -6784,46 +6502,42 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.15" +version = "0.12.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-core", - "futures-util", - "h2 0.4.10", + "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "http-body-util", "hyper 1.6.0", - "hyper-rustls 0.27.6", + "hyper-rustls 0.27.7", "hyper-tls 0.6.0", "hyper-util", - "ipnet", "js-sys", "log", "mime", "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.2.0", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.2", - "system-configuration 0.6.1", "tokio", "tokio-native-tls", "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "windows-registry", ] [[package]] @@ -6908,7 +6622,6 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" dependencies = [ - "async-global-executor", "futures", "log", "netlink-packet-core", @@ -6933,12 +6646,11 @@ dependencies = [ [[package]] name = "ruint" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11256b5fe8c68f56ac6f39ef0720e592f33d2367a4782740d9c9142e889c7fb4" +checksum = "9ecb38f82477f20c5c3d62ef52d7c4e536e38ea9b73fb570a20c5cae0e14bcf6" dependencies = [ "alloy-rlp", - "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", "bytes", @@ -6951,7 +6663,7 @@ dependencies = [ "primitive-types", "proptest", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rlp", "ruint-macro", "serde", @@ -6995,9 +6707,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" @@ -7054,28 +6766,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.9.1", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustix" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" dependencies = [ "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -7092,14 +6791,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.3", + "rustls-webpki 0.103.4", "subtle", "zeroize", ] @@ -7113,15 +6812,6 @@ dependencies = [ "base64 0.21.7", ] -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "rustls-pki-types" version = "1.12.0" @@ -7144,9 +6834,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.3" +version = "0.103.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" dependencies = [ "ring", "rustls-pki-types", @@ -7155,9 +6845,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" @@ -7191,7 +6881,7 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" [[package]] name = "salsa20" @@ -7202,6 +6892,15 @@ dependencies = [ "cipher 0.3.0", ] +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.27" @@ -7220,6 +6919,30 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "scoped-tls" version = "1.0.1" @@ -7269,6 +6992,27 @@ dependencies = [ "zeroize", ] +[[package]] +name = "secp256k1" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" +dependencies = [ + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys", + "serde", +] + +[[package]] +name = "secp256k1-sys" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" +dependencies = [ + "cc", +] + [[package]] name = "security-framework" version = "2.11.1" @@ -7328,7 +7072,7 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "serde", "url", @@ -7351,14 +7095,14 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" dependencies = [ "itoa", "memchr", @@ -7384,7 +7128,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -7401,15 +7145,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.9.0", + "indexmap 2.10.0", + "schemars 0.9.0", + "schemars 1.0.4", "serde", "serde_derive", "serde_json", @@ -7419,14 +7165,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -7435,7 +7181,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "itoa", "ryu", "serde", @@ -7524,9 +7270,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.5" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] @@ -7560,20 +7306,17 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "slashing_protection" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "arbitrary", - "ethereum_serde_utils", + "ethereum_serde_utils 0.8.0", "filesystem", "r2d2", "r2d2_sqlite", @@ -7588,7 +7331,7 @@ dependencies = [ [[package]] name = "slot_clock" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "metrics", "parking_lot", @@ -7597,37 +7340,14 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "arbitrary", "serde", ] -[[package]] -name = "smol" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" -dependencies = [ - "async-channel 2.3.1", - "async-executor", - "async-fs", - "async-io", - "async-lock", - "async-net", - "async-process", - "blocking", - "futures-lite", -] - -[[package]] -name = "snap" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" - [[package]] name = "snow" version = "0.9.6" @@ -7647,14 +7367,47 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "spec_tests" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "ethereum_ssz", + "ethereum_ssz_derive", + "hex", + "openssl", + "operator_key", + "parking_lot", + "qbft", + "serde", + "serde_json", + "serde_with", + "sha2 0.10.9", + "ssv_types", + "tree_hash", + "tree_hash_derive", + "types", + "walkdir", +] + [[package]] name = "spin" version = "0.9.8" @@ -7695,25 +7448,29 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "hex", - "indexmap 2.9.0", + "indexmap 2.10.0", "openssl", "operator_key", "rusqlite", + "serde", + "serde_json", "sha2 0.10.9", + "ssz_types", "thiserror 2.0.12", "tree_hash", "tree_hash_derive", + "typenum", "types", + "zerocopy", ] [[package]] name = "ssz_types" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad0fa7e9a85c06d0a6ba5100d733fff72e231eb6db2d86078225cf716fd2d95" +checksum = "75b55bedc9a18ed2860a46d6beb4f4082416ee1d60be0cc364cebdcdddc7afd4" dependencies = [ - "arbitrary", - "ethereum_serde_utils", + "ethereum_serde_utils 0.8.0", "ethereum_ssz", "itertools 0.13.0", "serde", @@ -7768,11 +7525,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ - "strum_macros 0.27.1", + "strum_macros 0.27.2", ] [[package]] @@ -7790,15 +7547,14 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "rustversion", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -7807,7 +7563,7 @@ version = "0.1.0" dependencies = [ "alloy", "database", - "ethereum_serde_utils", + "ethereum_serde_utils 0.7.0", "serde", "slot_clock", "ssv_types", @@ -7840,9 +7596,9 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "ethereum_hashing", "fixed_bytes", ] @@ -7860,9 +7616,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -7871,14 +7627,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.25" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4560533fbd6914b94a8fb5cc803ed6801c3455668db3b810702c57612bac9412" +checksum = "a7a985ff4ffd7373e10e0fb048110fb11a162e5a4c47f92ddb8787a6f766b769" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -7904,7 +7660,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -7970,7 +7726,7 @@ checksum = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "async-channel 1.9.0", "futures", @@ -7988,7 +7744,7 @@ dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", - "rustix 1.0.7", + "rustix 1.0.8", "windows-sys 0.59.0", ] @@ -7998,14 +7754,14 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ - "rustix 1.0.7", + "rustix 1.0.8", "windows-sys 0.59.0", ] [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "quote", "syn 1.0.109", @@ -8037,7 +7793,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -8048,17 +7804,16 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -8137,29 +7892,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.47.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "socket2", + "slab", + "socket2 0.6.0", "tokio-macros", - "windows-sys 0.52.0", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", + "windows-sys 0.59.0", ] [[package]] @@ -8170,7 +7917,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -8199,7 +7946,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.27", + "rustls 0.23.31", "tokio", ] @@ -8223,7 +7970,7 @@ checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" dependencies = [ "futures-util", "log", - "rustls 0.23.27", + "rustls 0.23.31", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", @@ -8233,13 +7980,12 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", - "futures-io", "futures-sink", "pin-project-lite", "slab", @@ -8248,17 +7994,17 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.10.0", "toml_datetime", "winnow", ] @@ -8281,14 +8027,18 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "bitflags 2.9.1", "bytes", + "futures-util", "http 1.3.1", + "http-body 1.0.1", + "iri-string", "pin-project-lite", + "tower", "tower-layer", "tower-service", ] @@ -8331,37 +8081,25 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "futures", - "futures-task", - "pin-project", - "tracing", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -8406,11 +8144,11 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c58eb0f518840670270d90d97ffee702d8662d9c5494870c9e1e9e0fa00f668" +checksum = "ee44f4cef85f88b4dea21c0b1f58320bdf35715cf56d840969487cff00613321" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "ethereum_hashing", "ethereum_ssz", "smallvec", @@ -8419,14 +8157,14 @@ dependencies = [ [[package]] name = "tree_hash_derive" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "699e7fb6b3fdfe0c809916f251cf5132d64966858601695c3736630a87e7166a" +checksum = "0bee2ea1551f90040ab0e34b6fb7f2fa3bad8acc925837ac654f2c78a13e3089" dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -8456,8 +8194,8 @@ dependencies = [ "http 1.3.1", "httparse", "log", - "rand 0.9.1", - "rustls 0.23.27", + "rand 0.9.2", + "rustls 0.23.31", "rustls-pki-types", "sha1", "thiserror 2.0.12", @@ -8473,11 +8211,10 @@ checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ - "alloy-primitives", + "alloy-primitives 1.3.0", "alloy-rlp", - "arbitrary", "bls", "compare_fields", "compare_fields_derive", @@ -8486,7 +8223,7 @@ dependencies = [ "derivative", "eth2_interop_keypairs", "ethereum_hashing", - "ethereum_serde_utils", + "ethereum_serde_utils 0.8.0", "ethereum_ssz", "ethereum_ssz_derive", "fixed_bytes", @@ -8500,7 +8237,7 @@ dependencies = [ "milhouse", "parking_lot", "rand 0.8.5", - "rand_xorshift", + "rand_xorshift 0.3.0", "rayon", "regex", "rpds", @@ -8616,11 +8353,6 @@ name = "unsigned-varint" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" -dependencies = [ - "asynchronous-codec", - "bytes", - "tokio-util", -] [[package]] name = "untrusted" @@ -8628,15 +8360,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" -[[package]] -name = "unused_port" -version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" -dependencies = [ - "lru_cache", - "parking_lot", -] - [[package]] name = "url" version = "2.5.4" @@ -8678,9 +8401,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" +checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" dependencies = [ "getrandom 0.3.3", "js-sys", @@ -8690,7 +8413,7 @@ dependencies = [ [[package]] name = "validator_metrics" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "metrics", ] @@ -8698,7 +8421,7 @@ dependencies = [ [[package]] name = "validator_services" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "beacon_node_fallback", "bls", @@ -8722,7 +8445,7 @@ dependencies = [ [[package]] name = "validator_store" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "eth2", "slashing_protection", @@ -8735,12 +8458,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" -[[package]] -name = "value-bag" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" - [[package]] name = "vcpkg" version = "0.2.15" @@ -8796,6 +8513,16 @@ dependencies = [ "libc", ] +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -8807,9 +8534,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -8842,7 +8569,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-shared", ] @@ -8877,7 +8604,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8906,9 +8633,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +checksum = "d8d49b5d6c64e8558d9b1b065014426f35c18de636895d24893dbbd329743446" dependencies = [ "futures", "js-sys", @@ -8950,14 +8677,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.0", + "webpki-roots 1.0.2", ] [[package]] name = "webpki-roots" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" dependencies = [ "rustls-pki-types", ] @@ -8990,6 +8717,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -9008,9 +8744,9 @@ dependencies = [ [[package]] name = "windows" -version = "0.61.1" +version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", "windows-core 0.61.2", @@ -9060,7 +8796,7 @@ dependencies = [ "windows-interface", "windows-link", "windows-result 0.3.4", - "windows-strings 0.4.2", + "windows-strings", ] [[package]] @@ -9082,7 +8818,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -9093,14 +8829,14 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-numerics" @@ -9114,13 +8850,13 @@ dependencies = [ [[package]] name = "windows-registry" -version = "0.4.0" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" dependencies = [ + "windows-link", "windows-result 0.3.4", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-strings", ] [[package]] @@ -9141,15 +8877,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-strings" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-strings" version = "0.4.2" @@ -9195,6 +8922,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -9243,10 +8979,11 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" dependencies = [ + "windows-link", "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", @@ -9448,9 +9185,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" dependencies = [ "memchr", ] @@ -9477,7 +9214,7 @@ dependencies = [ [[package]] name = "workspace_members" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?rev=9b84dac#9b84dac6d6cb4f00e8d6f4d0102bee9c7007498f" +source = "git+https://github.com/sigp/lighthouse?rev=0450cfc#0450cfc1afa9da3e33124e262cd617e1de742085" dependencies = [ "cargo_metadata", "quote", @@ -9491,9 +9228,9 @@ checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "ws_stream_wasm" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +checksum = "6c173014acad22e83f16403ee360115b38846fe754e735c5d9d3803fe70c6abc" dependencies = [ "async_io_stream", "futures", @@ -9502,7 +9239,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper", - "thiserror 1.0.69", + "thiserror 2.0.12", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -9531,9 +9268,9 @@ dependencies = [ [[package]] name = "x509-parser" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +checksum = "4569f339c0c402346d4a75a9e39cf8dad310e287eef1ff56d4c68e5067f53460" dependencies = [ "asn1-rs", "data-encoding", @@ -9542,15 +9279,15 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror 1.0.69", + "thiserror 2.0.12", "time", ] [[package]] name = "xml-rs" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62ce76d9b56901b19a74f19431b0d8b3bc7ca4ad685a746dfd78ca8f4fc6bda" +checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" [[package]] name = "xmltree" @@ -9578,16 +9315,16 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.5" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da1acad1c2dc53f0dde419115a38bd8221d8c3e47ae9aeceaf453266d29307e" +checksum = "2b2dd50a6d6115feb3e5d7d0efd45e8ca364b6c83722c1e9c602f5764e0e9597" dependencies = [ "futures", "log", "nohash-hasher", "parking_lot", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "static_assertions", "web-time", ] @@ -9621,28 +9358,28 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -9662,7 +9399,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", "synstructure", ] @@ -9684,7 +9421,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -9700,9 +9437,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ "yoke", "zerofrom", @@ -9717,7 +9454,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ba9c9c8d5..72c1e2db9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ members = [ "anchor/processor", "anchor/qbft_manager", "anchor/signature_collector", + "anchor/spec_tests", "anchor/subnet_service", "anchor/validator_store", ] @@ -37,54 +38,8 @@ edition = "2024" # This table has three subsections: first the internal dependencies, then the lighthouse dependencies, then all other. [workspace.dependencies] -anchor_validator_store = { path = "anchor/validator_store" } -api_types = { path = "anchor/common/api_types" } -bls_lagrange = { path = "anchor/common/bls_lagrange" } -client = { path = "anchor/client" } -database = { path = "anchor/database" } -duties_tracker = { path = "anchor/duties_tracker" } -eth = { path = "anchor/eth" } -global_config = { path = "anchor/common/global_config" } -http_api = { path = "anchor/http_api" } -http_metrics = { path = "anchor/http_metrics" } -keygen = { path = "anchor/keygen" } -keysplit = { path = "anchor/keysplit" } -logging = { path = "anchor/logging" } -message_receiver = { path = "anchor/message_receiver" } -message_sender = { path = "anchor/message_sender" } -message_validator = { path = "anchor/message_validator" } -network = { path = "anchor/network" } -operator_key = { path = "anchor/common/operator_key" } -processor = { path = "anchor/processor" } -qbft = { path = "anchor/common/qbft" } -qbft_manager = { path = "anchor/qbft_manager" } -signature_collector = { path = "anchor/signature_collector" } -ssv_network_config = { path = "anchor/common/ssv_network_config" } -ssv_types = { path = "anchor/common/ssv_types" } -subnet_service = { path = "anchor/subnet_service" } -version = { path = "anchor/common/version" } - -beacon_node_fallback = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -bls = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -eth2 = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -eth2_keystore = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -eth2_network_config = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -health_metrics = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -lighthouse_network = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -metrics = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -safe_arith = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -sensitive_url = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -slashing_protection = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -slot_clock = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -task_executor = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -types = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -unused_port = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -validator_metrics = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -validator_services = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -validator_store = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -workspace_members = { git = "https://github.com/sigp/lighthouse", rev = "9b84dac" } -alloy = { version = "0.12.0", features = [ +alloy = { version = "1.0.22", features = [ "sol-types", "transports", "json", @@ -94,44 +49,85 @@ alloy = { version = "0.12.0", features = [ "rpc-types", "rlp", ] } +anchor_validator_store = { path = "anchor/validator_store" } +api_types = { path = "anchor/common/api_types" } arbitrary = "1.4.1" async-channel = "1.9" axum = "0.8.1" base64 = "0.22.1" + +beacon_node_fallback = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +bls = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +bls_lagrange = { path = "anchor/common/bls_lagrange" } blst = "0.3.14" # the custom repo is needed because they fix to a specific version of blst, which conflicts with the line above blstrs_plus = { git = "https://github.com/dknopik/blstrs", branch = "pls" } clap = { version = "4.5.15", features = ["derive", "wrap_help"] } +client = { path = "anchor/client" } dashmap = "6.1.0" +database = { path = "anchor/database" } derive_more = { version = "2.0.1", features = ["full"] } dirs = "6.0.0" discv5 = "0.9.0" +duties_tracker = { path = "anchor/duties_tracker" } enr = "0.13.0" -ethereum_ssz = "0.8" -ethereum_ssz_derive = "0.8" +eth = { path = "anchor/eth" } +eth2 = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +eth2_keystore = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +eth2_network_config = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +ethereum_ssz = "0.9" +ethereum_ssz_derive = "0.9" futures = "0.3.30" -gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", rev = "61b2820" } +global_config = { path = "anchor/common/global_config" } +gossipsub = { package = "libp2p-gossipsub", git = "https://github.com/sigp/rust-libp2p.git", rev = "2a726cd", features = ["metrics"] } +health_metrics = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } hex = "0.4.3" +http_api = { path = "anchor/http_api" } +http_metrics = { path = "anchor/http_metrics" } hyper = "1.4" indexmap = "2.7.0" -libp2p = { version = "0.55", default-features = false } +keygen = { path = "anchor/keygen" } +keysplit = { path = "anchor/keysplit" } +libp2p = { version = "0.56", default-features = false } +logging = { path = "anchor/logging" } +lru = "0.16.0" +message_receiver = { path = "anchor/message_receiver" } +message_sender = { path = "anchor/message_sender" } +message_validator = { path = "anchor/message_validator" } +metrics = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } multiaddr = "0.18.2" +network = { path = "anchor/network" } +network_utils = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } num_cpus = "1" once_cell = "1.21.3" openssl = "0.10.72" +operator_key = { path = "anchor/common/operator_key" } parking_lot = "0.12" pbkdf2 = "0.12.2" -prometheus-client = "0.22.0" +processor = { path = "anchor/processor" } +prometheus-client = "0.23.0" +qbft = { path = "anchor/common/qbft" } +qbft_manager = { path = "anchor/qbft_manager" } r2d2 = "0.8.10" r2d2_sqlite = "0.21.0" rand = "0.9" reqwest = "0.12.12" rusqlite = "0.28.0" +safe_arith = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +sensitive_url = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } serde = { version = "1.0.208", features = ["derive"] } serde_json = "1.0.140" serde_yaml = "0.9" sha2 = "0.10.8" +signature_collector = { path = "anchor/signature_collector" } +slashing_protection = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +slot_clock = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +ssv_network_config = { path = "anchor/common/ssv_network_config" } +ssv_types = { path = "anchor/common/ssv_types" } +ssz_types = "0.11.0" strum = { version = "0.27.0", features = ["derive"] } +subnet_service = { path = "anchor/subnet_service" } +task_executor = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } thiserror = "2.0.11" tokio = { version = "1.39.2", features = [ "rt", @@ -145,15 +141,22 @@ tower-http = { version = "0.6", features = ["cors"] } tracing = "0.1.40" tracing-appender = "0.2" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } -tree_hash = "0.9" -tree_hash_derive = "0.9" +tree_hash = "0.10" +tree_hash_derive = "0.10" +typenum = "1.18" +types = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +validator_metrics = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +validator_services = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +validator_store = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } +version = { path = "anchor/common/version" } vsss-rs = "5.1.0" +workspace_members = { git = "https://github.com/sigp/lighthouse", rev = "0450cfc" } zeroize = "1.8.1" # todo: remove when there is a proper release for peer-store. [patch.'https://github.com/libp2p/rust-libp2p.git'] -libp2p-core = "0.43.0" -libp2p-swarm = "0.46.0" +libp2p-core = "0.43.1" +libp2p-swarm = "0.47.0" [profile.maxperf] inherits = "release" diff --git a/anchor/client/Cargo.toml b/anchor/client/Cargo.toml index f90c3e434..528797ca6 100644 --- a/anchor/client/Cargo.toml +++ b/anchor/client/Cargo.toml @@ -30,6 +30,7 @@ message_sender = { workspace = true } message_validator = { workspace = true } multiaddr = { workspace = true } network = { workspace = true } +network_utils = { workspace = true } openssl = { workspace = true } operator_key = { workspace = true } parking_lot = { workspace = true } @@ -49,7 +50,6 @@ task_executor = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } types = { workspace = true } -unused_port = { workspace = true } validator_metrics = { workspace = true } validator_services = { workspace = true } version = { workspace = true } diff --git a/anchor/client/src/cli.rs b/anchor/client/src/cli.rs index e6fcb7e24..fec7a719c 100644 --- a/anchor/client/src/cli.rs +++ b/anchor/client/src/cli.rs @@ -527,13 +527,7 @@ pub struct Node { )] pub disable_gossipsub_peer_scoring: bool, - #[clap( - long, - help = "Disables gossipsub topic scoring.", - action = ArgAction::Set, - default_value = "true", - hide = true - )] + #[clap(long, help = "Disables gossipsub topic scoring.", hide = true)] pub disable_gossipsub_topic_scoring: bool, #[clap(flatten)] diff --git a/anchor/client/src/config.rs b/anchor/client/src/config.rs index 23e6d0082..a909f389f 100644 --- a/anchor/client/src/config.rs +++ b/anchor/client/src/config.rs @@ -6,6 +6,9 @@ use std::{net::IpAddr, path::PathBuf}; use global_config::GlobalConfig; use multiaddr::{Multiaddr, Protocol}; use network::{DEFAULT_DISC_PORT, DEFAULT_TCP_PORT, ListenAddr, ListenAddress}; +use network_utils::unused_port::{ + unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port, +}; use sensitive_url::SensitiveUrl; use ssv_types::OperatorId; use tracing::{error, warn}; @@ -86,6 +89,7 @@ impl Config { ]; let execution_nodes_websocket = SensitiveUrl::parse(DEFAULT_EXECUTION_NODE_WS) .expect("execution_nodes_websocket must always be a valid url."); + let network_config = network::Config::new(global_config.data_dir.network_dir()); Self { global_config, @@ -100,7 +104,7 @@ impl Config { http_api: <_>::default(), http_metrics: <_>::default(), enable_high_validator_count_metrics: false, - network: <_>::default(), + network: network_config, beacon_nodes_tls_certs: None, execution_nodes_tls_certs: None, processor: <_>::default(), @@ -139,7 +143,6 @@ pub fn from_cli(cli_args: &Node, global_config: GlobalConfig) -> Result Result Result Result Result Result Result Result { let ipv4_tcp_port = cli_args .use_zero_ports - .then(unused_port::unused_tcp4_port) + .then(unused_tcp4_port) .transpose()? .or(cli_args.port) .unwrap_or(DEFAULT_TCP_PORT); let ipv4_disc_port = cli_args .use_zero_ports - .then(unused_port::unused_udp4_port) + .then(unused_udp4_port) .transpose()? .or(cli_args.discovery_port) .or(cli_args.port) .unwrap_or(DEFAULT_DISC_PORT); let ipv4_quic_port = cli_args .use_zero_ports - .then(unused_port::unused_udp4_port) + .then(unused_udp4_port) .transpose()? .or(cli_args.quic_port) .unwrap_or(if ipv4_tcp_port == 0 { @@ -436,19 +439,19 @@ pub fn parse_listening_addresses(cli_args: &Node) -> Result, password_file: Option<&Path>, ) -> Result, String> { // First, we have to read a file and decide what to do. - // TODO: do not hardcode paths here: https://github.com/sigp/anchor/issues/403 - let public_key_file = data_dir.join("public_key.txt"); + let public_key_file = data_dir.public_key_file(); let key = if let Some(key_file) = key_file { try_read(key_file, password_file).unwrap_or_else(|| { @@ -23,8 +23,8 @@ pub(crate) fn read_or_generate_private_key( }) } else { // Read key from data dir - let unencrypted_key_file = data_dir.join("unencrypted_private_key.txt"); - let encrypted_key_file = data_dir.join("encrypted_private_key.json"); + let unencrypted_key_file = data_dir.unencrypted_private_key_file(); + let encrypted_key_file = data_dir.encrypted_private_key_file(); try_read(&unencrypted_key_file, password_file) .or_else(|| try_read(&encrypted_key_file, password_file)) @@ -94,7 +94,7 @@ fn parse_encrypted( .map_err(|_| "Key decryption failed".to_string()) } -fn generate_key(dir: &Path, password_file: Option<&Path>) -> Result, String> { +fn generate_key(dir: &DataDir, password_file: Option<&Path>) -> Result, String> { info!("Creating private key"); let key = Rsa::generate(2048).map_err(|e| format!("Unable to generate key: {e}"))?; // Encrypt the fresh key if a password key file was provided. For interactive password @@ -129,11 +129,10 @@ fn read_password_from_user() -> Result, String> { fn save_key( key: &Rsa, password: Option<&Zeroizing>, - data_dir: &Path, + data_dir: &DataDir, ) -> Result<(), String> { - // TODO: do not hardcode paths here: https://github.com/sigp/anchor/issues/403 if let Some(password) = password { - let file = data_dir.join("encrypted_private_key.json"); + let file = data_dir.encrypted_private_key_file(); info!(file = %file.display(), "Saving encrypted private key"); let encrypted_key = EncryptedKey::encrypt(key, password.as_str()).map_err(|_| "Unable to encrypt key")?; @@ -146,7 +145,7 @@ fn save_key( }) .map_err(|e| format!("Unable to write encrypted private key: {e}")) } else { - let file = data_dir.join("unencrypted_private_key.txt"); + let file = data_dir.unencrypted_private_key_file(); info!(file = %file.display(), "Saving unencrypted private key"); let serialized_key = operator_key::unencrypted::to_base64(key) .map_err(|_| "Unable to serialize unencrypted key".to_string())?; diff --git a/anchor/client/src/lib.rs b/anchor/client/src/lib.rs index 2ad7434fb..b5c7fa4e3 100644 --- a/anchor/client/src/lib.rs +++ b/anchor/client/src/lib.rs @@ -61,9 +61,6 @@ use validator_services::{ use crate::{key::read_or_generate_private_key, notifier::spawn_notifier}; -/// The filename within the `validators` directory that contains the slashing protection DB. -const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; - /// Specific timeout constants for HTTP requests involved in different validator duties. /// This can help ensure that proper endpoint fallback occurs. const HTTP_ATTESTATION_TIMEOUT_QUOTIENT: u32 = 4; @@ -188,35 +185,27 @@ impl Client { let database = Arc::new( if let Some(impostor) = &config.impostor { NetworkDatabase::new_as_impostor( - config - .global_config - .data_dir - .join("anchor_db.sqlite") - .as_path(), + &config.global_config.data_dir.database_file(), impostor, + config.global_config.ssv_network.ssv_domain_type, ) } else { NetworkDatabase::new( - config - .global_config - .data_dir - .join("anchor_db.sqlite") - .as_path(), + &config.global_config.data_dir.database_file(), &pubkey, + config.global_config.ssv_network.ssv_domain_type, ) } .map_err(|e| format!("Unable to open Anchor database: {e}"))?, ); // Initialize slashing protection. - let slashing_db_path = config - .global_config - .data_dir - .join(SLASHING_PROTECTION_FILENAME); - let slashing_protection = + let slashing_db_path = config.global_config.data_dir.slashing_database_file(); + let slashing_protection = Arc::new( SlashingDatabase::open_or_create(&slashing_db_path).map_err(|e| { format!("Failed to open or create slashing protection database: {e:?}",) - })?; + })?, + ); let last_beacon_node_index = config .beacon_nodes @@ -381,6 +370,7 @@ impl Client { database.clone(), index_sync_tx, exit_tx, + slashing_protection.clone(), eth::Config { http_urls: config.execution_nodes, ws_url: config.execution_nodes_websocket, @@ -446,7 +436,7 @@ impl Client { let signature_collector = SignatureCollectorManager::new( processor_senders.clone(), operator_id.clone(), - config.global_config.ssv_network.ssv_domain_type.clone(), + config.global_config.ssv_network.ssv_domain_type, message_sender.clone(), slot_clock.clone(), ) @@ -458,7 +448,7 @@ impl Client { operator_id.clone(), slot_clock.clone(), message_sender, - config.global_config.ssv_network.ssv_domain_type.clone(), + config.global_config.ssv_network.ssv_domain_type, ) .map_err(|e| format!("Unable to initialize qbft manager: {e:?}"))?; @@ -506,7 +496,7 @@ impl Client { executor.spawn(network.run::(), "network"); let validator_store = AnchorValidatorStore::<_, E>::new( - database.watch(), + database.clone(), signature_collector, qbft_manager, slashing_protection, @@ -515,7 +505,6 @@ impl Client { spec.clone(), genesis_validators_root, config.impostor.is_none().then_some(key), - executor.clone(), config.gas_limit, config.builder_proposals, config.builder_boost_factor, @@ -533,13 +522,18 @@ impl Client { voluntary_exit_tracker.clone(), ); - let selection_proof_config = SelectionProofConfig { + let attestation_selection_proof_config = SelectionProofConfig { lookahead_slot: 0, computation_offset: Duration::ZERO, selections_endpoint: false, parallel_sign: true, }; + let sync_selection_proof_config = SelectionProofConfig { + lookahead_slot: 1, + ..attestation_selection_proof_config + }; + let duties_service = Arc::new( DutiesServiceBuilder::new() .slot_clock(slot_clock.clone()) @@ -548,8 +542,8 @@ impl Client { .spec(spec.clone()) .executor(executor.clone()) .enable_high_validator_count_metrics(config.enable_high_validator_count_metrics) - .attestation_selection_proof_config(selection_proof_config) - .sync_selection_proof_config(selection_proof_config) + .attestation_selection_proof_config(attestation_selection_proof_config) + .sync_selection_proof_config(sync_selection_proof_config) .build()?, ); diff --git a/anchor/client/src/notifier.rs b/anchor/client/src/notifier.rs index f677d3c17..9183ff790 100644 --- a/anchor/client/src/notifier.rs +++ b/anchor/client/src/notifier.rs @@ -62,6 +62,13 @@ async fn notify( (operator_id, cluster_count) }; + let validator_count = duties_service.total_validator_count() as i64; + validator_metrics::set_gauge( + &validator_metrics::ENABLED_VALIDATORS_COUNT, + validator_count, + ); + validator_metrics::set_gauge(&validator_metrics::TOTAL_VALIDATORS_COUNT, validator_count); + let is_synced = *synced.borrow(); match (operator_id, is_synced) { @@ -70,7 +77,7 @@ async fn notify( (Some(operator_id), false) => { info!(%operator_id, "Operator present on chain, waiting for sync") } - (Some(operator_id), true) if duties_service.total_validator_count() > 0 => { + (Some(operator_id), true) if validator_count > 0 => { info!(%operator_id, cluster_count, "Operator active"); validator_services::notifier_service::notify(duties_service).await; } diff --git a/anchor/common/global_config/Cargo.toml b/anchor/common/global_config/Cargo.toml index 3710b7272..908b6a83f 100644 --- a/anchor/common/global_config/Cargo.toml +++ b/anchor/common/global_config/Cargo.toml @@ -8,4 +8,5 @@ edition = { workspace = true } clap = { workspace = true } dirs = { workspace = true } ssv_network_config = { workspace = true } +thiserror = { workspace = true } tracing = { workspace = true } diff --git a/anchor/common/global_config/src/data_dir.rs b/anchor/common/global_config/src/data_dir.rs new file mode 100644 index 000000000..b487028ac --- /dev/null +++ b/anchor/common/global_config/src/data_dir.rs @@ -0,0 +1,92 @@ +use std::{fs::create_dir_all, path::PathBuf}; + +use ssv_network_config::SsvNetworkConfig; +use thiserror::Error; + +/// The default Data directory, relative to the users home directory +const DEFAULT_ROOT_DIR: &str = ".anchor"; + +#[derive(Debug, Clone)] +pub struct DataDir { + path: PathBuf, +} + +#[derive(Error, Debug)] +pub enum DataDirError { + #[error("Failed to create data directory")] + Create(#[from] std::io::Error), +} + +impl DataDir { + pub fn new(path: PathBuf) -> Result { + let ret = DataDir { path }; + + create_dir_all(&ret.path)?; + create_dir_all(&ret.network_dir().path)?; + + // TODO next PR: lock file here + + Ok(ret) + } + + pub fn default_for_network(ssv_network: &SsvNetworkConfig) -> Result { + Self::new( + dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join( + ssv_network + .eth2_network + .config + .config_name + .as_deref() + .unwrap_or("custom"), + ), + ) + } + + pub fn public_key_file(&self) -> PathBuf { + self.path.join("public_key.txt") + } + + pub fn unencrypted_private_key_file(&self) -> PathBuf { + self.path.join("unencrypted_private_key.txt") + } + + pub fn encrypted_private_key_file(&self) -> PathBuf { + self.path.join("encrypted_private_key.json") + } + + pub fn database_file(&self) -> PathBuf { + self.path.join("anchor_db.sqlite") + } + + pub fn slashing_database_file(&self) -> PathBuf { + self.path.join("slashing_protection.sqlite") + } + + pub fn network_dir(&self) -> NetworkDir { + NetworkDir { + path: self.path.join("network"), + } + } + + pub fn default_logs_dir(&self) -> PathBuf { + self.path.join("logs") + } +} + +#[derive(Debug, Clone)] +pub struct NetworkDir { + path: PathBuf, +} + +impl NetworkDir { + pub fn key_file(&self) -> PathBuf { + self.path.join("key") + } + + pub fn enr_file(&self) -> PathBuf { + self.path.join("enr.dat") + } +} diff --git a/anchor/common/global_config/src/lib.rs b/anchor/common/global_config/src/lib.rs index 3dda701c3..07f8c04b0 100644 --- a/anchor/common/global_config/src/lib.rs +++ b/anchor/common/global_config/src/lib.rs @@ -1,11 +1,13 @@ +pub mod data_dir; + use std::{path::PathBuf, str::FromStr}; use clap::Parser; use ssv_network_config::SsvNetworkConfig; use tracing::Level; -/// The default Data directory, relative to the users home directory -pub const DEFAULT_ROOT_DIR: &str = ".anchor"; +use crate::data_dir::DataDir; + /// Default network, used to partition the data storage pub const DEFAULT_HARDCODED_NETWORK: &str = "hoodi"; @@ -13,7 +15,7 @@ pub const DEFAULT_HARDCODED_NETWORK: &str = "hoodi"; /// logic matching the datadir from the actual CLI definition. #[derive(Debug, Clone)] pub struct GlobalConfig { - pub data_dir: PathBuf, + pub data_dir: DataDir, pub ssv_network: SsvNetworkConfig, pub debug_level: Level, } @@ -76,20 +78,11 @@ impl TryFrom<&GlobalFlags> for GlobalConfig { }?; let data_dir = if let Some(data_dir) = &cli.data_dir { - data_dir.clone() + DataDir::new(data_dir.clone()) } else { - dirs::home_dir() - .unwrap_or_else(|| PathBuf::from(".")) - .join(DEFAULT_ROOT_DIR) - .join( - ssv_network - .eth2_network - .config - .config_name - .as_deref() - .unwrap_or("custom"), - ) - }; + DataDir::default_for_network(&ssv_network) + } + .map_err(|e| e.to_string())?; Ok(GlobalConfig { data_dir, diff --git a/anchor/common/qbft/src/lib.rs b/anchor/common/qbft/src/lib.rs index e8b1fab83..87cac9371 100644 --- a/anchor/common/qbft/src/lib.rs +++ b/anchor/common/qbft/src/lib.rs @@ -8,7 +8,7 @@ pub use qbft_types::{ UnsignedWrappedQbftMessage, WrappedQbftMessage, }; use ssv_types::{ - OperatorId, Round, + OperatorId, Round, VariableList, consensus::{QbftData, QbftMessage, QbftMessageType, UnsignedSSVMessage}, message::{MsgType, SSVMessage, SignedSSVMessage}, msgid::MessageId, @@ -491,13 +491,20 @@ where // There was a quorum of round change justifications. We need to go though and verify each // one. Each will be a SignedSSVMessage for signed_round_change in &msg.qbft_message.round_change_justification { - // The qbft message is represented as a Vec in the signed message, deserialize this - // into a proper QbftMessage - let round_change: QbftMessage = - match QbftMessage::from_ssz_bytes(signed_round_change.ssv_message().data()) { + // The justification message is represented as a VariableList in the signed message, + // deserialize this into a proper QbftMessage + let Ok(typed_signed_round_change) = + SignedSSVMessage::from_ssz_bytes(signed_round_change) + else { + warn!("Invalid Signed Round change encoded within a message"); + return false; + }; + let round_change: QbftMessage = { + match QbftMessage::from_ssz_bytes(typed_signed_round_change.ssv_message().data()) { Ok(data) => data, Err(_) => return false, - }; + } + }; // Make sure this is actually a round change message if !matches!(round_change.qbft_message_type, QbftMessageType::RoundChange) { @@ -507,7 +514,7 @@ where // Convert to a wrapped message and perform verification let wrapped = WrappedQbftMessage { - signed_message: signed_round_change.clone(), + signed_message: typed_signed_round_change.clone(), qbft_message: round_change.clone(), }; @@ -553,13 +560,18 @@ where // Validate each prepare message matches highest prepared round/value for signed_prepare in &msg.qbft_message.prepare_justification { - // The qbft message is represented as Vec in the signed message, deserialize - // this into a qbft message - let prepare = match QbftMessage::from_ssz_bytes(signed_prepare.ssv_message().data()) - { - Ok(data) => data, - Err(_) => return false, + // The qbft message is represented as VariableList in the signed message, + // deserialize this into a qbft message + let Ok(typed_signed_prepare) = SignedSSVMessage::from_ssz_bytes(signed_prepare) + else { + warn!("Invalid Signed Prepare encoded within a message"); + return false; }; + let prepare = + match QbftMessage::from_ssz_bytes(typed_signed_prepare.ssv_message().data()) { + Ok(data) => data, + Err(_) => return false, + }; // Make sure this is a prepare message if prepare.qbft_message_type != QbftMessageType::Prepare { @@ -568,7 +580,7 @@ where } let wrapped = WrappedQbftMessage { - signed_message: signed_prepare.clone(), + signed_message: typed_signed_prepare.clone(), qbft_message: prepare.clone(), }; @@ -755,11 +767,13 @@ where let signed_commits = commit_quorum[1..] .iter() .map(|msg| msg.signed_message.clone()); - aggregated_commit.aggregate(signed_commits); + aggregated_commit.aggregate(signed_commits).ok()?; // Set full data let hash = first_commit.qbft_message.root; - aggregated_commit.set_full_data(self.data.get(&hash)?.as_ssz_bytes()); + aggregated_commit + .set_full_data(self.data.get(&hash)?.as_ssz_bytes()) + .ok()?; return Some(aggregated_commit); } @@ -904,14 +918,37 @@ where data_hash: D::Hash, round_change_justification: Vec, prepare_justification: Vec, + round: Option, ) -> UnsignedWrappedQbftMessage { let data = self.get_message_data(&msg_type, data_hash); + let round = if let Some(round) = round { + round + } else { + data.round.into() + }; + + // Clear full_data from justifications as these do not store full data. + let round_change_justification_vec: Vec> = round_change_justification + .into_iter() + .map(|msg| msg.without_full_data()) + .map(|msg| VariableList::from(msg.as_ssz_bytes())) + .collect(); + + let prepare_justification_vec: Vec> = prepare_justification + .into_iter() + .map(|msg| msg.without_full_data()) + .map(|msg| VariableList::from(msg.as_ssz_bytes())) + .collect(); + + let round_change_justification = VariableList::from(round_change_justification_vec); + let prepare_justification = VariableList::from(prepare_justification_vec); + // Create the QBFT message let qbft_message = QbftMessage { qbft_message_type: msg_type, height: *self.instance_height as u64, - round: data.round, + round: round.into(), identifier: (&self.identifier).into(), root: data.root, data_round: data.data_round, @@ -924,7 +961,7 @@ where self.identifier.clone(), qbft_message.as_ssz_bytes(), ) - .expect("SSVMessage should be valid."); // TODO revisit this + .expect("SSVMessage should be valid."); // Wrap in unsigned SSV message UnsignedWrappedQbftMessage { @@ -1063,6 +1100,7 @@ where value_to_propose, round_change_justifications, prepare_justifications, + None, ); self.message_sender.send(unsigned_msg); @@ -1078,7 +1116,7 @@ where // Construct unsigned prepare let unsigned_msg = - self.new_unsigned_message(QbftMessageType::Prepare, data_hash, vec![], vec![]); + self.new_unsigned_message(QbftMessageType::Prepare, data_hash, vec![], vec![], None); self.message_sender.send(unsigned_msg); } @@ -1087,7 +1125,7 @@ where fn send_commit(&mut self, data_hash: D::Hash) { // Construct unsigned commit let unsigned_msg = - self.new_unsigned_message(QbftMessageType::Commit, data_hash, vec![], vec![]); + self.new_unsigned_message(QbftMessageType::Commit, data_hash, vec![], vec![], None); self.message_sender.send(unsigned_msg); } @@ -1105,6 +1143,7 @@ where data_hash, round_change_justifications, vec![], + None, ); // forget that we accpeted a proposal @@ -1135,4 +1174,23 @@ where } }) } + + // Expose the ability to create new unsigned messages for spec testing + #[cfg(test)] + pub fn new_unsigned_message_spec( + &self, + msg_type: QbftMessageType, + data_hash: D::Hash, + round_change_justification: Vec, + prepare_justification: Vec, + round: Option, + ) -> UnsignedWrappedQbftMessage { + self.new_unsigned_message( + msg_type, + data_hash, + round_change_justification, + prepare_justification, + round, + ) + } } diff --git a/anchor/common/qbft/src/qbft_types.rs b/anchor/common/qbft/src/qbft_types.rs index 4f1f73c14..f638f5d81 100644 --- a/anchor/common/qbft/src/qbft_types.rs +++ b/anchor/common/qbft/src/qbft_types.rs @@ -60,7 +60,7 @@ pub struct WrappedQbftMessage { impl Display for WrappedQbftMessage { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let mut f = f.debug_struct("WrappedQbftMessage"); - f.field("operator_ids", self.signed_message.operator_ids()) + f.field("operator_ids", &self.signed_message.operator_ids()) .field("full_data", &!self.signed_message.full_data().is_empty()); self.qbft_message.format_fields(&mut f); f.finish() diff --git a/anchor/common/qbft/src/tests.rs b/anchor/common/qbft/src/tests.rs index 2a091b959..ae805b18f 100644 --- a/anchor/common/qbft/src/tests.rs +++ b/anchor/common/qbft/src/tests.rs @@ -10,10 +10,7 @@ use std::{ use qbft_types::DefaultLeaderFunction; use sha2::{Digest, Sha256}; -use ssv_types::{ - OperatorId, - message::{RSA_SIGNATURE_SIZE, SignedSSVMessage}, -}; +use ssv_types::{OperatorId, RSA_SIGNATURE_SIZE, message::SignedSSVMessage}; use ssz_derive::{Decode, Encode}; use tracing::debug_span; use tracing_subscriber::filter::EnvFilter; @@ -52,7 +49,7 @@ fn convert_unsigned_to_signed( ) -> WrappedQbftMessage { // Create a signed message containing just this operator let signed_message = SignedSSVMessage::new( - vec![vec![0; RSA_SIGNATURE_SIZE]], + vec![[0; RSA_SIGNATURE_SIZE]], vec![OperatorId(*operator_id)], msg.unsigned_message.ssv_message, msg.unsigned_message.full_data, diff --git a/anchor/common/ssv_types/Cargo.toml b/anchor/common/ssv_types/Cargo.toml index 53427afef..4996abc41 100644 --- a/anchor/common/ssv_types/Cargo.toml +++ b/anchor/common/ssv_types/Cargo.toml @@ -18,8 +18,13 @@ indexmap = { workspace = true } openssl = { workspace = true } operator_key = { workspace = true } rusqlite = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } sha2 = { workspace = true } +ssz_types = { workspace = true } thiserror = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } +zerocopy = "0.8.24" diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index d44bba6dd..8d3c70b3e 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -2,13 +2,14 @@ use std::fmt::Debug; use derive_more::{Deref, From}; use indexmap::IndexSet; +use serde::Deserialize; use ssz_derive::{Decode, Encode}; use types::{Address, Graffiti, PublicKeyBytes}; use crate::{OperatorId, committee::CommitteeId}; /// Unique identifier for a cluster -#[derive(Clone, Copy, Default, Eq, PartialEq, Hash, From, Deref)] +#[derive(Clone, Copy, Default, Eq, PartialEq, Hash, From, Deref, Deserialize)] pub struct ClusterId(pub [u8; 32]); impl Debug for ClusterId { @@ -66,7 +67,9 @@ pub struct ClusterMember { } /// Index of the validator in the validator registry. -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref, Encode, Decode)] +#[derive( + Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref, Encode, Decode, Deserialize, +)] #[ssz(struct_behaviour = "transparent")] pub struct ValidatorIndex(pub usize); diff --git a/anchor/common/ssv_types/src/committee.rs b/anchor/common/ssv_types/src/committee.rs index 6d1ee9dff..f99e09a41 100644 --- a/anchor/common/ssv_types/src/committee.rs +++ b/anchor/common/ssv_types/src/committee.rs @@ -2,6 +2,7 @@ use std::fmt::{Debug, Formatter}; use derive_more::{Deref, From}; use indexmap::IndexSet; +use serde::Deserialize; use sha2::{Digest, Sha256}; use crate::{OperatorId, ValidatorIndex}; @@ -16,7 +17,7 @@ pub struct CommitteeInfo { } /// Unique identifier for a committee -#[derive(Clone, Copy, Default, Eq, PartialEq, Hash, From, Deref)] +#[derive(Clone, Copy, Default, Eq, PartialEq, Hash, From, Deref, Deserialize)] pub struct CommitteeId(pub [u8; COMMITTEE_ID_LEN]); impl Debug for CommitteeId { diff --git a/anchor/common/ssv_types/src/consensus.rs b/anchor/common/ssv_types/src/consensus.rs index 7709edde0..38195731f 100644 --- a/anchor/common/ssv_types/src/consensus.rs +++ b/anchor/common/ssv_types/src/consensus.rs @@ -5,6 +5,7 @@ use std::{ }; use derive_more::{From, Into}; +use serde::Deserialize; use sha2::{Digest, Sha256}; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; @@ -13,7 +14,7 @@ use tree_hash_derive::TreeHash; use types::{ Checkpoint, CommitteeIndex, EthSpec, ForkName, Hash256, PublicKeyBytes, Signature, Slot, SyncCommitteeContribution, VariableList, - typenum::{U13, U56}, + typenum::{Prod, Sum, U3, U5, U8, U13, U56, U388, U608, U700, U852, U1000, U10000, U1000000}, }; use crate::{ValidatorIndex, message::*}; @@ -37,6 +38,11 @@ pub trait QbftData: Debug + Clone + Encode + Decode { fn validate(&self) -> bool; } +/// ValidatorConsensusData.DataSSZ max size: 8388608 bytes (2^23) +/// This is calculated as 2^23 = 8,388,608 +/// We can represent this as 8 * 1000000 + 388 * 1000 + 608 +pub type ValidatorConsensusDataLen = Sum, Sum, U608>>; + /// A SSV Message that has not been signed yet. #[derive(Clone, Debug, Encode)] pub struct UnsignedSSVMessage { @@ -48,19 +54,21 @@ pub struct UnsignedSSVMessage { pub full_data: Vec, } +pub type RoundChangeLength = Sum, Sum>; // 51852 +pub type JustificationLength = Sum, U700>; // 3700 + /// A QBFT specific message -#[derive(Debug, Clone, Encode, Decode)] +#[derive(Debug, Clone, Encode, Decode, TreeHash)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct QbftMessage { pub qbft_message_type: QbftMessageType, pub height: u64, pub round: u64, - pub identifier: VariableList, /* TODO: address redundant typing due to ssz_max - * encoding in go-client */ + pub identifier: VariableList, pub root: Hash256, pub data_round: u64, - pub round_change_justification: Vec, // always without full_data - pub prepare_justification: Vec, // always without full_data + pub round_change_justification: VariableList, U13>, /* always without full_data */ + pub prepare_justification: VariableList, U13>, /* always without full_data */ } impl Display for QbftMessage { @@ -163,11 +171,31 @@ impl Decode for QbftMessageType { } } -#[derive(Clone, Debug, PartialEq, Encode, Decode)] +impl TreeHash for QbftMessageType { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + let value = *self as u64; + value.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u64::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + let value = *self as u64; + value.tree_hash_root() + } +} + +#[derive(Clone, Debug, PartialEq, Encode, Decode, TreeHash)] pub struct ValidatorConsensusData { pub duty: ValidatorDuty, pub version: DataVersion, - pub data_ssz: Vec, + pub data_ssz: VariableList, } impl QbftData for ValidatorConsensusData { @@ -189,7 +217,7 @@ impl QbftData for ValidatorConsensusData { } } -#[derive(Clone, Debug, TreeHash, PartialEq, Encode, Decode)] +#[derive(Clone, Debug, TreeHash, PartialEq, Encode, Decode, Deserialize)] pub struct ValidatorDuty { pub r#type: BeaconRole, pub pub_key: PublicKeyBytes, @@ -202,7 +230,7 @@ pub struct ValidatorDuty { pub validator_sync_committee_indices: VariableList, } -#[derive(Clone, Debug, PartialEq, Encode, Decode)] +#[derive(Clone, Debug, PartialEq, Encode, Decode, Deserialize)] #[ssz(struct_behaviour = "transparent")] pub struct BeaconRole(u64); @@ -291,6 +319,42 @@ impl Decode for DataVersion { } } +impl TreeHash for DataVersion { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + let num: u64 = match self.0 { + ForkName::Base => 1, + ForkName::Altair => 2, + ForkName::Bellatrix => 3, + ForkName::Capella => 4, + ForkName::Deneb => 5, + ForkName::Electra => 6, + ForkName::Fulu => 7, + }; + num.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u64::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + let num: u64 = match self.0 { + ForkName::Base => 1, + ForkName::Altair => 2, + ForkName::Bellatrix => 3, + ForkName::Capella => 4, + ForkName::Deneb => 5, + ForkName::Electra => 6, + ForkName::Fulu => 7, + }; + num.tree_hash_root() + } +} + #[derive(Clone, Debug, TreeHash, Encode, Decode)] pub struct Contribution { pub selection_proof_sig: Signature, diff --git a/anchor/common/ssv_types/src/domain_type.rs b/anchor/common/ssv_types/src/domain_type.rs index 34641d537..38ba3bede 100644 --- a/anchor/common/ssv_types/src/domain_type.rs +++ b/anchor/common/ssv_types/src/domain_type.rs @@ -1,6 +1,12 @@ use std::str::FromStr; -#[derive(Clone, Debug, Default, PartialEq)] +use rusqlite::{ + ToSql, + types::{FromSql, FromSqlError, FromSqlResult, ToSqlOutput, Value, ValueRef}, +}; +use serde::Deserialize; + +#[derive(Clone, Copy, Debug, Default, PartialEq, Deserialize)] pub struct DomainType(pub [u8; 4]); impl FromStr for DomainType { @@ -28,3 +34,19 @@ impl From<[u8; 4]> for DomainType { Self(bytes) } } + +impl FromSql for DomainType { + fn column_result(value: ValueRef<'_>) -> FromSqlResult { + let value = value.as_i64()?; + let value = u32::try_from(value).map_err(|_| FromSqlError::InvalidType)?; + Ok(value.to_le_bytes().into()) + } +} + +impl ToSql for DomainType { + fn to_sql(&self) -> rusqlite::Result> { + Ok(ToSqlOutput::Owned(Value::Integer( + u32::from_le_bytes(self.0).into(), + ))) + } +} diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index e871aed03..291692be6 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -13,8 +13,32 @@ pub mod partial_sig; mod round; mod share; mod sql_conversions; +pub mod test_utils; pub use indexmap::IndexSet; pub use round::Round; pub use share::ENCRYPTED_KEY_LENGTH; pub use types::{Epoch, Slot, VariableList}; + +// Shared constants used across message types +pub const RSA_SIGNATURE_SIZE: usize = 256; +pub const MAX_SIGNATURES: usize = 13; + +// Helper that converts from OutOfBounds to a custom error variant. +#[macro_export] +macro_rules! vec_to_variable_list { + ($v:expr, $error_variant:path) => { + ssz_types::VariableList::new($v).map_err(|err| { + if let ssz_types::Error::OutOfBounds { i, len } = err { + $error_variant { + provided: i, + max: len, + } + } else { + panic!( + "OutOfBounds is the only variant that should be returned by VariableList::new" + ) + } + }) + }; +} diff --git a/anchor/common/ssv_types/src/message.rs b/anchor/common/ssv_types/src/message.rs index e29a10523..19dfe5a34 100644 --- a/anchor/common/ssv_types/src/message.rs +++ b/anchor/common/ssv_types/src/message.rs @@ -3,31 +3,32 @@ use std::{ fmt::{Debug, Display, Formatter}, }; +use base64::prelude::*; +use serde::{Deserialize, Deserializer, de::Error}; +use serde_json::Value; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use thiserror::Error; +use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; +use tree_hash_derive::TreeHash; +use typenum::Unsigned; +use types::{ + Hash256, + typenum::{Prod, Sum, U8, U13, U256, U388, U412, U608, U722, U836, U1000, U1000000}, +}; use crate::{ - OperatorId, - message::{ - SSVMessageError::{EmptyData, SSVDataTooBig}, - SignedSSVMessageError::{ - DuplicatedSigner, FullDataTooLong, NoSignatures, NoSigners, - SignersAndSignaturesWithDifferentLength, SignersNotSorted, TooManyOperatorIDs, - TooManySignatures, WrongRSASignatureSize, ZeroSigner, - }, - }, + MAX_SIGNATURES, OperatorId, RSA_SIGNATURE_SIZE, + consensus::{JustificationLength, RoundChangeLength}, msgid::MessageId, }; const QBFT_MSG_TYPE_SIZE: usize = 8; const HEIGHT_SIZE: usize = 8; const ROUND_SIZE: usize = 8; -const MAX_NO_JUSTIFICATION_SIZE: usize = 3616; -const MAX1_JUSTIFICATION_SIZE: usize = 50624; const IDENTIFIER_SIZE: usize = 56; // same as MessageId length const ROOT_SIZE: usize = 32; -const MAX_SIGNATURES: usize = 13; // For partial signatures const PARTIAL_SIGNATURE_SIZE: usize = 96; @@ -36,35 +37,34 @@ const VALIDATOR_INDEX_SIZE: usize = 8; const SLOT_SIZE: usize = 8; const PARTIAL_SIG_MSG_TYPE_SIZE: usize = 8; const MAX_PARTIAL_SIGNATURE_MESSAGES: usize = 1000; -const ENCODING_OVERHEAD_DIVISOR: usize = 20; - -// For RSA-based SignedSSVMessage -pub const RSA_SIGNATURE_SIZE: usize = 256; - -// Additional from the Go code -const MAX_FULL_DATA_SIZE: usize = 4_194_532; // from spectypes.SignedSSVMessage const MAX_CONSENSUS_MSG_SIZE: usize = QBFT_MSG_TYPE_SIZE + HEIGHT_SIZE + ROUND_SIZE - + IDENTIFIER_SIZE + + (IDENTIFIER_SIZE + ssz::BYTES_PER_LENGTH_OFFSET) + ROOT_SIZE + ROUND_SIZE - + MAX_SIGNATURES * (MAX_NO_JUSTIFICATION_SIZE + MAX1_JUSTIFICATION_SIZE); - -const MAX_ENCODED_CONSENSUS_MSG_SIZE: usize = - MAX_CONSENSUS_MSG_SIZE + (MAX_CONSENSUS_MSG_SIZE / ENCODING_OVERHEAD_DIVISOR) + 4; + + (MAX_SIGNATURES * (RoundChangeLength::USIZE + ssz::BYTES_PER_LENGTH_OFFSET) + + ssz::BYTES_PER_LENGTH_OFFSET) + + (MAX_SIGNATURES * (JustificationLength::USIZE + ssz::BYTES_PER_LENGTH_OFFSET) + + ssz::BYTES_PER_LENGTH_OFFSET); const PARTIAL_SIGNATURE_MSG_SIZE: usize = PARTIAL_SIGNATURE_SIZE + ROOT_SIZE + OPERATOR_ID_SIZE + VALIDATOR_INDEX_SIZE; const MAX_PARTIAL_SIGNATURE_MSGS_SIZE: usize = PARTIAL_SIG_MSG_TYPE_SIZE + SLOT_SIZE - + MAX_PARTIAL_SIGNATURE_MESSAGES * PARTIAL_SIGNATURE_MSG_SIZE; + + MAX_PARTIAL_SIGNATURE_MESSAGES * PARTIAL_SIGNATURE_MSG_SIZE + + ssz::BYTES_PER_LENGTH_OFFSET; + +/// SSVMessage.Data max size: 722412 (from Go spec) +/// 722412 = 722 * 1000 + 412 = 722000 + 412 +type SSVMessageDataLen = Sum, U412>; -const MAX_ENCODED_PARTIAL_SIGNATURE_SIZE: usize = MAX_PARTIAL_SIGNATURE_MSGS_SIZE - + (MAX_PARTIAL_SIGNATURE_MSGS_SIZE / ENCODING_OVERHEAD_DIVISOR) - + 4; +/// ValidatorConsensusData.DataSSZ max size: 8388608 bytes (2^23) +/// This is calculated as 2^23 = 8,388,608 +/// We can represent this as 8 * 1000000 + 388 * 1000 + 608 +pub type ValidatorConsensusDataLen = Sum, Sum, U608>>; /// Defines the types of messages with explicit discriminant values. #[derive(Debug, Clone, PartialEq, Eq)] @@ -75,6 +75,42 @@ pub enum MsgType { SSVPartialSignatureMsgType = 1, } +impl<'de> Deserialize<'de> for MsgType { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let value = u64::deserialize(deserializer)?; + match value { + 0 => Ok(MsgType::SSVConsensusMsgType), + 1 => Ok(MsgType::SSVPartialSignatureMsgType), + _ => Err(serde::de::Error::custom(format!( + "Invalid MsgType value: {value}" + ))), + } + } +} + +impl TreeHash for MsgType { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + let value = self.clone() as u64; + value.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u64::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Hash256 { + let value = self.clone() as u64; + value.tree_hash_root() + } +} + impl TryFrom for MsgType { type Error = DecodeError; @@ -121,17 +157,7 @@ impl Decode for MsgType { } fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.len() != U64_SIZE { - return Err(DecodeError::InvalidByteLength { - len: bytes.len(), - expected: U64_SIZE, - }); - } - let value = - u64::from_le_bytes(bytes.try_into().map_err(|_| { - DecodeError::BytesInvalid(format!("Invalid length: {}", bytes.len())) - })?); - value.try_into() + u64::from_ssz_bytes(bytes)?.try_into() } } @@ -141,8 +167,8 @@ pub enum SSVMessageError { #[error("SSVMessage data is empty")] EmptyData, - #[error("SSVMessage data too large: got {got}, max {max}")] - SSVDataTooBig { got: usize, max: usize }, + #[error("SSVMessage data too large: got {provided}, max {max}")] + SSVDataTooBig { provided: usize, max: usize }, #[error("Wrong domain: got {got}, expected {want}")] WrongDomain { got: String, want: String }, @@ -152,12 +178,18 @@ pub enum SSVMessageError { } /// Represents a bare SSVMessage with a type, ID, and data. -#[derive(Encode, Decode, Clone, PartialEq, Eq)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, Deserialize, TreeHash)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct SSVMessage { + #[serde(rename = "MsgType")] msg_type: MsgType, - msg_id: MessageId, // Fixed-size [u8; 56] - data: Vec, // Variable-length byte array + + #[serde(rename = "MsgID", deserialize_with = "deserialize_hex_message_id")] + msg_id: MessageId, + + #[serde(rename = "Data")] + #[serde(deserialize_with = "crate::message::deserialize_base64_message_data")] + data: VariableList, } impl Debug for SSVMessage { @@ -165,13 +197,13 @@ impl Debug for SSVMessage { f.debug_struct("SSVMessage") .field("msg_type", &self.msg_type) .field("msg_id", &self.msg_id) - .field("data", &hex::encode(&self.data)) + .field("data", &hex::encode(self.data.to_vec())) .finish() } } impl SSVMessage { - /// Creates a new `SSVMessage`. + /// Creates a new `SSVMessage` using a vec instead of a `VariableList`. /// /// # Arguments /// @@ -182,7 +214,10 @@ impl SSVMessage { /// # Examples /// /// ``` - /// use ssv_types::message::{MessageId, MsgType, SSVMessage}; + /// use ssv_types::{ + /// message::{MsgType, SSVMessage}, + /// msgid::MessageId, + /// }; /// let message_id = MessageId::from([0u8; 56]); /// let msg = SSVMessage::new(MsgType::SSVConsensusMsgType, message_id, vec![1, 2, 3]); /// ``` @@ -191,6 +226,8 @@ impl SSVMessage { msg_id: MessageId, data: Vec, ) -> Result { + let data = crate::vec_to_variable_list!(data, SSVMessageError::SSVDataTooBig)?; + let ssv_message = SSVMessage { msg_type, msg_id, @@ -202,22 +239,22 @@ impl SSVMessage { pub fn validate(&self) -> Result<(), SSVMessageError> { if self.data.is_empty() { - return Err(EmptyData); + return Err(SSVMessageError::EmptyData); } match self.msg_type { MsgType::SSVConsensusMsgType => { - if self.data.len() > MAX_ENCODED_CONSENSUS_MSG_SIZE { - return Err(SSVDataTooBig { - got: self.data.len(), - max: MAX_ENCODED_CONSENSUS_MSG_SIZE, + if self.data.len() > MAX_CONSENSUS_MSG_SIZE { + return Err(SSVMessageError::SSVDataTooBig { + provided: self.data.len(), + max: MAX_CONSENSUS_MSG_SIZE, }); } } MsgType::SSVPartialSignatureMsgType => { - if self.data.len() > MAX_ENCODED_PARTIAL_SIGNATURE_SIZE { - return Err(SSVDataTooBig { - got: self.data.len(), - max: MAX_ENCODED_PARTIAL_SIGNATURE_SIZE, + if self.data.len() > MAX_PARTIAL_SIGNATURE_MSGS_SIZE { + return Err(SSVMessageError::SSVDataTooBig { + provided: self.data.len(), + max: MAX_PARTIAL_SIGNATURE_MSGS_SIZE, }); } } @@ -239,6 +276,20 @@ impl SSVMessage { pub fn data(&self) -> &[u8] { &self.data } + + /// A testing helping function to create invalid messages. + #[cfg(test)] + pub fn new_unvalidated( + msg_type: MsgType, + msg_id: MessageId, + data: VariableList, + ) -> Self { + SSVMessage { + msg_type, + msg_id, + data, + } + } } /// Errors that can occur while creating a `SignedSSVMessage`. @@ -259,8 +310,8 @@ pub enum SignedSSVMessageError { #[error("Too many operator IDs: provided {provided}, maximum allowed is {max}.")] TooManyOperatorIDs { provided: usize, max: usize }, - #[error("Full data is too long: {length} bytes, maximum allowed is {max} bytes.")] - FullDataTooLong { length: usize, max: usize }, + #[error("Full data is too long: {provided} bytes, maximum allowed is {max} bytes.")] + FullDataTooLong { provided: usize, max: usize }, #[error("No signers were provided (must have at least one signer).")] NoSigners, @@ -281,52 +332,78 @@ pub enum SignedSSVMessageError { DuplicatedSigner, #[error("Invalid SSVMessage: {0}")] - SSVMessagError(#[from] SSVMessageError), + SSVMessageError(#[from] SSVMessageError), } +/// SignedSSVMessage.FullData max size: 8388836 (from Go spec) +/// 8388836 = 8000000 + 388836 = 8 * 1000000 + 388836 +/// We need to construct 388836 = 388 * 1000 + 836 = 388000 + 836 +type SSVMessageFullDataLen = Sum, Sum, U836>>; + +/// Maximum of 13 signatures. +pub type SignatureList = VariableList, U13>; + /// Represents a signed SSV Message with signatures, operator IDs, the message itself, and full /// data. -#[derive(Encode, Decode, Clone, PartialEq, Eq)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, Deserialize, TreeHash)] pub struct SignedSSVMessage { - signatures: Vec>, // Vec of Vec, max 13 elements, each with 256 bytes - operator_ids: Vec, // Vec of OperatorID (u64), max 13 elements - ssv_message: SSVMessage, // SSVMessage: Required field - full_data: Vec, // Variable-length byte array, max 4,194,532 bytes -} + #[serde(rename = "Signatures")] + #[serde(deserialize_with = "deserialize_base64_signatures")] + signatures: SignatureList, -#[cfg(feature = "arbitrary-fuzz")] -use arbitrary::{Arbitrary, Result, Unstructured}; - -#[cfg(feature = "arbitrary-fuzz")] -use crate::consensus::{BeaconVote, QbftMessage}; - -#[cfg(feature = "arbitrary-fuzz")] -impl<'a> Arbitrary<'a> for SignedSSVMessage { - fn arbitrary(u: &mut Unstructured<'a>) -> Result { - // Generate arbitrary BeaconVote - let beacon_vote = BeaconVote::arbitrary(u)?; + #[serde(rename = "OperatorIDs")] + operator_ids: VariableList, - // Generate arbitrary QbftMessage - let qbft_message = QbftMessage::arbitrary(u)?; + #[serde(rename = "SSVMessage")] + ssv_message: SSVMessage, - // Create arbitrary basic fields - let signatures = Vec::>::arbitrary(u)?; - let operator_ids = Vec::::arbitrary(u)?; + #[serde(rename = "FullData")] + #[serde(deserialize_with = "deserialize_base64_or_empty")] + full_data: VariableList, +} - // Create SSV message with serialized QbftMessage - let ssv_message = SSVMessage { - msg_type: MsgType::arbitrary(u)?, - msg_id: MessageId::arbitrary(u)?, - data: qbft_message.as_ssz_bytes(), // Serialize QbftMessage to bytes - }; +#[cfg(feature = "arbitrary-fuzz")] +mod arbitrary_impls { + use arbitrary::{Arbitrary, Result, Unstructured}; + use ssz::Encode; - // Create the SignedSSVMessage with serialized BeaconVote - Ok(SignedSSVMessage { - signatures, - operator_ids, - ssv_message, - full_data: beacon_vote.as_ssz_bytes(), // Serialize BeaconVote to bytes - }) + use super::*; + use crate::{ + RSA_SIGNATURE_SIZE, + consensus::{BeaconVote, QbftMessage}, + message::MsgType, + msgid::MessageId, + }; + + impl<'a> Arbitrary<'a> for SignedSSVMessage { + fn arbitrary(u: &mut Unstructured<'a>) -> Result { + // Generate arbitrary BeaconVote + let beacon_vote = BeaconVote::arbitrary(u)?; + + // Generate arbitrary QbftMessage + let qbft_message = QbftMessage::arbitrary(u)?; + + // Create arbitrary basic fields + let signatures = Vec::<[u8; RSA_SIGNATURE_SIZE]>::arbitrary(u)?; + let operator_ids = Vec::::arbitrary(u)?; + + // Create SSV message with serialized QbftMessage + let ssv_message = SSVMessage::new( + MsgType::arbitrary(u)?, + MessageId::arbitrary(u)?, + qbft_message.as_ssz_bytes(), // Serialize QbftMessage to bytes + ) + .expect("Valid SSVMessage"); + + // Create the SignedSSVMessage with serialized BeaconVote + Ok(SignedSSVMessage::new( + signatures, + operator_ids, + ssv_message, + beacon_vote.as_ssz_bytes(), // Serialize BeaconVote to bytes + ) + .expect("Valid SignedSSVMessage")) + } } } @@ -346,13 +423,17 @@ impl Display for SignedSSVMessage { impl Debug for SignedSSVMessage { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - let signatures = self.signatures.iter().map(hex::encode).collect::>(); + let signatures = (&self.signatures) + .into_iter() + .map(|v| v.to_vec()) + .map(hex::encode) + .collect::>(); f.debug_struct("SignedSSVMessage") .field("signatures", &signatures) .field("operator_ids", &self.operator_ids) .field("ssv_message", &self.ssv_message) - .field("full_data", &hex::encode(&self.full_data)) + .field("full_data", &hex::encode(&*self.full_data)) .finish() } } @@ -376,7 +457,8 @@ impl SignedSSVMessage { /// ``` /// use ssv_types::{ /// OperatorId, - /// message::{MessageId, MsgType, SSVMessage, SignedSSVMessage}, + /// message::{MsgType, SSVMessage, SignedSSVMessage}, + /// msgid::MessageId, /// }; /// let ssv_msg = SSVMessage::new( /// MsgType::SSVConsensusMsgType, @@ -384,25 +466,43 @@ impl SignedSSVMessage { /// vec![1, 2, 3], /// ) /// .unwrap(); - /// let signed_msg = SignedSSVMessage::new( - /// vec![vec![0; 256]], - /// vec![OperatorId(1)], - /// ssv_msg, - /// vec![4, 5, 6], - /// ) - /// .unwrap(); + /// let signed_msg = + /// SignedSSVMessage::new(vec![[0; 256]], vec![OperatorId(1)], ssv_msg, vec![4, 5, 6]).unwrap(); /// ``` pub fn new( - signatures: Vec>, + signatures: Vec<[u8; RSA_SIGNATURE_SIZE]>, operator_ids: Vec, ssv_message: SSVMessage, full_data: Vec, ) -> Result { + // Convert Vec<[u8; 256]> to VariableList, U13> + let mut signature_list = VariableList::empty(); + for sig in signatures { + let sig_variable_list = VariableList::new(sig.to_vec()).map_err(|_| { + SignedSSVMessageError::TooManySignatures { + provided: 256, + max: 256, + } + })?; + signature_list.push(sig_variable_list).map_err(|_| { + SignedSSVMessageError::TooManySignatures { + provided: signature_list.len() + 1, + max: 13, + } + })?; + } + let signed_ssv_message = SignedSSVMessage { - signatures, - operator_ids, + signatures: signature_list, + operator_ids: crate::vec_to_variable_list!( + operator_ids, + SignedSSVMessageError::TooManyOperatorIDs + )?, ssv_message, - full_data, + full_data: crate::vec_to_variable_list!( + full_data, + SignedSSVMessageError::FullDataTooLong + )?, }; signed_ssv_message.validate()?; @@ -411,12 +511,12 @@ impl SignedSSVMessage { } /// Returns a reference to the signatures. - pub fn signatures(&self) -> &Vec> { + pub fn signatures(&self) -> &SignatureList { &self.signatures } /// Returns a reference to the operator IDs. - pub fn operator_ids(&self) -> &Vec { + pub fn operator_ids(&self) -> &[OperatorId] { &self.operator_ids } @@ -430,19 +530,47 @@ impl SignedSSVMessage { &self.full_data } - pub fn set_full_data(&mut self, data: Vec) { - self.full_data = data; + pub fn set_full_data(&mut self, data: Vec) -> Result<(), SignedSSVMessageError> { + self.full_data = + crate::vec_to_variable_list!(data, SignedSSVMessageError::FullDataTooLong)?; + Ok(()) + } + + /// Returns a clone of this SignedSSVMessage with empty full_data. + /// This matches the Go implementation's WithoutFullData() method used for justifications. + pub fn without_full_data(&self) -> Self { + let mut cloned = self.clone(); + cloned.full_data = VariableList::empty(); + cloned } /// Aggregate a set of signed ssv messages into Self - pub fn aggregate(&mut self, others: I) + pub fn aggregate(&mut self, others: I) -> Result<(), SignedSSVMessageError> where I: IntoIterator, { for signed_msg in others { + if signed_msg.operator_ids.len() != signed_msg.signatures.len() { + return Err(SignedSSVMessageError::SignersAndSignaturesWithDifferentLength); + } + // These will only all have 1 signature/operator, but we call extend for safety - self.signatures.extend(signed_msg.signatures); - self.operator_ids.extend(signed_msg.operator_ids); + for signature in signed_msg.signatures.into_iter() { + self.signatures.push(signature).map_err(|_| { + SignedSSVMessageError::TooManySignatures { + provided: self.signatures.len() + 1, + max: MAX_SIGNATURES, + } + })?; + } + for operator_id in signed_msg.operator_ids.into_iter() { + self.operator_ids.push(operator_id).map_err(|_| { + SignedSSVMessageError::TooManyOperatorIDs { + provided: self.operator_ids.len() + 1, + max: MAX_SIGNATURES, + } + })?; + } } // Maintain id <-> sig pairing during sorting @@ -455,62 +583,38 @@ impl SignedSSVMessage { sig_pairs.sort_by_key(|&(_, op_id)| *op_id); - let (sorted_signatures, sorted_operator_ids) = sig_pairs.into_iter().unzip(); - self.signatures = sorted_signatures; - self.operator_ids = sorted_operator_ids; + let (sorted_signatures, sorted_operator_ids) = sig_pairs.iter().cloned().unzip(); + self.signatures = crate::vec_to_variable_list!( + sorted_signatures, + SignedSSVMessageError::TooManySignatures + )?; + self.operator_ids = crate::vec_to_variable_list!( + sorted_operator_ids, + SignedSSVMessageError::TooManyOperatorIDs + )?; + Ok(()) } // Validate the signed message to ensure that it is well formed for qbft processing pub fn validate(&self) -> Result<(), SignedSSVMessageError> { - if self.signatures.len() > MAX_SIGNATURES { - return Err(TooManySignatures { - provided: self.signatures.len(), - max: MAX_SIGNATURES, - }); - } - - for (i, sig) in self.signatures.iter().enumerate() { - if sig.len() != RSA_SIGNATURE_SIZE { - return Err(WrongRSASignatureSize { - index: i, - length: sig.len(), - sig_length: RSA_SIGNATURE_SIZE, - }); - } - } - - if self.operator_ids.len() > MAX_SIGNATURES { - return Err(TooManyOperatorIDs { - provided: self.operator_ids.len(), - max: MAX_SIGNATURES, - }); - } - - if self.full_data.len() > MAX_FULL_DATA_SIZE { - return Err(FullDataTooLong { - length: self.full_data.len(), - max: MAX_FULL_DATA_SIZE, - }); - } - // Rule: Must have at least one signer if self.operator_ids.is_empty() { - return Err(NoSigners); + return Err(SignedSSVMessageError::NoSigners); } if self.signatures.is_empty() { - return Err(NoSignatures); + return Err(SignedSSVMessageError::NoSignatures); } if !self.operator_ids.is_sorted() { - return Err(SignersNotSorted); + return Err(SignedSSVMessageError::SignersNotSorted); } // Note: Len Signers & Operators will only be > 1 after commit aggregation // Rule: Signer can't be zero if self.operator_ids.iter().any(|&id| *id == 0) { - return Err(ZeroSigner); + return Err(SignedSSVMessageError::ZeroSigner); } // Rule: Signers must be unique @@ -519,13 +623,13 @@ impl SignedSSVMessage { let mut seen_ids = HashSet::with_capacity(self.operator_ids.len()); for &id in &self.operator_ids { if !seen_ids.insert(id) { - return Err(DuplicatedSigner); + return Err(SignedSSVMessageError::DuplicatedSigner); } } // Rule: Len(Signers) must be equal to Len(Signatures) if self.operator_ids.len() != self.signatures.len() { - return Err(SignersAndSignaturesWithDifferentLength); + return Err(SignedSSVMessageError::SignersAndSignaturesWithDifferentLength); } self.ssv_message.validate()?; @@ -534,50 +638,110 @@ impl SignedSSVMessage { } } -#[cfg(test)] -mod tests { - use std::iter; +fn deserialize_base64_or_empty<'de, D, T>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, + T: TryFrom>, +{ + let value = Value::deserialize(deserializer)?; + + match value { + Value::Null => Ok(Vec::new()), // Return empty Vec for null values + Value::String(s) => BASE64_STANDARD + .decode(s.as_bytes()) + .map_err(D::Error::custom), + _ => Err(D::Error::custom("Expected null or a base64 string")), + } + .and_then(|vec| { + vec.try_into() + .map_err(|_| D::Error::custom("Failed to convert from Vec to actual type")) + }) +} - use ssz::{Decode, Encode}; +fn deserialize_base64_signatures<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let string_vec: Vec = serde::Deserialize::deserialize(deserializer)?; - use super::*; + let mut signatures = VariableList::empty(); - // Helper functions for building valid test data - // + for string in string_vec { + let decoded_bytes = BASE64_STANDARD + .decode(&string) + .map_err(serde::de::Error::custom)?; - /// Returns a default 56-byte ID array with all zeros. - fn default_msg_id() -> MessageId { - [0u8; IDENTIFIER_SIZE].into() - } + let signature_variable_list = VariableList::new(decoded_bytes) + .map_err(|e| D::Error::custom(format!("Signature too long: {e:?}")))?; - /// Returns a small, non-empty payload for SSVMessage data. - fn small_data() -> Vec { - vec![0x11, 0x22, 0x33] + if let Err(err) = signatures.push(signature_variable_list) { + return Err(D::Error::custom(format!("Too many signatures: {err:?}"))); + } } - /// Returns a valid signature of exactly [`RSA_SIGNATURE_SIZE`] bytes. - fn valid_signature() -> Vec { - vec![0u8; RSA_SIGNATURE_SIZE] - } + Ok(signatures) +} - /// Creates a valid, non-empty SSVMessage (ensuring it doesn’t exceed the max size). - fn valid_ssv_message() -> SSVMessage { - SSVMessage::new(MsgType::SSVConsensusMsgType, default_msg_id(), small_data()) - .expect("Creating a valid SSVMessage must succeed") +pub fn deserialize_base64_message_data<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + let value = Value::deserialize(deserializer)?; + + match value { + Value::Null => Ok(VariableList::::new(vec![0]).expect("Valid size")), /* Return empty Vec for null values */ + Value::String(s) => Ok(VariableList::::from( + BASE64_STANDARD + .decode(s.as_bytes()) + .map_err(D::Error::custom)?, + )), + _ => Err(D::Error::custom("Expected null or a base64 string")), } +} - /// Creates a single-signer, single-signature valid SignedSSVMessage. - fn valid_signed_ssv_message() -> SignedSSVMessage { - let msg = valid_ssv_message(); - SignedSSVMessage::new( - vec![valid_signature()], - vec![OperatorId(1)], - msg, - vec![0xAB, 0xCD], // "full_data" well under max - ) - .expect("Creating a valid SignedSSVMessage must succeed") +/// Deserialize MessageId from hex string +fn deserialize_hex_message_id<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let hex_str = String::deserialize(deserializer)?; + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + let bytes = + hex::decode(hex_str).map_err(|e| Error::custom(format!("Failed to decode hex: {e}")))?; + + if bytes.len() != 56 { + return Err(Error::custom(format!( + "Expected 56 bytes for MessageId, got {}", + bytes.len() + ))); } + let array: [u8; 56] = bytes + .try_into() + .map_err(|_| Error::custom("Failed to convert to array"))?; + Ok(MessageId::from(array)) +} + +#[cfg(test)] +mod tests { + use std::iter; + + use ssz::{Decode, Encode}; + use types::{Signature, Unsigned}; + + use super::*; + use crate::{ + consensus::{QbftMessage, QbftMessageType}, + partial_sig::{PartialSignatureKind, PartialSignatureMessage, PartialSignatureMessages}, + test_utils::{ + default_msg_id, valid_signature, valid_signed_ssv_message, valid_ssv_message, + }, + }; + + const MAX_FULL_DATA_SIZE: usize = SSVMessageFullDataLen::USIZE; + // Tests for MessageId // @@ -683,14 +847,14 @@ mod tests { /// Checks that data exceeding `MAX_CONSENSUS_MSG_SIZE` triggers `SSVDataTooBig`. #[test] fn test_consensus_message_too_big() { - let oversized = vec![0u8; MAX_ENCODED_CONSENSUS_MSG_SIZE + 1]; + let oversized = vec![0u8; MAX_CONSENSUS_MSG_SIZE + 1]; let result = SSVMessage::new(MsgType::SSVConsensusMsgType, default_msg_id(), oversized); match result { - Err(SSVDataTooBig { got, max }) => { - assert_eq!(got, MAX_ENCODED_CONSENSUS_MSG_SIZE + 1); - assert_eq!(max, MAX_ENCODED_CONSENSUS_MSG_SIZE); + Err(SSVMessageError::SSVDataTooBig { provided, max }) => { + assert_eq!(provided, MAX_CONSENSUS_MSG_SIZE + 1); + assert_eq!(max, MAX_CONSENSUS_MSG_SIZE); } other => panic!("Expected SSVDataTooBig, got {other:?}"), } @@ -699,7 +863,7 @@ mod tests { /// Checks that data exceeding `MAX_PARTIAL_SIGNATURE_MSGS_SIZE` triggers `SSVDataTooBig`. #[test] fn test_partial_signature_message_too_big() { - let oversized = vec![0u8; MAX_ENCODED_PARTIAL_SIGNATURE_SIZE + 1]; + let oversized = vec![0u8; MAX_PARTIAL_SIGNATURE_MSGS_SIZE + 1]; let result = SSVMessage::new( MsgType::SSVPartialSignatureMsgType, @@ -708,9 +872,9 @@ mod tests { ); match result { - Err(SSVDataTooBig { got, max }) => { - assert_eq!(got, MAX_ENCODED_PARTIAL_SIGNATURE_SIZE + 1); - assert_eq!(max, MAX_ENCODED_PARTIAL_SIGNATURE_SIZE); + Err(SSVMessageError::SSVDataTooBig { provided, max }) => { + assert_eq!(provided, MAX_PARTIAL_SIGNATURE_MSGS_SIZE + 1); + assert_eq!(max, MAX_PARTIAL_SIGNATURE_MSGS_SIZE); } other => panic!("Expected SSVDataTooBig, got {other:?}"), } @@ -777,7 +941,7 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); match result { - Err(TooManySignatures { provided, max }) => { + Err(SignedSSVMessageError::TooManySignatures { provided, max }) => { assert_eq!(provided, MAX_SIGNATURES + 1); assert_eq!(max, MAX_SIGNATURES); } @@ -785,32 +949,6 @@ mod tests { } } - /// Checks that a signature with the wrong size triggers `WrongRSASignatureSize`. - #[test] - fn test_signed_ssv_message_wrong_signature_size() { - let ssv_msg = valid_ssv_message(); - let good = valid_signature(); - let mut bad = valid_signature(); - bad.pop(); // now it’s 255 bytes - let sigs = vec![good, bad]; - let ops = vec![OperatorId(1), OperatorId(2)]; - - let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); - - match result { - Err(WrongRSASignatureSize { - index, - length, - sig_length, - }) => { - assert_eq!(index, 1); - assert_eq!(length, 255); - assert_eq!(sig_length, RSA_SIGNATURE_SIZE); - } - other => panic!("Expected WrongRSASignatureSize, got {other:?}"), - } - } - /// Checks that having too many operator IDs triggers `TooManyOperatorIDs`. #[test] fn test_signed_ssv_message_too_many_operator_ids() { @@ -821,7 +959,7 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); match result { - Err(TooManyOperatorIDs { provided, max }) => { + Err(SignedSSVMessageError::TooManyOperatorIDs { provided, max }) => { assert_eq!(provided, MAX_SIGNATURES + 1); assert_eq!(max, MAX_SIGNATURES); } @@ -859,8 +997,8 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, huge_data); match result { - Err(FullDataTooLong { length, max }) => { - assert_eq!(length, MAX_FULL_DATA_SIZE + 1); + Err(SignedSSVMessageError::FullDataTooLong { provided, max }) => { + assert_eq!(provided, MAX_FULL_DATA_SIZE + 1); assert_eq!(max, MAX_FULL_DATA_SIZE); } other => panic!("Expected FullDataTooLong, got {other:?}"), @@ -892,7 +1030,7 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); match result { - Err(NoSigners) => (), + Err(SignedSSVMessageError::NoSigners) => (), other => panic!("Expected NoSigners, got {other:?}"), } } @@ -907,7 +1045,7 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); match result { - Err(NoSignatures) => (), + Err(SignedSSVMessageError::NoSignatures) => (), other => panic!("Expected NoSignatures, got {other:?}"), } } @@ -923,7 +1061,7 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); match result { - Err(SignersNotSorted) => (), + Err(SignedSSVMessageError::SignersNotSorted) => (), other => panic!("Expected SignersNotSorted, got {other:?}"), } } @@ -938,7 +1076,7 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); match result { - Err(ZeroSigner) => (), + Err(SignedSSVMessageError::ZeroSigner) => (), other => panic!("Expected ZeroSigner, got {other:?}"), } } @@ -954,7 +1092,7 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); match result { - Err(DuplicatedSigner) => (), + Err(SignedSSVMessageError::DuplicatedSigner) => (), other => panic!("Expected DuplicatedSigner, got {other:?}"), } } @@ -969,7 +1107,7 @@ mod tests { let result = SignedSSVMessage::new(sigs, ops, ssv_msg, vec![]); match result { - Err(SignersAndSignaturesWithDifferentLength) => (), + Err(SignedSSVMessageError::SignersAndSignaturesWithDifferentLength) => (), other => panic!("Expected SignersAndSignaturesWithDifferentLength, got {other:?}"), } } @@ -1008,11 +1146,11 @@ mod tests { // Force the scenario: pretend we got an SSVMessage from somewhere else // that didn't call `new()`, and attempt to use it: - let forcibly_invalid_msg = SSVMessage { - msg_type: MsgType::SSVConsensusMsgType, - msg_id: default_msg_id(), - data: vec![], // still empty - }; + let forcibly_invalid_msg = SSVMessage::new_unvalidated( + MsgType::SSVConsensusMsgType, + default_msg_id(), + VariableList::empty(), // still empty + ); let result = SignedSSVMessage::new( vec![valid_signature()], vec![OperatorId(1)], @@ -1021,7 +1159,7 @@ mod tests { ); match result { - Err(SignedSSVMessageError::SSVMessagError(SSVMessageError::EmptyData)) => (), + Err(SignedSSVMessageError::SSVMessageError(SSVMessageError::EmptyData)) => (), other => panic!("Expected SSVMessagError(EmptyData), got {other:?}"), } } @@ -1041,7 +1179,8 @@ mod tests { ) .expect("Should be valid"); - base.aggregate(iter::once(extra)); + base.aggregate(iter::once(extra)) + .expect("Aggregation should succeed"); let ops = base.operator_ids(); let sigs = base.signatures(); assert_eq!( @@ -1051,4 +1190,56 @@ mod tests { ); assert_eq!(sigs.len(), 2, "Expected 2 signatures total"); } + + // Test for message size constants + #[test] + fn ensure_message_sizes_correct() { + let messages_vec = vec![ + PartialSignatureMessage { + partial_signature: Signature::empty(), + signing_root: Default::default(), + signer: Default::default(), + validator_index: Default::default(), + }; + 1000 + ]; + let partial_signature_messages = PartialSignatureMessages { + kind: PartialSignatureKind::PostConsensus, + slot: Default::default(), + messages: ssz_types::VariableList::new(messages_vec).unwrap(), + }; + + assert_eq!( + partial_signature_messages.ssz_bytes_len(), + MAX_PARTIAL_SIGNATURE_MSGS_SIZE, + ); + + let qbft_message = QbftMessage { + qbft_message_type: QbftMessageType::Proposal, + height: 0, + round: 0, + identifier: vec![0; 56].try_into().unwrap(), + root: Default::default(), + data_round: 0, + round_change_justification: vec![ + vec![0; RoundChangeLength::USIZE].try_into().unwrap(); + 13 + ] + .try_into() + .unwrap(), + prepare_justification: vec![ + vec![0; JustificationLength::USIZE].try_into().unwrap(); + 13 + ] + .try_into() + .unwrap(), + }; + + assert_eq!(qbft_message.ssz_bytes_len(), MAX_CONSENSUS_MSG_SIZE); + + assert_eq!( + SSVMessageDataLen::to_usize(), + std::cmp::max(MAX_PARTIAL_SIGNATURE_MSGS_SIZE, MAX_CONSENSUS_MSG_SIZE) + ); + } } diff --git a/anchor/common/ssv_types/src/msgid.rs b/anchor/common/ssv_types/src/msgid.rs index bd594d711..7afa41535 100644 --- a/anchor/common/ssv_types/src/msgid.rs +++ b/anchor/common/ssv_types/src/msgid.rs @@ -1,7 +1,9 @@ use std::fmt::{Debug, Formatter}; use derive_more::{Display, From, Into}; +use serde::{Deserialize, Deserializer}; use ssz::{Decode, DecodeError, Encode}; +use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; use types::{PublicKeyBytes, VariableList, typenum::U56}; use crate::{committee::CommitteeId, domain_type::DomainType}; @@ -68,6 +70,41 @@ pub enum DutyExecutor { #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct MessageId([u8; 56]); +impl TreeHash for MessageId { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Vector + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl<'de> Deserialize<'de> for MessageId { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // First deserialize as a Vec + let vec = Vec::::deserialize(deserializer)?; + + // Then try to convert to [u8; 56] + vec.try_into() + .map(MessageId) + .map_err(|_| serde::de::Error::custom("Expected array of 56 bytes".to_string())) + } +} + +// Implement custom deserialization for MessageId + impl Debug for MessageId { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "{}", hex::encode(self.0)) @@ -91,6 +128,15 @@ impl MessageId { MessageId(id) } + pub fn for_spectest() -> Self { + let mut id = [0; 56]; + id[0] = 1; + id[1] = 2; + id[2] = 3; + id[3] = 4; + MessageId(id) + } + pub fn domain(&self) -> DomainType { DomainType( self.0[0..4] diff --git a/anchor/common/ssv_types/src/operator.rs b/anchor/common/ssv_types/src/operator.rs index 5d4ba9ccb..3a7876a15 100644 --- a/anchor/common/ssv_types/src/operator.rs +++ b/anchor/common/ssv_types/src/operator.rs @@ -2,7 +2,9 @@ use std::{cmp::Eq, fmt::Debug, hash::Hash}; use derive_more::{Deref, Display, From}; use openssl::{pkey::Public, rsa::Rsa}; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use tree_hash::{Hash256, PackedEncoding, TreeHash, TreeHashType}; use types::Address; /// Unique identifier for an Operator. @@ -21,10 +23,31 @@ use types::Address; Ord, PartialOrd, Display, + Serialize, + Deserialize, )] #[ssz(struct_behaviour = "transparent")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub struct OperatorId(pub u64); +impl TreeHash for OperatorId { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + let value: u64 = self.0; + value.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u64::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Hash256 { + let value: u64 = self.0; + value.tree_hash_root() + } +} /// Client responsible for maintaining the overall health of the network. #[derive(Debug, Clone)] diff --git a/anchor/common/ssv_types/src/partial_sig.rs b/anchor/common/ssv_types/src/partial_sig.rs index 40448cf60..52b665788 100644 --- a/anchor/common/ssv_types/src/partial_sig.rs +++ b/anchor/common/ssv_types/src/partial_sig.rs @@ -1,10 +1,21 @@ +use serde::Deserialize; use ssz::{Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; -use types::{Hash256, Signature, Slot}; +use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; +use tree_hash_derive::TreeHash; +use types::{ + Hash256, Signature, Slot, VariableList, + typenum::{Sum, U512, U1000}, +}; use crate::{OperatorId, ValidatorIndex}; -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +/// Maximum number of partial signature messages: 1512 +/// Calculated as 1000 + 512 = 1512 +pub type PartialSignatureMessagesLen = Sum; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize)] +#[serde(from = "u64", into = "u64")] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] pub enum PartialSignatureKind { // PostConsensusPartialSig is a partial signature over a decided duty (attestation data, @@ -23,22 +34,26 @@ pub enum PartialSignatureKind { VoluntaryExit = 5, } -impl TryFrom for PartialSignatureKind { - type Error = (); - - fn try_from(value: u64) -> Result { +impl From for PartialSignatureKind { + fn from(value: u64) -> Self { match value { - 0 => Ok(PartialSignatureKind::PostConsensus), - 1 => Ok(PartialSignatureKind::RandaoPartialSig), - 2 => Ok(PartialSignatureKind::SelectionProofPartialSig), - 3 => Ok(PartialSignatureKind::ContributionProofs), - 4 => Ok(PartialSignatureKind::ValidatorRegistration), - 5 => Ok(PartialSignatureKind::VoluntaryExit), - _ => Err(()), + 0 => PartialSignatureKind::PostConsensus, + 1 => PartialSignatureKind::RandaoPartialSig, + 2 => PartialSignatureKind::SelectionProofPartialSig, + 3 => PartialSignatureKind::ContributionProofs, + 4 => PartialSignatureKind::ValidatorRegistration, + 5 => PartialSignatureKind::VoluntaryExit, + _ => panic!("Invalid PartialSignatureKind value: {value}"), } } } +impl From for u64 { + fn from(kind: PartialSignatureKind) -> Self { + kind as u64 + } +} + const U64_SIZE: usize = 8; // u64 is 8 bytes impl Encode for PartialSignatureKind { @@ -76,22 +91,217 @@ impl Decode for PartialSignatureKind { }); } let value = u64::from_le_bytes(bytes.try_into().unwrap()); - value.try_into().map_err(|_| DecodeError::NoMatchingVariant) + match value { + 0..=5 => Ok(value.into()), + _ => Err(DecodeError::NoMatchingVariant), + } + } +} + +impl TreeHash for PartialSignatureKind { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> PackedEncoding { + let value = *self as u64; + value.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u64::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + let value = *self as u64; + value.tree_hash_root() } } // A partial signature specific message -#[derive(Clone, Debug, Encode, Decode)] +#[derive(Clone, Debug, PartialEq, Encode, Decode, TreeHash, Deserialize)] pub struct PartialSignatureMessages { + #[serde( + rename = "Type", + deserialize_with = "serde_impl::deserialize_partial_signature_kind" + )] pub kind: PartialSignatureKind, + #[serde(rename = "Slot", deserialize_with = "serde_impl::deserialize_slot")] pub slot: Slot, - pub messages: Vec, + #[serde(rename = "Messages")] + pub messages: VariableList, } -#[derive(Clone, Debug, Encode, Decode)] +#[derive(Clone, Debug, PartialEq, Encode, Decode, TreeHash, Deserialize)] pub struct PartialSignatureMessage { + #[serde( + rename = "PartialSignature", + deserialize_with = "serde_impl::deserialize_signature" + )] pub partial_signature: Signature, + #[serde( + rename = "SigningRoot", + deserialize_with = "serde_impl::deserialize_hash256" + )] pub signing_root: Hash256, + #[serde(rename = "Signer")] pub signer: OperatorId, + #[serde( + rename = "ValidatorIndex", + deserialize_with = "serde_impl::deserialize_validator_index" + )] pub validator_index: ValidatorIndex, } + +#[derive(Debug, PartialEq)] +pub enum PartialSignatureError { + NoMessages, + InconsistentSigners, + ZeroSigner, +} + +impl PartialSignatureMessages { + /// Validates the partial signature messages + pub fn validate(&self) -> Result<(), PartialSignatureError> { + // Must have at least one message + if self.messages.is_empty() { + return Err(PartialSignatureError::NoMessages); + } + + // Get the signer from the first message + let signer = self.messages[0].signer; + + // Validate each message and check consistency + for message in &self.messages { + // Check signer consistency + if message.signer != signer { + return Err(PartialSignatureError::InconsistentSigners); + } + + // Validate individual message + message.validate()?; + } + + Ok(()) + } +} + +impl PartialSignatureMessage { + /// Validates an individual partial signature message + pub fn validate(&self) -> Result<(), PartialSignatureError> { + // Signer ID 0 is not allowed + if self.signer.0 == 0 { + return Err(PartialSignatureError::ZeroSigner); + } + + Ok(()) + } +} + +mod serde_impl { + use base64::prelude::*; + use serde::{Deserialize, Deserializer, de::Error}; + + use super::*; + + pub fn deserialize_slot<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let slot_str = String::deserialize(deserializer)?; + slot_str + .parse::() + .map(Slot::new) + .map_err(|e| Error::custom(format!("Failed to parse slot: {e}"))) + } + + pub fn deserialize_partial_signature_kind<'de, D>( + deserializer: D, + ) -> Result + where + D: Deserializer<'de>, + { + let value = u64::deserialize(deserializer)?; + if value > 5 { + return Err(Error::custom(format!( + "Invalid PartialSignatureKind value: {}", + value + ))); + } + Ok(PartialSignatureKind::from(value)) + } + + pub fn deserialize_signature<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let sig_opt: Option = Option::deserialize(deserializer)?; + match sig_opt { + Some(sig_str) => { + // Handle empty string as empty signature (for invalid test cases) + if sig_str.is_empty() { + return Ok(types::Signature::empty()); + } + + let sig_bytes = if sig_str.starts_with("0x") { + // Handle hex string with 0x prefix + hex::decode(&sig_str[2..]).map_err(|e| { + Error::custom(format!("Failed to decode hex signature: {e}")) + })? + } else if sig_str.chars().all(|c| c.is_ascii_hexdigit()) && sig_str.len() % 2 == 0 { + // Try hex without prefix if all characters are hex digits and even length + hex::decode(&sig_str).map_err(|e| { + Error::custom(format!("Failed to decode hex signature: {e}")) + })? + } else { + // Fall back to base64 for backward compatibility + BASE64_STANDARD.decode(&sig_str).map_err(|e| { + Error::custom(format!("Failed to decode base64 signature: {e}")) + })? + }; + + if sig_bytes.len() != 96 { + return Err(Error::custom(format!( + "Signature must be 96 bytes, got {}", + sig_bytes.len() + ))); + } + + Ok(types::Signature::deserialize(&sig_bytes) + .map_err(|e| Error::custom(format!("Failed to parse signature: {e:?}")))?) + } + None => { + // Return empty signature for null values + Ok(types::Signature::empty()) + } + } + } + + pub fn deserialize_hash256<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let hash_str = String::deserialize(deserializer)?; + let hash_str = hash_str.strip_prefix("0x").unwrap_or(&hash_str); + let bytes = hex::decode(hash_str) + .map_err(|e| Error::custom(format!("Failed to decode hex: {e}")))?; + if bytes.len() != 32 { + return Err(Error::custom(format!( + "Expected 32 bytes for Hash256, got {}", + bytes.len() + ))); + } + Ok(Hash256::from_slice(&bytes)) + } + + pub fn deserialize_validator_index<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let index_str = String::deserialize(deserializer)?; + index_str + .parse::() + .map(ValidatorIndex) + .map_err(|e| Error::custom(format!("Failed to parse validator index: {e}"))) + } +} diff --git a/anchor/common/ssv_types/src/test_utils.rs b/anchor/common/ssv_types/src/test_utils.rs new file mode 100644 index 000000000..777627eda --- /dev/null +++ b/anchor/common/ssv_types/src/test_utils.rs @@ -0,0 +1,42 @@ +//! Test utilities shared across the ssv_types crate + +use crate::{ + OperatorId, RSA_SIGNATURE_SIZE, + message::{MsgType, SSVMessage, SignedSSVMessage}, + msgid::MessageId, +}; + +const IDENTIFIER_SIZE: usize = 56; // same as MessageId length + +/// Returns a default 56-byte ID array with all zeros. +pub fn default_msg_id() -> MessageId { + [0u8; IDENTIFIER_SIZE].into() +} + +/// Returns a small, non-empty payload for SSVMessage data. +pub fn small_data() -> Vec { + vec![0x11, 0x22, 0x33] +} + +/// Returns a valid signature of exactly [`RSA_SIGNATURE_SIZE`] bytes. +pub fn valid_signature() -> [u8; RSA_SIGNATURE_SIZE] { + [0u8; RSA_SIGNATURE_SIZE] +} + +/// Creates a valid, non-empty SSVMessage (ensuring it doesn't exceed the max size). +pub fn valid_ssv_message() -> SSVMessage { + SSVMessage::new(MsgType::SSVConsensusMsgType, default_msg_id(), small_data()) + .expect("Creating a valid SSVMessage must succeed") +} + +/// Creates a single-signer, single-signature valid SignedSSVMessage. +pub fn valid_signed_ssv_message() -> SignedSSVMessage { + let msg = valid_ssv_message(); + SignedSSVMessage::new( + vec![valid_signature()], + vec![OperatorId(1)], + msg, + vec![0xAB, 0xCD], // "full_data" well under max + ) + .expect("Creating a valid SignedSSVMessage must succeed") +} diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml index bf3e2d7e4..e1079db1a 100644 --- a/anchor/database/Cargo.toml +++ b/anchor/database/Cargo.toml @@ -6,7 +6,6 @@ authors = ["Sigma Prime "] [dependencies] base64 = { workspace = true } -multi_index_map = "0.15.0" once_cell = { workspace = true } openssl = { workspace = true } r2d2 = { workspace = true } diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index f59dcfe2b..3aeef95eb 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -2,9 +2,7 @@ use rusqlite::{Transaction, params}; use ssv_types::{Cluster, ClusterId, OperatorId, Share, ValidatorMetadata}; use types::{Address, PublicKeyBytes}; -use super::{ - ClusterIndexed, DatabaseError, MetadataIndexed, NetworkDatabase, ShareIndexed, sql_operations, -}; +use super::{DatabaseError, NetworkDatabase, NonUniqueIndex, UniqueIndex, sql_operations}; /// Implements all cluster related functionality on the database impl NetworkDatabase { @@ -60,43 +58,33 @@ impl NetworkDatabase { // Record that we are a member of this cluster state.single_state.clusters.insert(cluster.cluster_id); - state.multi_state.shares.insert(ShareIndexed { - validator_pubkey: validator.public_key, - cluster_id: cluster.cluster_id, - owner: cluster.owner, - committee_id: cluster.committee_id(), - share: share.to_owned(), - }); + // Save the keyshare + state.multi_state.shares.insert_or_update( + &validator.public_key, // The validator this keyshare belongs to + &cluster.cluster_id, // The id of the cluster + &cluster.owner, // The owner of the cluster + &cluster.committee_id(), // The committee id of the cluster + share.to_owned(), // The keyshare itself + ); } // Save all cluster related information - // Check if we already have this cluster - let existing = state - .multi_state - .clusters - .get_by_cluster_id(&cluster.cluster_id) - .is_some(); - - // Only insert if it doesn't exist yet - if !existing { - state.multi_state.clusters.insert(ClusterIndexed { - cluster_id: cluster.cluster_id, - owner: cluster.owner, - committee_id: cluster.committee_id(), - cluster: cluster.to_owned(), - }); - } - - state - .multi_state - .validator_metadata - .insert(MetadataIndexed { - validator_pubkey: validator.public_key, - cluster_id: cluster.cluster_id, - owner: cluster.owner, - committee_id: cluster.committee_id(), - metadata: validator.to_owned(), - }); + state.multi_state.clusters.insert_or_update( + &cluster.cluster_id, // The id of the cluster + &validator.public_key, // The public key of validator added to the cluster + &cluster.owner, // Owner of the cluster + &cluster.committee_id(), // The committee id of the cluster + cluster.to_owned(), // The Cluster and all containing information + ); + + // Save the metadata for the validators + state.multi_state.validator_metadata.insert_or_update( + &validator.public_key, // The public key of the validator + &cluster.cluster_id, // The id of the cluster the validator belongs to + &cluster.owner, // The owner of the cluster + &cluster.committee_id(), // The committee id of the cluster + validator.to_owned(), // The metadata of the validator + ); }); Ok(()) @@ -117,12 +105,9 @@ impl NetworkDatabase { // Update in memory status of cluster self.modify_state(|state| { - state - .multi_state - .clusters - .modify_by_cluster_id(&cluster_id, |cluster_idx| { - cluster_idx.cluster.liquidated = status; - }); + if let Some(cluster) = state.multi_state.clusters.get_mut_by(&cluster_id) { + cluster.liquidated = status; + } }); Ok(()) @@ -142,15 +127,11 @@ impl NetworkDatabase { self.modify_state(|state| { // Remove from in memory - state - .multi_state - .shares - .remove_by_validator_pubkey(validator_pubkey); - - let metadata_idx = state + state.multi_state.shares.remove(validator_pubkey); + let metadata = state .multi_state .validator_metadata - .remove_by_validator_pubkey(validator_pubkey) + .remove(validator_pubkey) .expect("Data should have existed"); // If there is no longer and validators for this cluster, remove it from both the @@ -158,17 +139,12 @@ impl NetworkDatabase { if state .multi_state .validator_metadata - .get_by_cluster_id(&metadata_idx.metadata.cluster_id) - .is_empty() + .get_all_by(&metadata.cluster_id) + .next() + .is_none() { - state - .multi_state - .clusters - .remove_by_cluster_id(&metadata_idx.metadata.cluster_id); - state - .single_state - .clusters - .remove(&metadata_idx.metadata.cluster_id); + state.multi_state.clusters.remove(&metadata.cluster_id); + state.single_state.clusters.remove(&metadata.cluster_id); } }); diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 70a3d082b..cdd8aecc8 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,81 +1,35 @@ use std::{ collections::{HashMap, HashSet}, - fs::File, path::Path, time::Duration, }; -use multi_index_map::MultiIndexMap; use once_cell::sync::OnceCell; use openssl::{pkey::Public, rsa::Rsa}; use r2d2_sqlite::SqliteConnectionManager; use rusqlite::{Transaction, params}; -use ssv_types::{Cluster, ClusterId, CommitteeId, Operator, OperatorId, Share, ValidatorMetadata}; +use ssv_types::{ + Cluster, ClusterId, CommitteeId, Operator, OperatorId, Share, ValidatorMetadata, + domain_type::DomainType, +}; use tokio::sync::{ watch, watch::{Receiver, Ref}, }; use types::{Address, PublicKeyBytes}; -pub use crate::{error::DatabaseError, state::NetworkState}; - -/// All the shares that belong to the current operator. -/// IMPORTANT: There are parts of the code that assume this only contains shares that belong to the -/// current operator. If this ever changes, make sure to update the code accordingly. -#[derive(Debug, Clone, MultiIndexMap)] -pub struct ShareIndexed { - #[multi_index(hashed_unique)] - pub validator_pubkey: PublicKeyBytes, - - #[multi_index(hashed_non_unique)] - pub cluster_id: ClusterId, - - #[multi_index(hashed_non_unique)] - pub owner: Address, - - #[multi_index(hashed_non_unique)] - pub committee_id: CommitteeId, - - pub share: Share, -} - -/// Metadata for all validators in the network -#[derive(Debug, Clone, MultiIndexMap)] -pub struct MetadataIndexed { - #[multi_index(hashed_unique)] - pub validator_pubkey: PublicKeyBytes, - - #[multi_index(hashed_non_unique)] - pub cluster_id: ClusterId, - - #[multi_index(hashed_non_unique)] - pub owner: Address, - - #[multi_index(hashed_non_unique)] - pub committee_id: CommitteeId, - - pub metadata: ValidatorMetadata, -} - -/// All the clusters in the network -#[derive(Debug, Clone, MultiIndexMap)] -pub struct ClusterIndexed { - #[multi_index(hashed_unique)] - pub cluster_id: ClusterId, - - #[multi_index(hashed_non_unique)] - pub owner: Address, - - #[multi_index(hashed_non_unique)] - pub committee_id: CommitteeId, - - pub cluster: Cluster, -} +pub use crate::{ + error::DatabaseError, + multi_index::{MultiIndexMap, *}, + state::NetworkState, +}; mod cluster_operations; mod error; mod keysplit_operations; +mod multi_index; mod operator_operations; +mod schema; mod share_operations; mod sql_operations; mod state; @@ -90,16 +44,57 @@ const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); type Pool = r2d2::Pool; type PoolConn = r2d2::PooledConnection; -// The actual map types are generated by the macro and are named: -// - MultiIndexShareIndexedMap -// - MultiIndexMetadataIndexedMap -// - MultiIndexClusterIndexedMap -// -// Information that needs to be accessed via multiple different indices +/// All the shares that belong to the current operator. +/// IMPORTANT: There are parts of the code that assume this only contains shares that belong to the +/// current operator. If this ever changes, make sure to update the code accordingly. +/// Primary: public key of validator, uniquely identifies a share +/// Secondary: cluster id, corresponds to a list of shares +/// Tertiary: owner of the cluster, corresponds to a list of shares +pub type ShareMultiIndexMap = MultiIndexMap< + PublicKeyBytes, + ClusterId, + Address, + CommitteeId, + Share, + NonUniqueTag, + NonUniqueTag, + NonUniqueTag, +>; +/// Metadata for all validators in the network +/// Primary: public key of the validator. uniquely identifies the metadata +/// Secondary: cluster id. corresponds to list of metadata for all validators +/// Tertiary: owner of the cluster: corresponds to list of metadata for all validators +pub type MetadataMultiIndexMap = MultiIndexMap< + PublicKeyBytes, + ClusterId, + Address, + CommitteeId, + ValidatorMetadata, + NonUniqueTag, + NonUniqueTag, + NonUniqueTag, +>; +/// All of the clusters in the network +/// Primary: cluster id. uniquely identifies a cluster +/// Secondary: public key of the validator. uniquely identifies a cluster +/// Tertiary: owner of the cluster. does not uniquely identify a cluster +pub type ClusterMultiIndexMap = MultiIndexMap< + ClusterId, + PublicKeyBytes, + Address, + CommitteeId, + Cluster, + UniqueTag, + NonUniqueTag, + NonUniqueTag, +>; + +// Information that needs to be accessed via multiple different indicies +#[derive(Debug)] struct MultiState { - shares: MultiIndexShareIndexedMap, - validator_metadata: MultiIndexMetadataIndexedMap, - clusters: MultiIndexClusterIndexedMap, + shares: ShareMultiIndexMap, + validator_metadata: MetadataMultiIndexMap, + clusters: ClusterMultiIndexMap, // Be careful when adding new maps here. If you really must to, it must be updated in the // operations files } @@ -129,6 +124,7 @@ enum PubkeyOrId { /// Top level NetworkDatabase that contains in memory storage for quick access /// to relevant information and a connection to the database +#[derive(Debug)] pub struct NetworkDatabase { /// The public key or ID of our operator operator: PubkeyOrId, @@ -140,8 +136,12 @@ pub struct NetworkDatabase { impl NetworkDatabase { /// Construct a new NetworkDatabase at the given path and the Public Key of the current operator - pub fn new(path: &Path, pubkey: &Rsa) -> Result { - let conn_pool = Self::open_or_create(path)?; + pub fn new( + path: &Path, + pubkey: &Rsa, + domain: DomainType, + ) -> Result { + let conn_pool = Self::open_or_create(path, domain)?; let operator = PubkeyOrId::Pubkey(pubkey.clone()); let state = watch::Sender::new(NetworkState::new_with_state(&conn_pool, &operator)?); Ok(Self { @@ -152,8 +152,12 @@ impl NetworkDatabase { } /// Act as if we had the pubkey of a certain operator - pub fn new_as_impostor(path: &Path, operator: &OperatorId) -> Result { - let conn_pool = Self::open_or_create(path)?; + pub fn new_as_impostor( + path: &Path, + operator: &OperatorId, + domain: DomainType, + ) -> Result { + let conn_pool = Self::open_or_create(path, domain)?; let operator = PubkeyOrId::Id(*operator); let state = watch::Sender::new(NetworkState::new_with_state(&conn_pool, &operator)?); Ok(Self { @@ -186,12 +190,9 @@ impl NetworkDatabase { } // Open an existing database at the given `path`, or create one if none exists. - fn open_or_create(path: &Path) -> Result { - if path.exists() { - Self::open_conn_pool(path) - } else { - Self::create(path) - } + fn open_or_create(path: &Path, domain: DomainType) -> Result { + schema::ensure_up_to_date(path, domain)?; + Self::open_conn_pool(path) } // Build a new connection pool @@ -205,23 +206,6 @@ impl NetworkDatabase { Ok(conn_pool) } - // Create a database at the given path. - fn create(path: &Path) -> Result { - let _file = File::options() - .write(true) - .read(true) - .create_new(true) - .open(path)?; - - // restrict file permissions - let conn_pool = Self::open_conn_pool(path)?; - let conn = conn_pool.get()?; - - // create all of the tables - conn.execute_batch(include_str!("table_schema.sql"))?; - Ok(conn_pool) - } - // Open a new connection pub fn connection(&self) -> Result { Ok(self.conn_pool.get()?) diff --git a/anchor/database/src/multi_index.rs b/anchor/database/src/multi_index.rs new file mode 100644 index 000000000..9fb2864f2 --- /dev/null +++ b/anchor/database/src/multi_index.rs @@ -0,0 +1,781 @@ +use std::{ + collections::{HashMap, HashSet}, + hash::Hash, + marker::PhantomData, +}; + +/// Marker trait for uniquely identifying indices +pub trait Unique {} + +/// Marker trait for non-uniquely identifying indices +pub trait NotUnique {} + +/// Index type markers +pub enum Primary {} +pub enum Secondary {} +pub enum Tertiary {} +pub enum Quaternary {} + +/// Type tags markers +#[derive(Debug)] +pub enum UniqueTag {} +impl Unique for UniqueTag {} + +#[derive(Debug)] +pub enum NonUniqueTag {} +impl NotUnique for NonUniqueTag {} + +/// Trait for accessing values through a unique index +pub trait UniqueIndex { + fn get_by(&self, key: &K) -> Option<&V>; + fn get_mut_by(&mut self, key: &K) -> Option<&mut V>; +} + +/// Trait for accessing values through a non-unique index +pub trait NonUniqueIndex { + fn get_all_by<'a>(&'a self, key: &K) -> impl Iterator + 'a + where + V: 'a; + fn modify_all_by(&mut self, key: &K, f: F) + where + F: FnMut(&mut V); +} + +/// Inner storage maps for the multi-index map, now supporting a quaternary index. +/// - K1: Primary key type (always unique) +/// - K2: Secondary key type +/// - K3: Tertiary key type +/// - K4: Quaternary key type +/// - V: Value type +#[derive(Debug)] +struct InnerMaps +where + K1: Eq + Hash, + K2: Eq + Hash, + K3: Eq + Hash, + K4: Eq + Hash, +{ + primary: HashMap, + secondary_unique: HashMap, + secondary_multi: HashMap>, + tertiary_unique: HashMap, + tertiary_multi: HashMap>, + quaternary_unique: HashMap, + quaternary_multi: HashMap>, +} + +/// A concurrent multi-index map that supports up to four different access patterns. +/// The core differentiates between unique identification and non-unique identification. +/// The primary index is forced to always uniquely identify the value. The secondary, tertiary, +/// and quaternary indices have more flexibility. A key may non-uniquely identify many values, +/// or uniquely identify a single value. +/// +/// Example: A share might be uniquely identified by a primary key (like a Validators public key) +/// while a secondary or tertiary index (like a ClusterId) does not uniquely identify a share. The +/// new quaternary index provides an additional access pattern. +/// +/// - K1: Primary key type (always unique) +/// - K2: Secondary key type +/// - K3: Tertiary key type +/// - K4: Quaternary key type +/// - V: Value type +/// - U1: Secondary index uniqueness (Unique or NotUnique) +/// - U2: Tertiary index uniqueness (Unique or NotUnique) +/// - U3: Quaternary index uniqueness (Unique or NotUnique) +#[derive(Debug)] +pub struct MultiIndexMap +where + K1: Eq + Hash, + K2: Eq + Hash, + K3: Eq + Hash, + K4: Eq + Hash, +{ + maps: InnerMaps, + _marker: PhantomData<(U1, U2, U3)>, +} + +impl Default for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, + U1: 'static, + U2: 'static, + U3: 'static, +{ + fn default() -> Self { + Self { + maps: InnerMaps { + primary: HashMap::new(), + secondary_unique: HashMap::new(), + secondary_multi: HashMap::new(), + tertiary_unique: HashMap::new(), + tertiary_multi: HashMap::new(), + quaternary_unique: HashMap::new(), + quaternary_multi: HashMap::new(), + }, + _marker: PhantomData, + } + } +} + +impl MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, + U1: 'static, + U2: 'static, + U3: 'static, +{ + /// Creates a new empty MultiIndexMap. + pub fn new() -> Self { + Self { + maps: InnerMaps { + primary: HashMap::new(), + secondary_unique: HashMap::new(), + secondary_multi: HashMap::new(), + tertiary_unique: HashMap::new(), + tertiary_multi: HashMap::new(), + quaternary_unique: HashMap::new(), + quaternary_multi: HashMap::new(), + }, + _marker: PhantomData, + } + } + + /// Returns the number of entries in the primary map. + pub fn length(&self) -> usize { + self.maps.primary.len() + } + + /// Inserts a new value and associated keys into the map. + /// Inserts the primary key and value first, then updates the secondary, tertiary, + /// and quaternary indices based on their uniqueness. + /// If there already was a value associated with the primary key, it is updated. All non-primary + /// keys will also refer to the new value. + pub fn insert_or_update(&mut self, k1: &K1, k2: &K2, k3: &K3, k4: &K4, v: V) { + // Insert into primary map first + self.maps.primary.insert(k1.clone(), v); + + // Handle secondary index based on uniqueness + if std::any::TypeId::of::() == std::any::TypeId::of::() { + self.maps.secondary_unique.insert(k2.clone(), k1.clone()); + } else { + self.maps + .secondary_multi + .entry(k2.clone()) + .or_default() + .insert(k1.clone()); + } + + // Handle tertiary index based on uniqueness + if std::any::TypeId::of::() == std::any::TypeId::of::() { + self.maps.tertiary_unique.insert(k3.clone(), k1.clone()); + } else { + self.maps + .tertiary_multi + .entry(k3.clone()) + .or_default() + .insert(k1.clone()); + } + + // Handle quaternary index based on uniqueness + if std::any::TypeId::of::() == std::any::TypeId::of::() { + self.maps.quaternary_unique.insert(k4.clone(), k1.clone()); + } else { + self.maps + .quaternary_multi + .entry(k4.clone()) + .or_default() + .insert(k1.clone()); + } + } + + /// Removes a value and all its indexes using the primary key. + pub fn remove(&mut self, k1: &K1) -> Option { + // Remove from primary storage + let removed = self.maps.primary.remove(k1)?; + + // Remove from secondary index + if std::any::TypeId::of::() == std::any::TypeId::of::() { + // For unique indexes, just remove the entry that points to this k1 + self.maps.secondary_unique.retain(|_, v| v != k1); + } else { + // For non-unique indexes, remove k1 from any vectors it appears in + self.maps.secondary_multi.retain(|_, set| { + set.remove(k1); + !set.is_empty() + }); + } + + // Remove from tertiary index + if std::any::TypeId::of::() == std::any::TypeId::of::() { + // For unique indexes, just remove the entry that points to this k1 + self.maps.tertiary_unique.retain(|_, v| v != k1); + } else { + // For non-unique indexes, remove k1 from any vectors it appears in + self.maps.tertiary_multi.retain(|_, set| { + set.remove(k1); + !set.is_empty() + }); + } + + // Remove from quaternary index + if std::any::TypeId::of::() == std::any::TypeId::of::() { + self.maps.quaternary_unique.retain(|_, v| v != k1); + } else { + self.maps.quaternary_multi.retain(|_, set| { + set.remove(k1); + !set.is_empty() + }); + } + + Some(removed) + } + + /// Updates an existing value using the primary key. + /// Only updates if the primary key exists; indexes remain unchanged. + pub fn update(&mut self, k1: &K1, new_value: V) -> Option { + if !self.maps.primary.contains_key(k1) { + return None; + } + + // Only update the value in primary storage + self.maps.primary.insert(k1.clone(), new_value) + } + + pub fn values(&self) -> impl Iterator { + self.maps.primary.values() + } +} + +// Implement unique access for primary key. +impl UniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, +{ + fn get_by(&self, key: &K1) -> Option<&V> { + self.maps.primary.get(key) + } + + fn get_mut_by(&mut self, key: &K1) -> Option<&mut V> { + self.maps.primary.get_mut(key) + } +} + +// Implement unique access for secondary key. +impl UniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, + U1: Unique, +{ + fn get_by(&self, key: &K2) -> Option<&V> { + let primary_key = self.maps.secondary_unique.get(key)?; + self.maps.primary.get(primary_key) + } + + fn get_mut_by(&mut self, key: &K2) -> Option<&mut V> { + let primary_key = self.maps.secondary_unique.get(key)?.clone(); + self.maps.primary.get_mut(&primary_key) + } +} + +// Implement non-unique access for secondary key. +impl NonUniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, + U1: NotUnique, +{ + fn get_all_by<'a>(&'a self, key: &K2) -> impl Iterator + 'a + where + V: 'a, + { + self.maps + .secondary_multi + .get(key) + .into_iter() + .flatten() + .flat_map(|key| self.maps.primary.get(key)) + } + + fn modify_all_by(&mut self, key: &K2, mut f: F) + where + F: FnMut(&mut V), + { + if let Some(keys) = self.maps.secondary_multi.get(key) { + let keys = keys.clone(); + for primary_key in keys { + if let Some(value) = self.maps.primary.get_mut(&primary_key) { + f(value); + } + } + } + } +} + +// Implement unique access for tertiary key. +impl UniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, + U2: Unique, +{ + fn get_by(&self, key: &K3) -> Option<&V> { + let primary_key = self.maps.tertiary_unique.get(key)?; + self.maps.primary.get(primary_key) + } + + fn get_mut_by(&mut self, key: &K3) -> Option<&mut V> { + let primary_key = self.maps.tertiary_unique.get(key)?.clone(); + self.maps.primary.get_mut(&primary_key) + } +} + +// Implement non-unique access for tertiary key. +impl NonUniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, + U2: NotUnique, +{ + fn get_all_by<'a>(&'a self, key: &K3) -> impl Iterator + 'a + where + V: 'a, + { + self.maps + .tertiary_multi + .get(key) + .into_iter() + .flat_map(|keys| keys.iter().filter_map(|k1| self.maps.primary.get(k1))) + } + + fn modify_all_by(&mut self, key: &K3, mut f: F) + where + F: FnMut(&mut V), + { + if let Some(keys) = self.maps.tertiary_multi.get(key) { + let keys = keys.clone(); + for primary_key in keys { + if let Some(value) = self.maps.primary.get_mut(&primary_key) { + f(value); + } + } + } + } +} + +// Implement unique access for quaternary key. +impl UniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, + U3: Unique, +{ + fn get_by(&self, key: &K4) -> Option<&V> { + let primary_key = self.maps.quaternary_unique.get(key)?; + self.maps.primary.get(primary_key) + } + + fn get_mut_by(&mut self, key: &K4) -> Option<&mut V> { + let primary_key = self.maps.quaternary_unique.get(key)?.clone(); + self.maps.primary.get_mut(&primary_key) + } +} + +// Implement non-unique access for quaternary key. +impl NonUniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + K4: Eq + Hash + Clone, + U3: NotUnique, +{ + fn get_all_by<'a>(&'a self, key: &K4) -> impl Iterator + 'a + where + V: 'a, + { + self.maps + .quaternary_multi + .get(key) + .into_iter() + .flat_map(|keys| keys.iter().filter_map(|k1| self.maps.primary.get(k1))) + } + + fn modify_all_by(&mut self, key: &K4, mut f: F) + where + F: FnMut(&mut V), + { + if let Some(keys) = self.maps.quaternary_multi.get(key) { + let keys = keys.clone(); + for primary_key in keys { + if let Some(value) = self.maps.primary.get_mut(&primary_key) { + f(value); + } + } + } + } +} + +#[cfg(test)] +mod multi_index_tests { + use super::*; + + #[derive(Clone, Debug, PartialEq)] + struct TestValue { + id: i32, + data: String, + } + + #[test] + fn test_basic_operations() { + // Using unique indices for all secondary, tertiary, and quaternary keys. + let mut map: MultiIndexMap< + i32, + String, + bool, + char, + TestValue, + UniqueTag, + UniqueTag, + UniqueTag, + > = MultiIndexMap::new(); + + let value = TestValue { + id: 1, + data: "test".to_string(), + }; + + // Test insertion with quaternary key 'a' + map.insert_or_update(&1, &"key1".to_string(), &true, &'a', value.clone()); + + // Test primary key access + assert_eq!(map.get_by(&1), Some(&value)); + + // Test secondary key access + assert_eq!(map.get_by(&"key1".to_string()), Some(&value)); + + // Test tertiary key access + assert_eq!(map.get_by(&true), Some(&value)); + + // Test quaternary key access + assert_eq!(map.get_by(&'a'), Some(&value)); + + // Test update + let new_value = TestValue { + id: 1, + data: "updated".to_string(), + }; + map.update(&1, new_value.clone()); + assert_eq!(map.get_by(&1), Some(&new_value)); + + // Test removal: all indices should be cleaned up + assert_eq!(map.remove(&1), Some(new_value.clone())); + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&"key1".to_string()), None); + assert_eq!(map.get_by(&true), None); + assert_eq!(map.get_by(&'a'), None); + } + + #[test] + fn test_non_unique_indices() { + // Using non-unique indices for all secondary, tertiary, and quaternary keys. + let mut map: MultiIndexMap< + i32, + String, + bool, + char, + TestValue, + NonUniqueTag, + NonUniqueTag, + NonUniqueTag, + > = MultiIndexMap::new(); + + let value1 = TestValue { + id: 1, + data: "test1".to_string(), + }; + let value2 = TestValue { + id: 2, + data: "test2".to_string(), + }; + + // Insert multiple values with same secondary, tertiary, and quaternary keys. + map.insert_or_update(&1, &"shared_key".to_string(), &true, &'z', value1.clone()); + map.insert_or_update(&2, &"shared_key".to_string(), &true, &'z', value2.clone()); + + // Test primary key access (still unique) + assert_eq!(map.get_by(&1), Some(&value1)); + assert_eq!(map.get_by(&2), Some(&value2)); + + // Test secondary key access (non-unique) + let secondary_values: Vec<_> = map.get_all_by(&"shared_key".to_string()).collect(); + assert_eq!(secondary_values.len(), 2); + assert!(secondary_values.contains(&&value1)); + assert!(secondary_values.contains(&&value2)); + + // Test tertiary key access (non-unique) + let tertiary_values: Vec<_> = map.get_all_by(&true).collect(); + assert_eq!(tertiary_values.len(), 2); + assert!(tertiary_values.contains(&&value1)); + assert!(tertiary_values.contains(&&value2)); + + // Test quaternary key access (non-unique) + let quaternary_values: Vec<_> = map.get_all_by(&'z').collect(); + assert_eq!(quaternary_values.len(), 2); + assert!(quaternary_values.contains(&&value1)); + assert!(quaternary_values.contains(&&value2)); + + // Test removal maintains other entries + map.remove(&1); + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&2), Some(&value2)); + + let remaining_secondary: Vec<_> = map.get_all_by(&"shared_key".to_string()).collect(); + assert_eq!(remaining_secondary.len(), 1); + assert_eq!(remaining_secondary[0], &value2); + } + + #[test] + fn test_mixed_uniqueness() { + // Mixed: unique secondary, non-unique tertiary, unique quaternary. + let mut map: MultiIndexMap< + i32, + String, + bool, + char, + TestValue, + UniqueTag, + NonUniqueTag, + UniqueTag, + > = MultiIndexMap::new(); + + let value1 = TestValue { + id: 1, + data: "test1".to_string(), + }; + let value2 = TestValue { + id: 2, + data: "test2".to_string(), + }; + + // Insert values with unique secondary keys but shared tertiary and different quaternary + // keys. + map.insert_or_update(&1, &"key1".to_string(), &true, &'q', value1.clone()); + map.insert_or_update(&2, &"key2".to_string(), &true, &'r', value2.clone()); + + // Test unique secondary key access + assert_eq!(map.get_by(&"key1".to_string()), Some(&value1)); + assert_eq!(map.get_by(&"key2".to_string()), Some(&value2)); + + // Test non-unique tertiary key access + let tertiary_values: Vec<_> = map.get_all_by(&true).collect(); + assert_eq!(tertiary_values.len(), 2); + assert!(tertiary_values.contains(&&value1)); + assert!(tertiary_values.contains(&&value2)); + + // Test unique quaternary key access + assert_eq!(map.get_by(&'q'), Some(&value1)); + assert_eq!(map.get_by(&'r'), Some(&value2)); + } + + #[test] + fn test_empty_cases() { + let mut map: MultiIndexMap< + i32, + String, + bool, + char, + TestValue, + UniqueTag, + UniqueTag, + UniqueTag, + > = MultiIndexMap::new(); + + // Test access on empty map + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&"key".to_string()), None); + assert_eq!(map.get_by(&true), None); + assert_eq!(map.get_by(&'x'), None); + + // Test remove on empty map + assert_eq!(map.remove(&1), None); + + // Test update on empty map + let value = TestValue { + id: 1, + data: "test".to_string(), + }; + assert_eq!(map.update(&1, value), None); + } + + #[test] + fn test_get_mut_by() { + // Using unique indices for all secondary, tertiary, and quaternary keys. + let mut map: MultiIndexMap< + i32, + String, + bool, + char, + TestValue, + UniqueTag, + UniqueTag, + UniqueTag, + > = MultiIndexMap::new(); + + let value = TestValue { + id: 1, + data: "original".to_string(), + }; + + // Test insertion + map.insert_or_update(&1, &"key1".to_string(), &true, &'a', value.clone()); + + // Test mutable access via primary key + if let Some(mut_ref) = map.get_mut_by(&1) { + mut_ref.data = "modified_primary".to_string(); + } + assert_eq!(map.get_by(&1).unwrap().data, "modified_primary"); + + // Test mutable access via secondary key + if let Some(mut_ref) = map.get_mut_by(&"key1".to_string()) { + mut_ref.data = "modified_secondary".to_string(); + } + assert_eq!(map.get_by(&1).unwrap().data, "modified_secondary"); + + // Test mutable access via tertiary key + if let Some(mut_ref) = map.get_mut_by(&true) { + mut_ref.data = "modified_tertiary".to_string(); + } + assert_eq!(map.get_by(&1).unwrap().data, "modified_tertiary"); + + // Test mutable access via quaternary key + if let Some(mut_ref) = map.get_mut_by(&'a') { + mut_ref.data = "modified_quaternary".to_string(); + } + assert_eq!(map.get_by(&1).unwrap().data, "modified_quaternary"); + + // Test that all index access methods see the same modified value + assert_eq!( + map.get_by(&"key1".to_string()).unwrap().data, + "modified_quaternary" + ); + assert_eq!(map.get_by(&true).unwrap().data, "modified_quaternary"); + assert_eq!(map.get_by(&'a').unwrap().data, "modified_quaternary"); + + // Test access to non-existent keys returns None + assert!(map.get_mut_by(&2).is_none()); + assert!(map.get_mut_by(&"nonexistent".to_string()).is_none()); + assert!(map.get_mut_by(&false).is_none()); + assert!(map.get_mut_by(&'z').is_none()); + } + + #[test] + fn test_modify_all_by() { + // Using non-unique indices for all secondary, tertiary, and quaternary keys. + let mut map: MultiIndexMap< + i32, + String, + bool, + char, + TestValue, + NonUniqueTag, + NonUniqueTag, + NonUniqueTag, + > = MultiIndexMap::new(); + + let value1 = TestValue { + id: 1, + data: "original1".to_string(), + }; + let value2 = TestValue { + id: 2, + data: "original2".to_string(), + }; + let value3 = TestValue { + id: 3, + data: "original3".to_string(), + }; + + // Insert values with shared keys + map.insert_or_update(&1, &"shared_key".to_string(), &true, &'z', value1.clone()); + map.insert_or_update(&2, &"shared_key".to_string(), &true, &'z', value2.clone()); + map.insert_or_update(&3, &"other_key".to_string(), &false, &'y', value3.clone()); + + // Test mutable access via secondary key + let mut counter = 0; + map.modify_all_by(&"shared_key".to_string(), |value| { + counter += 1; + value.data = format!("modified_secondary_{counter}"); + }); + + // Verify both values were modified + assert!( + map.get_by(&1) + .unwrap() + .data + .starts_with("modified_secondary_") + ); + assert!( + map.get_by(&2) + .unwrap() + .data + .starts_with("modified_secondary_") + ); + assert_eq!(map.get_by(&3).unwrap().data, "original3"); // Unchanged + + // Test mutable access via tertiary key + map.modify_all_by(&true, |value| { + value.data = format!("modified_tertiary_{}", value.id); + }); + + // Verify both values were modified + assert_eq!(map.get_by(&1).unwrap().data, "modified_tertiary_1"); + assert_eq!(map.get_by(&2).unwrap().data, "modified_tertiary_2"); + assert_eq!(map.get_by(&3).unwrap().data, "original3"); // Unchanged + + // Test mutable access via quaternary key + map.modify_all_by(&'z', |value| { + value.data = format!("modified_quaternary_{}", value.id); + }); + + // Verify both values were modified + assert_eq!(map.get_by(&1).unwrap().data, "modified_quaternary_1"); + assert_eq!(map.get_by(&2).unwrap().data, "modified_quaternary_2"); + assert_eq!(map.get_by(&3).unwrap().data, "original3"); // Unchanged + + // Test access to non-existent keys does nothing + map.modify_all_by(&"nonexistent".to_string(), |_value| { + panic!("Should not be called for non-existent key"); + }); + + map.modify_all_by(&'x', |_value| { + panic!("Should not be called for non-existent key"); + }); + } +} diff --git a/anchor/database/src/schema.rs b/anchor/database/src/schema.rs new file mode 100644 index 000000000..d3121ab2d --- /dev/null +++ b/anchor/database/src/schema.rs @@ -0,0 +1,147 @@ +use std::path::Path; + +use rusqlite::{Connection, types::Value}; +use ssv_types::domain_type::DomainType; + +use crate::{DatabaseError, sql_operations}; + +type SchemaVersion = u32; + +struct Metadata { + schema_version: SchemaVersion, + domain: DomainType, +} + +enum UpgradeAction { + UpToDate, + // allow dead code until there are upgrade scripts + #[allow(dead_code)] + DoUpdate { + script: &'static str, + new_version: SchemaVersion, + }, + Outdated, + Future, +} + +enum DatabaseType { + /// If the Option is none, the database is from an older version of Anchor where we did not + /// track the schema version yet. We can change the type to "SchemaVersion" at some point and + /// treat older versions as "Unknown". + Anchor(Option), + IncorrectDomain(DomainType), + Unknown, +} + +/// Ensure that there is an up-to-date database available at `db_path`. Also check or set the +/// domain type to ensure the database is for the correct network. +pub fn ensure_up_to_date( + db_path: impl AsRef, + domain: DomainType, +) -> Result<(), DatabaseError> { + let db_path = db_path.as_ref(); + let is_new_file = !db_path.exists(); + let conn = Connection::open(db_path)?; + + let mut schema_version = if is_new_file { + create_initial_schema(&conn, domain)? + } else { + match determine_database_type(&conn, domain) { + DatabaseType::Anchor(schema_version) => schema_version, + DatabaseType::Unknown => { + // We do not know what this is. Let's be safe and error out. + return Err(DatabaseError::AlreadyPresent( + "Unknown database schema".to_string(), + )); + } + DatabaseType::IncorrectDomain(domain) => { + return Err(DatabaseError::AlreadyPresent(format!( + "Existing database for different network: {domain:?}" + ))); + } + } + }; + + // Upgrade scripts are step by step, so we need to loop until we are up to date. + loop { + match get_upgrade_action(schema_version) { + UpgradeAction::UpToDate => return Ok(()), + UpgradeAction::DoUpdate { + script, + new_version, + } => { + conn.execute_batch(script)?; + schema_version = Some(new_version); + } + UpgradeAction::Outdated => { + return Err(DatabaseError::AlreadyPresent( + "Database is outdated - please remove \"anchor_db.sqlite\" or use another data dir.".to_string(), + )); + } + UpgradeAction::Future => { + return Err(DatabaseError::AlreadyPresent( + "Database schema is newer than supported by this version of Anchor".to_string(), + )); + } + } + } +} + +fn determine_database_type(conn: &Connection, domain: DomainType) -> DatabaseType { + let result = conn.query_row(sql_operations::GET_METADATA, [], |row| { + Ok(Metadata { + schema_version: row.get("schema_version")?, + domain: row.get("domain_type")?, + }) + }); + + match result { + Ok(metadata) => { + if metadata.domain == domain { + DatabaseType::Anchor(Some(metadata.schema_version)) + } else { + DatabaseType::IncorrectDomain(metadata.domain) + } + } + Err(_) => { + // Something failed - this might be a non-Anchor or legacy Anchor database. + // To check, try to get the block from the old table before `metadata` was introduced. + let legacy = conn + .query_row(sql_operations::GET_LEGACY_BLOCK, [], |row| { + // Check if there is the expected column and no further columns. + Ok( + row.get::<_, u64>("block_number").is_ok() + && row.get::<_, Value>(1).is_err(), + ) + }) + .unwrap_or(false); + + if legacy { + DatabaseType::Anchor(None) + } else { + DatabaseType::Unknown + } + } + } +} + +// Before release, update the return value of this function if the initial table schema was changed. +fn create_initial_schema( + conn: &rusqlite::Connection, + domain: DomainType, +) -> Result, DatabaseError> { + conn.execute_batch(include_str!("table_schema.sql"))?; + conn.execute(sql_operations::INSERT_METADATA, [&domain])?; + Ok(Some(0)) +} + +// Register upgrade scripts in this function and mark the current version. Define any versions for +// which the schema is not upgradable as "Recreate" and all versions after the current version as +// "Future". +fn get_upgrade_action(version: Option) -> UpgradeAction { + match version { + None => UpgradeAction::Outdated, + Some(0) => UpgradeAction::UpToDate, + Some(1..) => UpgradeAction::Future, + } +} diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs index a160c71fd..43895606a 100644 --- a/anchor/database/src/sql_operations.rs +++ b/anchor/database/src/sql_operations.rs @@ -1,3 +1,8 @@ +// Metadata +pub const INSERT_METADATA: &str = r#"INSERT INTO metadata (domain_type) VALUES (?1)"#; +pub const GET_METADATA: &str = r#"SELECT schema_version, domain_type FROM metadata"#; +pub const GET_LEGACY_BLOCK: &str = r#"SELECT * FROM block"#; + // Operator pub const INSERT_OPERATOR: &str = r#" INSERT INTO operators @@ -81,8 +86,8 @@ pub const SET_INDEX: &str = r#" "#; // Blocks -pub const UPDATE_BLOCK_NUMBER: &str = r#"UPDATE block SET block_number = ?1"#; -pub const GET_BLOCK_NUMBER: &str = r#"SELECT block_number FROM block"#; +pub const UPDATE_BLOCK_NUMBER: &str = r#"UPDATE metadata SET block_number = ?1"#; +pub const GET_BLOCK_NUMBER: &str = r#"SELECT block_number FROM metadata"#; // Nonce pub const GET_ALL_NONCES: &str = r#"SELECT owner, nonce FROM owners"#; diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 40787d99c..02b060896 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -13,12 +13,13 @@ use ssv_types::{ use types::{Address, PublicKeyBytes}; use crate::{ - ClusterIndexed, DatabaseError, MetadataIndexed, MultiIndexClusterIndexedMap, - MultiIndexMetadataIndexedMap, MultiIndexShareIndexedMap, MultiState, Pool, PoolConn, - PubkeyOrId, ShareIndexed, SingleState, sql_operations, + ClusterMultiIndexMap, DatabaseError, MetadataMultiIndexMap, MultiIndexMap, MultiState, + NonUniqueIndex, Pool, PoolConn, PubkeyOrId, ShareMultiIndexMap, SingleState, UniqueIndex, + sql_operations, }; // Container to hold all network state +#[derive(Debug)] pub struct NetworkState { pub(crate) multi_state: MultiState, pub(crate) single_state: SingleState, @@ -58,9 +59,9 @@ impl NetworkState { let nonces = Self::fetch_nonces(&conn)?; // Second phase: Populate all in memory stores with data; - let mut shares_multi = MultiIndexShareIndexedMap::default(); - let mut metadata_multi = MultiIndexMetadataIndexedMap::default(); - let mut cluster_multi = MultiIndexClusterIndexedMap::default(); + let mut shares_multi: ShareMultiIndexMap = MultiIndexMap::new(); + let mut metadata_multi: MetadataMultiIndexMap = MultiIndexMap::new(); + let mut cluster_multi: ClusterMultiIndexMap = MultiIndexMap::new(); let single_state = SingleState { id, last_processed_block, @@ -82,22 +83,20 @@ impl NetworkState { // Process each validator and its associated data for validator in validators { // Insert cluster and validator metadata - // Only insert the cluster once per cluster_id - if cluster_multi.get_by_cluster_id(cluster_id).is_none() { - cluster_multi.insert(ClusterIndexed { - cluster_id: *cluster_id, - owner: cluster.owner, - committee_id: cluster.committee_id(), - cluster: cluster.clone(), - }); - } - metadata_multi.insert(MetadataIndexed { - validator_pubkey: validator.public_key, - cluster_id: *cluster_id, - owner: cluster.owner, - committee_id: cluster.committee_id(), - metadata: validator.clone(), - }); + cluster_multi.insert_or_update( + cluster_id, + &validator.public_key, + &cluster.owner, + &cluster.committee_id(), + cluster.clone(), + ); + metadata_multi.insert_or_update( + &validator.public_key, + cluster_id, + &cluster.owner, + &cluster.committee_id(), + validator.clone(), + ); // Process this validators shares if let Some(share_map) = &share_map @@ -105,13 +104,13 @@ impl NetworkState { { for share in shares { if share.validator_pubkey == validator.public_key { - shares_multi.insert(ShareIndexed { - validator_pubkey: validator.public_key, - cluster_id: *cluster_id, - owner: cluster.owner, - committee_id: cluster.committee_id(), - share: share.clone(), - }); + shares_multi.insert_or_update( + &validator.public_key, + cluster_id, + &cluster.owner, + &cluster.committee_id(), + share.clone(), + ); } } } @@ -263,9 +262,10 @@ impl NetworkState { fn get_cluster_members(&self, committee_id: &CommitteeId) -> Option> { self.multi_state .clusters - .get_by_committee_id(committee_id) - .first() - .map(|cluster_idx| cluster_idx.cluster.cluster_members.clone()) + .get_all_by(committee_id) + .next() + .cloned() + .map(|cluster| cluster.cluster_members.clone()) } pub fn get_cluster_members_for_validator( @@ -275,35 +275,34 @@ impl NetworkState { let cluster_id = self .multi_state .validator_metadata - .get_by_validator_pubkey(validator_pk) - .map(|v| v.metadata.cluster_id)?; + .get_by(validator_pk) + .map(|v| v.cluster_id)?; self.multi_state .clusters - .get_by_cluster_id(&cluster_id) - .map(|c| c.cluster.cluster_members.clone()) + .get_by(&cluster_id) + .map(|c| c.cluster_members.clone()) } fn get_validator_indices(&self, committee_id: &CommitteeId) -> Vec { self.multi_state .validator_metadata - .get_by_committee_id(committee_id) - .iter() - .flat_map(|metadata_idx| metadata_idx.metadata.index) + .get_all_by(committee_id) + .flat_map(|metadata| metadata.index) .collect::>() } /// Get a reference to the shares map - pub fn shares(&self) -> &MultiIndexShareIndexedMap { + pub fn shares(&self) -> &ShareMultiIndexMap { &self.multi_state.shares } /// Get a reference to the validator metadata map - pub fn metadata(&self) -> &MultiIndexMetadataIndexedMap { + pub fn metadata(&self) -> &MetadataMultiIndexMap { &self.multi_state.validator_metadata } /// Get a reference to the cluster map - pub fn clusters(&self) -> &MultiIndexClusterIndexedMap { + pub fn clusters(&self) -> &ClusterMultiIndexMap { &self.multi_state.clusters } @@ -358,9 +357,10 @@ impl NetworkState { validator_pk: &PublicKeyBytes, ) -> Option { let validator_index = self - .metadata() - .get_by_validator_pubkey(validator_pk) - .map(|v| v.metadata.index)?; + .multi_state + .validator_metadata + .get_by(validator_pk) + .map(|v| v.index)?; let committee_members = self.get_cluster_members_for_validator(validator_pk)?; @@ -371,10 +371,10 @@ impl NetworkState { } pub fn validator_indices(&self) -> Vec { - self.metadata() - .iter() - .map(|(_, metadata_idx)| metadata_idx) - .filter_map(|metadata_idx| metadata_idx.metadata.index.map(|idx| idx.into())) + self.multi_state + .validator_metadata + .values() + .filter_map(|metadata| metadata.index.map(|idx| idx.into())) .collect() } } diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index ae6604d83..5bf50dbb1 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -1,7 +1,17 @@ -CREATE TABLE block ( +-- SCHEMA VERSION 0 + +-- we should avoid removing columns from this to keep compatibility between anchor Versions +CREATE TABLE metadata ( + schema_version INTEGER NOT NULL DEFAULT 0, + domain_type INTEGER NOT NULL, block_number INTEGER NOT NULL DEFAULT 0 CHECK (block_number >= 0) ); -INSERT INTO block (block_number) VALUES (0); +CREATE TRIGGER unique_metadata + BEFORE INSERT ON metadata + WHEN (SELECT COUNT(*) FROM metadata) >= 1 +BEGIN + SELECT RAISE(FAIL, 'we can only have one metadata row'); +END; CREATE TABLE owners ( owner TEXT PRIMARY KEY NOT NULL, diff --git a/anchor/database/src/tests/metadata_tests.rs b/anchor/database/src/tests/metadata_tests.rs new file mode 100644 index 000000000..320b4f9ab --- /dev/null +++ b/anchor/database/src/tests/metadata_tests.rs @@ -0,0 +1,255 @@ +use std::path::PathBuf; + +use rusqlite::Connection; +use ssv_types::domain_type::DomainType; +use tempfile::TempDir; + +use super::test_prelude::*; +use crate::{DatabaseError, schema}; + +#[cfg(test)] +mod tests { + use super::*; + + const TEST_DOMAIN_1: DomainType = DomainType([42, 42, 42, 42]); + const TEST_DOMAIN_2: DomainType = DomainType([99, 99, 99, 99]); + + #[test] + fn test_new_database_creation() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + + // Ensure database is created successfully + let result = schema::ensure_up_to_date(&db_path, TEST_DOMAIN_1); + assert!(result.is_ok(), "Failed to create new database: {result:?}",); + + // Verify database file was created + assert!(db_path.exists(), "Database file should exist"); + + // Verify metadata table contains correct initial values + let conn = Connection::open(&db_path).expect("Failed to open database"); + let metadata = queries::get_metadata(&conn).expect("Failed to get metadata"); + + assert_eq!( + metadata.schema_version, 0, + "Initial schema version should be 0" + ); + assert_eq!(metadata.domain, TEST_DOMAIN_1, "Domain should match input"); + assert_eq!(metadata.block_number, 0, "Initial block number should be 0"); + } + + #[test] + fn test_domain_type_validation() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + + // Create database with first domain + schema::ensure_up_to_date(&db_path, TEST_DOMAIN_1).expect("Failed to create database"); + + // Try to open with different domain - should fail + let result = schema::ensure_up_to_date(&db_path, TEST_DOMAIN_2); + assert!(result.is_err(), "Should fail with incorrect domain"); + + match result.unwrap_err() { + DatabaseError::AlreadyPresent(msg) => { + assert!( + msg.contains("different network"), + "Error should mention different network" + ); + } + other => panic!("Expected AlreadyPresent error, got: {other:?}"), + } + } + + #[test] + fn test_domain_type_validation_success() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + + // Create database with domain + schema::ensure_up_to_date(&db_path, TEST_DOMAIN_1).expect("Failed to create database"); + + // Open with same domain - should succeed + let result = schema::ensure_up_to_date(&db_path, TEST_DOMAIN_1); + assert!(result.is_ok(), "Should succeed with correct domain"); + } + + #[test] + fn test_unknown_database_rejection() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + + // Create a completely unknown database + create_unknown_database(&db_path); + + // Try to open - should fail + let result = schema::ensure_up_to_date(&db_path, TEST_DOMAIN_1); + assert!(result.is_err(), "Should reject unknown database"); + + match result.unwrap_err() { + DatabaseError::AlreadyPresent(msg) => { + assert!( + msg.contains("Unknown database schema"), + "Should mention unknown schema" + ); + } + other => panic!("Expected AlreadyPresent error, got: {other:?}"), + } + } + + #[test] + fn test_future_schema_version() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + + // Create database with future schema version + create_future_schema_database(&db_path, TEST_DOMAIN_1); + + // Try to open - should fail + let result = schema::ensure_up_to_date(&db_path, TEST_DOMAIN_1); + assert!(result.is_err(), "Should reject future schema version"); + + match result.unwrap_err() { + DatabaseError::AlreadyPresent(msg) => { + assert!( + msg.contains("newer than supported"), + "Should mention newer version" + ); + } + other => panic!("Expected AlreadyPresent error, got: {other:?}"), + } + } + + #[test] + fn test_block_number_operations() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + let pubkey = generators::pubkey::random_rsa(); + + // Create database + let db = NetworkDatabase::new(&db_path, &pubkey, TEST_DOMAIN_1) + .expect("Failed to create database"); + + // Test initial block number + let initial_block = db.state().get_last_processed_block(); + assert_eq!(initial_block, 0, "Initial block should be 0"); + + // Update block number + let new_block = 12345u64; + let mut conn = db.connection().expect("Failed to get connection"); + let tx = conn.transaction().expect("Failed to start transaction"); + db.processed_block(new_block, &tx) + .expect("Failed to update block"); + tx.commit().expect("Failed to commit transaction"); + + // Verify update + let updated_block = db.state().get_last_processed_block(); + assert_eq!(updated_block, new_block, "Block number should be updated"); + + // Verify persistence after restart + drop(db); + let db2 = NetworkDatabase::new(&db_path, &pubkey, TEST_DOMAIN_1) + .expect("Failed to reopen database"); + let persisted_block = db2.state().get_last_processed_block(); + assert_eq!(persisted_block, new_block, "Block number should persist"); + } + + #[test] + fn test_database_outdated() { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + + // Create legacy database + create_legacy_database(&db_path); + + // Ensure up to date - should error + let err = schema::ensure_up_to_date(&db_path, TEST_DOMAIN_1) + .expect_err("Failed to detect outdated database"); + + assert!( + err.to_string().contains("outdated"), + "Error should mention outdated database" + ); + } + + #[test] + fn test_domain_type_serialization() { + // Test DomainType conversion to/from SQL + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let db_path = temp_dir.path().join("test.db"); + let conn = Connection::open(&db_path).expect("Failed to create database"); + + // Create metadata table + conn.execute( + "CREATE TABLE test_metadata (domain_type INTEGER NOT NULL)", + [], + ) + .expect("Failed to create test table"); + + // Insert domain type + conn.execute( + "INSERT INTO test_metadata (domain_type) VALUES (?1)", + [&TEST_DOMAIN_1], + ) + .expect("Failed to insert domain"); + + // Read back domain type + let retrieved_domain: DomainType = conn + .query_row("SELECT domain_type FROM test_metadata", [], |row| { + row.get(0) + }) + .expect("Failed to retrieve domain"); + + assert_eq!( + retrieved_domain, TEST_DOMAIN_1, + "Domain type should round-trip correctly" + ); + } + + // Helper functions for creating test databases + fn create_legacy_database(db_path: &PathBuf) { + let conn = Connection::open(db_path).expect("Failed to create legacy database"); + + // Create the old block table (without metadata) + conn.execute( + "CREATE TABLE block (block_number INTEGER NOT NULL DEFAULT 0)", + [], + ) + .expect("Failed to create legacy block table"); + + conn.execute("INSERT INTO block (block_number) VALUES (42)", []) + .expect("Failed to insert legacy block"); + } + + fn create_unknown_database(db_path: &PathBuf) { + let conn = Connection::open(db_path).expect("Failed to create unknown database"); + + // Create some random table that doesn't match our schema + conn.execute( + "CREATE TABLE unknown_table (id INTEGER PRIMARY KEY, data TEXT)", + [], + ) + .expect("Failed to create unknown table"); + } + + fn create_future_schema_database(db_path: &PathBuf, domain: DomainType) { + let conn = Connection::open(db_path).expect("Failed to create future schema database"); + + // Create metadata table with future version + conn.execute( + "CREATE TABLE metadata ( + schema_version INTEGER NOT NULL DEFAULT 999, + domain_type INTEGER NOT NULL, + block_number INTEGER NOT NULL DEFAULT 0 + )", + [], + ) + .expect("Failed to create future metadata table"); + + conn.execute( + "INSERT INTO metadata (schema_version, domain_type) VALUES (999, ?1)", + [&domain], + ) + .expect("Failed to insert future metadata"); + } +} diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs index 8fbffb836..e7770e7f3 100644 --- a/anchor/database/src/tests/mod.rs +++ b/anchor/database/src/tests/mod.rs @@ -1,16 +1,17 @@ mod cluster_tests; +mod metadata_tests; mod operator_tests; mod state_tests; mod utils; mod validator_tests; pub mod test_prelude { - pub use ssv_types::*; + pub use ssv_types::{domain_type::DomainType, *}; pub use tempfile::tempdir; pub use types::{Address, Graffiti, PublicKeyBytes}; pub use super::utils::*; - pub use crate::NetworkDatabase; + pub use crate::{NetworkDatabase, multi_index::UniqueIndex}; } #[cfg(test)] @@ -22,7 +23,7 @@ mod database_test { let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); let pubkey = generators::pubkey::random_rsa(); - let db = NetworkDatabase::new(&file, &pubkey); + let db = NetworkDatabase::new(&file, &pubkey, DomainType::from([0; 4])); assert!(db.is_ok()); } } diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index e6c476abe..69fd5ead4 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -12,7 +12,7 @@ mod state_database_tests { // drop the database and then recreate it drop(fixture.db); - fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey, TEST_DOMAIN) .expect("Failed to create database"); let mut conn = fixture.db.connection().unwrap(); @@ -34,7 +34,7 @@ mod state_database_tests { // drop the database and then recreate it drop(fixture.db); - fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey, TEST_DOMAIN) .expect("Failed to create database"); // confirm all data is what we expect @@ -50,17 +50,14 @@ mod state_database_tests { // drop and recrate database drop(fixture.db); - fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey, TEST_DOMAIN) .expect("Failed to create database"); // Confirm share data, there should be one share in memory for this operator - assert_eq!(fixture.db.state().shares().len(), 1); + assert_eq!(fixture.db.state().shares().length(), 1); let pk = &fixture.validator.public_key; let state = fixture.db.state(); - let share = state - .shares() - .get_by_validator_pubkey(pk) - .expect("The share should exist"); + let share = state.shares().get_by(pk).expect("The share should exist"); assertions::share::exists_in_memory(&fixture.db, pk, share); } @@ -72,7 +69,6 @@ mod state_database_tests { // Generate new validator information let cluster = fixture.cluster; - let cluster_id = cluster.cluster_id; let new_validator = generators::validator::random_metadata(cluster.cluster_id); let mut shares: Vec = Vec::new(); fixture.operators.iter().for_each(|op| { @@ -90,23 +86,13 @@ mod state_database_tests { // drop and recrate database drop(fixture.db); - fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey, TEST_DOMAIN) .expect("Failed to create database"); // assert that there are two validators, one cluster, and 2 shares in memory - assert_eq!(fixture.db.state().metadata().len(), 2); - assert_eq!(fixture.db.state().shares().len(), 2); - assert_eq!(fixture.db.state().clusters().len(), 1); - - // assert the cluster has the correct id - let binding = fixture.db.state(); - let cluster_idx = binding - .clusters() - .iter() - .map(|(_, value)| value) - .next() - .unwrap(); - assert_eq!(cluster_idx.cluster_id, cluster_id); + assert_eq!(fixture.db.state().metadata().length(), 2); + assert_eq!(fixture.db.state().shares().length(), 2); + assert_eq!(fixture.db.state().clusters().length(), 1); } #[test] @@ -138,7 +124,7 @@ mod state_database_tests { tx.commit().unwrap(); drop(fixture.db); - fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey, TEST_DOMAIN) .expect("Failed to create database"); assert_eq!(fixture.db.state().get_last_processed_block(), 10); } @@ -183,7 +169,7 @@ mod state_database_tests { let tx = conn.transaction().unwrap(); drop(fixture.db); - fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey, TEST_DOMAIN) .expect("Failed to create database"); // confirm that nonce is 1 diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index 534bdbf93..77b9c5c4f 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -3,6 +3,7 @@ use std::path::PathBuf; use openssl::{pkey::Public, rsa::Rsa}; use rand::Rng; use rusqlite::{Transaction, params}; +use ssv_types::domain_type::DomainType; use tempfile::TempDir; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; @@ -11,8 +12,10 @@ use super::test_prelude::*; const DEFAULT_NUM_OPERATORS: u64 = 4; const RSA_KEY_SIZE: u32 = 2048; const DEFAULT_SEED: [u8; 16] = [42; 16]; +pub const TEST_DOMAIN: DomainType = DomainType([42, 42, 42, 42]); // Test fixture for common scnearios +#[derive(Debug)] pub struct TestFixture { pub db: NetworkDatabase, pub cluster: Cluster, @@ -40,7 +43,7 @@ impl TestFixture { let temp_dir = TempDir::new().expect("Failed to create temporary directory"); let db_path = temp_dir.path().join("test.db"); - let db = NetworkDatabase::new(&db_path, &us).expect("Failed to create DB"); + let db = NetworkDatabase::new(&db_path, &us, TEST_DOMAIN).expect("Failed to create DB"); let mut conn = db.connection().unwrap(); let tx = conn.transaction().unwrap(); @@ -92,7 +95,8 @@ impl TestFixture { let db_path = temp_dir.path().join("test.db"); let pubkey = generators::pubkey::random_rsa(); - let db = NetworkDatabase::new(&db_path, &pubkey).expect("Failed to create test database"); + let db = NetworkDatabase::new(&db_path, &pubkey, TEST_DOMAIN) + .expect("Failed to create test database"); let cluster = generators::cluster::random(0); Self { @@ -218,6 +222,7 @@ pub mod generators { pub mod queries { use std::str::FromStr; + use rusqlite::Connection; use types::PublicKeyBytes; use super::*; @@ -232,6 +237,7 @@ pub mod queries { const GET_SHARES: &str = "SELECT share_pubkey, encrypted_key, cluster_id, operator_id FROM shares WHERE validator_pubkey = ?1"; const GET_VALIDATOR: &str = "SELECT validator_pubkey, cluster_id, validator_index, graffiti FROM validators WHERE validator_pubkey = ?1"; const GET_MEMBERS: &str = "SELECT operator_id FROM cluster_members WHERE cluster_id = ?1"; + const GET_METADATA: &str = "SELECT schema_version, domain_type, block_number FROM metadata"; // Get an operator from the database pub fn get_operator(id: OperatorId, tx: &Transaction<'_>) -> Option { @@ -327,6 +333,22 @@ pub mod queries { }) .ok() } + + pub struct Metadata { + pub schema_version: u64, + pub domain: DomainType, + pub block_number: u64, + } + + pub fn get_metadata(conn: &Connection) -> Result { + conn.query_row(GET_METADATA, [], |row| { + Ok(Metadata { + schema_version: row.get("schema_version")?, + domain: row.get("domain_type")?, + block_number: row.get("block_number")?, + }) + }) + } } /// Database assertions for testing @@ -396,16 +418,16 @@ pub mod assertions { let state = db.state(); let stored_validator = state .metadata() - .get_by_validator_pubkey(&v.public_key) + .get_by(&v.public_key) .expect("Metadata should exist"); - data(v, &stored_validator.metadata); + data(v, stored_validator); } // Verifies that the cluster is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, v: &ValidatorMetadata) { let state = db.state(); - let metadata_idx = state.metadata().get_by_validator_pubkey(&v.public_key); - assert!(metadata_idx.is_none()); + let stored_validator = state.metadata().get_by(&v.public_key); + assert!(stored_validator.is_none()); } // Verify that the cluster is in the database @@ -436,19 +458,19 @@ pub mod assertions { pub fn exists_in_memory(db: &NetworkDatabase, c: &Cluster) { assert!(db.state().member_of_cluster(&c.cluster_id)); let state = db.state(); - let cluster_idx = state + let stored_cluster = state .clusters() - .get_by_cluster_id(&c.cluster_id) + .get_by(&c.cluster_id) .expect("Cluster should exist"); - data(c, &cluster_idx.cluster) + data(c, stored_cluster) } // Verifies that the cluster is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, cluster_id: ClusterId) { assert!(!db.state().member_of_cluster(&cluster_id)); let state = db.state(); - let cluster_idx = state.clusters().get_by_cluster_id(&cluster_id); - assert!(cluster_idx.is_none()) + let stored_cluster = state.clusters().get_by(&cluster_id); + assert!(stored_cluster.is_none()); } // Verify that the cluster is in the database @@ -473,7 +495,6 @@ pub mod assertions { use types::PublicKeyBytes; use super::*; - use crate::ShareIndexed; fn data(s1: &Share, s2: &Share) { assert_eq!(s1.cluster_id, s2.cluster_id); assert_eq!(s1.encrypted_private_key, s2.encrypted_private_key); @@ -485,20 +506,20 @@ pub mod assertions { pub fn exists_in_memory( db: &NetworkDatabase, validator_pubkey: &PublicKeyBytes, - s: &ShareIndexed, + s: &Share, ) { let state = db.state(); - let share_idx = state + let stored_share = state .shares() - .get_by_validator_pubkey(validator_pubkey) + .get_by(validator_pubkey) .expect("Share should exist"); - data(&s.share, &share_idx.share); + data(s, stored_share); } // Verifies that a share is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKeyBytes) { let state = db.state(); - let stored_share = state.shares().get_by_validator_pubkey(validator_pubkey); + let stored_share = state.shares().get_by(validator_pubkey); assert!(stored_share.is_none()); } diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index dbd6560ed..f1ab2cc7e 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -5,7 +5,9 @@ use ssv_types::ValidatorIndex; use tracing::debug; use types::{Address, Graffiti, PublicKeyBytes}; -use crate::{DatabaseError, NetworkDatabase, sql_operations}; +use crate::{ + DatabaseError, NetworkDatabase, NonUniqueIndex, multi_index::UniqueIndex, sql_operations, +}; /// Implements all validator specific database functionality impl NetworkDatabase { @@ -24,12 +26,9 @@ impl NetworkDatabase { ])?; self.modify_state(|state| { - state - .multi_state - .clusters - .modify_by_owner(&owner, |cluster_indexed| { - cluster_indexed.cluster.fee_recipient = fee_recipient; - }); + state.multi_state.clusters.modify_all_by(&owner, |cluster| { + cluster.fee_recipient = fee_recipient; + }); }); Ok(()) } @@ -82,13 +81,14 @@ impl NetworkDatabase { ])?; self.modify_state(|state| { - state + if let Some(validator) = state .multi_state .validator_metadata - .modify_by_validator_pubkey(validator_pubkey, |validator| { - // Update in memory - validator.metadata.graffiti = graffiti; - }); + .get_mut_by(validator_pubkey) + { + // Update in memory + validator.graffiti = graffiti; + } }); Ok(()) } @@ -112,19 +112,11 @@ impl NetworkDatabase { self.modify_state(|state| { for (public_key, index) in map { - if state - .multi_state - .validator_metadata - .get_by_validator_pubkey(&public_key) - .is_some() + if let Some(validator) = + state.multi_state.validator_metadata.get_mut_by(&public_key) { - state - .multi_state - .validator_metadata - .modify_by_validator_pubkey(&public_key, |validator| { - // Update in memory - validator.metadata.index = Some(index); - }); + // Update in memory + validator.index = Some(index); } else { debug!(?public_key, "Tried to update index of unknown validator"); } diff --git a/anchor/eth/Cargo.toml b/anchor/eth/Cargo.toml index 87469a83b..a840fcbd1 100644 --- a/anchor/eth/Cargo.toml +++ b/anchor/eth/Cargo.toml @@ -19,6 +19,7 @@ metrics = { workspace = true } reqwest = { workspace = true } rusqlite = { workspace = true } sensitive_url = { workspace = true } +slashing_protection = { workspace = true } slot_clock = { workspace = true } ssv_network_config = { workspace = true } ssv_types = { workspace = true } diff --git a/anchor/eth/src/event_parser.rs b/anchor/eth/src/event_parser.rs index fb8e0b9ba..acb9cba7e 100644 --- a/anchor/eth/src/event_parser.rs +++ b/anchor/eth/src/event_parser.rs @@ -15,7 +15,7 @@ macro_rules! impl_event_decoder { type Output = $event_type; fn decode_from_log(log: &Log) -> Result { - let decoded = Self::decode_log(&log.inner, true) + let decoded = Self::decode_log(&log.inner) .map_err(|e| { ExecutionError::DecodeError( format!("Failed to decode {} event: {}", stringify!($event_type), e) diff --git a/anchor/eth/src/event_processor.rs b/anchor/eth/src/event_processor.rs index 73441fff4..fb740510a 100644 --- a/anchor/eth/src/event_processor.rs +++ b/anchor/eth/src/event_processor.rs @@ -1,10 +1,11 @@ use std::sync::Arc; use alloy::{primitives::Address, rpc::types::Log, sol_types::SolEvent}; -use database::NetworkDatabase; +use database::{NetworkDatabase, UniqueIndex}; use eth2::types::PublicKeyBytes; use indexmap::IndexSet; use rusqlite::Transaction; +use slashing_protection::SlashingDatabase; use ssv_types::{Cluster, ClusterId, Operator, OperatorId, ValidatorIndex}; use tracing::{debug, error, info, instrument, trace, warn}; @@ -28,6 +29,8 @@ pub enum Mode { index_sync_tx: index_sync::Tx, /// Queue to submit validator exits for processing exit_tx: ExitTx, + /// Slashing protection database for validator registration + slashing_protection: Arc, }, /// Process added validators only by updating the nonce. /// @@ -268,6 +271,7 @@ impl EventProcessor { // During keysplitting, we only care about the nonce let Mode::Node { index_sync_tx: index_lookup_queue, + slashing_protection, .. } = &self.mode else { @@ -319,6 +323,17 @@ impl EventProcessor { liquidated: false, cluster_members: IndexSet::from_iter(operator_ids), }; + + // First, do the slashing protection database... + slashing_protection + .register_validator(validator_pubkey) + .map_err(|e| { + ExecutionError::Database(format!( + "Failed to insert validator into slashing db: {e}" + )) + })?; + + // ...then the main databse. self.db .insert_validator(cluster, &validator_metadata, shares, tx) .map_err(|e| { @@ -362,10 +377,9 @@ impl EventProcessor { let cluster_id = compute_cluster_id(owner, &operatorIds); let state = self.db.state(); - - // Get the cluster that this validator is in - let metadata_idx = match state.metadata().get_by_validator_pubkey(&validator_pubkey) { - Some(m) => m, + // Get the metadata for this validator + let metadata = match state.metadata().get_by(&validator_pubkey) { + Some(data) => data, None => { debug!( cluster_id = ?cluster_id, @@ -376,7 +390,9 @@ impl EventProcessor { )); } }; - let cluster_idx = match state.clusters().get_by_cluster_id(&metadata_idx.cluster_id) { + + // Get the cluster that this validator is in + let cluster = match state.clusters().get_by(&validator_pubkey) { Some(data) => data, None => { debug!( @@ -390,24 +406,24 @@ impl EventProcessor { }; // Make sure the right owner is removing this validator - if owner != cluster_idx.owner { + if owner != cluster.owner { debug!( cluster_id = ?cluster_id, - expected_owner = ?cluster_idx.owner, + expected_owner = ?cluster.owner, actual_owner = ?owner, "Owner mismatch for validator removal" ); return Err(ExecutionError::InvalidEvent(format!( "Cluster already exists with a different owner address. Expected {}. Got {}", - cluster_idx.owner, owner + cluster.owner, owner ))); } // Make sure this is the correct validator - if validator_pubkey != metadata_idx.metadata.public_key { + if validator_pubkey != metadata.public_key { debug!( cluster_id = ?cluster_id, - expected_pubkey = %metadata_idx.metadata.public_key, + expected_pubkey = %metadata.public_key, actual_pubkey = %validator_pubkey, "Validator pubkey mismatch" ); @@ -617,11 +633,7 @@ impl EventProcessor { } fn is_our_validator(&self, validator_pubkey: &PublicKeyBytes) -> bool { - self.db - .state() - .shares() - .get_by_validator_pubkey(validator_pubkey) - .is_some() + self.db.state().shares().get_by(validator_pubkey).is_some() } /// Retrieves the validator index for a given validator public key from the database. @@ -639,7 +651,7 @@ impl EventProcessor { ) -> Result, ExecutionError> { // Get the validator metadata including its index let state = self.db.state(); - let metadata_idx = match state.metadata().get_by_validator_pubkey(validator_pubkey) { + let validator_metadata = match state.metadata().get_by(validator_pubkey) { Some(metadata) => metadata, None => { error!( @@ -653,7 +665,7 @@ impl EventProcessor { }; // Check if we have a validator index (required for exits) - let validator_index = match metadata_idx.metadata.index { + let validator_index = match validator_metadata.index { Some(index) => Some(index), None => { warn!( @@ -694,26 +706,11 @@ impl EventProcessor { // Get validator's metadata from the database let state = self.db.state(); - // Get metadata first to find the cluster ID - let metadata_idx = match state.metadata().get_by_validator_pubkey(validator_pubkey) { - Some(m) => m, - None => { - debug!( - ?validator_pubkey, - "Failed to fetch validator metadata from database" - ); - return Err(ExecutionError::Database( - "Failed to fetch validator metadata from database".to_string(), - )); - } - }; - - // Now get the cluster using the cluster ID from metadata - let cluster_idx = match state.clusters().get_by_cluster_id(&metadata_idx.cluster_id) { + // Get the cluster for this validator to access owner information + let cluster = match state.clusters().get_by(validator_pubkey) { Some(cluster) => cluster, None => { error!( - cluster_id = ?&metadata_idx.cluster_id, validator_pubkey = %validator_pubkey, "Cluster not found for validator" ); @@ -723,11 +720,11 @@ impl EventProcessor { } }; - if cluster_idx.cluster_id != *computed_cluster_id { + if cluster.cluster_id != *computed_cluster_id { error!( validator_pubkey = %validator_pubkey, computed_cluster_id = ?computed_cluster_id, - cluster_id = ?cluster_idx.cluster_id, + cluster_id = ?cluster.cluster_id, "Validator's cluster id is not the same as the computed cluster id" ); return Err(ExecutionError::InvalidEvent( @@ -735,7 +732,7 @@ impl EventProcessor { )); } - if cluster_idx.cluster.liquidated { + if cluster.liquidated { warn!( validator_pubkey = %validator_pubkey, computed_cluster_id = ?computed_cluster_id, @@ -748,10 +745,10 @@ impl EventProcessor { // Verify that the owner from the contract event is the one who registered the validator // (which is stored as the cluster's owner in our database) - if &cluster_idx.owner != owner { + if &cluster.owner != owner { error!( validator_pubkey = %validator_pubkey, - registered_owner = ?cluster_idx.owner, + registered_owner = ?cluster.owner, contract_event_owner = ?owner, "Owner mismatch: the address in the contract event is not the validator's registered owner" ); diff --git a/anchor/eth/src/index_sync.rs b/anchor/eth/src/index_sync.rs index 10438b03a..bda265fde 100644 --- a/anchor/eth/src/index_sync.rs +++ b/anchor/eth/src/index_sync.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use beacon_node_fallback::BeaconNodeFallback; -use database::{MultiIndexClusterIndexedMap, NetworkDatabase}; +use database::{ClusterMultiIndexMap, NetworkDatabase, UniqueIndex}; use eth2::types::{StateId, ValidatorId}; use slot_clock::SlotClock; use ssv_types::{ValidatorIndex, ValidatorMetadata}; @@ -82,8 +82,7 @@ async fn validator_index_syncer( let clusters = state.clusters(); let mut from_database = state .metadata() - .iter() - .map(|(_, v)| &v.metadata) + .values() .filter_map(|v| needs_index(v, &batch, clusters)) .collect::>(); drop(state); @@ -138,12 +137,12 @@ async fn validator_index_syncer( fn needs_index( metadata: &ValidatorMetadata, current_batch: &[PublicKeyBytes], - clusters: &MultiIndexClusterIndexedMap, + clusters: &ClusterMultiIndexMap, ) -> Option { (metadata.index.is_none() && !current_batch.contains(&metadata.public_key) && clusters - .get_by_cluster_id(&metadata.cluster_id) - .is_some_and(|c| !c.cluster.liquidated)) + .get_by(&metadata.cluster_id) + .is_some_and(|c| !c.liquidated)) .then_some(metadata.public_key) } diff --git a/anchor/eth/src/sync.rs b/anchor/eth/src/sync.rs index a8ab1bc9d..7a0d9520c 100644 --- a/anchor/eth/src/sync.rs +++ b/anchor/eth/src/sync.rs @@ -16,6 +16,7 @@ use database::NetworkDatabase; use futures::{FutureExt, StreamExt, stream::FuturesOrdered}; use reqwest::Url; use sensitive_url::SensitiveUrl; +use slashing_protection::SlashingDatabase; use ssv_network_config::SsvNetworkConfig; use tokio::{select, sync::watch, task::spawn_blocking, time::Duration}; use tracing::{debug, error, info, instrument, trace, warn}; @@ -117,6 +118,7 @@ impl SsvEventSyncer { db: Arc, index_sync_tx: index_sync::Tx, exit_tx: ExitTx, + slashing_protection: Arc, config: Config, ) -> Result { info!("Creating new SSV Event Syncer"); @@ -127,9 +129,15 @@ impl SsvEventSyncer { // Construct Websocket Provider let ws = WsConnect::new(config.ws_url.full.as_str()); - let ws_client = ProviderBuilder::default().on_ws(ws).await.map_err(|e| { - ExecutionError::SyncError(format!("Failed to bind to WS: {}, {}", &config.ws_url, e)) - })?; + let ws_client = ProviderBuilder::default() + .connect_ws(ws) + .await + .map_err(|e| { + ExecutionError::SyncError(format!( + "Failed to bind to WS: {}, {}", + &config.ws_url, e + )) + })?; debug!("Created ws client"); // Construct an EventProcessor with access to the DB @@ -138,6 +146,7 @@ impl SsvEventSyncer { Mode::Node { index_sync_tx, exit_tx, + slashing_protection, }, ); debug!("Created event processor - done"); @@ -161,7 +170,7 @@ impl SsvEventSyncer { network: SsvNetworkConfig, ) -> Self { let http_url: Url = rpc_endpoint.parse().expect("Failed to parse HTTP URL"); - let rpc_client = ProviderBuilder::default().on_http(http_url.clone()); + let rpc_client = ProviderBuilder::default().connect_http(http_url.clone()); let event_processor = EventProcessor::new(db, Mode::KeySplit); @@ -169,7 +178,7 @@ impl SsvEventSyncer { // so that we dont have to switch the ws fields to Option and clutter up the rest of the // application unnecessarily let ws_url = String::from(""); - let ws_client = ProviderBuilder::default().on_http(http_url); + let ws_client = ProviderBuilder::default().connect_http(http_url); Self { rpc_client, @@ -269,7 +278,7 @@ impl SsvEventSyncer { loop { let ws = WsConnect::new(&self.ws_url); - if let Ok(ws_client) = ProviderBuilder::default().on_ws(ws).await { + if let Ok(ws_client) = ProviderBuilder::default().connect_ws(ws).await { self.ws_client = ws_client; break; } diff --git a/anchor/eth/src/util.rs b/anchor/eth/src/util.rs index edc7ae69d..41e51f436 100644 --- a/anchor/eth/src/util.rs +++ b/anchor/eth/src/util.rs @@ -149,7 +149,7 @@ pub fn validate_operators( // make sure count is valid let threshold = (num_operators - 1) / 3; - if (num_operators - 1) % 3 != 0 || !(1..=4).contains(&threshold) { + if !(num_operators - 1).is_multiple_of(3) || !(1..=4).contains(&threshold) { return Err(ExecutionError::InvalidEvent(format!( "Given {num_operators} operators. Cannot build a 3f+1 quorum" ))); @@ -238,7 +238,7 @@ fn provider_from_transports( // Construct the final client let client = RpcClient::builder().transport(transport, false); - ProviderBuilder::default().on_client(client) + ProviderBuilder::default().connect_client(client) } #[cfg(test)] diff --git a/anchor/http_api/src/router.rs b/anchor/http_api/src/router.rs index 6f8b10174..bcd470cf2 100644 --- a/anchor/http_api/src/router.rs +++ b/anchor/http_api/src/router.rs @@ -46,12 +46,12 @@ async fn get_validators( let validators = database_state .borrow() .metadata() - .iter() - .map(|(_, v)| ValidatorData { - public_key: v.metadata.public_key.to_string(), + .values() + .map(|v| ValidatorData { + public_key: v.public_key.to_string(), cluster_id: format!("{:?}", v.cluster_id), - index: v.metadata.index.map(|i| i.0), - graffiti: hex::encode(v.metadata.graffiti.0), + index: v.index.map(|i| i.0), + graffiti: hex::encode(v.graffiti.0), }) .collect::>(); @@ -68,8 +68,8 @@ async fn get_committees( let state = database_state.borrow(); let committee_ids = state .clusters() - .iter() - .map(|(_, cluster)| cluster.cluster.committee_id()) + .values() + .map(|cluster| cluster.committee_id()) .collect::>(); let committee_data = committee_ids diff --git a/anchor/http_metrics/Cargo.toml b/anchor/http_metrics/Cargo.toml index 9c205d798..2c3692f51 100644 --- a/anchor/http_metrics/Cargo.toml +++ b/anchor/http_metrics/Cargo.toml @@ -8,9 +8,11 @@ authors = ["Sigma Prime "] anchor_validator_store = { workspace = true } axum = { workspace = true } health_metrics = { workspace = true } -lighthouse_network = { workspace = true } +libp2p = { workspace = true, default-features = false, features = ["metrics"] } metrics = { workspace = true } +network_utils = { workspace = true } parking_lot = { workspace = true } +prometheus-client = { workspace = true } serde = { workspace = true } slot_clock = { workspace = true } tokio = { workspace = true } diff --git a/anchor/http_metrics/src/lib.rs b/anchor/http_metrics/src/lib.rs index d6dc52c49..a761b4b1e 100644 --- a/anchor/http_metrics/src/lib.rs +++ b/anchor/http_metrics/src/lib.rs @@ -19,8 +19,9 @@ use axum::{ response::{IntoResponse, Response}, routing::get, }; -use lighthouse_network::{libp2p::metrics::Registry, prometheus_client::encoding::text::encode}; +use libp2p::metrics::Registry; use parking_lot::RwLock; +use prometheus_client::encoding::text::encode; use serde::{Deserialize, Serialize}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use tokio::net::TcpListener; @@ -128,7 +129,7 @@ async fn metrics_handler( } health_metrics::metrics::scrape_health_metrics(); - lighthouse_network::metrics::scrape_discovery_metrics(); + network_utils::discovery_metrics::scrape_discovery_metrics(); if let Err(e) = encoder.encode_utf8(&gather(), &mut buffer) { return ( diff --git a/anchor/keygen/Cargo.toml b/anchor/keygen/Cargo.toml index 77764fb5f..45f7dfb58 100644 --- a/anchor/keygen/Cargo.toml +++ b/anchor/keygen/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Sigma Prime "] [dependencies] base64 = { workspace = true } clap = { workspace = true } +global_config = { workspace = true } openssl = { workspace = true } operator_key = { workspace = true } rpassword = "7.4.0" diff --git a/anchor/keygen/src/lib.rs b/anchor/keygen/src/lib.rs index 55199140b..8dbc02d32 100644 --- a/anchor/keygen/src/lib.rs +++ b/anchor/keygen/src/lib.rs @@ -1,9 +1,7 @@ -use std::{ - fs, io, - path::{Path, PathBuf}, -}; +use std::{fs, io, path::PathBuf}; use clap::Parser; +use global_config::data_dir::DataDir; use openssl::{error::ErrorStack, pkey::Private, rsa::Rsa}; use operator_key::{ ConversionError, @@ -67,7 +65,7 @@ pub struct Keygen { } // Run RSA keygeneration -pub fn run_keygen(keygen: Keygen, data_dir: &Path) -> Result, KeygenError> { +pub fn run_keygen(keygen: Keygen, data_dir: &DataDir) -> Result, KeygenError> { // Generate the new rsa private key let private_key = Rsa::generate(2048)?; @@ -75,50 +73,53 @@ pub fn run_keygen(keygen: Keygen, data_dir: &Path) -> Result, Keyge // Create output paths for both files let private_key_file = if keygen.encrypt { - data_dir.join("encrypted_private_key.json") + data_dir.encrypted_private_key_file() } else { - data_dir.join("private_key.txt") + data_dir.unencrypted_private_key_file() }; - let pubkey_file = data_dir.join("public_key.txt"); - - if keygen.force || (!private_key_file.exists() && !pubkey_file.exists()) { - // If the user would like to password encrypt the key - if keygen.encrypt { - let password = if let Some(password_file) = keygen.password_file { - // Zeroize the original allocation - let full = Zeroizing::new( - fs::read_to_string(password_file).map_err(KeygenError::Password)?, - ); - // Zeroize the allocation with the trimmed string - Zeroizing::new(full.trim().to_string()) - } else { - read_password_from_user(true)? - }; - - // Encrypt the private key - let encrypted_private = EncryptedKey::encrypt(&private_key, &password)?; - - fs::write(&private_key_file, &String::try_from(encrypted_private)?) - .map_err(KeygenError::KeyOutput)?; - info!( - "Encrypted private key written to: {}", - private_key_file.display() - ); + let pubkey_file = data_dir.public_key_file(); + + if !keygen.force && private_key_file.exists() { + return Err(KeygenError::Exists(private_key_file.display().to_string())); + } + + if !keygen.force && pubkey_file.exists() { + return Err(KeygenError::Exists(pubkey_file.display().to_string())); + } + + // If the user would like to password encrypt the key + if keygen.encrypt { + let password = if let Some(password_file) = keygen.password_file { + // Zeroize the original allocation + let full = + Zeroizing::new(fs::read_to_string(password_file).map_err(KeygenError::Password)?); + // Zeroize the allocation with the trimmed string + Zeroizing::new(full.trim().to_string()) } else { - info!("Password not supplied. Private key will NOT be encrypted"); + read_password_from_user(true)? + }; - fs::write(&private_key_file, &unencrypted::to_base64(&private_key)?) - .map_err(KeygenError::KeyOutput)?; - info!("Private key written to: {}", private_key_file.display()); - } + // Encrypt the private key + let encrypted_private = EncryptedKey::encrypt(&private_key, &password)?; - // Log the public key - info!("Generated public key: {public_key}"); - fs::write(&pubkey_file, &public_key).map_err(KeygenError::KeyOutput)?; + fs::write(&private_key_file, &String::try_from(encrypted_private)?) + .map_err(KeygenError::KeyOutput)?; + info!( + "Encrypted private key written to: {}", + private_key_file.display() + ); } else { - return Err(KeygenError::Exists(data_dir.to_string_lossy().into_owned())); + info!("Password not supplied. Private key will NOT be encrypted"); + + fs::write(&private_key_file, &unencrypted::to_base64(&private_key)?) + .map_err(KeygenError::KeyOutput)?; + info!("Private key written to: {}", private_key_file.display()); } + // Log the public key + info!("Generated public key: {public_key}"); + fs::write(&pubkey_file, &public_key).map_err(KeygenError::KeyOutput)?; + Ok(private_key) } diff --git a/anchor/keysplit/Cargo.toml b/anchor/keysplit/Cargo.toml index b3ad413f1..bac23c1a4 100644 --- a/anchor/keysplit/Cargo.toml +++ b/anchor/keysplit/Cargo.toml @@ -19,6 +19,7 @@ openssl = { workspace = true } operator_key = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +ssv_types = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } types = { workspace = true } diff --git a/anchor/keysplit/src/split.rs b/anchor/keysplit/src/split.rs index d65b05860..074f30762 100644 --- a/anchor/keysplit/src/split.rs +++ b/anchor/keysplit/src/split.rs @@ -4,6 +4,7 @@ use database::NetworkDatabase; use eth::SsvEventSyncer; use global_config::GlobalConfig; use openssl::rsa::Rsa; +use ssv_types::domain_type::DomainType; use types::SecretKey; use crate::{KeyShare, KeysplitError, Manual, Onchain, split_keys}; @@ -99,5 +100,11 @@ fn build_db() -> Arc { Rsa::from_public_components(rsa.n().to_owned().unwrap(), rsa.e().to_owned().unwrap()) .expect("Keygen will not fail"); let path = Path::new("keysplit.sqlite"); - Arc::new(NetworkDatabase::new(path, &public_key).expect("Database construction will not fail")) + // TODO: The way the keysplit currently is implemented, we do not have easy access to the domain + // type. This is easier once https://github.com/sigp/anchor/pull/347 is merged and irrelevant + // if we implement https://github.com/sigp/anchor/issues/386. + Arc::new( + NetworkDatabase::new(path, &public_key, DomainType([0xff; 4])) + .expect("Database construction will not fail"), + ) } diff --git a/anchor/message_receiver/src/manager.rs b/anchor/message_receiver/src/manager.rs index fb6ff9807..2c26b442d 100644 --- a/anchor/message_receiver/src/manager.rs +++ b/anchor/message_receiver/src/manager.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use database::NetworkState; +use database::{NetworkState, NonUniqueIndex, UniqueIndex}; use gossipsub::{Message, MessageAcceptance, MessageId}; use libp2p::PeerId; use message_validator::{ @@ -112,7 +112,7 @@ impl MessageReceiver .network_state_rx .borrow() .shares() - .get_by_validator_pubkey(&validator) + .get_by(&validator) .is_none() { // We are not a signer for this validator, return without passing. @@ -131,9 +131,9 @@ impl MessageReceiver // of operators. let is_member = state .clusters() - .get_by_committee_id(&committee_id) - .first() - .map(|c| c.cluster.cluster_members.contains(&own_id)) + .get_all_by(&committee_id) + .next() + .map(|c| c.cluster_members.contains(&own_id)) .unwrap_or(false); if !is_member { diff --git a/anchor/message_sender/Cargo.toml b/anchor/message_sender/Cargo.toml index ea2409dd0..e41102145 100644 --- a/anchor/message_sender/Cargo.toml +++ b/anchor/message_sender/Cargo.toml @@ -16,5 +16,6 @@ processor = { workspace = true } slot_clock = { workspace = true } ssv_types = { workspace = true } subnet_service = { workspace = true } +thiserror = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } diff --git a/anchor/message_sender/src/lib.rs b/anchor/message_sender/src/lib.rs index 70cb5e7bc..478f774f0 100644 --- a/anchor/message_sender/src/lib.rs +++ b/anchor/message_sender/src/lib.rs @@ -4,7 +4,9 @@ pub mod impostor; #[cfg(feature = "testing")] pub mod testing; +use openssl::error::ErrorStack; use ssv_types::{CommitteeId, consensus::UnsignedSSVMessage, message::SignedSSVMessage}; +use thiserror::Error as ThisError; pub use crate::network::*; @@ -27,3 +29,11 @@ pub enum Error { OwnOperatorIdUnknown, NotSynced, } + +#[derive(Debug, ThisError)] +enum SigningError { + #[error("Signing error: {0}")] + SignerError(#[from] ErrorStack), + #[error("Ciphertext has {0} bytes, expected 256")] + IncorrectCiphertextLength(usize), +} diff --git a/anchor/message_sender/src/network.rs b/anchor/message_sender/src/network.rs index 51197dd43..f51b8884d 100644 --- a/anchor/message_sender/src/network.rs +++ b/anchor/message_sender/src/network.rs @@ -3,7 +3,6 @@ use std::sync::Arc; use database::OwnOperatorId; use message_validator::{DutiesProvider, MessageAcceptance, Validator}; use openssl::{ - error::ErrorStack, hash::MessageDigest, pkey::{PKey, Private}, rsa::Rsa, @@ -16,7 +15,7 @@ use subnet_service::SubnetId; use tokio::sync::{mpsc, mpsc::error::TrySendError, watch}; use tracing::{debug, error, warn}; -use crate::{Error, MessageCallback, MessageSender}; +use crate::{Error, MessageCallback, MessageSender, SigningError}; const SIGNER_NAME: &str = "message_sign_and_send"; const SENDER_NAME: &str = "message_send"; @@ -152,10 +151,15 @@ impl NetworkMessageSender { } } - fn sign(&self, message: &UnsignedSSVMessage) -> Result, ErrorStack> { + fn sign(&self, message: &UnsignedSSVMessage) -> Result<[u8; 256], SigningError> { let serialized = message.ssv_message.as_ssz_bytes(); let mut signer = Signer::new(MessageDigest::sha256(), &self.private_key)?; signer.update(&serialized)?; - signer.sign_to_vec() + let mut signature = [0u8; 256]; + let len = signer.sign(&mut signature)?; + if len != 256 { + return Err(SigningError::IncorrectCiphertextLength(len)); + } + Ok(signature) } } diff --git a/anchor/message_sender/src/testing.rs b/anchor/message_sender/src/testing.rs index 1a5291c24..c4dfdd716 100644 --- a/anchor/message_sender/src/testing.rs +++ b/anchor/message_sender/src/testing.rs @@ -1,7 +1,6 @@ use ssv_types::{ - CommitteeId, OperatorId, - consensus::UnsignedSSVMessage, - message::{RSA_SIGNATURE_SIZE, SignedSSVMessage}, + CommitteeId, OperatorId, RSA_SIGNATURE_SIZE, consensus::UnsignedSSVMessage, + message::SignedSSVMessage, }; use tokio::sync::mpsc; @@ -20,7 +19,7 @@ impl MessageSender for MockMessageSender { additional_message_callback: Option>, ) -> Result<(), Error> { let message = SignedSSVMessage::new( - vec![vec![0u8; RSA_SIGNATURE_SIZE]], + vec![[0u8; RSA_SIGNATURE_SIZE]], vec![self.operator_id], message.ssv_message, message.full_data, diff --git a/anchor/message_validator/src/consensus_message.rs b/anchor/message_validator/src/consensus_message.rs index 3d1a1f345..ce0be1f4f 100644 --- a/anchor/message_validator/src/consensus_message.rs +++ b/anchor/message_validator/src/consensus_message.rs @@ -412,10 +412,10 @@ mod tests { use bls::{Hash256, PublicKeyBytes}; use openssl::hash::MessageDigest; use ssv_types::{ - OperatorId, + OperatorId, RSA_SIGNATURE_SIZE, VariableList, consensus::{QbftMessage, QbftMessageType}, domain_type::DomainType, - message::{MsgType, RSA_SIGNATURE_SIZE, SSVMessage, SignedSSVMessage}, + message::{MsgType, SSVMessage, SignedSSVMessage}, msgid::{DutyExecutor, MessageId, Role}, }; use ssz::Encode; @@ -639,7 +639,7 @@ mod tests { let ssv_msg = SSVMessage::new(MsgType::SSVConsensusMsgType, msg_id, invalid_data) .expect("SSVMessage should be created"); let signed_msg = SignedSSVMessage::new( - vec![vec![0xAA; RSA_SIGNATURE_SIZE]], + vec![[0xAA; RSA_SIGNATURE_SIZE]], vec![OperatorId(1)], ssv_msg, vec![], @@ -832,15 +832,15 @@ mod tests { identifier: (&msg_id_b).into(), // Mismatched ID root: Hash256::from([0u8; 32]), data_round: 1, - round_change_justification: vec![], - prepare_justification: vec![], + round_change_justification: VariableList::empty(), + prepare_justification: VariableList::empty(), }; let qbft_bytes = qbft_msg.as_ssz_bytes(); let ssv_msg = SSVMessage::new(MsgType::SSVConsensusMsgType, msg_id_a, qbft_bytes) .expect("SSVMessage should be created"); let signed_msg = SignedSSVMessage::new( - vec![vec![0xAA; RSA_SIGNATURE_SIZE]], + vec![[0xAA; RSA_SIGNATURE_SIZE]], vec![OperatorId(42)], ssv_msg, vec![], @@ -876,7 +876,7 @@ mod tests { let ssv_msg = SSVMessage::new(MsgType::SSVConsensusMsgType, msg_id, qbft_bytes) .expect("SSVMessage should be created"); let signed_msg = SignedSSVMessage::new( - vec![vec![0xAA; RSA_SIGNATURE_SIZE]], + vec![[0xAA; RSA_SIGNATURE_SIZE]], vec![OperatorId(1)], ssv_msg, vec![], @@ -1134,12 +1134,14 @@ mod tests { let signature = signer.sign_to_vec().expect("Failed to create signature"); // Pad signature to RSA_SIGNATURE_SIZE if needed - let padded_signature = if signature.len() < RSA_SIGNATURE_SIZE { - let mut padded = vec![0; RSA_SIGNATURE_SIZE]; + let padded_signature: [u8; RSA_SIGNATURE_SIZE] = if signature.len() < RSA_SIGNATURE_SIZE { + let mut padded = [0; RSA_SIGNATURE_SIZE]; padded[..signature.len()].copy_from_slice(&signature); padded } else { signature + .try_into() + .expect("Signature should not be longer than RSA_SIGNATURE_SIZE bytes") }; // Create signed message @@ -1195,7 +1197,7 @@ mod tests { .expect("SSVMessage should be created"); // Create an invalid signature (just random bytes) - let invalid_signature = vec![0xBB; RSA_SIGNATURE_SIZE]; + let invalid_signature = [0xBB; RSA_SIGNATURE_SIZE]; // Create signed message with invalid signature let signed_msg = SignedSSVMessage::new( @@ -1270,7 +1272,7 @@ mod tests { // Create a signed SSV message let signed_msg = SignedSSVMessage::new( - vec![vec![0xAA; RSA_SIGNATURE_SIZE]], + vec![[0xAA; RSA_SIGNATURE_SIZE]], vec![OperatorId(1)], ssv_msg, vec![], diff --git a/anchor/message_validator/src/duty_state.rs b/anchor/message_validator/src/duty_state.rs index 7b793703f..971ea2147 100644 --- a/anchor/message_validator/src/duty_state.rs +++ b/anchor/message_validator/src/duty_state.rs @@ -313,7 +313,7 @@ impl SignerState { if signed_ssv_message.operator_ids().len() > 1 { self.seen_signers - .insert(signed_ssv_message.operator_ids().as_slice().into()); + .insert(signed_ssv_message.operator_ids().into()); } self.message_counts.record_consensus_message( diff --git a/anchor/message_validator/src/lib.rs b/anchor/message_validator/src/lib.rs index 37db4c51b..42a56d660 100644 --- a/anchor/message_validator/src/lib.rs +++ b/anchor/message_validator/src/lib.rs @@ -380,7 +380,7 @@ impl Validator { &self, message_id: &MessageId, slots_per_epoch: u64, - ) -> RefMut { + ) -> RefMut<'_, MessageId, DutyState> { self.duty_state_map .entry(message_id.clone()) .or_insert_with(|| { @@ -791,14 +791,14 @@ mod tests { sign::Signer, }; use ssv_types::{ - CommitteeId, CommitteeInfo, IndexSet, OperatorId, ValidatorIndex, + CommitteeId, CommitteeInfo, IndexSet, OperatorId, RSA_SIGNATURE_SIZE, ValidatorIndex, consensus::{QbftMessage, QbftMessageType}, domain_type::DomainType, - message::{MsgType, RSA_SIGNATURE_SIZE, SSVMessage, SignedSSVMessage}, + message::{MsgType, SSVMessage, SignedSSVMessage}, msgid::{DutyExecutor, MessageId, Role}, }; use ssz::Encode; - use types::{Epoch, Slot}; + use types::{Epoch, Slot, VariableList}; use crate::{ValidationFailure, compute_quorum_size, hash_data}; @@ -854,6 +854,23 @@ mod tests { } pub(crate) fn build(self) -> QbftMessage { + // Convert Vec to VariableList, U13> + let round_change_justification_vec: Vec<_> = self + .round_change_justification + .into_iter() + .map(|msg| msg.without_full_data()) + .map(|msg| VariableList::from(msg.as_ssz_bytes())) + .collect(); + let round_change_justification = VariableList::from(round_change_justification_vec); + + let prepare_justification_vec: Vec<_> = self + .prepare_justification + .into_iter() + .map(|msg| msg.without_full_data()) + .map(|msg| VariableList::from(msg.as_ssz_bytes())) + .collect(); + let prepare_justification = VariableList::from(prepare_justification_vec); + QbftMessage { qbft_message_type: self.msg_type, height: 1, @@ -861,8 +878,8 @@ mod tests { identifier: (&self.identifier).into(), root: Hash256::from([0u8; 32]), data_round: 1, - round_change_justification: self.round_change_justification, - prepare_justification: self.prepare_justification, + round_change_justification, + prepare_justification, } } } @@ -897,7 +914,7 @@ mod tests { signers .iter() .enumerate() - .map(|(i, _)| vec![0xAA + i as u8; RSA_SIGNATURE_SIZE]) + .map(|(i, _)| [0xAA + i as u8; RSA_SIGNATURE_SIZE]) .collect::>() } else { pks.iter() @@ -905,7 +922,11 @@ mod tests { let p_key = PKey::from_rsa(pk.clone()).unwrap(); let mut signer = Signer::new(MessageDigest::sha256(), &p_key).unwrap(); signer.update(&ssv_msg.as_ssz_bytes()).unwrap(); - signer.sign_to_vec().expect("Failed to sign message") + signer + .sign_to_vec() + .expect("Failed to sign message") + .try_into() + .expect("Signature should be 256 bytes") }) .collect::>() }; diff --git a/anchor/message_validator/src/partial_signature.rs b/anchor/message_validator/src/partial_signature.rs index d3ff1e36f..bc9babe5d 100644 --- a/anchor/message_validator/src/partial_signature.rs +++ b/anchor/message_validator/src/partial_signature.rs @@ -279,8 +279,8 @@ mod tests { }; use slot_clock::{ManualSlotClock, SlotClock}; use ssv_types::{ - OperatorId, ValidatorIndex, - message::{MsgType, RSA_SIGNATURE_SIZE, SSVMessage, SignedSSVMessage}, + OperatorId, RSA_SIGNATURE_SIZE, ValidatorIndex, + message::{MsgType, SSVMessage, SignedSSVMessage}, partial_sig::PartialSignatureMessage, }; use ssz::Encode; @@ -325,7 +325,7 @@ mod tests { let partial_sig_messages = PartialSignatureMessages { kind, slot: Slot::new(0), - messages, + messages: messages.into(), }; let msg_id = create_message_id_for_test(role); @@ -343,9 +343,15 @@ mod tests { let p_key = PKey::from_rsa(pk.clone()).unwrap(); let mut signer = Signer::new(MessageDigest::sha256(), &p_key).unwrap(); signer.update(&ssv_msg.as_ssz_bytes()).unwrap(); - vec![signer.sign_to_vec().expect("Failed to sign message")] + vec![ + signer + .sign_to_vec() + .expect("Failed to sign message") + .try_into() + .expect("Signature should be 256 bytes"), + ] } else { - vec![vec![0xAA; RSA_SIGNATURE_SIZE]] + vec![[0xAA; RSA_SIGNATURE_SIZE]] }; let signed_msg = SignedSSVMessage::new(signature, vec![signer], ssv_msg, full_data) @@ -440,10 +446,7 @@ mod tests { // Multiple signers - this should fail let signers = vec![OperatorId(1), OperatorId(2)]; - let signatures = vec![ - vec![0xAA; RSA_SIGNATURE_SIZE], - vec![0xBB; RSA_SIGNATURE_SIZE], - ]; + let signatures = vec![[0xAA; RSA_SIGNATURE_SIZE], [0xBB; RSA_SIGNATURE_SIZE]]; let signed_msg = SignedSSVMessage::new(signatures, signers, ssv_msg, vec![]) .expect("SignedSSVMessage should be created"); @@ -715,7 +718,7 @@ mod tests { let partial_sig_messages = PartialSignatureMessages { kind: PartialSignatureKind::PostConsensus, slot: Slot::new(0), - messages, + messages: messages.into(), }; let msg_id = create_message_id_for_test(Role::Proposer); // Not committee role @@ -724,7 +727,7 @@ mod tests { .expect("SSVMessage should be created"); let signed_msg = SignedSSVMessage::new( - vec![vec![0xAA; RSA_SIGNATURE_SIZE]], + vec![[0xAA; RSA_SIGNATURE_SIZE]], vec![OperatorId(1)], ssv_msg, vec![], @@ -765,7 +768,7 @@ mod tests { let partial_sig_messages = PartialSignatureMessages { kind: PartialSignatureKind::PostConsensus, slot: Slot::new(0), - messages, + messages: messages.into(), }; let msg_id = create_message_id_for_test(Role::Committee); @@ -774,7 +777,7 @@ mod tests { .expect("SSVMessage should be created"); let signed_msg = SignedSSVMessage::new( - vec![vec![0xAA; RSA_SIGNATURE_SIZE]], + vec![[0xAA; RSA_SIGNATURE_SIZE]], vec![OperatorId(1)], ssv_msg, vec![], diff --git a/anchor/network/Cargo.toml b/anchor/network/Cargo.toml index 926e43791..9650c08ac 100644 --- a/anchor/network/Cargo.toml +++ b/anchor/network/Cargo.toml @@ -10,6 +10,7 @@ dirs = { workspace = true } discv5 = { workspace = true } ethereum_ssz = { workspace = true } futures = { workspace = true } +global_config = { workspace = true } gossipsub = { workspace = true } hex = "0.4.3" libp2p = { workspace = true, default-features = false, features = [ @@ -23,17 +24,19 @@ libp2p = { workspace = true, default-features = false, features = [ "quic", "ping", "request-response", + "dns", ] } -lighthouse_network = { workspace = true } message_receiver = { workspace = true } -peer-store = { package = "libp2p-peer-store", git = "https://github.com/libp2p/rust-libp2p.git", rev = "082eb16" } +metrics = { workspace = true } +network_utils = { workspace = true } +peer-store = { package = "libp2p-peer-store", git = "https://github.com/libp2p/rust-libp2p.git", rev = "ad9a1b2" } prometheus-client = { workspace = true } quick-protobuf = "0.8.1" rand = { workspace = true } serde = { workspace = true } serde_json = "1.0.137" ssv_types = { workspace = true } -ssz_types = "0.10" +ssz_types = "0.11.0" subnet_service = { workspace = true } task_executor = { workspace = true } thiserror = { workspace = true } @@ -43,7 +46,7 @@ types = { workspace = true } version = { workspace = true } [dev-dependencies] -libp2p-swarm-test = "0.5.0" +libp2p-swarm-test = "0.6.0" message_receiver = { workspace = true } tokio = { workspace = true, features = ["rt", "macros", "time", "test-util"] } tracing-subscriber = { workspace = true } diff --git a/anchor/network/src/behaviour.rs b/anchor/network/src/behaviour.rs index fa168a6fb..6054e0ceb 100644 --- a/anchor/network/src/behaviour.rs +++ b/anchor/network/src/behaviour.rs @@ -1,11 +1,9 @@ use std::time::Duration; +use discv5::enr::k256::sha2::{Digest, Sha256}; use gossipsub::{ConfigBuilderError, MessageAuthenticity, ValidationMode}; use libp2p::{identify, ping, swarm::NetworkBehaviour}; -use lighthouse_network::{ - discv5::enr::k256::sha2::{Digest, Sha256}, - prometheus_client::registry::Registry, -}; +use prometheus_client::registry::Registry; use thiserror::Error; use types::{ChainSpec, EthSpec}; use version::version_with_platform; @@ -96,13 +94,13 @@ impl AnchorBehaviour { .validate_messages() .build()?; - let mut gossipsub = gossipsub::Behaviour::new_with_metrics( - MessageAuthenticity::RandomAuthor, - gossipsub_config, + let mut gossipsub = + gossipsub::Behaviour::new(MessageAuthenticity::RandomAuthor, gossipsub_config) + .map_err(|e| Gossipsub(e.to_string()))?; + gossipsub = gossipsub.with_metrics( metrics_registry.sub_registry_with_prefix("gossipsub"), gossipsub::MetricsConfig::default(), - ) - .map_err(|e| Gossipsub(e.to_string()))?; + ); // Add peer scoring if not disabled if !network_config.disable_gossipsub_peer_scoring { diff --git a/anchor/network/src/config.rs b/anchor/network/src/config.rs index 028e729c3..c444e3f5f 100644 --- a/anchor/network/src/config.rs +++ b/anchor/network/src/config.rs @@ -1,12 +1,12 @@ use std::{ net::{Ipv4Addr, Ipv6Addr}, num::NonZeroU16, - path::PathBuf, }; use discv5::Enr; +use global_config::data_dir::NetworkDir; use libp2p::Multiaddr; -use lighthouse_network::{ListenAddr, ListenAddress}; +use network_utils::listen_addr::{ListenAddr, ListenAddress}; use ssv_types::domain_type::DomainType; /// This is a default network directory, but it will be overridden by the cli defaults. @@ -21,7 +21,7 @@ pub const DEFAULT_QUIC_PORT: u16 = 13002; #[derive(Clone)] pub struct Config { /// Data directory where node's keyfile is stored - pub network_dir: PathBuf, + pub network_dir: NetworkDir, /// IP addresses to listen on. pub listen_addresses: ListenAddress, @@ -75,14 +75,8 @@ pub struct Config { pub domain_type: DomainType, } -impl Default for Config { - fn default() -> Self { - // WARNING: this directory default should be always overwritten with parameters - // from cli for specific networks. - let network_dir = dirs::home_dir() - .unwrap_or_else(|| PathBuf::from(".")) - .join(DEFAULT_NETWORK_DIR); - +impl Config { + pub fn new(network_dir: NetworkDir) -> Self { let listen_addresses = ListenAddress::V4(ListenAddr { addr: DEFAULT_IPV4_ADDRESS, disc_port: DEFAULT_DISC_PORT, diff --git a/anchor/network/src/discovery.rs b/anchor/network/src/discovery.rs index c58336416..2316ef3e7 100644 --- a/anchor/network/src/discovery.rs +++ b/anchor/network/src/discovery.rs @@ -24,20 +24,14 @@ use libp2p::{ THandlerOutEvent, ToSwarm, dummy, }, }; -use lighthouse_network::{ - CombinedKeyExt, EnrExt, - discovery::{ - UpdatePorts, - enr_ext::{QUIC_ENR_KEY, QUIC6_ENR_KEY}, - }, -}; +use network_utils::enr_ext::{CombinedKeyExt, EnrExt, QUIC_ENR_KEY, QUIC6_ENR_KEY}; use ssv_types::domain_type::DomainType; use ssz::{Decode, Encode}; -use ssz_types::{BitVector, Bitfield, length::Fixed, typenum::U128}; use subnet_service::SubnetId; use thiserror::Error; use tokio::sync::mpsc; use tracing::{debug, error, info, trace, warn}; +use types::{BitVector, typenum::U128}; use crate::{ Config, @@ -125,6 +119,13 @@ impl EventStream { } } +struct UpdatePorts { + tcp4: bool, + tcp6: bool, + quic4: bool, + quic6: bool, +} + pub struct ProtocolId {} impl ProtocolIdentity for ProtocolId { @@ -155,11 +156,11 @@ pub struct Discovery { /// Specifies whether various port numbers should be updated after the discovery service has /// been started - pub update_ports: UpdatePorts, + update_ports: UpdatePorts, domain_type: DomainType, - enr_dir: PathBuf, + enr_file_path: PathBuf, } impl Discovery { @@ -167,7 +168,7 @@ impl Discovery { local_keypair: Keypair, network_config: &Config, ) -> Result { - let enr_dir = network_config.network_dir.clone(); + let enr_file_path = network_config.network_dir.enr_file(); let discv5_listen_config = discv5::ListenConfig::from_two_sockets( network_config @@ -187,9 +188,9 @@ impl Discovery { let enr_key: CombinedKey = CombinedKey::from_libp2p(local_keypair).map_err(|e| EnrKey(e.to_string()))?; - let previous_enr = load_enr_from_disk(&enr_dir); + let previous_enr = load_enr_from_disk(&enr_file_path); let enr = build_enr(&enr_key, network_config, previous_enr)?; - save_enr_to_disk(&enr_dir, &enr); + save_enr_to_disk(&enr_file_path, &enr); let local_node_id = enr.node_id(); info!(%enr, "Created local ENR"); @@ -297,9 +298,9 @@ impl Discovery { discv5, event_stream, started: !network_config.disable_discovery, - domain_type: network_config.domain_type.clone(), + domain_type: network_config.domain_type, update_ports, - enr_dir, + enr_file_path, }) } @@ -353,7 +354,7 @@ impl Discovery { error!(?err, "Unable to update ENR"); } else { debug!(enr=?self.discv5.local_enr(), "Updated subnets in ENR"); - save_enr_to_disk(&self.enr_dir, &self.discv5.local_enr()); + save_enr_to_disk(&self.enr_file_path, &self.discv5.local_enr()); } } @@ -397,7 +398,7 @@ impl Discovery { .enr_insert(key, &new_port) .map_err(|e| format!("{e:?}"))?; - save_enr_to_disk(Path::new(&self.enr_dir), &self.discv5.local_enr()); + save_enr_to_disk(&self.enr_file_path, &self.discv5.local_enr()); Ok(true) } @@ -416,7 +417,7 @@ impl Discovery { let tcp_predicate = move |enr: &Enr| enr.tcp4().is_some() || enr.tcp6().is_some(); // Capture a copy of the domain type so the closure no longer references `self`. - let local_domain_type = self.domain_type.clone(); + let local_domain_type = self.domain_type; let domain_type_predicate = move |enr: &Enr| { if let Some(Ok(domain_type)) = enr.get_decodable::<[u8; 4]>("domaintype") { @@ -679,24 +680,21 @@ pub fn build_enr( } /// Loads an ENR from disk -pub fn load_enr_from_disk(dir: &Path) -> Option { - fs::read_to_string(dir.join(Path::new(ENR_FILENAME))) +pub fn load_enr_from_disk(path: &Path) -> Option { + fs::read_to_string(path) .ok() .and_then(|enr| Enr::from_str(&enr).ok()) } /// Saves an ENR to disk -pub fn save_enr_to_disk(dir: &Path, enr: &Enr) { - let _ = std::fs::create_dir_all(dir); - match File::create(dir.join(Path::new(ENR_FILENAME))) - .and_then(|mut f| f.write_all(enr.to_base64().as_bytes())) - { +pub fn save_enr_to_disk(path: &Path, enr: &Enr) { + match File::create(path).and_then(|mut f| f.write_all(enr.to_base64().as_bytes())) { Ok(_) => { debug!("ENR written to disk"); } Err(e) => { warn!( - file = format!("{:?}{:?}",dir, ENR_FILENAME), + file = %path.display(), error = %e, "Could not write ENR to file" ); @@ -704,7 +702,7 @@ pub fn save_enr_to_disk(dir: &Path, enr: &Enr) { } } -pub fn committee_bitfield(enr: &Enr) -> Result>, &'static str> { +pub fn committee_bitfield(enr: &Enr) -> Result, &'static str> { let bitfield_bytes: Bytes = enr .get_decodable("subnets") .ok_or("ENR subnet bitfield non-existent")? @@ -717,7 +715,7 @@ pub fn committee_bitfield(enr: &Enr) -> Result>, &'static s /// Returns the predicate for a given subnet. pub fn subnet_predicate(subnets: Vec) -> impl Fn(&Enr) -> bool + Send { move |enr: &Enr| { - let committee_bitfield: Bitfield> = match committee_bitfield(enr) { + let committee_bitfield: BitVector = match committee_bitfield(enr) { Ok(b) => b, Err(_e) => return false, }; diff --git a/anchor/network/src/handshake/mod.rs b/anchor/network/src/handshake/mod.rs index 7e8070cce..572c86552 100644 --- a/anchor/network/src/handshake/mod.rs +++ b/anchor/network/src/handshake/mod.rs @@ -188,9 +188,9 @@ mod tests { let local_key = Keypair::generate_ed25519(); let remote_key = Keypair::generate_ed25519(); - let mut local_swarm = Swarm::new_ephemeral(|_| create_behaviour(local_key)); + let mut local_swarm = Swarm::new_ephemeral_tokio(|_| create_behaviour(local_key)); let local_node_info = node_info("test", "local"); - let mut remote_swarm = Swarm::new_ephemeral(|_| create_behaviour(remote_key)); + let mut remote_swarm = Swarm::new_ephemeral_tokio(|_| create_behaviour(remote_key)); let remote_node_info = node_info("test", "remote"); tokio::spawn(async move { @@ -250,9 +250,9 @@ mod tests { let local_key = Keypair::generate_ed25519(); let remote_key = Keypair::generate_ed25519(); - let mut local_swarm = Swarm::new_ephemeral(|_| create_behaviour(local_key)); + let mut local_swarm = Swarm::new_ephemeral_tokio(|_| create_behaviour(local_key)); let local_node_info = node_info("test1", "local"); - let mut remote_swarm = Swarm::new_ephemeral(|_| create_behaviour(remote_key)); + let mut remote_swarm = Swarm::new_ephemeral_tokio(|_| create_behaviour(remote_key)); let remote_node_info = node_info("test2", "remote"); tokio::spawn(async move { diff --git a/anchor/network/src/keypair_utils.rs b/anchor/network/src/keypair_utils.rs index 7c63d348d..2dc367d06 100644 --- a/anchor/network/src/keypair_utils.rs +++ b/anchor/network/src/keypair_utils.rs @@ -1,22 +1,19 @@ use std::{ fs::File, io::{Read, Write}, - path::PathBuf, + path::Path, }; use libp2p::identity::{Keypair, secp256k1}; use tracing::{debug, warn}; -pub const NETWORK_KEY_FILENAME: &str = "key"; - /// Loads a private key from disk. If this fails, a new key is /// generated and is then saved to disk. /// /// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. -pub fn load_private_key(network_dir: &PathBuf) -> Keypair { +pub fn load_private_key(network_key_file: &Path) -> Keypair { // check for key from disk - let network_key_f = network_dir.join(NETWORK_KEY_FILENAME); - if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { + if let Ok(mut network_key_file) = File::open(network_key_file) { let mut key_bytes: Vec = Vec::with_capacity(36); match network_key_file.read_to_end(&mut key_bytes) { Err(_) => debug!("Could not read network key file"), @@ -35,8 +32,7 @@ pub fn load_private_key(network_dir: &PathBuf) -> Keypair { // if a key could not be loaded from disk, generate a new one and save it let local_private_key = secp256k1::Keypair::generate(); - let _ = std::fs::create_dir_all(network_dir); - match File::create(network_key_f.clone()) + match File::create(network_key_file) .and_then(|mut f| f.write_all(&local_private_key.secret().to_bytes())) { Ok(_) => { @@ -44,7 +40,7 @@ pub fn load_private_key(network_dir: &PathBuf) -> Keypair { } Err(e) => { warn!( - file = ?network_key_f, + file = ?network_key_file, error = ?e, "Could not write node key to file" ); diff --git a/anchor/network/src/lib.rs b/anchor/network/src/lib.rs index c0fb2d0da..2bedb5e00 100644 --- a/anchor/network/src/lib.rs +++ b/anchor/network/src/lib.rs @@ -5,12 +5,13 @@ mod config; mod discovery; mod handshake; mod keypair_utils; +mod metrics; mod network; mod peer_manager; mod scoring; mod transport; pub use config::{Config, DEFAULT_DISC_PORT, DEFAULT_QUIC_PORT, DEFAULT_TCP_PORT}; -pub use lighthouse_network::{ListenAddr, ListenAddress}; pub use network::Network; +pub use network_utils::listen_addr::{ListenAddr, ListenAddress}; pub type Enr = discv5::enr::Enr; diff --git a/anchor/network/src/metrics.rs b/anchor/network/src/metrics.rs new file mode 100644 index 000000000..952be3047 --- /dev/null +++ b/anchor/network/src/metrics.rs @@ -0,0 +1,7 @@ +use std::sync::LazyLock; + +use metrics::*; + +pub static PEERS_CONNECTED: LazyLock> = LazyLock::new(|| { + try_create_int_gauge("libp2p_peers", "Count of libp2p peers currently connected") +}); diff --git a/anchor/network/src/network.rs b/anchor/network/src/network.rs index 069989a8f..0b58adcb8 100644 --- a/anchor/network/src/network.rs +++ b/anchor/network/src/network.rs @@ -93,7 +93,7 @@ impl Network { executor: TaskExecutor, spec: Arc, ) -> Result, Box> { - let local_keypair: Keypair = load_private_key(&config.network_dir); + let local_keypair: Keypair = load_private_key(&config.network_dir.key_file()); let transport = build_transport(local_keypair.clone(), !config.disable_quic_support)?; @@ -105,7 +105,7 @@ impl Network { .map_err(|e| Box::new(NetworkError::Behaviour(e)))?; let peer_id = local_keypair.public().to_peer_id(); - let domain_type: String = config.domain_type.clone().into(); + let domain_type: String = config.domain_type.into(); let node_info = NodeInfo::new( domain_type, Some(NodeMetadata { @@ -130,7 +130,7 @@ impl Network { node_info, message_receiver, outcome_rx, - domain_type: config.domain_type.clone(), + domain_type: config.domain_type, metrics_registry: Some(metrics_registry), spec, }; diff --git a/anchor/network/src/peer_manager/connection.rs b/anchor/network/src/peer_manager/connection.rs index 179371669..c31501340 100644 --- a/anchor/network/src/peer_manager/connection.rs +++ b/anchor/network/src/peer_manager/connection.rs @@ -11,7 +11,7 @@ use peer_store::memory_store::MemoryStore; use ssz_types::{Bitfield, length::Fixed, typenum::U128}; use subnet_service::SubnetId; -use crate::{Config, Enr, discovery}; +use crate::{Config, Enr, discovery, metrics::PEERS_CONNECTED}; /// A fraction of `target_peers` that we allow to connect to us in excess of /// `target_peers`. For clarity, if `target_peers` is 50 and @@ -161,8 +161,8 @@ impl ConnectionManager { /// Update metrics if connection state changed pub fn update_metrics_if_changed(&self, changed: bool) { if changed { - lighthouse_network::metrics::set_gauge( - &lighthouse_network::metrics::PEERS_CONNECTED, + metrics::set_gauge( + &PEERS_CONNECTED, self.connected.len().try_into().unwrap_or(0), ); } diff --git a/anchor/network/src/peer_manager/discovery.rs b/anchor/network/src/peer_manager/discovery.rs index 5b7326883..bbcf255a2 100644 --- a/anchor/network/src/peer_manager/discovery.rs +++ b/anchor/network/src/peer_manager/discovery.rs @@ -3,9 +3,12 @@ use std::collections::{HashMap, HashSet, hash_map::Entry}; use discv5::libp2p_identity::PeerId; use libp2p::{ Multiaddr, - swarm::dial_opts::{DialOpts, PeerCondition}, + swarm::{ + FromSwarm, NewExternalAddrOfPeer, + dial_opts::{DialOpts, PeerCondition}, + }, }; -use lighthouse_network::EnrExt; +use network_utils::enr_ext::EnrExt; use peer_store::{ Store, memory_store::{MemoryStore, PeerRecord}, @@ -37,9 +40,14 @@ impl PeerDiscovery { ) -> Option { let id = enr.peer_id(); + let multiaddrs = enr.multiaddr(); + // Update peer store with the discovered peer - for multiaddr in enr.multiaddr() { - peer_store.update_address(&id, &multiaddr); + for multiaddr in multiaddrs.iter() { + peer_store.on_swarm_event(&FromSwarm::NewExternalAddrOfPeer(NewExternalAddrOfPeer { + peer_id: id, + addr: multiaddr, + })); } peer_store.insert_custom_data(&id, enr.clone()); diff --git a/anchor/network/src/peer_manager/types.rs b/anchor/network/src/peer_manager/types.rs index e879024b1..455014c7b 100644 --- a/anchor/network/src/peer_manager/types.rs +++ b/anchor/network/src/peer_manager/types.rs @@ -1,5 +1,4 @@ use libp2p::swarm::dial_opts::DialOpts; -use peer_store::memory_store; use subnet_service::SubnetId; /// Actions that the peer manager can request from the network @@ -25,6 +24,6 @@ impl ConnectActions { /// Events emitted by the peer manager #[derive(Debug)] pub enum Event { - PeerStore(peer_store::Event), + PeerStore(peer_store::memory_store::Event), Heartbeat(crate::peer_manager::heartbeat::Event), } diff --git a/anchor/signature_collector/src/lib.rs b/anchor/signature_collector/src/lib.rs index 24de51699..3f54e78d7 100644 --- a/anchor/signature_collector/src/lib.rs +++ b/anchor/signature_collector/src/lib.rs @@ -103,7 +103,7 @@ impl SignatureCollectorManager { self: &Arc, metadata: SignatureMetadata, requester: SignatureRequester, - signing_data: SigningData, + validator_signing_data: ValidatorSigningData, ) -> Result, CollectionError> { let Some(signer) = self.operator_id.get() else { return Err(CollectionError::OwnOperatorIdUnknown); @@ -114,8 +114,8 @@ impl SignatureCollectorManager { debug!( ?metadata, ?requester, - root=?signing_data.root, - index=?signing_data.index, + root=?validator_signing_data.root, + index=?validator_signing_data.index, "sign_and_collect called", ); @@ -125,8 +125,8 @@ impl SignatureCollectorManager { self.processor.permitless.send_immediate( move |drop_on_finish| { let sender = manager.get_or_spawn( - signing_data.root, - signing_data.index, + validator_signing_data.root, + validator_signing_data.index, cloned_metadata.slot, ); let _ = sender.send(CollectorMessage { @@ -144,21 +144,21 @@ impl SignatureCollectorManager { let manager = self.clone(); self.processor.urgent_consensus.send_blocking( move || { - trace!(root = ?signing_data.root, "Signing..."); + trace!(root = ?validator_signing_data.root, "Signing..."); // If we have no share, we can not actually sign the message, because we are running // in impostor mode. - let partial_signature = if let Some(share) = &signing_data.share { - share.sign(signing_data.root) + let partial_signature = if let Some(share) = &validator_signing_data.share { + share.sign(validator_signing_data.root) } else { Signature::empty() }; - trace!(root = ?signing_data.root, "Signed"); + trace!(root = ?validator_signing_data.root, "Signed"); let message = PartialSignatureMessage { partial_signature, - signing_root: signing_data.root, + signing_root: validator_signing_data.root, signer, - validator_index: signing_data.index, + validator_index: validator_signing_data.index, }; match requester { SignatureRequester::SingleValidator { pubkey } => { @@ -178,12 +178,13 @@ impl SignatureCollectorManager { } SignatureRequester::Committee { num_signatures_to_collect, + base_hash, } => { // We have to collect all signatures from the given validators. // To check this create or get an entry from the `committee_signatures` map. let mut entry = match manager .committee_signatures - .entry((signing_data.root, metadata.committee_id)) + .entry((base_hash, metadata.committee_id)) { Entry::Occupied(occupied) => occupied, Entry::Vacant(vacant) => vacant.insert_entry(CommitteeSignatures { @@ -224,7 +225,7 @@ impl SignatureCollectorManager { // Finally, make the local instance aware of the partial signature, if it is a real // signature. - if signing_data.share.is_some() { + if validator_signing_data.share.is_some() { let _ = manager.receive_partial_signature(message, metadata.slot); } }, @@ -245,7 +246,7 @@ impl SignatureCollectorManager { let partial_sig_messages = PartialSignatureMessages { kind: metadata.kind, slot: metadata.slot, - messages: signatures, + messages: signatures.into(), }; UnsignedSSVMessage { @@ -391,11 +392,16 @@ pub enum SignatureRequester { Committee { /// The number of signatures we have to wait for. num_signatures_to_collect: usize, + /// A hash that identifies what we are signing. We wait with sending the message until we + /// have created enough signatures with this `base_hash`. We need this to differentiate + /// "groups" of signatures. We cannot use the signing root, as we need to group signatures + /// with differing signing roots. + base_hash: Hash256, }, } #[derive(Clone)] -pub struct SigningData { +pub struct ValidatorSigningData { pub root: Hash256, pub index: ValidatorIndex, pub share: Option, diff --git a/anchor/spec_tests/Cargo.toml b/anchor/spec_tests/Cargo.toml new file mode 100644 index 000000000..42fcab4e5 --- /dev/null +++ b/anchor/spec_tests/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "spec_tests" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +base64 = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +hex = { workspace = true } +openssl = { workspace = true } +operator_key = { path = "../common/operator_key" } +parking_lot = { workspace = true } +qbft = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_with = "3.0" +sha2 = { workspace = true } +ssv_types = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } +types = { workspace = true } +walkdir = "2.5.0" diff --git a/anchor/spec_tests/src/lib.rs b/anchor/spec_tests/src/lib.rs new file mode 100644 index 000000000..0b55fba17 --- /dev/null +++ b/anchor/spec_tests/src/lib.rs @@ -0,0 +1,278 @@ +#![allow(dead_code)] + +mod types; +mod utils; +use std::{ + collections::{HashMap, HashSet}, + fmt, fs, + path::Path, + sync::LazyLock, +}; + +use serde::de::DeserializeOwned; +use types::TypesSpecTestType; +use walkdir::WalkDir; + +use crate::types::*; + +// All Spec Test Variants. Maps to an inner variant type that describes specific tests +#[derive(Eq, PartialEq, Hash, Debug)] +enum SpecTestType { + Types(TypesSpecTestType), +} + +// Maps a test category to its respective spec test location. Do not change! +impl fmt::Display for SpecTestType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + SpecTestType::Types(_) => write!(f, "ssv-spec/types/spectest/generate/tests"), + } + } +} + +impl SpecTestType { + /// Some tests are encoding tests. They share a prefix but have a different fielname and + /// structure + pub fn is_encoding(&self) -> bool { + matches!(self, SpecTestType::Types(type_test) if type_test.is_encoding()) + } +} + +// Import the debug_encoding module + +// Core trait to orchestrate setting up and running spec tests. The spec tests are broken up into +// different categories with different file strucutres. For each file structure, implementing the +// required functions allows for a smooth testing process +trait SpecTest { + // Retrieve the name of the test + fn name(&self) -> &str; + + // Setup a runner for the test. This will configure and construct eveything required to + // execute the test + fn setup(&mut self); + + // Run the test and verify that the output is what we were expecting. + fn run(&self) -> bool; + + // Return the type of this test. Used as a Key for the loaders and path construction + fn test_type() -> SpecTestType + where + Self: Sized; +} + +// Abstract away repeated logic for registering a test type with the loader +macro_rules! register_test_loaders { + ($($test_type:ty),* $(,)?) => { + LazyLock::new(|| { + let mut loaders = HashMap::new(); + $( + register_test::<$test_type>(&mut loaders); + )* + loaders + }) + }; +} + +type Loaders = HashMap Box>; +static TEST_LOADERS: LazyLock = register_test_loaders!( + // Types tests + // ----------- + BeaconVoteEncodingTest, + ConsensusDataProposerTest, + EncryptionSpecTest, + PartialSigMsgSpecTest, + PartialSigMessageEncodingTest, + SignedSSVMessageTest, + SignedSSVMessageEncodingTest, + SSVMessageTest, + SSVMessageEncodingTest, + SSZSpecTest, + ValidatorConsensusDataTest, + ValidatorConsensusDataEncodingTest, +); + +// Register a test in the loader. This inserts a mapping from SpecTestType -> loading closure +// into a map for later access. This is needed to that we can parse from an arbitrary test file to a +// specific test type T +fn register_test(map: &mut Loaders) { + map.insert(T::test_type(), |path| { + let contents = + fs::read_to_string(path).unwrap_or_else(|_| panic!("Failed to read test file: {path}")); + + let test: T = serde_json::from_str(&contents).unwrap_or_else(|e| { + eprintln!("=== JSON PARSING ERROR ==="); + eprintln!("File: {path}"); + eprintln!("Error: {e}"); + eprintln!("========================"); + panic!("Failed to parse test {path}: {e}") + }); + + Box::new(test) + }); +} + +// Core function to run the tests. Given a SpecTestType, it will navigate to the proper directory, +// read in all of the tests, make sure they are all setup, and then run each one +fn run_tests(test_type: SpecTestType) -> bool { + let dir_name = test_type.to_string(); + let test_dir = Path::new(&dir_name); + + let tests: Vec> = WalkDir::new(test_dir) + .into_iter() + .filter_map(Result::ok) + .filter_map(|entry| { + let path = entry.path(); + + // Check if it is an encoding test + let is_encoding = test_type.is_encoding(); + + // Get the inner variant string to check in filenames + let variant = match &test_type { + SpecTestType::Types(inner) => inner.to_string(), + }; + + if path.is_file() { + let filename = path.file_name().map(|name| name.to_string_lossy()); + + let matches = filename + .map(|name| { + let split: HashSet = name.split('.').map(String::from).collect(); + + let contains_prefix = split.contains(&variant); + if is_encoding { + // if it is an encoding tests, we also have to check that the file + // conatins "EncodingTest" + contains_prefix & name.contains("EncodingTest") + } else { + contains_prefix & !name.contains("EncodingTest") + } + }) + .unwrap_or(false); + + if matches { + println!("Loading {path:?}"); + let loader = TEST_LOADERS + .get(&test_type) + .unwrap_or_else(|| panic!("No loader registered for: {test_type}")); + return Some(loader(&path.to_string_lossy())); + } + } + None + }) + .collect(); + + assert!(!tests.is_empty()); + println!("Loaded {} tests", tests.len()); + + let mut result = true; + for mut test in tests { + test.setup(); + result &= test.run(); + } + result +} + +#[cfg(test)] +mod spec_tests { + use super::*; + + // All type specific spec tests + mod type_tests { + use super::*; + + #[test] + // Beacon vote encoding + fn test_types_encoding_beacon_vote() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::BeaconVoteEncoding + ))) + } + + #[test] + #[ignore = "need to implement validation"] + // Consensus data proposer test + fn test_types_consensus_data_proposer() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::ConsensusDataProposer + ))) + } + + #[test] + // Encryption test + fn test_types_encryption_test() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::Encryption + ))) + } + + #[test] + // Partial sig message encoding + fn test_types_partial_sig_message() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::PartialSigMessage + ))) + } + + #[test] + // Partial sig message encoding + fn test_types_encoding_partial_sig_message() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::PartialSigMessageEncoding + ))) + } + + #[test] + // Signed ssv message test + fn test_types_signed_ssv_message() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::SignedSSVMsg + ))) + } + + #[test] + // Signed SSV Message Encoding + fn test_types_encoding_signed_ssv_message() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::SignedSSVMsgEncoding + ))) + } + + #[test] + // SSV Message test + fn test_types_ssv_message() { + assert!(run_tests(SpecTestType::Types(TypesSpecTestType::SSVMsg))) + } + + #[test] + // Signed SSV Message Encoding + fn test_types_encoding_ssv_message() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::SSVMsgEncoding + ))) + } + + #[test] + #[ignore = "invalid signature in test data"] + // SSZ withdrawals marshalling test + fn test_types_ssz() { + assert!(run_tests(SpecTestType::Types(TypesSpecTestType::Ssz))) + } + + #[test] + #[ignore = "need to implement validation"] + // Validator consensus data encoding + fn test_types_validator_consensus_data() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::ValidatorConsensusData + ))) + } + + #[test] + // Validator consensus data encoding + fn test_types_encoding_validator_consensus_data() { + assert!(run_tests(SpecTestType::Types( + TypesSpecTestType::ValidatorConsensusDataEncoding + ))) + } + } +} diff --git a/anchor/spec_tests/src/types/beacon_vote_encoding.rs b/anchor/spec_tests/src/types/beacon_vote_encoding.rs new file mode 100644 index 000000000..b67422294 --- /dev/null +++ b/anchor/spec_tests/src/types/beacon_vote_encoding.rs @@ -0,0 +1,72 @@ +use serde::Deserialize; +use ssv_types::consensus::BeaconVote; +use ssz::{Decode, Encode}; +use tree_hash::TreeHash; +use types::Hash256; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{deserialize_base64, deserialize_bytes_to_hash256}, +}; + +// BeaconVote encoding test +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct BeaconVoteEncodingTest { + #[serde(rename = "Type")] + pub r#type: Option, + pub documentation: Option, + pub name: String, + #[serde(deserialize_with = "deserialize_base64")] + pub data: Vec, + #[serde(deserialize_with = "deserialize_bytes_to_hash256")] + pub expected_root: Hash256, +} + +impl SpecTest for BeaconVoteEncodingTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No-op + } + + fn run(&self) -> bool { + // Decode the BeaconVote from the provided data + let beacon_vote = match BeaconVote::from_ssz_bytes(&self.data) { + Ok(bv) => bv, + Err(e) => { + println!("Failed to decode BeaconVote: {e:?}"); + return false; + } + }; + + // Compute the hash tree root and verify it matches the expected root + if self.expected_root != beacon_vote.tree_hash_root() { + return false; + } + + // Test round trip encoding + let re_encoded = beacon_vote.as_ssz_bytes(); + match BeaconVote::from_ssz_bytes(&re_encoded) { + Ok(re_decoded) => { + if re_decoded != beacon_vote { + println!("Roundtrip encoding failed"); + return false; + } + } + Err(e) => { + println!("Failed to decode re-encoded data: {e:?}"); + return false; + } + } + + true + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::BeaconVoteEncoding) + } +} diff --git a/anchor/spec_tests/src/types/consensus_data_proposer.rs b/anchor/spec_tests/src/types/consensus_data_proposer.rs new file mode 100644 index 000000000..992b15524 --- /dev/null +++ b/anchor/spec_tests/src/types/consensus_data_proposer.rs @@ -0,0 +1,94 @@ +use serde::Deserialize; +use ssv_types::consensus::ValidatorConsensusData; +use ssz::{Decode, Encode}; +use tree_hash::TreeHash; +use types::Hash256; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{ + deserialize_base64, deserialize_base64_option, deserialize_hex_hash256, + }, +}; + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ConsensusDataProposerTest { + #[serde(rename = "Name")] + pub name: String, + #[serde(rename = "Type")] + pub test_type: String, + #[serde(rename = "Documentation")] + pub documentation: String, + #[serde(rename = "Blinded")] + pub blinded: bool, + #[serde(rename = "DataCd", deserialize_with = "deserialize_base64")] + pub data_cd: Vec, + #[serde(rename = "DataBlk", deserialize_with = "deserialize_base64_option")] + pub data_blk: Option>, + #[serde( + rename = "ExpectedBlkRoot", + deserialize_with = "deserialize_hex_hash256" + )] + pub expected_blk_root: Hash256, + #[serde( + rename = "ExpectedCdRoot", + deserialize_with = "deserialize_hex_hash256" + )] + pub expected_cd_root: Hash256, + #[serde(rename = "ExpectedError")] + pub expected_error: String, +} + +impl SpecTest for ConsensusDataProposerTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // Setup any required test state + } + + fn run(&self) -> bool { + let consensus_data = match ValidatorConsensusData::from_ssz_bytes(&self.data_cd) { + Ok(data) => data, + Err(e) => { + let has_error = !self.expected_error.is_empty(); + if !has_error { + eprintln!( + "Test '{}' failed: unexpected SSZ decode error: {:?}", + self.name, e + ); + } + return has_error; + } + }; + + // todo!() need block validation logic + // https://github.com/sigp/anchor/issues/258 + + // Compute tree hash root and compare with expected + let computed_root = consensus_data.tree_hash_root(); + if self.expected_cd_root != computed_root { + eprintln!( + "Test '{}' failed: CD root mismatch. Expected: {:?}, Got: {:?}", + self.name, self.expected_cd_root, computed_root + ); + return false; + } + + // Test roundtrip encoding + let re_encoded = consensus_data.as_ssz_bytes(); + if re_encoded != self.data_cd { + eprintln!("Test '{}' failed: re-encoding mismatch", self.name); + return false; + } + + true + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::ConsensusDataProposer) + } +} diff --git a/anchor/spec_tests/src/types/encryption.rs b/anchor/spec_tests/src/types/encryption.rs new file mode 100644 index 000000000..cd2349b73 --- /dev/null +++ b/anchor/spec_tests/src/types/encryption.rs @@ -0,0 +1,71 @@ +use base64::prelude::*; +use operator_key::{encrypted::EncryptedKey, unencrypted}; +use serde::Deserialize; + +use crate::{ + SpecTest, SpecTestType, types::TypesSpecTestType, utils::deserializers::deserialize_base64, +}; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct EncryptionSpecTest { + #[serde(rename = "Type")] + pub r#type: String, + pub documentation: String, + pub name: String, + #[serde(rename = "SKPem", deserialize_with = "deserialize_base64")] + pub sk_pem: Vec, + #[serde(rename = "PKPem", deserialize_with = "deserialize_base64")] + pub pk_pem: Vec, + #[serde(deserialize_with = "deserialize_base64")] + pub plain_text: Vec, +} + +impl EncryptionSpecTest {} + +impl SpecTest for EncryptionSpecTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No-op + } + + fn run(&self) -> bool { + // Parse the private key using operator_key's unencrypted module + let sk_pem_base64 = BASE64_STANDARD.encode(&self.sk_pem); + let private_key = match unencrypted::from_base64(sk_pem_base64.as_bytes()) { + Ok(key) => key, + Err(_) => return false, + }; + + // Use the plaintext as a password to test the actual client encryption logic + // If it's not valid UTF-8, base64 encode it to make it a valid password string + let password = String::from_utf8(self.plain_text.clone()) + .unwrap_or_else(|_| BASE64_STANDARD.encode(&self.plain_text)); + + // Test the actual client key encryption logic: encrypt the private key with the password + let encrypted_key = match EncryptedKey::encrypt(&private_key, &password) { + Ok(key) => key, + Err(_) => return false, + }; + + // Test the actual client key decryption logic: decrypt back to the original key + let decrypted_key = match encrypted_key.decrypt(&password) { + Ok(key) => key, + Err(_) => return false, + }; + + // Verify round-trip: decrypted key should match original private key + if private_key.p() != decrypted_key.p() || private_key.q() != decrypted_key.q() { + return false; + } + + true + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::Encryption) + } +} diff --git a/anchor/spec_tests/src/types/mod.rs b/anchor/spec_tests/src/types/mod.rs new file mode 100644 index 000000000..abeb3653d --- /dev/null +++ b/anchor/spec_tests/src/types/mod.rs @@ -0,0 +1,81 @@ +mod beacon_vote_encoding; +mod consensus_data_proposer; +mod encryption; +mod partial_sig_message; +mod partial_sig_message_encoding; +mod signed_ssv_msg; +mod signed_ssv_msg_encoding; +mod ssv_msg; +mod ssv_msg_encoding; +mod ssz; +mod validator_consensus_data; +mod validator_consensus_data_encoding; + +use std::fmt; + +// Re-export test implementations +pub use beacon_vote_encoding::*; +pub use consensus_data_proposer::*; +pub use encryption::*; +pub use partial_sig_message::*; +pub use partial_sig_message_encoding::*; +pub use signed_ssv_msg::*; +pub use signed_ssv_msg_encoding::*; +pub use ssv_msg::*; +pub use ssv_msg_encoding::*; +pub use ssz::*; +pub use validator_consensus_data::*; +pub use validator_consensus_data_encoding::*; + +// Types-specific test type enumeration +#[derive(Eq, PartialEq, Hash, Debug)] +pub(crate) enum TypesSpecTestType { + BeaconVoteEncoding, + ConsensusDataProposer, + Encryption, + PartialSigMessage, + PartialSigMessageEncoding, + SignedSSVMsg, + SignedSSVMsgEncoding, + SSVMsg, + SSVMsgEncoding, + Ssz, + ValidatorConsensusData, + ValidatorConsensusDataEncoding, +} + +impl TypesSpecTestType { + // Determine if this is an encoding test + pub fn is_encoding(&self) -> bool { + matches!( + self, + TypesSpecTestType::BeaconVoteEncoding + | TypesSpecTestType::PartialSigMessageEncoding + | TypesSpecTestType::SignedSSVMsgEncoding + | TypesSpecTestType::SSVMsgEncoding + | TypesSpecTestType::ValidatorConsensusDataEncoding + ) + } +} + +// Contains specific identifier for the test file matching Go test naming +impl fmt::Display for TypesSpecTestType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + TypesSpecTestType::BeaconVoteEncoding => write!(f, "beaconvote"), + TypesSpecTestType::ConsensusDataProposer => write!(f, "consensusdataproposer"), + TypesSpecTestType::Encryption => write!(f, "encryption"), + TypesSpecTestType::PartialSigMessage => write!(f, "partialsigmessage"), + TypesSpecTestType::PartialSigMessageEncoding => write!(f, "partialsigmessage"), + TypesSpecTestType::SignedSSVMsg => write!(f, "signedssvmsg"), + TypesSpecTestType::SignedSSVMsgEncoding => write!(f, "signedssvmsg"), + TypesSpecTestType::SSVMsg => write!(f, "ssvmsg"), + TypesSpecTestType::SSVMsgEncoding => write!(f, "ssvmsg"), + TypesSpecTestType::Ssz => write!(f, "ssz"), + TypesSpecTestType::ValidatorConsensusData => write!(f, "validatorconsensusdata"), + TypesSpecTestType::ValidatorConsensusDataEncoding => { + write!(f, "validatorconsensusdata") + } + } + } +} diff --git a/anchor/spec_tests/src/types/partial_sig_message.rs b/anchor/spec_tests/src/types/partial_sig_message.rs new file mode 100644 index 000000000..1f2c7a71e --- /dev/null +++ b/anchor/spec_tests/src/types/partial_sig_message.rs @@ -0,0 +1,111 @@ +use serde::Deserialize; +use ssv_types::partial_sig::{PartialSignatureError, PartialSignatureMessages}; +use ssz::{Decode, Encode}; +use tree_hash::TreeHash; +use types::Hash256; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{deserialize_base64_list_option, deserialize_hash256_list_option}, +}; + +// Partial signature message test +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct PartialSigMsgSpecTest { + #[serde(rename = "Type")] + pub r#type: String, + pub documentation: String, + pub name: String, + pub messages: Vec, + #[serde(deserialize_with = "deserialize_base64_list_option", default)] + pub encoded_messages: Option>>, + #[serde(deserialize_with = "deserialize_hash256_list_option", default)] + pub expected_roots: Option>, + pub expected_error: String, +} + +impl SpecTest for PartialSigMsgSpecTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No-op + } + + fn run(&self) -> bool { + let mut last_error: Option = None; + + for (i, msg) in self.messages.iter().enumerate() { + // Test validation + if let Err(err) = msg.validate() { + last_error = Some(err); + } + + // Test encoding/decoding if we have encoded messages + if let Some(ref encoded_messages) = self.encoded_messages { + // Test encoding + let encoded = msg.as_ssz_bytes(); + if encoded != encoded_messages[i] { + return false; + } + + // Test decoding + let decoded = match PartialSignatureMessages::from_ssz_bytes(&encoded) { + Ok(decoded) => decoded, + Err(_) => return false, + }; + + // Verify decoded matches original + if decoded != *msg { + return false; + } + + // Verify tree hash roots match + if decoded.tree_hash_root() != msg.tree_hash_root() { + return false; + } + } + + // Test expected roots if provided + if let Some(ref expected_roots) = self.expected_roots { + if msg.tree_hash_root() != expected_roots[i] { + return false; + } + } + } + + if !self.expected_error.is_empty() { + // We have an expected error, so last_error should be Some and it should match + self.check_error_message(&last_error) + } else { + // If we do do not have an expected error, then last_error should be None. + last_error.is_none() + } + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::PartialSigMessage) + } +} + +impl PartialSigMsgSpecTest { + /// Check if the error message matches the expected error from Go tests + fn check_error_message(&self, error: &Option) -> bool { + let error = match error { + Some(error) => error, + None => return false, + }; + + // Map Rust errors to Go error messages + let go_error = match error { + PartialSignatureError::NoMessages => "no PartialSignatureMessages messages", + PartialSignatureError::InconsistentSigners => "inconsistent signers", + PartialSignatureError::ZeroSigner => "message invalid: signer ID 0 not allowed", + }; + + self.expected_error == go_error + } +} diff --git a/anchor/spec_tests/src/types/partial_sig_message_encoding.rs b/anchor/spec_tests/src/types/partial_sig_message_encoding.rs new file mode 100644 index 000000000..2ff3fc6c7 --- /dev/null +++ b/anchor/spec_tests/src/types/partial_sig_message_encoding.rs @@ -0,0 +1,76 @@ +use serde::Deserialize; +use ssv_types::partial_sig::PartialSignatureMessages; +use ssz::{Decode, Encode}; +use tree_hash::TreeHash; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{deserialize_base64, deserialize_bytes_to_hash256}, +}; + +// Encoding test for partial signature messages +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct PartialSigMessageEncodingTest { + #[serde(rename = "Type")] + pub r#type: String, + pub documentation: String, + pub name: String, + #[serde(deserialize_with = "deserialize_base64")] + pub data: Vec, + #[serde(deserialize_with = "deserialize_bytes_to_hash256")] + pub expected_root: types::Hash256, +} + +impl SpecTest for PartialSigMessageEncodingTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No-op + } + + fn run(&self) -> bool { + // Decode the PartialSignatureMessages from the provided data + let partial_sig_messages = match PartialSignatureMessages::from_ssz_bytes(&self.data) { + Ok(psm) => psm, + Err(e) => { + println!("Failed to decode PartialSignatureMessages: {e:?}"); + return false; + } + }; + + // Compute tree hash root and compare with expected + let computed_root = partial_sig_messages.tree_hash_root(); + if self.expected_root != computed_root { + println!( + "Tree hash root mismatch. Expected: {:?}, Got: {:?}", + self.expected_root, computed_root + ); + return false; + } + + // Test roundtrip encoding + let re_encoded = partial_sig_messages.as_ssz_bytes(); + match PartialSignatureMessages::from_ssz_bytes(&re_encoded) { + Ok(re_decoded) => { + if re_decoded != partial_sig_messages { + println!("Roundtrip encoding failed"); + return false; + } + } + Err(e) => { + println!("Failed to decode re-encoded data: {e:?}"); + return false; + } + } + + true + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::PartialSigMessageEncoding) + } +} diff --git a/anchor/spec_tests/src/types/signed_ssv_msg.rs b/anchor/spec_tests/src/types/signed_ssv_msg.rs new file mode 100644 index 000000000..ca1f0d2e5 --- /dev/null +++ b/anchor/spec_tests/src/types/signed_ssv_msg.rs @@ -0,0 +1,174 @@ +use openssl::{hash::MessageDigest, pkey::PKey, sign::Verifier}; +use operator_key::public; +use serde::Deserialize; +use ssv_types::{ + OperatorId, + message::{SSVMessage, SignedSSVMessage, SignedSSVMessageError}, +}; +use ssz::Encode; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{deserialize_base64_list, deserialize_hex_option}, +}; + +// Test message structure that directly handles null SSVMessage +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct TestSignedSSVMessage { + #[serde(deserialize_with = "deserialize_base64_list")] + pub signatures: Vec>, + #[serde(rename = "OperatorIDs")] + pub operator_ids: Vec, + #[serde(rename = "SSVMessage")] + pub ssv_message: Option, + #[serde(deserialize_with = "deserialize_hex_option", default)] + pub full_data: Option>, +} + +// SignedSSVMessage validation tests +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct SignedSSVMessageTest { + #[serde(rename = "Type")] + pub test_type: String, + pub name: String, + pub documentation: String, + pub messages: Vec, + pub expected_error: String, + #[serde(rename = "RSAPublicKey")] + pub rsa_public_key: Option>, +} + +impl SpecTest for SignedSSVMessageTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No-op + } + + fn run(&self) -> bool { + for test_msg in &self.messages { + if let Err(error) = self.validate_message(test_msg) { + return self.check_expected_error(&error); + } + } + true + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::SignedSSVMsg) + } +} + +impl SignedSSVMessageTest { + fn validate_message(&self, test_msg: &TestSignedSSVMessage) -> Result<(), String> { + // Handle null SSVMessage case + let ssv_message = test_msg.ssv_message.as_ref().ok_or("nil SSVMessage")?; + + // Convert and validate signatures + let signatures = self.prepare_signatures(test_msg)?; + + // Create SignedSSVMessage + let full_data = test_msg.full_data.clone().unwrap_or_default(); + let signed_msg = SignedSSVMessage::new( + signatures, + test_msg.operator_ids.clone(), + ssv_message.clone(), + full_data, + ) + .map_err(|e| self.error_to_string(&e))?; + + // Validate the message by calling our internal validat function + signed_msg + .validate() + .map_err(|_| "validation failed".to_string())?; + + // Verify RSA signatures if provided + self.verify_rsa_signatures(&signed_msg, ssv_message) + } + + fn prepare_signatures( + &self, + test_msg: &TestSignedSSVMessage, + ) -> Result, String> { + let mut signatures = Vec::new(); + + for sig_bytes in &test_msg.signatures { + if sig_bytes.is_empty() { + return Err("empty signature".to_string()); + } + + // Pad or truncate signature to 256 bytes for RSA signature format + let mut sig_array = [0u8; 256]; + if sig_bytes.len() <= 256 { + sig_array[..sig_bytes.len()].copy_from_slice(sig_bytes); + } else { + sig_array.copy_from_slice(&sig_bytes[..256]); + } + signatures.push(sig_array); + } + + Ok(signatures) + } + + fn verify_rsa_signatures( + &self, + signed_msg: &SignedSSVMessage, + ssv_message: &SSVMessage, + ) -> Result<(), String> { + let Some(ref pk_strings) = self.rsa_public_key else { + return Ok(()); + }; + + let encoded_ssv_msg = ssv_message.as_ssz_bytes(); + + for (i, pk_string) in pk_strings.iter().enumerate() { + let rsa_key = public::from_base64(pk_string.as_bytes()) + .map_err(|_| "failed to parse RSA public key")?; + + let pkey = PKey::from_rsa(rsa_key).map_err(|_| "failed to convert RSA key to PKey")?; + + let mut verifier = Verifier::new(MessageDigest::sha256(), &pkey) + .map_err(|_| "failed to create verifier")?; + + verifier + .update(&encoded_ssv_msg) + .map_err(|_| "failed to update verifier")?; + + let signature: &[u8] = &signed_msg.signatures()[i]; + verifier + .verify(signature) + .map_err(|_| "signature verification failed")?; + } + + Ok(()) + } + + fn error_to_string(&self, error: &SignedSSVMessageError) -> String { + match error { + SignedSSVMessageError::NoSigners => "no signers".to_string(), + SignedSSVMessageError::ZeroSigner => "signer ID 0 not allowed".to_string(), + SignedSSVMessageError::DuplicatedSigner => "non unique signer".to_string(), + SignedSSVMessageError::SignersAndSignaturesWithDifferentLength => { + "number of signatures is different than number of signers".to_string() + } + SignedSSVMessageError::NoSignatures => "no signatures".to_string(), + SignedSSVMessageError::TooManySignatures { .. } => "too many signatures".to_string(), + SignedSSVMessageError::WrongRSASignatureSize { .. } => { + "wrong RSA signature size".to_string() + } + SignedSSVMessageError::TooManyOperatorIDs { .. } => "too many operator IDs".to_string(), + SignedSSVMessageError::FullDataTooLong { .. } => "full data too long".to_string(), + SignedSSVMessageError::SignersNotSorted => "signers not sorted".to_string(), + SignedSSVMessageError::SSVMessageError(_) => "invalid SSV message".to_string(), + } + } + + fn check_expected_error(&self, error_msg: &str) -> bool { + !self.expected_error.is_empty() && self.expected_error == error_msg + } +} diff --git a/anchor/spec_tests/src/types/signed_ssv_msg_encoding.rs b/anchor/spec_tests/src/types/signed_ssv_msg_encoding.rs new file mode 100644 index 000000000..eb1f7a713 --- /dev/null +++ b/anchor/spec_tests/src/types/signed_ssv_msg_encoding.rs @@ -0,0 +1,47 @@ +use serde::Deserialize; +use ssv_types::message::SignedSSVMessage; +use ssz::{Decode, Encode}; + +use crate::{ + SpecTest, SpecTestType, types::TypesSpecTestType, utils::deserializers::deserialize_base64, +}; + +// Encoding test structure +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct SignedSSVMessageEncodingTest { + #[serde(rename = "Type")] + pub r#type: String, + pub documentation: String, + pub name: String, + #[serde(deserialize_with = "deserialize_base64")] + pub data: Vec, +} + +impl SpecTest for SignedSSVMessageEncodingTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // Setup any required test state + } + + fn run(&self) -> bool { + let signed_message = match SignedSSVMessage::from_ssz_bytes(&self.data) { + Ok(msg) => msg, + Err(_) => return false, + }; + + // Test roundtrip encoding + let re_encoded = signed_message.as_ssz_bytes(); + match SignedSSVMessage::from_ssz_bytes(&re_encoded) { + Ok(re_decoded) => re_decoded == signed_message, + Err(_) => false, + } + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::SignedSSVMsgEncoding) + } +} diff --git a/anchor/spec_tests/src/types/ssv_msg.rs b/anchor/spec_tests/src/types/ssv_msg.rs new file mode 100644 index 000000000..eaea12fd1 --- /dev/null +++ b/anchor/spec_tests/src/types/ssv_msg.rs @@ -0,0 +1,66 @@ +use serde::Deserialize; +use ssv_types::{ + ValidatorIndex, + msgid::{DutyExecutor, MessageId}, +}; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::{ + deserializers::{deserialize_hex_message_id_list, deserialize_string_to_validator_index}, + test_keys::TESTING_VALIDATOR_PUBKEY, + }, +}; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct SSVMessageTest { + #[serde(rename = "Type")] + pub r#type: String, + pub documentation: String, + pub name: String, + #[serde( + rename = "MessageIDs", + deserialize_with = "deserialize_hex_message_id_list" + )] + pub message_ids: Vec, + #[serde(deserialize_with = "deserialize_string_to_validator_index")] + pub validator_index: ValidatorIndex, + pub belongs_to_validator: bool, +} + +impl SpecTest for SSVMessageTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No-op + } + + fn run(&self) -> bool { + // Setup the 4 share set + let mut result = true; + for msg_id in &self.message_ids { + // Some of message ids have an invalid role + if let Some(duty_executor) = msg_id.duty_executor() { + let validator_pubkey = match duty_executor { + DutyExecutor::Validator(key) => key, + _ => return false, + }; + + if self.belongs_to_validator { + result &= validator_pubkey == *TESTING_VALIDATOR_PUBKEY; + } else { + result &= validator_pubkey != *TESTING_VALIDATOR_PUBKEY; + } + } + } + result + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::SSVMsg) + } +} diff --git a/anchor/spec_tests/src/types/ssv_msg_encoding.rs b/anchor/spec_tests/src/types/ssv_msg_encoding.rs new file mode 100644 index 000000000..7015d0c00 --- /dev/null +++ b/anchor/spec_tests/src/types/ssv_msg_encoding.rs @@ -0,0 +1,59 @@ +use serde::Deserialize; +use ssv_types::message::SSVMessage; +use ssz::{Decode, Encode}; +use tree_hash::TreeHash; +use types::Hash256; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{deserialize_base64, deserialize_bytes_to_hash256}, +}; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct SSVMessageEncodingTest { + #[serde(rename = "Type")] + pub r#type: String, + pub documentation: String, + pub name: String, + #[serde(deserialize_with = "deserialize_base64")] + pub data: Vec, + #[serde(deserialize_with = "deserialize_bytes_to_hash256")] + pub expected_root: Hash256, +} + +impl SpecTest for SSVMessageEncodingTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No-op + } + + fn run(&self) -> bool { + // Decode the SSVMessage from the provided data + let ssv_message = match SSVMessage::from_ssz_bytes(&self.data) { + Ok(bv) => bv, + Err(_) => return false, + }; + + // Compute tree hash root and compare with expected + let computed_root = ssv_message.tree_hash_root(); + if self.expected_root != computed_root { + return false; + } + + // Test roundtrip encoding + let re_encoded = ssv_message.as_ssz_bytes(); + match SSVMessage::from_ssz_bytes(&re_encoded) { + Ok(re_decoded) => re_decoded == ssv_message, + Err(_) => false, + } + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::SSVMsgEncoding) + } +} diff --git a/anchor/spec_tests/src/types/ssz.rs b/anchor/spec_tests/src/types/ssz.rs new file mode 100644 index 000000000..dc10fd5ac --- /dev/null +++ b/anchor/spec_tests/src/types/ssz.rs @@ -0,0 +1,78 @@ +use serde::Deserialize; +use ssv_types::consensus::ValidatorConsensusData; +use ssz::Decode; +use types::{BeaconBlock, ExecPayload, ForkName, Hash256, MainnetEthSpec}; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{deserialize_base64, deserialize_hex_hash256}, +}; + +/// SSZ test for validating SSZ encoding and decoding operations +/// +/// This test validates SSZ marshaling and hash tree root calculations, +/// particularly for Capella withdrawals and other consensus objects. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct SSZSpecTest { + /// The name of the test case + pub name: String, + /// The type of test being performed (e.g. "SSZ: validation of SSZ encoding and decoding") + #[serde(rename = "Type")] + pub test_type: Option, + /// Documentation describing what the test does + pub documentation: Option, + /// Base64 encoded SSZ data to decode and validate + #[serde(deserialize_with = "deserialize_base64")] + pub data: Vec, + /// The expected hash tree root as hex string + #[serde(deserialize_with = "deserialize_hex_hash256")] + pub expected_root: Hash256, + /// Expected error message (empty string if no error expected) + pub expected_error: String, +} + +impl SpecTest for SSZSpecTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No setup required + } + + fn run(&self) -> bool { + let cd = match ValidatorConsensusData::from_ssz_bytes(&self.data) { + Ok(cd) => cd, + Err(_) => return !self.expected_error.is_empty(), + }; + + // Convert DataVersion to ForkName for deserialization + let fork = ForkName::from(cd.version); + + // Try to deserialize as full BeaconBlock first + let withdrawals_root = + match BeaconBlock::::from_ssz_bytes_for_fork(&cd.data_ssz, fork) { + Ok(full_block) => match fork { + ForkName::Capella | ForkName::Deneb | ForkName::Electra => { + match full_block.body().execution_payload() { + Ok(payload) => match payload.withdrawals_root() { + Ok(root) => root, + Err(_) => return false, + }, + Err(_) => return false, + } + } + _ => return false, + }, + Err(_) => return false, + }; + + withdrawals_root == self.expected_root + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::Ssz) + } +} diff --git a/anchor/spec_tests/src/types/validator_consensus_data.rs b/anchor/spec_tests/src/types/validator_consensus_data.rs new file mode 100644 index 000000000..625a18356 --- /dev/null +++ b/anchor/spec_tests/src/types/validator_consensus_data.rs @@ -0,0 +1,128 @@ +use serde::{Deserialize, Deserializer, de::Error}; +use ssv_types::{ + ValidatorIndex, + consensus::{ + BeaconRole, DataVersion, ValidatorConsensusData as SSVValidatorConsensusData, + ValidatorDuty as SSVValidatorDuty, + }, + message::ValidatorConsensusDataLen, +}; +use types::{CommitteeIndex, PublicKeyBytes, Slot, VariableList, typenum::U13}; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{ + deserialize_base64, deserialize_beacon_role, deserialize_data_version, + deserialize_hex_public_key, deserialize_string_to_committee_index, + deserialize_string_to_slot, deserialize_string_to_u64, + deserialize_string_to_validator_index, deserialize_sync_committee_indices, + }, +}; + +/// Deserialize VariableList from base64 for DataSSZ +fn deserialize_data_ssz<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let bytes = deserialize_base64(deserializer)?; + VariableList::new(bytes).map_err(|e| Error::custom(format!("DataSSZ too large: {e:?}"))) +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ValidatorDuty { + #[serde(rename = "Type", deserialize_with = "deserialize_beacon_role")] + pub r#type: BeaconRole, + #[serde(rename = "PubKey", deserialize_with = "deserialize_hex_public_key")] + pub pub_key: PublicKeyBytes, + #[serde(deserialize_with = "deserialize_string_to_slot")] + pub slot: Slot, + #[serde(deserialize_with = "deserialize_string_to_validator_index")] + pub validator_index: ValidatorIndex, + #[serde(deserialize_with = "deserialize_string_to_committee_index")] + pub committee_index: CommitteeIndex, + #[serde(deserialize_with = "deserialize_string_to_u64")] + pub committee_length: u64, + #[serde(deserialize_with = "deserialize_string_to_u64")] + pub committees_at_slot: u64, + #[serde(deserialize_with = "deserialize_string_to_u64")] + pub validator_committee_index: u64, + #[serde(deserialize_with = "deserialize_sync_committee_indices")] + pub validator_sync_committee_indices: VariableList, +} + +impl ValidatorDuty { + /// Convert to SSV ValidatorDuty type + pub fn to_ssv_duty(self) -> SSVValidatorDuty { + SSVValidatorDuty { + r#type: self.r#type, + pub_key: self.pub_key, + slot: self.slot, + validator_index: self.validator_index, + committee_index: self.committee_index, + committee_length: self.committee_length, + committees_at_slot: self.committees_at_slot, + validator_committee_index: self.validator_committee_index, + validator_sync_committee_indices: self.validator_sync_committee_indices, + } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ValidatorConsensusData { + pub duty: ValidatorDuty, + #[serde(deserialize_with = "deserialize_data_version")] + pub version: DataVersion, + #[serde(rename = "DataSSZ", deserialize_with = "deserialize_data_ssz")] + pub data_ssz: VariableList, +} + +impl ValidatorConsensusData { + /// Convert to SSV ValidatorConsensusData type + pub fn to_ssv_consensus_data(self) -> SSVValidatorConsensusData { + SSVValidatorConsensusData { + duty: self.duty.to_ssv_duty(), + version: self.version, + data_ssz: self.data_ssz, + } + } +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct ValidatorConsensusDataTest { + #[serde(rename = "Type")] + pub r#type: Option, + pub documentation: Option, + pub name: String, + pub consensus_data: ValidatorConsensusData, + pub expected_error: String, +} + +impl SpecTest for ValidatorConsensusDataTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No setup needed + } + + fn run(&self) -> bool { + // Convert to SSV types + let _consensus_data = self.consensus_data.clone().to_ssv_consensus_data(); + + // todo!() need block validation logic + // https://github.com/sigp/anchor/issues/258 + + true + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::ValidatorConsensusData) + } +} diff --git a/anchor/spec_tests/src/types/validator_consensus_data_encoding.rs b/anchor/spec_tests/src/types/validator_consensus_data_encoding.rs new file mode 100644 index 000000000..6b6e96784 --- /dev/null +++ b/anchor/spec_tests/src/types/validator_consensus_data_encoding.rs @@ -0,0 +1,60 @@ +use serde::Deserialize; +use ssv_types::consensus::ValidatorConsensusData; +use ssz::{Decode, Encode}; +use tree_hash::TreeHash; +use types::Hash256; + +use crate::{ + SpecTest, SpecTestType, + types::TypesSpecTestType, + utils::deserializers::{deserialize_base64, deserialize_bytes_to_hash256}, +}; + +// Validator consensus data encoding test +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase", deny_unknown_fields)] +pub struct ValidatorConsensusDataEncodingTest { + #[serde(rename = "Type")] + pub r#type: String, + pub documentation: String, + pub name: String, + #[serde(deserialize_with = "deserialize_base64")] + pub data: Vec, + #[serde(deserialize_with = "deserialize_bytes_to_hash256")] + pub expected_root: Hash256, +} + +impl SpecTest for ValidatorConsensusDataEncodingTest { + fn name(&self) -> &str { + &self.name + } + + fn setup(&mut self) { + // No-op + } + + fn run(&self) -> bool { + // Decode the ValidatorConsensusData from SSZ bytes + let consensus_data = match ValidatorConsensusData::from_ssz_bytes(&self.data) { + Ok(data) => data, + Err(_) => return false, + }; + + // Compute tree hash root and compare with expected + let computed_root = consensus_data.tree_hash_root(); + if self.expected_root != computed_root { + return false; + } + + // Test roundtrip encoding + let re_encoded = consensus_data.as_ssz_bytes(); + match ValidatorConsensusData::from_ssz_bytes(&re_encoded) { + Ok(re_decoded) => re_decoded == consensus_data, + Err(_) => false, + } + } + + fn test_type() -> SpecTestType { + SpecTestType::Types(TypesSpecTestType::ValidatorConsensusDataEncoding) + } +} diff --git a/anchor/spec_tests/src/utils/deserializers.rs b/anchor/spec_tests/src/utils/deserializers.rs new file mode 100644 index 000000000..8ed858472 --- /dev/null +++ b/anchor/spec_tests/src/utils/deserializers.rs @@ -0,0 +1,587 @@ +//! Unified serde deserializers for SSV spec tests +//! +//! This module provides clean, reusable deserializers for common patterns in SSV spec tests. +//! All deserializers are designed to be simple, idiomatic, and maintainable. + +use base64::{Engine as _, engine::general_purpose::STANDARD}; +use serde::{Deserialize, Deserializer, de::Error}; +use ssv_types::{ + ValidatorIndex, + consensus::{ + BEACON_ROLE_AGGREGATOR, BEACON_ROLE_ATTESTER, BEACON_ROLE_PROPOSER, + BEACON_ROLE_SYNC_COMMITTEE, BEACON_ROLE_SYNC_COMMITTEE_CONTRIBUTION, + BEACON_ROLE_VALIDATOR_REGISTRATION, BEACON_ROLE_VOLUNTARY_EXIT, BeaconRole, DataVersion, + }, + msgid::MessageId, +}; +use types::{ + CommitteeIndex, ForkName, Hash256, PublicKeyBytes, Signature, Slot, VariableList, typenum::U13, +}; + +// ============================================================================= +// Base64 Deserializers +// ============================================================================= + +/// Deserialize a base64 string to bytes +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_base64")] +/// data: Vec, +/// ``` +pub fn deserialize_base64<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let base64_string = String::deserialize(deserializer)?; + STANDARD + .decode(&base64_string) + .map_err(|e| Error::custom(format!("Failed to decode base64: {e}"))) +} + +/// Deserialize an optional base64 string to optional bytes +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_base64_option")] +/// data: Option>, +/// ``` +pub fn deserialize_base64_option<'de, D>(deserializer: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let opt: Option = Option::deserialize(deserializer)?; + match opt { + Some(s) => STANDARD + .decode(&s) + .map(Some) + .map_err(|e| Error::custom(format!("Failed to decode base64: {e}"))), + None => Ok(None), + } +} + +/// Deserialize a vector of base64 strings to vector of byte arrays +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_base64_list")] +/// data: Vec>, +/// ``` +pub fn deserialize_base64_list<'de, D>(deserializer: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let strings: Vec = Vec::deserialize(deserializer)?; + let mut result = Vec::with_capacity(strings.len()); + for s in strings { + let bytes = STANDARD + .decode(&s) + .map_err(|e| Error::custom(format!("Failed to decode base64: {e}")))?; + result.push(bytes); + } + Ok(result) +} + +/// Deserialize an optional vector of base64 strings to optional vector of byte arrays +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_base64_list_option")] +/// data: Option>>, +/// ``` +pub fn deserialize_base64_list_option<'de, D>( + deserializer: D, +) -> Result>>, D::Error> +where + D: Deserializer<'de>, +{ + let opt: Option> = Option::deserialize(deserializer)?; + match opt { + None => Ok(None), + Some(strings) => { + let mut result = Vec::with_capacity(strings.len()); + for s in strings { + let bytes = STANDARD + .decode(&s) + .map_err(|e| Error::custom(format!("Failed to decode base64: {e}")))?; + result.push(bytes); + } + Ok(Some(result)) + } + } +} + +// ============================================================================= +// Hex String Deserializers +// ============================================================================= + +/// Deserialize a hex string (with or without 0x prefix) to bytes +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hex")] +/// data: Vec, +/// ``` +pub fn deserialize_hex<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let hex_str = String::deserialize(deserializer)?; + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + hex::decode(hex_str).map_err(|e| Error::custom(format!("Failed to decode hex: {e}"))) +} + +/// Deserialize an optional hex string to optional bytes +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hex_option")] +/// data: Option>, +/// ``` +pub fn deserialize_hex_option<'de, D>(deserializer: D) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let opt: Option = Option::deserialize(deserializer)?; + match opt { + None => Ok(None), + Some(hex_str) => { + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + hex::decode(hex_str) + .map(Some) + .map_err(|e| Error::custom(format!("Failed to decode hex: {e}"))) + } + } +} + +/// Deserialize a hex string to Hash256 +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hex_hash256")] +/// hash: Hash256, +/// ``` +pub fn deserialize_hex_hash256<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let hex_str = String::deserialize(deserializer)?; + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + let bytes = + hex::decode(hex_str).map_err(|e| Error::custom(format!("Failed to decode hex: {e}")))?; + + if bytes.len() != 32 { + return Err(Error::custom(format!( + "Expected 32 bytes for Hash256, got {}", + bytes.len() + ))); + } + + Ok(Hash256::from_slice(&bytes)) +} + +/// Deserialize a Signature from hex string +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hex_signature")] +/// signature: Signature, +/// ``` +pub fn deserialize_hex_signature<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let hex_str = String::deserialize(deserializer)?; + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + let bytes = + hex::decode(hex_str).map_err(|e| Error::custom(format!("Failed to decode hex: {e}")))?; + + // Handle both 72 bytes (spec tests) and 96 bytes (full BLS) signatures + if bytes.len() != 72 && bytes.len() != 96 { + return Err(Error::custom(format!( + "Expected 72 or 96 bytes for signature, got {}", + bytes.len() + ))); + } + + // If it's 72 bytes, pad it to 96 bytes (this might not be correct, but let's see) + let bytes = if bytes.len() == 72 { + let mut padded = vec![0u8; 96]; + padded[..72].copy_from_slice(&bytes); + padded + } else { + bytes + }; + + Signature::deserialize(&bytes) + .map_err(|e| Error::custom(format!("Failed to parse signature: {e:?}"))) +} + +/// Deserialize an optional Signature from hex string +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hex_signature_option")] +/// signature: Option, +/// ``` +pub fn deserialize_hex_signature_option<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let opt: Option = Option::deserialize(deserializer)?; + match opt { + None => Ok(None), + Some(hex_str) => { + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + let bytes = hex::decode(hex_str) + .map_err(|e| Error::custom(format!("Failed to decode hex: {e}")))?; + + if bytes.len() != 96 { + return Err(Error::custom(format!( + "Expected 96 bytes for signature, got {}", + bytes.len() + ))); + } + + let sig = Signature::deserialize(&bytes) + .map_err(|e| Error::custom(format!("Failed to parse signature: {e:?}")))?; + Ok(Some(sig)) + } + } +} + +/// Deserialize a PublicKeyBytes from hex string +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hex_public_key")] +/// pubkey: PublicKeyBytes, +/// ``` +pub fn deserialize_hex_public_key<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let hex_str = String::deserialize(deserializer)?; + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + let hex_with_prefix = format!("0x{hex_str}"); + hex_with_prefix + .parse() + .map_err(|e| Error::custom(format!("Invalid public key: {e}"))) +} + +// ============================================================================= +// Hash256 Deserializers +// ============================================================================= + +/// Convert byte array to Hash256 +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_bytes_to_hash256")] +/// hash: Hash256, +/// ``` +pub fn deserialize_bytes_to_hash256<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let bytes = >::deserialize(deserializer)?; + if bytes.len() != 32 { + return Err(Error::custom(format!( + "Expected 32 bytes for Hash256, got {}", + bytes.len() + ))); + } + Ok(Hash256::from_slice(&bytes)) +} + +/// Deserialize optional vector of Hash256 from byte arrays +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hash256_list_option")] +/// hashes: Option>, +/// ``` +pub fn deserialize_hash256_list_option<'de, D>( + deserializer: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let opt: Option>> = Option::deserialize(deserializer)?; + match opt { + None => Ok(None), + Some(byte_arrays) => { + let mut result = Vec::with_capacity(byte_arrays.len()); + for bytes in byte_arrays { + if bytes.len() != 32 { + return Err(Error::custom(format!( + "Expected 32 bytes for Hash256, got {}", + bytes.len() + ))); + } + result.push(Hash256::from_slice(&bytes)); + } + Ok(Some(result)) + } + } +} + +// ============================================================================= +// String to Number Converters +// ============================================================================= + +/// Parse string as u64 +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_string_to_u64")] +/// value: u64, +/// ``` +pub fn deserialize_string_to_u64<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + s.parse() + .map_err(|e| Error::custom(format!("Invalid u64: {e}"))) +} + +/// Parse string as usize +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_string_to_usize")] +/// value: usize, +/// ``` +pub fn deserialize_string_to_usize<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + s.parse() + .map_err(|e| Error::custom(format!("Invalid usize: {e}"))) +} + +/// Parse string as Slot +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_string_to_slot")] +/// slot: Slot, +/// ``` +pub fn deserialize_string_to_slot<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + let slot_num: u64 = s + .parse() + .map_err(|e| Error::custom(format!("Invalid slot: {e}")))?; + Ok(Slot::new(slot_num)) +} + +/// Parse string as ValidatorIndex +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_string_to_validator_index")] +/// index: ValidatorIndex, +/// ``` +pub fn deserialize_string_to_validator_index<'de, D>( + deserializer: D, +) -> Result +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + let index: usize = s + .parse() + .map_err(|e| Error::custom(format!("Invalid validator index: {e}")))?; + Ok(ValidatorIndex(index)) +} + +/// Parse string as CommitteeIndex +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_string_to_committee_index")] +/// index: CommitteeIndex, +/// ``` +pub fn deserialize_string_to_committee_index<'de, D>( + deserializer: D, +) -> Result +where + D: Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + let index: u64 = s + .parse() + .map_err(|e| Error::custom(format!("Invalid committee index: {e}")))?; + Ok(CommitteeIndex::from(index)) +} + +// ============================================================================= +// Enum Deserializers +// ============================================================================= + +/// Deserialize BeaconRole from numeric value +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_beacon_role")] +/// role: BeaconRole, +/// ``` +pub fn deserialize_beacon_role<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let num = u64::deserialize(deserializer)?; + match num { + 0 => Ok(BEACON_ROLE_ATTESTER), + 1 => Ok(BEACON_ROLE_AGGREGATOR), + 2 => Ok(BEACON_ROLE_PROPOSER), + 3 => Ok(BEACON_ROLE_SYNC_COMMITTEE), + 4 => Ok(BEACON_ROLE_SYNC_COMMITTEE_CONTRIBUTION), + 5 => Ok(BEACON_ROLE_VALIDATOR_REGISTRATION), + 6 => Ok(BEACON_ROLE_VOLUNTARY_EXIT), + _ => Err(Error::custom(format!("Unknown beacon role: {num}"))), + } +} + +/// Deserialize DataVersion from fork name string +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_data_version")] +/// version: DataVersion, +/// ``` +pub fn deserialize_data_version<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let version_str = String::deserialize(deserializer)?; + let fork_name = match version_str.as_str() { + "phase0" => ForkName::Base, + "altair" => ForkName::Altair, + "bellatrix" => ForkName::Bellatrix, + "capella" => ForkName::Capella, + "deneb" => ForkName::Deneb, + "electra" => ForkName::Electra, + "fulu" => ForkName::Fulu, + _ => return Err(Error::custom(format!("Invalid fork name: {version_str}"))), + }; + Ok(DataVersion::from(fork_name)) +} + +// ============================================================================= +// Utility Deserializers +// ============================================================================= + +/// Deserialize sync committee indices from JSON array +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_sync_committee_indices")] +/// indices: VariableList, +/// ``` +pub fn deserialize_sync_committee_indices<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let opt: Option> = Option::deserialize(deserializer)?; + let indices = opt.unwrap_or_default(); + VariableList::new(indices) + .map_err(|e| Error::custom(format!("Too many sync committee indices: {e:?}"))) +} + +/// Deserialize MessageId from hex string +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hex_message_id")] +/// message_id: MessageId, +/// ``` +pub fn deserialize_hex_message_id<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let hex_str = String::deserialize(deserializer)?; + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + let bytes = + hex::decode(hex_str).map_err(|e| Error::custom(format!("Failed to decode hex: {e}")))?; + + if bytes.len() != 56 { + return Err(Error::custom(format!( + "Expected 56 bytes for MessageId, got {}", + bytes.len() + ))); + } + + let array: [u8; 56] = bytes + .try_into() + .map_err(|_| Error::custom("Failed to convert to array"))?; + Ok(MessageId::from(array)) +} + +/// Deserialize vector of MessageIds from hex strings +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_hex_message_id_list")] +/// message_ids: Vec, +/// ``` +pub fn deserialize_hex_message_id_list<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let hex_strings: Vec = Vec::deserialize(deserializer)?; + let mut result = Vec::with_capacity(hex_strings.len()); + + for hex_str in hex_strings { + let hex_str = hex_str.strip_prefix("0x").unwrap_or(&hex_str); + let bytes = hex::decode(hex_str) + .map_err(|e| Error::custom(format!("Failed to decode hex: {e}")))?; + + if bytes.len() != 56 { + return Err(Error::custom(format!( + "Expected 56 bytes for MessageId, got {}", + bytes.len() + ))); + } + + let array: [u8; 56] = bytes + .try_into() + .map_err(|_| Error::custom("Failed to convert to array"))?; + result.push(MessageId::from(array)); + } + + Ok(result) +} + +/// Deserialize sync committee indices from JSON array (optional) +/// +/// # Usage +/// ```ignore +/// #[serde(deserialize_with = "deserialize_sync_committee_indices_option")] +/// indices: Option>, +/// ``` +pub fn deserialize_sync_committee_indices_option<'de, D>( + deserializer: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let opt: Option> = Option::deserialize(deserializer)?; + match opt { + None => Ok(None), + Some(indices) => { + let var_list = VariableList::new(indices) + .map_err(|e| Error::custom(format!("Too many sync committee indices: {e:?}")))?; + Ok(Some(var_list)) + } + } +} diff --git a/anchor/spec_tests/src/utils/mod.rs b/anchor/spec_tests/src/utils/mod.rs new file mode 100644 index 000000000..60d6c2b99 --- /dev/null +++ b/anchor/spec_tests/src/utils/mod.rs @@ -0,0 +1,2 @@ +pub mod deserializers; +pub mod test_keys; diff --git a/anchor/spec_tests/src/utils/test_keys.rs b/anchor/spec_tests/src/utils/test_keys.rs new file mode 100644 index 000000000..3f120c5eb --- /dev/null +++ b/anchor/spec_tests/src/utils/test_keys.rs @@ -0,0 +1,102 @@ +use std::{collections::HashMap, str::FromStr, sync::LazyLock}; + +use hex::FromHex; +use openssl::{pkey::Private, rsa::Rsa}; +use ssv_types::OperatorId; +use types::{PublicKeyBytes, SecretKey}; + +// Reimplementation of required testing infrastruture +// https://github.com/ssvlabs/ssv-spec/blob/main/types/testingutils/keys.go#L76 + +pub static TESTING_VALIDATOR_PUBKEY: LazyLock = LazyLock::new(|| { + PublicKeyBytes::from_str("0x8e80066551a81b318258709edaf7dd1f63cd686a0e4db8b29bbb7acfe65608677af5a527d9448ee47835485e02b50bc0").expect("Failed to create public key") +}); + +pub static TESTING_WRONG_VALIDATOR_PUBKEY: LazyLock = LazyLock::new(|| { + PublicKeyBytes::from_str("0x948fb44582ce25336fdb17122eac64fe5a1afc39174ce92d6013becac116766dc5a778c880dd47de7dfff6a0f86ba42b").expect("Failed to create public key") +}); + +pub static VALIDATOR_SECRET_KEY: LazyLock = LazyLock::new(|| { + secret_key_from_hex("3515c7d08e5affd729e9579f7588d30f2342ee6f6a9334acf006345262162c6f") +}); + +pub const FOUR_OPERATOR_ONE_PRIVATE: &str = "308204a40201000282010100c8ccf66fe299248cc1cd1670b696f22effe0ed8e0f14bf054dbe1c178a97b045f1261bb49462614f4618602c5809abcc65fe743500cee1009d7b796ca046016b0d1e7ed917362d8b5ffc708a3ddc37ce7a7761a8d2161fd81115d89137a337524abe5c862fda4efd7797c68c61c8d6d3c972033940533dd782f4627552a6c7186300f1137f73e6a6ec216a8dd89ffc0bf1147a3808c2111e0aed173fe6f9ca8ef061e7b95241fed814e7567e094770c7177f0539f9ebda12645a8a8a1acd2072d352a8d910c1a45fee13beec75fd42eb94d026fa0acb61742506efd0e4134a35b408fc34852daf3b304d53ebc01045f8c2a063cf75fe3c3bbd2dbc00e785a6510203010001028201000a16219ae52b0426fde52b676604970dbd54b31a1bafd318951b23961b241b7aa7ee5e1de80639151e544320771ba541932e00f058a60baf5839c793a9495af0e1abd27b5d2b1f868cbfc5776c3c0fa1938d439e934f01327d4937a3b3c3c317a32184cc48c3128cb0e132dc025d704d1b255afc193b15342a23d47e48349073965e376ad6adda5b2b0ca0079211e57a4333975e40cb7be59c1496514f179efb29055f4940aa7b0ebd05d2534f3cd84333b2a7d14782f85bc65dfbb9e39c3829b94aef63072ebc8f54a03cb696dde520cdd5f47213fedcbd72220d63df197a882d4cfa488d7067bbc5ec3b6c5c07effbf85b310bc3572c07048e653814cf54b102818100e49816bb4cc65881dfedd4f766074c4ba5d25dea4a241de0d043c0ebef275db2ddf4182611f2ae4e66e409555f190f82e6a6fff8e9da984afe5e23beaa85511b534d559f2ded88ba16f2d177f75fb2496d528a4ff80bf1714e0a91b7f73eb403f54cda1c524c8fd7b7238a350c392f141c9ea88ba37a7628dce811c07c6cfdc302818100e0dfd9544d600582e0ab69b6a262990818696a3c500e7fc07f3bf83dc5fb0331ec70468186518374d5299cb395f35166195ebef5d6d04190c05de9233553a5d1f1816250b45804286461d5984c7f6490812bf0066c4255dbbbf67e9bd792d3a7a612540e44395f24332ccb3913ef0a25a20738b3c5d81e8e0260e6677429a65b02818100c03df36802f20f7ef19e66eac44040f6a176a00aa7dd65cf29f6c0e8ea10362975a591257b14976851f956ac1834d029aae62900e152379f61fa339f667285ba303d2a539ae1578a0040a6ce78185fac86a6d2b0dc0ed7370d85aff4819696f77934ef7cbfeda94ea5b2dac930056b4543a85e6048d475487a3724aeb73545d702818100cef777b10d5dd8f4b20f51c694022752ba151b7fd336e501a898eb4aff929d482f92ce719bcc1e2f43997eee128ed55620f780ce071db99a9e5250a6e507cdd0427490a632b5e76dbda605ce9c698b872c3be238271f8ea2248723d40f3ec5aac140913868365d8895c91e69b41d07bbc73ada472b4a5424e3af879fa3dc498d02818053120a3e1ed0900b6c63e844b5068d92e5f84e0268d5aa83a8e31d39cab5c2a75dbbfeee601f928c4f7250f8daaf7b1c64f05d5e41b5b731c7837978a5f230f281adcbffc685876a41fee167e7fddd6c18a147aa68bd4d6cb47563f71f93dfb2125549c6080cab6b991fb799f6f2877293b682228b30100eb1619a467bf37e14"; + +pub const FOUR_OPERATOR_TWO_PRIVATE: &str = "308204a4020100028201010099864fd861b8ab755145a89d9a099a382c33cfcf064cbdbfb6ce2cc767cc689070b7699c46919224bb8c021f9024422110f7166926eb6146bb35e1fa4ef470b8c210bd6b0ec6c1027ceea0bdf6cb84cc43ae17e4160e86ccc80960199442ea842296c859ff905b684211b077b86bfcaa2d3b888d123b4cfc29c7f1054056ea4aba0c8b73de527a81534ded2e1302755d2a3ebad2719b9c709ef513f7e1f92b4ca9d0d06a020fcc6f135ed25a563c1ab6ac0e5225e75a9b44396f12b20ab7f0ea02761d7cdfbf3caccb2362de3dc70742e5898cff50bd832192bfd4560af473c464ee792f6b391b92429b931793d89c3cfd5ee1ff26b887c6283f3d812b0350710203010001028201003e105c2affa6663a3136d5e990a21d24644a35d25d9b9c81ea67031741d112dc8194c42f172036527f37248c99faee78eba0d8007e695d93f88ed9e215152094b06f9003bd9f7fdb7fa2007d8b4dcf4bbc789ed3e84ecb13f23248154f28962200d1b001221dbbb6342f6e85979aa03433c1037cf447e0e1780a8a5733216fe9516c582a2d8aa33782aba179a7813cd2b1e8396d053490019ffe1f64b4b56acf10882085973283f59c9067a7f0090caf4832752f9ee9bc659487fdd036c9d5bc0d76b6ddd9555fb5e9b8af6189624d5ae9e9fedd2ab99356e5eecdddad1815da4d5c9ccc50a65b9df76a40c5feb628019a7c9262e3a4067d790ede5d630d1c2102818100c8888e3a44e0711f4cbcb0ed1077885b80aa68ee38becd5e596d9e3e83af835b6becba0186fedb0ded6987855e423650e4e60c8d8cf55fe0958a5ce14cc9e3119b57a92ed16ec90cad7db1e45308c49dafae51d80f2efcc00c42c5f28e232d46eebb006d825e97e15a1b635eec55d987a73506f3d2f665f478bbddeeeeef53fd02818100c3fd247330ed61b28412b168e827befe777ad131843c0cfcbe504ae43ff231f530c1f7ef64659366703485a28d6867d809f7804dda90dff30611b711cdc6aa17100852e7d7e6f70b110a9da6229829d1f48262cc1a7961f2d196bdc27125bc8141cd4271e3b9d54260b861cf450c3b8cfb1f017cb51c08065ec9ef5904c3c68502818100c22d98ab4bae995b598f0d3340d2be32fc70069346575bbd9492d4bc6bff340efe7e87ce9acd858802f040ce1febb574b7711b8ea583a4876fc63f11daad5336e55908f5d0ce99d7b0d719bea1b8c7ca79272f112c02afb3b72ba149b1e0d622ed601e95ebbb750e3d966faea6e2aa74f4b0203f51744e5d5fdb6a97c6bdf0710281800a22840904e5b1a0a69dc4d8d4f0813aed78c76a9518f9def40478eaf6b79287c85eaf708cb387fccb1e9c2e7cbb826b3490bcecc9b9a62b0e0c4a783c38e2c0d08e6da3199213025a7e3f0ac14d3714695d78b86f4209a3a1dcf6b12062c02dbaf65f523e6174babaffade726fdebf26d65fc10b3d8e03d5c177b2e1246017502818100a7cf5af2ea38792850e778b9bff3fb03119e2b8805cecc264419ec8e1213f7a763c98c7d5ac085f26e7829ed42cc40244b0b2c1a9eb23da6046be2da4f16b894f043897ba60f50a3b0483aa0e4d77c7f9ec984ca66620e58e9ad1f39d17bbd34a32ced9047b4befebd5d0377235f318728c58f2b96e95abc03e1c9bed8d5a4b1"; + +pub const FOUR_OPERATOR_THREE_PRIVATE: &str = "308204a40201000282010100b688854d4a89a4bf38c6f84c15200e600a9b1188b30c13e5fe5336735f0ab06f480120d2cdb35fc469dcda90b7ee38ebee1ddb952c4435848a3985475b6376abc6f32c1bb4ea42b4833b150c324c35cf23a55a8df8422b3e52233582f69a9e593676b3c9a580695564358c8c69a8c80334f2ad9fbac2ea104688aecd2825bb447a7ae59065ef02722113d590eb0b4462867f9b201a377bbbf4cc501ec374305372251688f3b49e6b8fe25f97241a522ff34c2cbda933e7596140ced199e857bfd37ba154dd2cf16670b76cb55756e21d800f633eb67e2b42578e18ae4cc43a62128119f75288cece4f2f345626702478d4bdb0fe8d7b4740f72e0f27c42e8c25020301000102820101008074b922f8a6bf3b1750e7225be79056448076a9761fb4cd31db0bc1cb8bf1388f3ac407b65d5ab3163127db9aa55a87a6ae7a7e938579084a624a8a3a255839712c66c924db8b900f9e7fa472ad315d11dfe7476c03dcfce1bf07849fd996408054af17e491e70f0213b1528b750d353c88e0693d7cb84e35e530e70e2ee7870be016c12bbc5e40a90883ba6d94514a9608142e79d57c25b9ba815b7cd107831383e470666a15c2f6b2e4766ba7c082e83f27103e338fe53f021eb208b58e53e6aa009e9dbff86a88d4d5fec44d85ce26ad84cd0f05b8cfa37f57129aa56be6a128bc0c164ec8be430970c7e3f03fd6412ad4af7f72f732de3fa0df32c1078102818100cd04a971ccb3472473203c5fd025075d949432eab85926297f6a16761aecdc131ef9cc0cc7934dedb7f183c1f2b35bfef654e5b7b13ac263a176549baa2d24d9b17f3d56ebeee78439516db1bcb39fb79aa76b7f994b145b5713e6028246456b5f57ce64f860769eb5f74e30576f061e5df7d0993f1bd5fc9f83fccded11b3c502818100e3ec7b4fd5daab0763f48f5b3fcbb506312d1ea0603b351107a9d2790f9141e0cddd9c3e778c99efe3b57e7564ec0c433378ffca40ee7e0107506bc4629ec4b334b56b38924bb84f19fa649203bb43e32d2928ed66d190e890bfc425c1998bc4a2091f680b1caa6b064e68641da7e4c626c57d23450b9317e56b35d4d97e1ce102818017189a5a269c5fbc5c77da35550686e0e4f7191156393cd259f74296858bff72ebff6a1c5a735ec913fad2440c2a6687bf8a6ae299c5abd67b7f10230535d6bbeb82110ff4be52389418774a199f06b4316900f43bf9b84e5dedf0f0816a9731746938e8290efcedfe43e0fc132d7fbbf60c0fe4e3b62812308a36f59fea699d02818100bc04221cd37ed4c2fdf38a266dd3eefab2aa53af5c72baedd772818b180a6d5bb2b6f2e29cdfc144a084e15299f4169180ee79a330390c7c70ba288c120682a08a0475f46eca43ba0ce5fefc6c539846d8c4315cd50a5f0d5a0ab715a644b1857d5d252940b15eeb76824b9efacfbaeab2a50afb83436f0db154e54d3634d0410281803c24edfc42dcfc9e94245260120129e89f3c1a7c671373a36dfc060753a2d6f732a016cef726c34740f8bc0b0881919deb97204e4ca78adbb4e92ac38f008d80db808989fa5c55b8ffd69e1574d0eb86f390183e9db8f6baa3f207eea3de1f3ae6d52f12cea8c9f9b20e1ff97731214d3aaccc24f597616c8b7d83be30281f8e"; + +pub const FOUR_OPERATOR_FOUR_PRIVATE: &str = "308204a40201000282010100a905f3abfe97b5511f25367fbf53f09334a43515dba42ff8d5af4b490bec924202746d9d1b0f906a090d558a6f290b11df003105f0e842a74ca04bfc1a1f7105a65a7fa90b5a49da55860d25e5a7e9b1220e65e35580ccab976197da1df5484ae04613f2b21fe5a95fc846bdf96b3da1e00b4b6d5c54fa513e86d01f1b17f31a3db900ab2f13aa738116f36f392a3e6f9d095fa461b6d561417db1c64785daf9a98e7d328f9512e579550ccf05feee978627fe47de3a4b165fa815aaf60bf6031ff109cf4f8daba1899bbd6227b31cd7e7343fa14e6b2e99a99f990e3f5da4977f99ba98cf2deb2ba6cfa3c36f3446074897ce443e0a8cd308b384b5ca9c592302030100010282010060dd2e562523601fcb4f923a07b5dd2b1f81f382414b88ca7bfb6793c7279e7201e2236763b8b9b46ad79f6c24644b19c4c8e14f5c4e5ed46dcf777c54a42c2b66b87a6cb03ae01425eb1ae1db092d9dfbbc709ba5c69884c5ce822dd7f957a2c180a7b1f06ee338fbd154e94e652cfef5dcc32f3b38dff36b77eb11c87f232bb9be79e7039dc61af7ac15e608369c479f23cd99887bc01dadbe5aeefee4b579a7b9858705a4cb2a3f66c13ae304cd52d6a60f0cc445025d872883b419ea6f2fc90d794b82f107afa191239642d97b85e2f7069b560bcc855c9ea5119d9f98d2b4e207102ebc23153a956207b62295172f725655c46756ef7c57ce6c117659e102818100c2b75c4ce021fef5ca4ec0a5581e7383c7ec0a0342bae6082ea3b2d9c3a9ce157b190a1eb2bb7a7e5407332f8e28ac16926156b4b47f25e1392bf5fcd35e0de463f928ecb1d3c6311c6f69b4244d666eb4f29dc10622ad124ed33c95abaec5d1443036725a92831ab1aa956f18f4f5a713f48e3a12b1a210f0d3b6ec7c0907c902818100de3875b1ee8f03e42274bb26b34739d4e4b33e48280a72ff9b2c7e5954308a5faaf7bceef3c45d495082f1825217646fd490cf2bc0df90fe807c13b4f7c2e8106438f856f04089a6130f0974cbb619709be2ee988f0362f8900f37444e5e53ed85b07574063cfa275b8f4636d5e94cbcd1c7a655dd3a1cd66209daa7f319a78b0281807665198960eb2ae4f6db45c603bb984f73bb712724671241bd6229f8c141399ed4179890abead50385424f7c45fb331012777f4a2749fc9562b6f93e7ea2fcdd777063d2f019adb3e4ef559d84494fd456d002de00460b684b67a3b9fa072e1f1d50177b16d969404cf14525a54e25242f3d0f51fe55e60e58f0d2941ea33b0902818100bc66abca3612445f47a32604b29c6178908932f5a414efd8ababb6576fdc5384b683a148099de2e544802fd7a857b2cc693078a484ba46c8af1002f93bd1a0443d645b9001d305a0aaa9e5ff82b299b0f2491cb675118ef863d2b2ad93afbf823205201f4526af836cc9f4e28acb6846f1a84deaa04c23a4d2abbe19042f2cef02818100bb9e385d27f693f7981cad37aa856e24b651e26e8f8040e9e29b9da15b9a54f51cf49cd718dfc70948436ee8d4a4625b6cab3da065ba1f286fd423f55e10a778634a4286e1838df70e2525fc5cc48a3f4e1e28859526e1f8a2563f6bd635484ce101d1a7158d2702f25399d0013d4fff927547b828286a3f882d3c39869f3836"; + +// A SSV Operator that is responsible for signing +pub struct TestingSigner { + private_key: Rsa, + operator_id: OperatorId, +} + +impl TestingSigner { + pub fn new_testing_signer(keyset: &TestKeySet, id: u64) -> TestingSigner { + let id = OperatorId::from(id); + TestingSigner { + private_key: keyset.operator_keys.get(&id).unwrap().clone(), + operator_id: id, + } + } +} + +pub struct TestKeySet { + pub secret_key: SecretKey, + pub public_key: PublicKeyBytes, + pub share_count: u64, + pub threshold: u64, + pub partial_threshold: u64, + pub shares: HashMap, + pub operator_keys: HashMap>, +} + +impl TestKeySet { + #[rustfmt::skip] + pub fn four_share_set() -> TestKeySet { + TestKeySet { + secret_key: VALIDATOR_SECRET_KEY.clone(), + public_key: *TESTING_VALIDATOR_PUBKEY, + share_count: 4, + threshold: 3, + partial_threshold: 2, + shares: HashMap::from([ + (OperatorId::from(1),secret_key_from_hex("5f4711a796c1116b5118ec35279fb64d551d9b38813d2939954dd2df5160d3d9")), + (OperatorId::from(2),secret_key_from_hex("48e4c0a38e90f9352d1d09489446443ebd17b1904f4f0002fe894c2c3f62457a")), + (OperatorId::from(3),secret_key_from_hex("65dc7c179f68347cf12f86e1c51e54e8aeeed579d4c715082bb8a0382c1a8153")), + (OperatorId::from(4),secret_key_from_hex("42409cb09fa945fa6a168cf8b0861045d6e562f211a70c4a1cdbcf0417898763")), + ]), + operator_keys: HashMap::from([ + (OperatorId::from(1),rsa_secret_from_hex(FOUR_OPERATOR_ONE_PRIVATE)), + (OperatorId::from(2),rsa_secret_from_hex(FOUR_OPERATOR_TWO_PRIVATE)), + (OperatorId::from(3),rsa_secret_from_hex(FOUR_OPERATOR_THREE_PRIVATE)), + (OperatorId::from(4),rsa_secret_from_hex(FOUR_OPERATOR_FOUR_PRIVATE)), + ]), + } + } + + fn seven_share_set() -> TestKeySet { + todo!() + } + + fn ten_share_set() -> TestKeySet { + todo!() + } + + fn thirteen_share_set() -> TestKeySet { + todo!() + } +} + +pub fn secret_key_from_hex(hex: &str) -> SecretKey { + let bytes = <[u8; 32]>::from_hex(hex).expect("Invalid hex string"); + SecretKey::deserialize(&bytes).expect("Failed to create secret key") +} + +pub fn rsa_secret_from_hex(key: &str) -> Rsa { + let pem_bytes = hex::decode(key).expect("Valid key"); + Rsa::private_key_from_der(&pem_bytes).expect("Valid key bytes") +} diff --git a/anchor/spec_tests/ssv-spec b/anchor/spec_tests/ssv-spec new file mode 160000 index 000000000..171927625 --- /dev/null +++ b/anchor/spec_tests/ssv-spec @@ -0,0 +1 @@ +Subproject commit 17192762564b2fed35e8c237122c24861bfb248d diff --git a/anchor/src/main.rs b/anchor/src/main.rs index 3814204f6..555226819 100644 --- a/anchor/src/main.rs +++ b/anchor/src/main.rs @@ -1,9 +1,4 @@ -use std::fs; - use clap::Parser; -use tracing::{Level, error, info}; - -mod environment; use client::{Client, Node, config}; use environment::Environment; use global_config::{GlobalConfig, GlobalFlags}; @@ -14,10 +9,13 @@ use logging::{ utils::build_workspace_filter, }; use task_executor::ShutdownReason; +use tracing::{Level, error, info}; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{EnvFilter, Layer, fmt, layer::SubscriberExt, util::SubscriberInitExt}; use types::EthSpecId; +mod environment; + #[derive(Parser, Clone, Debug)] struct Cli { #[clap(flatten)] @@ -53,12 +51,6 @@ fn main() { } }; - // Try and create the data directory if it doesn't exist. - if let Err(err) = fs::create_dir_all(&global_config.data_dir) { - eprintln!("Failed to create data directory: {err}"); - return; - } - let file_logging_flags = if let AnchorSubcommands::Node(node) = &cli.subcommand { Some(&node.logging_flags) } else { @@ -102,7 +94,7 @@ fn start_anchor(anchor_config: &Node, global_config: GlobalConfig, mut environme } }; - config.network.domain_type = config.global_config.ssv_network.ssv_domain_type.clone(); + config.network.domain_type = config.global_config.ssv_network.ssv_domain_type; // Build the core task executor let core_executor = environment.executor(); @@ -199,7 +191,7 @@ pub fn enable_logging( let logs_dir = file_logging_flags .logfile_dir .clone() - .unwrap_or_else(|| global_config.data_dir.join("logs")); + .unwrap_or_else(|| global_config.data_dir.default_logs_dir()); let filter_level: Level = file_logging_flags.logfile_debug_level; diff --git a/anchor/subnet_service/src/lib.rs b/anchor/subnet_service/src/lib.rs index 7f165717f..cfa9d61dc 100644 --- a/anchor/subnet_service/src/lib.rs +++ b/anchor/subnet_service/src/lib.rs @@ -1,7 +1,7 @@ use std::{collections::HashSet, ops::Deref, sync::Arc, time::Duration}; use alloy::primitives::ruint::aliases::U256; -use database::NetworkState; +use database::{NetworkState, NonUniqueIndex, UniqueIndex}; use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; use ssv_types::{CommitteeId, CommitteeInfo}; @@ -202,9 +202,8 @@ async fn handle_subnet_changes( { let state = db.borrow(); for cluster_id in state.get_own_clusters() { - if let Some(cluster_idx) = state.clusters().get_by_cluster_id(cluster_id) { - let subnet_id = - SubnetId::from_committee(cluster_idx.cluster.committee_id(), subnet_count); + if let Some(cluster) = state.clusters().get_by(cluster_id) { + let subnet_id = SubnetId::from_committee(cluster.committee_id(), subnet_count); current_subnets.insert(subnet_id); } } @@ -298,24 +297,21 @@ pub fn get_committee_info_for_subnet( ) -> Vec { network_state .clusters() - .iter() - .map(|(_, cluster_idx)| cluster_idx) - .filter(|cluster_idx| { - let cluster_subnet = - SubnetId::from_committee(cluster_idx.cluster.committee_id(), SUBNET_COUNT); + .values() + .filter(|cluster| { + let cluster_subnet = SubnetId::from_committee(cluster.committee_id(), SUBNET_COUNT); cluster_subnet == *subnet }) - .map(|cluster_idx| { + .map(|cluster| { // Convert cluster to CommitteeInfo by getting validator indices let validator_indices = network_state .metadata() - .get_by_cluster_id(&cluster_idx.cluster_id) - .iter() - .flat_map(|metadata| metadata.metadata.index) + .get_all_by(&cluster.cluster_id) + .flat_map(|metadata| metadata.index) .collect::>(); CommitteeInfo { - committee_members: cluster_idx.cluster.cluster_members.clone(), + committee_members: cluster.cluster_members.clone(), validator_indices, } }) diff --git a/anchor/validator_store/Cargo.toml b/anchor/validator_store/Cargo.toml index b92910864..b0c69db2a 100644 --- a/anchor/validator_store/Cargo.toml +++ b/anchor/validator_store/Cargo.toml @@ -6,11 +6,11 @@ authors = ["Sigma Prime "] [dependencies] beacon_node_fallback = { workspace = true } -dashmap = { workspace = true } database = { workspace = true } eth2 = { workspace = true } ethereum_ssz = { workspace = true } hex = { workspace = true } +lru = { workspace = true } metrics = { workspace = true } openssl = { workspace = true } parking_lot = { workspace = true } diff --git a/anchor/validator_store/src/lib.rs b/anchor/validator_store/src/lib.rs index 92681d465..31e2ef4bf 100644 --- a/anchor/validator_store/src/lib.rs +++ b/anchor/validator_store/src/lib.rs @@ -2,17 +2,18 @@ pub mod metadata_service; mod metrics; use std::{ - collections::{HashMap, HashSet}, + collections::HashMap, fmt::Debug, future::Future, + num::NonZeroUsize, str::from_utf8, sync::{Arc, LazyLock}, time::Duration, }; -use dashmap::DashMap; -use database::NetworkState; +use database::{NetworkDatabase, NonUniqueIndex, UniqueIndex}; use eth2::types::{BlockContents, FullBlockContents, PublishBlockRequest}; +use lru::LruCache; use openssl::{ pkey::Private, rsa::{Padding, Rsa}, @@ -24,22 +25,22 @@ use qbft_manager::{ }; use safe_arith::{ArithError, SafeArith}; use signature_collector::{ - CollectionError, SignatureCollectorManager, SignatureMetadata, SignatureRequester, SigningData, + CollectionError, SignatureCollectorManager, SignatureMetadata, SignatureRequester, + ValidatorSigningData, }; use slashing_protection::{NotSafe, Safe, SlashingDatabase}; use slot_clock::SlotClock; use ssv_types::{ - Cluster, CommitteeId, ValidatorIndex, ValidatorMetadata, + Cluster, ClusterId, ENCRYPTED_KEY_LENGTH, ValidatorIndex, ValidatorMetadata, consensus::{ BEACON_ROLE_AGGREGATOR, BEACON_ROLE_PROPOSER, BEACON_ROLE_SYNC_COMMITTEE_CONTRIBUTION, - BeaconVote, Contribution, ContributionWrapper, Contributions, ValidatorConsensusData, - ValidatorDuty, + BeaconVote, Contribution, ContributionWrapper, Contributions, QbftData, + ValidatorConsensusData, ValidatorDuty, }, msgid::Role, partial_sig::PartialSignatureKind, }; use ssz::{Decode, DecodeError, Encode}; -use task_executor::TaskExecutor; use tokio::{ select, sync::{Barrier, RwLock, watch}, @@ -77,6 +78,10 @@ use validator_store::{ /// This acts as a maximum safe-guard against clock drift. const SLASHING_PROTECTION_HISTORY_EPOCHS: u64 = 512; +// We use 2000 here as some networks (e.g. hoodi-stage) already use a validator limit of 2000. +const MAX_VALIDATORS_PER_OPERATOR: NonZeroUsize = + NonZeroUsize::new(2000).expect("2000 is non-zero"); + const RANDAO_REVEAL_LOG_NAME: &str = "RANDAO reveal"; const BLOCK_LOG_NAME: &str = "block"; const ATTESTATION_LOG_NAME: &str = "attestation"; @@ -87,19 +92,12 @@ const SYNC_SELECTION_PROOF_LOG_NAME: &str = "sync selection proof"; const SYNC_COMMITTEE_SIGNATURE_LOG_NAME: &str = "sync committee signature"; const SYNC_COMMITTEE_CONTRIBUTION_LOG_NAME: &str = "sync committee contribution"; -#[derive(Clone)] -struct InitializedValidator { - cluster: Cluster, - metadata: ValidatorMetadata, - decrypted_key_share: Option, -} - pub struct AnchorValidatorStore { - validators: DashMap, - validators_per_committee: DashMap>, + database: Arc, + decrypted_keys: Mutex>, signature_collector: Arc, qbft_manager: Arc, - slashing_protection: SlashingDatabase, + slashing_protection: Arc, slashing_protection_last_prune: Mutex, disable_slashing_protection: bool, slot_clock: T, @@ -119,25 +117,24 @@ pub struct AnchorValidatorStore { impl AnchorValidatorStore { #[allow(clippy::too_many_arguments)] pub fn new( - database_state: watch::Receiver, + database: Arc, signature_collector: Arc, qbft_manager: Arc, - slashing_protection: SlashingDatabase, + slashing_protection: Arc, disable_slashing_protection: bool, slot_clock: T, spec: Arc, genesis_validators_root: Hash256, private_key: Option>, - task_executor: TaskExecutor, gas_limit: u64, builder_proposals: bool, builder_boost_factor: Option, prefer_builder_proposals: bool, is_synced: watch::Receiver, ) -> Arc> { - let ret = Arc::new(Self { - validators: DashMap::new(), - validators_per_committee: DashMap::new(), + Arc::new(Self { + database, + decrypted_keys: Mutex::new(LruCache::new(MAX_VALIDATORS_PER_OPERATOR)), signature_collector, qbft_manager, slashing_protection, @@ -153,180 +150,40 @@ impl AnchorValidatorStore { builder_boost_factor, prefer_builder_proposals, is_synced, - }); - - task_executor.spawn( - Arc::clone(&ret).updater(database_state), - "validator_store_updater", - ); - - ret - } - - async fn updater(self: Arc, mut database_state: watch::Receiver) { - while database_state.changed().await.is_ok() { - self.load_validators(&database_state.borrow()); - } - } - - fn load_validators(&self, state: &NetworkState) { - let mut unseen_validators = self - .validators - .iter() - .map(|v| *v.key()) - .collect::>(); - let db_clusters_ids = state.get_own_clusters().iter().collect::>(); - - for (cluster_idx, validator) in db_clusters_ids - .into_iter() - .flat_map(|id| state.clusters().get_by_cluster_id(id)) - .filter(|cluster_idx| !cluster_idx.cluster.liquidated) - .flat_map(|cluster_idx| { - state - .metadata() - .get_by_cluster_id(&cluster_idx.cluster_id) - .into_iter() - .map(move |metadata| (cluster_idx, metadata)) - }) - { - if unseen_validators.remove(&validator.metadata.public_key) { - // Validator was present: check if the cluster has changed - if let Some(mut entry) = self.validators.get_mut(&validator.metadata.public_key) { - let current_cluster = &mut entry.value_mut().cluster; - if current_cluster.cluster_id != cluster_idx.cluster_id { - // Update the validator with the new cluster - *current_cluster = cluster_idx.cluster.clone(); - } - } - } else { - // value was not present: add to store - if let Ok(secret_key) = self.get_share_from_state( - state, - &validator.metadata, - validator.metadata.public_key, - ) { - let result = self.add_validator( - validator.metadata.public_key, - &cluster_idx.cluster, - validator.metadata.clone(), - secret_key, - ); - if let Err(err) = result { - error!(?err, "Unable to initialize validator"); - } - } - } - } - - for validator in unseen_validators { - self.remove_validator(&validator); - info!(%validator, "Validator disabled"); - } - - let count = self.validators.len() as i64; - validator_metrics::set_gauge(&validator_metrics::ENABLED_VALIDATORS_COUNT, count); - validator_metrics::set_gauge(&validator_metrics::TOTAL_VALIDATORS_COUNT, count); - } - - fn get_share_from_state( - &self, - state: &NetworkState, - validator: &ValidatorMetadata, - pubkey_bytes: PublicKeyBytes, - ) -> Result, ()> { - // If we have no private key, we are running in impostor mode - so we can not decrypt the - // share. Return `None` to let the signature collector mock the signing. - let Some(private_key) = &self.private_key else { - return Ok(None); - }; - - let share = state - .shares() - .get_by_validator_pubkey(&validator.public_key) - .ok_or_else(|| warn!(validator = %pubkey_bytes, "Key share not found"))?; - - // the buffer size must be larger than or equal the modulus size - let mut key_hex = [0; 2048 / 8]; - let length = private_key - .private_decrypt( - &share.share.encrypted_private_key, - &mut key_hex, - Padding::PKCS1, - ) - .map_err(|e| error!(?e, validator = %pubkey_bytes, "Share decryption failed"))?; - - let key_hex = from_utf8(&key_hex[..length]).map_err(|err| { - error!( - ?err, - validator = %pubkey_bytes, - "Share decryption yielded non-utf8 data" - ) - })?; - - let mut secret_key = [0; 32]; - hex::decode_to_slice( - key_hex.strip_prefix("0x").unwrap_or(key_hex), - &mut secret_key, - ) - .map_err(|err| { - error!( - ?err, - validator = %pubkey_bytes, - "Decrypted share is not a hex string of size 64" - ) - })?; - - SecretKey::deserialize(&secret_key) - .map(Some) - .map_err(|err| error!(?err, validator = %pubkey_bytes, "Invalid secret key decrypted")) + }) } - fn add_validator( + fn get_validator_and_cluster( &self, - pubkey_bytes: PublicKeyBytes, - cluster: &Cluster, - validator_metadata: ValidatorMetadata, - decrypted_key_share: Option, - ) -> Result<(), Error> { - if let Some(index) = validator_metadata.index { - self.validators_per_committee - .entry(cluster.committee_id()) - .or_default() - .insert(index); + validator_pubkey: PublicKeyBytes, + ) -> Result<(ValidatorMetadata, Cluster), Error> { + let state = self.database.state(); + let validator = state + .metadata() + .get_by(&validator_pubkey) + .ok_or(Error::UnknownPubkey(validator_pubkey))? + .clone(); + + // First, attempt to get the cluster normally + if let Some(cluster) = state.clusters().get_by(&validator.cluster_id) { + return Ok((validator, cluster.clone())); } - self.validators.insert( - pubkey_bytes, - InitializedValidator { - cluster: cluster.clone(), - metadata: validator_metadata, - decrypted_key_share, - }, + // If cluster is missing, this indicates a database inconsistency + // Log the error with context + error!( + validator_pubkey = %validator_pubkey, + cluster_id = ?validator.cluster_id, + "Database inconsistency detected: validator references non-existent cluster" ); - self.slashing_protection - .register_validator(pubkey_bytes) - .map_err(Error::Slashable)?; - info!(validator = %pubkey_bytes, "Validator enabled"); - Ok(()) - } - - fn remove_validator(&self, pubkey_bytes: &PublicKeyBytes) { - let Some((_, validator)) = self.validators.remove(pubkey_bytes) else { - return; - }; - if let Some(idx) = validator.metadata.index { - for mut committee in self.validators_per_committee.iter_mut() { - committee.remove(&idx); - } - } - } - - fn validator(&self, validator_pubkey: PublicKeyBytes) -> Result { - self.validators - .get(&validator_pubkey) - .map(|c| c.value().clone()) - .ok_or(Error::UnknownPubkey(validator_pubkey)) + // Return specific error with context for potential recovery + Err(Error::SpecificError( + SpecificError::ValidatorClusterMismatch { + validator_pubkey, + cluster_id: validator.cluster_id, + }, + )) } fn get_domain(&self, epoch: Epoch, domain: Domain) -> Hash256 { @@ -338,20 +195,22 @@ impl AnchorValidatorStore { ) } + #[allow(clippy::too_many_arguments)] async fn collect_signature( &self, signature_kind: PartialSignatureKind, role: Role, - validator: InitializedValidator, + base_hash: Option, + validator: &ValidatorMetadata, + cluster: &Cluster, signing_root: Hash256, slot: Slot, ) -> Result { - let committee_id = validator.cluster.committee_id(); + let committee_id = cluster.committee_id(); let metadata = SignatureMetadata { kind: signature_kind, role, - threshold: validator - .cluster + threshold: cluster .get_f() .safe_mul(2) .and_then(|x| x.safe_add(1)) @@ -360,42 +219,63 @@ impl AnchorValidatorStore { committee_id, }; - let requester = if role == Role::Committee { - let metadata = self.get_slot_metadata(slot).await?; + let slot_metadata = self.get_slot_metadata(slot).await?; + let (num_signatures_to_collect, encrypted_private_key) = { + let state = self.database.state(); + let num_signatures_to_collect = state + .metadata() + .get_all_by(&committee_id) + .map(|validator| { + let mut duties = 0; + if let Some(idx) = &validator.index { + if slot_metadata.attesting_validators.contains(idx) { + duties += 1; + } + if slot_metadata.sync_validators.contains(idx) { + duties += 1; + } + } + duties + }) + .sum(); + let encrypted_private_key = state + .shares() + .get_by(&validator.public_key) + .ok_or(Error::UnknownPubkey(validator.public_key))? + .encrypted_private_key; + (num_signatures_to_collect, encrypted_private_key) + }; + + let decrypted_key_share = if let Some(operator_key) = &self.private_key { + let key = self + .decrypted_keys + .lock() + .try_get_or_insert(encrypted_private_key, || { + decrypt_key_share(operator_key, encrypted_private_key, validator.public_key) + .map_err(|_| SpecificError::KeyShareDecryptionFailed) + }) + .cloned()?; + Some(key) + } else { + // We are in imposter mode and cannot decrypt the share. + None + }; + + let requester = if let Some(base_hash) = base_hash { SignatureRequester::Committee { - num_signatures_to_collect: self - .validators_per_committee - .get(&committee_id) - .map(|indices| { - indices - .iter() - .map(|idx| { - let mut duties = 0; - if metadata.attesting_validators.contains(idx) { - duties += 1; - } - if metadata.sync_validators.contains(idx) { - duties += 1; - } - duties - }) - .sum() - }) - .unwrap_or_default(), + num_signatures_to_collect, + base_hash, } } else { SignatureRequester::SingleValidator { - pubkey: validator.metadata.public_key, + pubkey: validator.public_key, } }; - let signing_data = SigningData { + let signing_data = ValidatorSigningData { root: signing_root, - index: validator - .metadata - .index - .ok_or(SpecificError::MissingIndex)?, - share: validator.decrypted_key_share.clone(), + index: validator.index.ok_or(SpecificError::MissingIndex)?, + share: decrypted_key_share, }; let _timer = @@ -409,11 +289,10 @@ impl AnchorValidatorStore { async fn decide_abstract_block( &self, - validator_pubkey: PublicKeyBytes, + validator: &ValidatorMetadata, + cluster: &Cluster, signable_block: impl SignableBlock, ) -> Result, Error> { - let validator = self.validator(validator_pubkey)?; - let block = signable_block.as_block(); let slot = block.slot(); @@ -423,16 +302,13 @@ impl AnchorValidatorStore { // Define the validator instance identity for QBFT consensus let instance_id = ValidatorInstanceId { - validator: validator_pubkey, + validator: validator.public_key, duty: ValidatorDutyKind::Proposal, instance_height: slot.as_usize().into(), }; // Get the validator index, ensuring it exists - let validator_index = validator - .metadata - .index - .ok_or(SpecificError::MissingIndex)?; + let validator_index = validator.index.ok_or(SpecificError::MissingIndex)?; // Determine the appropriate version based on block type let block_version = block.fork_name_unchecked().into(); @@ -440,7 +316,7 @@ impl AnchorValidatorStore { // Create the validator duty information let validator_duty = ValidatorDuty { r#type: BEACON_ROLE_PROPOSER, - pub_key: validator_pubkey, + pub_key: validator.public_key, slot, validator_index, committee_index: 0, @@ -454,13 +330,13 @@ impl AnchorValidatorStore { let consensus_data = ValidatorConsensusData { duty: validator_duty, version: block_version, - data_ssz: signable_block.as_ssz_bytes(), + data_ssz: signable_block.as_ssz_bytes().into(), }; // Initiate QBFT consensus for this block proposal let completed = self .qbft_manager - .decide_instance(instance_id, consensus_data, start_time, &validator.cluster) + .decide_instance(instance_id, consensus_data, start_time, cluster) .await .map_err(SpecificError::from)?; drop(timer); @@ -483,7 +359,8 @@ impl AnchorValidatorStore { async fn sign_abstract_block( &self, - validator_pubkey: PublicKeyBytes, + validator: &ValidatorMetadata, + cluster: &Cluster, signable_block: impl SignableBlock, current_slot: Slot, ) -> Result, Error> { @@ -510,7 +387,7 @@ impl AnchorValidatorStore { if !self.disable_slashing_protection { convert_slashing_result(self.slashing_protection.check_and_insert_block_proposal( - &validator_pubkey, + &validator.public_key, &header, domain_hash, ))?; @@ -521,7 +398,9 @@ impl AnchorValidatorStore { .collect_signature( PartialSignatureKind::PostConsensus, Role::Proposer, - self.validator(validator_pubkey)?, + None, + validator, + cluster, signing_root, header.slot, ) @@ -618,12 +497,15 @@ impl AnchorValidatorStore { let spec = self.spec.clone(); let domain_hash = voluntary_exit.get_domain(self.genesis_validators_root, &spec); let signing_root = voluntary_exit.signing_root(domain_hash); + let (validator, cluster) = self.get_validator_and_cluster(validator_pubkey)?; let signature = self .collect_signature( PartialSignatureKind::VoluntaryExit, Role::VoluntaryExit, - self.validator(validator_pubkey)?, + None, + &validator, + &cluster, signing_root, slot, ) @@ -680,6 +562,42 @@ async fn run_and_update_metrics( result } +fn decrypt_key_share( + operator_key: &Rsa, + encrypted_private_key: [u8; ENCRYPTED_KEY_LENGTH], + pubkey_bytes: PublicKeyBytes, +) -> Result { + // the buffer size must be larger than or equal the modulus size + let mut key_hex = [0; 2048 / 8]; + let length = operator_key + .private_decrypt(&encrypted_private_key, &mut key_hex, Padding::PKCS1) + .map_err(|e| error!(?e, validator = %pubkey_bytes, "Share decryption failed"))?; + + let key_hex = from_utf8(&key_hex[..length]).map_err(|err| { + error!( + ?err, + validator = %pubkey_bytes, + "Share decryption yielded non-utf8 data" + ) + })?; + + let mut secret_key = [0; 32]; + hex::decode_to_slice( + key_hex.strip_prefix("0x").unwrap_or(key_hex), + &mut secret_key, + ) + .map_err(|err| { + error!( + ?err, + validator = %pubkey_bytes, + "Decrypted share is not a hex string of size 64" + ) + })?; + + SecretKey::deserialize(&secret_key) + .map_err(|err| error!(?err, validator = %pubkey_bytes, "Invalid secret key decrypted")) +} + struct SlotMetadata { /// The slot this metadata is about. slot: Slot, @@ -742,6 +660,13 @@ pub enum SpecificError { MissingIndex, SlotClock, NotSynced, + InconsistentDatabase, + /// Database inconsistency: validator references a cluster that doesn't exist + ValidatorClusterMismatch { + validator_pubkey: PublicKeyBytes, + cluster_id: ClusterId, + }, + KeyShareDecryptionFailed, } impl From for SpecificError { @@ -777,9 +702,11 @@ impl ValidatorStore for AnchorValidatorStore { type E = E; fn validator_index(&self, pubkey: &PublicKeyBytes) -> Option { - self.validator(*pubkey) - .ok() - .and_then(|v| v.metadata.index.map(|idx| *idx as u64)) + self.database + .state() + .metadata() + .get_by(pubkey) + .and_then(|v| v.index.map(|idx| *idx as u64)) } fn voting_pubkeys(&self, filter_func: F) -> I @@ -787,10 +714,12 @@ impl ValidatorStore for AnchorValidatorStore { I: FromIterator, F: Fn(DoppelgangerStatus) -> Option, { - // Treat all validators as `SigningEnabled` - self.validators - .iter() - .filter_map(|v| filter_func(DoppelgangerStatus::SigningEnabled(*v.key()))) + // Treat all shares as `SigningEnabled` + self.database + .state() + .shares() + .values() + .filter_map(|v| filter_func(DoppelgangerStatus::SigningEnabled(v.validator_pubkey))) .collect() } @@ -800,19 +729,25 @@ impl ValidatorStore for AnchorValidatorStore { } fn num_voting_validators(&self) -> usize { - self.validators.len() + self.database.state().shares().length() } fn graffiti(&self, validator_pubkey: &PublicKeyBytes) -> Option { - self.validator(*validator_pubkey) - .ok() - .map(|v| v.metadata.graffiti) + self.database + .state() + .metadata() + .get_by(validator_pubkey) + .map(|metadata| metadata.graffiti) } fn get_fee_recipient(&self, validator_pubkey: &PublicKeyBytes) -> Option
{ - self.validator(*validator_pubkey) - .ok() - .map(|v| v.cluster.fee_recipient) + let state = self.database.state(); + state.metadata().get_by(validator_pubkey).and_then(|v| { + state + .clusters() + .get_by(&v.cluster_id) + .map(|cluster| cluster.fee_recipient) + }) } fn determine_builder_boost_factor(&self, _validator_pubkey: &PublicKeyBytes) -> Option { @@ -833,48 +768,63 @@ impl ValidatorStore for AnchorValidatorStore { validator_pubkey: PublicKeyBytes, signing_epoch: Epoch, ) -> Result { - let domain_hash = self.get_domain(signing_epoch, Domain::Randao); - let signing_root = signing_epoch.signing_root(domain_hash); + let future = async { + let domain_hash = self.get_domain(signing_epoch, Domain::Randao); + let signing_root = signing_epoch.signing_root(domain_hash); + + let (validator, cluster) = self.get_validator_and_cluster(validator_pubkey)?; - run_and_update_metrics( - RANDAO_REVEAL_LOG_NAME, - &metrics::SIGNED_RANDAO_REVEALS_TOTAL, self.collect_signature( PartialSignatureKind::RandaoPartialSig, Role::Proposer, - self.validator(validator_pubkey)?, + None, + &validator, + &cluster, signing_root, self.slot_clock.now().ok_or(SpecificError::SlotClock)?, - ), + ) + .await + }; + + run_and_update_metrics( + RANDAO_REVEAL_LOG_NAME, + &metrics::SIGNED_RANDAO_REVEALS_TOTAL, + future, ) .await } fn set_validator_index(&self, validator_pubkey: &PublicKeyBytes, index: u64) { - match self.validators.get_mut(validator_pubkey) { - None => warn!( + let Some(maybe_old_idx) = self + .database + .state() + .metadata() + .get_by(validator_pubkey) + .map(|v| v.index) + else { + warn!( validator = validator_pubkey.as_hex_string(), "Trying to set index for unknown validator" - ), - Some(mut v) => { - let index = ValidatorIndex(index as usize); - let mut index_set = self - .validators_per_committee - .entry(v.cluster.committee_id()) - .or_default(); - if let Some(old_idx) = v.metadata.index - && old_idx != index - { - error!( - ?validator_pubkey, - db=?old_idx, - got=?index, - "Inconsistent validator index - database corrupt?" - ); - index_set.remove(&old_idx); - } - v.metadata.index = Some(index); - index_set.insert(index); + ); + return; + }; + + let index = ValidatorIndex(index as usize); + if let Some(old_idx) = maybe_old_idx { + if old_idx != index { + error!( + ?validator_pubkey, + db=?old_idx, + got=?index, + "Inconsistent validator index - database corrupt?" + ); + } + } else { + let result = self + .database + .set_validator_indices(HashMap::from([(*validator_pubkey, index)])); + if let Err(err) = result { + error!(?err, "Failed to set validator index"); } } } @@ -889,31 +839,35 @@ impl ValidatorStore for AnchorValidatorStore { if !*self.is_synced.borrow() { return Err(Error::SpecificError(SpecificError::NotSynced)); } + let (validator, cluster) = self.get_validator_and_cluster(validator_pubkey)?; let block = match block { UnsignedBlock::Full(FullBlockContents::BlockContents(contents)) => { - self.decide_abstract_block(validator_pubkey, contents).await + self.decide_abstract_block(&validator, &cluster, contents) + .await } UnsignedBlock::Full(FullBlockContents::Block(block)) => { - self.decide_abstract_block(validator_pubkey, block).await + self.decide_abstract_block(&validator, &cluster, block) + .await } UnsignedBlock::Blinded(block) => { - self.decide_abstract_block(validator_pubkey, block).await + self.decide_abstract_block(&validator, &cluster, block) + .await } }?; // yay - we agree! let's sign the block we agreed on match block { UnsignedBlock::Full(FullBlockContents::BlockContents(contents)) => { - self.sign_abstract_block(validator_pubkey, contents, current_slot) + self.sign_abstract_block(&validator, &cluster, contents, current_slot) .await } UnsignedBlock::Full(FullBlockContents::Block(block)) => { - self.sign_abstract_block(validator_pubkey, block, current_slot) + self.sign_abstract_block(&validator, &cluster, block, current_slot) .await } UnsignedBlock::Blinded(block) => { - self.sign_abstract_block(validator_pubkey, block, current_slot) + self.sign_abstract_block(&validator, &cluster, block, current_slot) .await } } @@ -948,7 +902,7 @@ impl ValidatorStore for AnchorValidatorStore { }); } - let validator = self.validator(validator_pubkey)?; + let (validator, cluster) = self.get_validator_and_cluster(validator_pubkey)?; let timer = metrics::start_timer_vec(&metrics::CONSENSUS_TIMES, &[metrics::BEACON_VOTE]); @@ -960,7 +914,7 @@ impl ValidatorStore for AnchorValidatorStore { .qbft_manager .decide_instance( CommitteeInstanceId { - committee: validator.cluster.committee_id(), + committee: cluster.committee_id(), instance_height: attestation.data().slot.as_usize().into(), }, BeaconVote { @@ -969,7 +923,7 @@ impl ValidatorStore for AnchorValidatorStore { target: attestation.data().target, }, start_time, - &validator.cluster, + &cluster, ) .await .map_err(SpecificError::from)?; @@ -979,6 +933,7 @@ impl ValidatorStore for AnchorValidatorStore { Completed::TimedOut => return Err(Error::SpecificError(SpecificError::Timeout)), Completed::Success(data) => data, }; + let data_hash = data.hash(); attestation.data_mut().beacon_block_root = data.block_root; attestation.data_mut().source = data.source; attestation.data_mut().target = data.target; @@ -999,7 +954,9 @@ impl ValidatorStore for AnchorValidatorStore { .collect_signature( PartialSignatureKind::PostConsensus, Role::Committee, - validator, + Some(data_hash), + &validator, + &cluster, signing_root, attestation.data().slot, ) @@ -1027,6 +984,9 @@ impl ValidatorStore for AnchorValidatorStore { let domain_hash = self.spec.get_builder_domain(); let signing_root = validator_registration_data.signing_root(domain_hash); + let (validator, cluster) = + self.get_validator_and_cluster(validator_registration_data.pubkey)?; + // SSV always uses the start of the current epoch, so we need to convert to that let epoch = self .slot_clock @@ -1043,7 +1003,9 @@ impl ValidatorStore for AnchorValidatorStore { .collect_signature( PartialSignatureKind::ValidatorRegistration, Role::ValidatorRegistration, - self.validator(validator_registration_data.pubkey)?, + None, + &validator, + &cluster, signing_root, validity_slot, ) @@ -1072,7 +1034,7 @@ impl ValidatorStore for AnchorValidatorStore { ) -> Result, Error> { let future = async { let signing_epoch = aggregate.data().target.epoch; - let validator = self.validator(validator_pubkey)?; + let (validator, cluster) = self.get_validator_and_cluster(validator_pubkey)?; let version = match &aggregate { Attestation::Base(_) => ForkName::Base.into(), @@ -1104,10 +1066,7 @@ impl ValidatorStore for AnchorValidatorStore { r#type: BEACON_ROLE_AGGREGATOR, pub_key: validator_pubkey, slot: message.aggregate().data().slot, - validator_index: validator - .metadata - .index - .ok_or(SpecificError::MissingIndex)?, + validator_index: validator.index.ok_or(SpecificError::MissingIndex)?, committee_index: message.aggregate().data().index, // TODO: it seems the below are not needed (anymore?) // potentially related: https://github.com/sigp/anchor/issues/263 @@ -1117,10 +1076,10 @@ impl ValidatorStore for AnchorValidatorStore { validator_sync_committee_indices: Default::default(), }, version, - data_ssz: message.as_ssz_bytes(), + data_ssz: message.as_ssz_bytes().into(), }, start_time, - &validator.cluster, + &cluster, ) .await .map_err(SpecificError::from)?; @@ -1156,7 +1115,9 @@ impl ValidatorStore for AnchorValidatorStore { .collect_signature( PartialSignatureKind::PostConsensus, Role::Aggregator, - validator, + None, + &validator, + &cluster, signing_root, message.aggregate().get_slot(), ) @@ -1184,6 +1145,7 @@ impl ValidatorStore for AnchorValidatorStore { let epoch = slot.epoch(E::slots_per_epoch()); let domain_hash = self.get_domain(epoch, Domain::SelectionProof); let signing_root = slot.signing_root(domain_hash); + let (validator, cluster) = self.get_validator_and_cluster(validator_pubkey)?; // We do not want to spend too long on the selection proof. We will not produce an // aggregation anyway if the proof is not known at 2/3rds into the slot - so we abort @@ -1197,7 +1159,9 @@ impl ValidatorStore for AnchorValidatorStore { self.collect_signature( PartialSignatureKind::SelectionProofPartialSig, Role::Aggregator, - self.validator(validator_pubkey)?, + None, + &validator, + &cluster, signing_root, slot, ), @@ -1228,6 +1192,7 @@ impl ValidatorStore for AnchorValidatorStore { subcommittee_index: subnet_id.into(), } .signing_root(domain_hash); + let (validator, cluster) = self.get_validator_and_cluster(*validator_pubkey)?; // We do not want to spend too long on the selection proof. We will not produce an // aggregation anyway if the proof is not known at 2/3rds into the slot - so we abort @@ -1241,7 +1206,9 @@ impl ValidatorStore for AnchorValidatorStore { self.collect_signature( PartialSignatureKind::ContributionProofs, Role::SyncCommittee, - self.validator(*validator_pubkey)?, + None, + &validator, + &cluster, signing_root, slot, ), @@ -1268,7 +1235,7 @@ impl ValidatorStore for AnchorValidatorStore { ) -> Result { let future = async { let epoch = slot.epoch(E::slots_per_epoch()); - let validator = self.validator(*validator_pubkey)?; + let (validator, cluster) = self.get_validator_and_cluster(*validator_pubkey)?; let metadata = self.get_slot_metadata(slot).await?; let timer = @@ -1279,12 +1246,12 @@ impl ValidatorStore for AnchorValidatorStore { .qbft_manager .decide_instance( CommitteeInstanceId { - committee: validator.cluster.committee_id(), + committee: cluster.committee_id(), instance_height: slot.as_usize().into(), }, metadata.beacon_vote.clone(), start_time, - &validator.cluster, + &cluster, ) .await .map_err(SpecificError::from)?; @@ -1301,7 +1268,9 @@ impl ValidatorStore for AnchorValidatorStore { .collect_signature( PartialSignatureKind::PostConsensus, Role::Committee, - validator, + Some(data.hash()), + &validator, + &cluster, signing_root, slot, ) @@ -1333,6 +1302,7 @@ impl ValidatorStore for AnchorValidatorStore { let future = async { let slot = contribution.slot; let epoch = slot.epoch(E::slots_per_epoch()); + let (validator, cluster) = self.get_validator_and_cluster(aggregator_pubkey)?; let subcommittee_index = contribution.subcommittee_index; @@ -1341,11 +1311,6 @@ impl ValidatorStore for AnchorValidatorStore { selection_proof, }; - let validator = match self.validator(aggregator_pubkey) { - Ok(cluster) => cluster, - Err(_) => return Err(Error::UnknownPubkey(aggregator_pubkey)), - }; - let metadata = self.get_slot_metadata(slot).await?; let signing_data = match metadata.multi_sync_aggregators.get(&aggregator_pubkey) { @@ -1396,10 +1361,7 @@ impl ValidatorStore for AnchorValidatorStore { r#type: BEACON_ROLE_SYNC_COMMITTEE_CONTRIBUTION, pub_key: aggregator_pubkey, slot, - validator_index: validator - .metadata - .index - .ok_or(SpecificError::MissingIndex)?, + validator_index: validator.index.ok_or(SpecificError::MissingIndex)?, committee_index: 0, committee_length: 0, committees_at_slot: 0, @@ -1407,10 +1369,10 @@ impl ValidatorStore for AnchorValidatorStore { validator_sync_committee_indices: Default::default(), }, version: ForkName::Altair.into(), - data_ssz: data.as_ssz_bytes(), + data_ssz: data.as_ssz_bytes().into(), }, start_time, - &validator.cluster, + &cluster, ) .await; drop(timer); @@ -1448,7 +1410,9 @@ impl ValidatorStore for AnchorValidatorStore { self.collect_signature( PartialSignatureKind::PostConsensus, Role::SyncCommittee, - validator, + None, + &validator, + &cluster, signing_root, slot, ) @@ -1528,9 +1492,16 @@ impl ValidatorStore for AnchorValidatorStore { } fn proposal_data(&self, pubkey: &PublicKeyBytes) -> Option { - self.validator(*pubkey).ok().map(|v| ProposalData { - validator_index: v.metadata.index.map(|idx| *idx as u64), - fee_recipient: Some(v.cluster.fee_recipient), + let state = self.database.state(); + let validator = state.metadata().get_by(pubkey)?; + + let validator_index = validator.index.map(|idx| *idx as u64); + let cluster = state.clusters().get_by(&validator.cluster_id); + let fee_recipient = cluster.map(|c| c.fee_recipient); + + Some(ProposalData { + validator_index, + fee_recipient, gas_limit: self.gas_limit, builder_proposals: self.builder_proposals, }) @@ -1540,14 +1511,14 @@ impl ValidatorStore for AnchorValidatorStore { trait SignableBlock: Debug + Encode { type Payload: AbstractExecPayload; - fn as_block(&self) -> BeaconBlockRef; + fn as_block(&self) -> BeaconBlockRef<'_, E, Self::Payload>; fn to_signed_block(self, signature: Signature) -> SignedBlock; } impl SignableBlock for BlockContents { type Payload = FullPayload; - fn as_block(&self) -> BeaconBlockRef { + fn as_block(&self) -> BeaconBlockRef<'_, E, Self::Payload> { self.block.to_ref() } @@ -1562,7 +1533,7 @@ impl SignableBlock for BlockContents { impl SignableBlock for BeaconBlock> { type Payload = FullPayload; - fn as_block(&self) -> BeaconBlockRef { + fn as_block(&self) -> BeaconBlockRef<'_, E, Self::Payload> { self.to_ref() } @@ -1577,7 +1548,7 @@ impl SignableBlock for BeaconBlock> { impl SignableBlock for BeaconBlock> { type Payload = BlindedPayload; - fn as_block(&self) -> BeaconBlockRef { + fn as_block(&self) -> BeaconBlockRef<'_, E, Self::Payload> { self.to_ref() }