From 5ff6552fff58e1abef8d71d6524488373ed37284 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 22 Jan 2025 15:10:31 +0000 Subject: [PATCH 01/34] Added Makefile and nimble support --- Makefile | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index bb7d5efaef..001242c6af 100644 --- a/Makefile +++ b/Makefile @@ -108,6 +108,7 @@ VERIF_PROXY_OUT_PATH ?= build/libverifproxy/ nimbus \ nimbus_execution_client \ nimbus_portal_client \ + nimbus_unified \ fluffy \ nimbus_verified_proxy \ libverifproxy \ @@ -378,13 +379,24 @@ txparse: | build deps # usual cleaning clean: | clean-common - rm -rf build/{nimbus,nimbus_execution_client,nimbus_portal_client,fluffy,portal_bridge,libverifproxy,nimbus_verified_proxy,$(TOOLS_CSV),$(PORTAL_TOOLS_CSV),all_tests,test_kvstore_rocksdb,test_rpc,all_portal_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test,*.dSYM} + rm -rf build/{nimbus_unified,nimbus,nimbus_execution_client,nimbus_portal_client,fluffy,portal_bridge,libverifproxy,nimbus_verified_proxy,$(TOOLS_CSV),$(PORTAL_TOOLS_CSV),all_tests,test_kvstore_rocksdb,test_rpc,all_portal_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test,nimbus_unified_test,*.dSYM} rm -rf tools/t8n/{t8n,t8n_test} rm -rf tools/evmstate/{evmstate,evmstate_test} ifneq ($(USE_LIBBACKTRACE), 0) + $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT) endif +# Nimbus unified related targets + +# builds the unified client +nimbus_unified: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:release --parallelBuild:1 -d:libp2p_pki_schemes=secp256k1 -u:metrics -o:build/$@ "nimbus_unified/$@.nim" + +all_tests_unified: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim c -r $(NIM_PARAMS) --threads:on -d:chronicles_log_level=ERROR -o:build/$@ "nimbus_unified/tests/$@.nim" + # Note about building Nimbus as a library: # # There were `wrappers`, `wrappers-static`, `libnimbus.so` and `libnimbus.a` From 241c0a4cb9b88c76725c2249edfc3ab7cd59f2cf Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Thu, 23 Jan 2025 09:13:31 +0000 Subject: [PATCH 02/34] Added README file --- nimbus_unified/README.md | 45 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 nimbus_unified/README.md diff --git a/nimbus_unified/README.md b/nimbus_unified/README.md new file mode 100644 index 0000000000..cba7373882 --- /dev/null +++ b/nimbus_unified/README.md @@ -0,0 +1,45 @@ +# Nimbus Unified + + +[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) + +[![Discord: Nimbus](https://img.shields.io/badge/discord-nimbus-orange.svg)](https://discord.gg/XRxWahP) +[![Status: #nimbus-general](https://img.shields.io/badge/status-nimbus--general-orange.svg)](https://join.status.im/nimbus-general) + + +# description + +For in-depth configuration and functionality of Nimbus execution and consensus layer refer to: +- [Nimbus-eth1 - Execution layer client](https://github.com/status-im/nimbus-eth1) Documentation +- [Nimbus-eth2 - Consensus layer client](https://github.com/status-im/nimbus-eth2) Documentation + +tbc +# dependencies +tbd +# how to +## configuration + todo +## commands + todo +## compile +tbd + - mac os, windows, and linux + + ]$ make nimbus_unified +## colaborate +We welcome contributions to Nimbus Unified! Please adhere to the following guidelines: + +- Use the [Status Nim style guide](https://status-im.github.io/nim-style-guide/) to maintain code consistency. +- Format your code using the [Nim Pretty Printer (nph)](https://github.com/nim-lang/nimpretty) to ensure consistency across the codebase. Run it as part of your pull request process. +## License + +Licensed and distributed under either of + +* MIT license: [LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT + +or + +* Apache License, Version 2.0: [LICENSE-APACHEv2](LICENSE-APACHEv2) or https://www.apache.org/licenses/LICENSE-2.0 + +at your option. These files may not be copied, modified, or distributed except according to those terms. From d2014da932257a6b8bd362e8123e3e50cd090cb3 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Mon, 27 Jan 2025 09:15:53 +0000 Subject: [PATCH 03/34] added version file --- nimbus_unified/version.nim | 62 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 nimbus_unified/version.nim diff --git a/nimbus_unified/version.nim b/nimbus_unified/version.nim new file mode 100644 index 0000000000..4eaaf15007 --- /dev/null +++ b/nimbus_unified/version.nim @@ -0,0 +1,62 @@ +# Nimbus +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import std/strutils, stew/byteutils, metrics + +const + versionMajor* = 0 + versionMinor* = 1 + versionBuild* = 0 + + gitRevision* = strip(staticExec("git rev-parse --short HEAD"))[0 .. 5] + + versionAsStr* = $versionMajor & "." & $versionMinor & "." & $versionBuild + + fullVersionStr* = "v" & versionAsStr & "-" & gitRevision + + clientName* = "Nimbus" + + nimFullBanner = staticExec("nim --version") + nimBanner* = staticExec("nim --version | grep Version") + + # The web3_clientVersion + clientVersion* = + clientName & "/" & fullVersionStr & "/" & hostOS & "-" & hostCPU & "/" & "Nim" & + NimVersion + + compileYear = CompileDate[0 ..< 4] # YYYY-MM-DD (UTC) + copyrightBanner* = + "Copyright (c) " & compileYear & " Status Research & Development GmbH" + + # Short debugging identifier to be placed in the ENR + enrClientInfoShort* = toBytes("f") + +func getNimGitHash*(): string = + const gitPrefix = "git hash: " + let tmp = splitLines(nimFullBanner) + if tmp.len == 0: + return + for line in tmp: + if line.startsWith(gitPrefix) and line.len > 8 + gitPrefix.len: + result = line[gitPrefix.len ..< gitPrefix.len + 8] + +# TODO: Currently prefixing these metric names as the non prefixed names give +# a collector already registered conflict at runtime. This is due to the same +# names in nimbus-eth2 nimbus_binary_common.nim even though there are no direct +# imports of that file. + +declareGauge versionGauge, + "Nimbus version info (as metric labels)", + ["version", "commit"], + name = "nimbus_version" +versionGauge.set(1, labelValues = [fullVersionStr, gitRevision]) + +declareGauge nimVersionGauge, + "Nim version info", ["version", "nim_commit"], name = "nimbus_nim_version" +nimVersionGauge.set(1, labelValues = [NimVersion, getNimGitHash()]) From 490dac998c7795ad64e6d65b43d4006968e7dc48 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Tue, 28 Jan 2025 10:23:24 +0000 Subject: [PATCH 04/34] minimal functional project structure: - chronicle support - Added config files with support for both nimbus-eth clients configuration. - thread model suggestion. - Minimal nimbus config for given thread model. - layers handlers. --- nimbus_unified/configs/nimbus_configs.nim | 70 +++++++++ nimbus_unified/consensus/consensus_layer.nim | 32 ++++ nimbus_unified/execution/execution_layer.nim | 31 ++++ nimbus_unified/nimbus_unified.cfg | 17 ++ nimbus_unified/nimbus_unified.nim | 156 +++++++++++++++++++ 5 files changed, 306 insertions(+) create mode 100644 nimbus_unified/configs/nimbus_configs.nim create mode 100644 nimbus_unified/consensus/consensus_layer.nim create mode 100644 nimbus_unified/execution/execution_layer.nim create mode 100644 nimbus_unified/nimbus_unified.cfg create mode 100644 nimbus_unified/nimbus_unified.nim diff --git a/nimbus_unified/configs/nimbus_configs.nim b/nimbus_unified/configs/nimbus_configs.nim new file mode 100644 index 0000000000..3a55f6dbc5 --- /dev/null +++ b/nimbus_unified/configs/nimbus_configs.nim @@ -0,0 +1,70 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + std/[os, atomics], + chronicles, + options, + #eth2-configs + beacon_chain/nimbus_binary_common, + #eth1-configs + ../../nimbus/nimbus_desc + +export BeaconNodeConf, NimbusConf + +## Exceptions +type NimbusServiceError* = object of CatchableError + +## Constants +## TODO: evaluate the proposed timeouts +const cNimbusMaxServices* = 2 +const cNimbusServiceTimeoutMs* = 3000 + +## log +logScope: + topics = "Service manager" + +## Nimbus service arguments +type + ConfigKind* = enum + Execution + Consensus + + LayerConfig* = object + case kind*: ConfigKind + of Consensus: + consensusConfig*: BeaconNodeConf + of Execution: + executionConfig*: NimbusConf + + ServiceParameters* = object + name*: string + layerConfig*: LayerConfig + + NimbusService* = ref object + name*: string + timeoutMs*: uint32 + serviceHandler*: Thread[ServiceParameters] + + Nimbus* = ref object + serviceList*: array[cNimbusMaxServices, Option[NimbusService]] + +## Service shutdown +var isShutDownRequired*: Atomic[bool] +isShutDownRequired.store(false) + +# filesystem specs +proc defaultDataDir*(): string = + let dataDir = + when defined(windows): + "AppData" / "Roaming" / "Nimbus_unified" + elif defined(macosx): + "Library" / "Application Support" / "Nimbus_unified" + else: + ".cache" / "nimbus_unified" + + getHomeDir() / dataDir diff --git a/nimbus_unified/consensus/consensus_layer.nim b/nimbus_unified/consensus/consensus_layer.nim new file mode 100644 index 0000000000..90cbcd1f9e --- /dev/null +++ b/nimbus_unified/consensus/consensus_layer.nim @@ -0,0 +1,32 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import std/[atomics, os], chronicles, ../configs/nimbus_configs + +export nimbus_configs + +## log +logScope: + topics = "Consensus layer" + +proc consensusLayer*(params: ServiceParameters) {.raises: [CatchableError].} = + var config = params.layerConfig + + doAssert config.kind == Consensus + + try: + while isShutDownRequired.load() == false: + info "consensus ..." + sleep(cNimbusServiceTimeoutMs + 1000) + + isShutDownRequired.store(true) + except CatchableError as e: + fatal "error", message = e.msg + isShutDownRequired.store(true) + + isShutDownRequired.store(true) + warn "\tExiting consensus layer" diff --git a/nimbus_unified/execution/execution_layer.nim b/nimbus_unified/execution/execution_layer.nim new file mode 100644 index 0000000000..5cf09b8122 --- /dev/null +++ b/nimbus_unified/execution/execution_layer.nim @@ -0,0 +1,31 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import std/[atomics, os], chronicles, ../configs/nimbus_configs + +export nimbus_configs + +logScope: + topics = "Execution layer" + +proc executionLayer*(params: ServiceParameters) {.raises: [CatchableError].} = + var config = params.layerConfig + + doAssert config.kind == Execution + + try: + while isShutDownRequired.load() == false: + info "execution ..." + sleep(cNimbusServiceTimeoutMs) + + isShutDownRequired.store(true) + except CatchableError as e: + fatal "error", message = e.msg + isShutDownRequired.store(true) + + isShutDownRequired.store(true) + warn "\tExiting execution layer" diff --git a/nimbus_unified/nimbus_unified.cfg b/nimbus_unified/nimbus_unified.cfg new file mode 100644 index 0000000000..36a4fd937d --- /dev/null +++ b/nimbus_unified/nimbus_unified.cfg @@ -0,0 +1,17 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +-d:"libp2p_pki_schemes=secp256k1" + +-d:"chronicles_sinks=textlines[dynamic],json[dynamic]" +-d:"chronicles_runtime_filtering=on" +-d:"chronicles_disable_thread_id" + +@if release: + -d:"chronicles_line_numbers:0" +@end + diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim new file mode 100644 index 0000000000..8a24f0fbfd --- /dev/null +++ b/nimbus_unified/nimbus_unified.nim @@ -0,0 +1,156 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + std/[atomics, os, exitprocs], + chronicles, + stew/io2, + options, + consensus/consensus_layer, + execution/execution_layer, + configs/nimbus_configs, + #eth2-configs + beacon_chain/nimbus_binary_common, + #eth1-configs + ../nimbus/nimbus_desc + +# ------------------------------------------------------------------------------ +# Private +# ------------------------------------------------------------------------------ + +## Execution Layer handler +proc executionLayerHandler(parameters: ServiceParameters) {.thread.} = + info "Started service:", service = parameters.name + executionLayer(parameters) + info "\tExited service", service = parameters.name + +## Consensus Layer handler +proc consensusLayerHandler(parameters: ServiceParameters) {.thread.} = + info "Started service:", service = parameters.name + + info "Waiting for execution layer bring up ..." + consensusLayer(parameters) + info "\tExit service", service = parameters.name + +# lock file +var gPidFile: string +proc createPidFile(filename: string) {.raises: [IOError].} = + writeFile filename, $os.getCurrentProcessId() + gPidFile = filename + addExitProc ( + proc() = + discard io2.removeFile(filename) + ) + +## adds a new service to nimbus services list. +## returns position on services list +proc addService( + nimbus: var Nimbus, + serviceHandler: proc(config: ServiceParameters) {.thread.}, + parameters: var ServiceParameters, + timeout: uint32, +): int = + #search next available free worker + var currentIndex = -1 + for i in 0 .. cNimbusMaxServices - 1: + if nimbus.serviceList[i].isNone: + nimbus.serviceList[i] = + some(NimbusService(name: parameters.name, timeoutMs: timeout)) + currentIndex = i + parameters.name = parameters.name + break + + if currentIndex < 0: + raise newException(NimbusServiceError, "No available slots on nimbus services list") + + info "Created service:", service = nimbus.serviceList[currentIndex].get().name + + currentIndex + +# ------------------------------------------------------------------------------ +# Public +# ------------------------------------------------------------------------------ + +## Block execution and waits for services to finish +proc exitServices*(nimbus: Nimbus) = + + for i in 0 .. cNimbusMaxServices - 1: + if nimbus.serviceList[i].isSome: + let thread = nimbus.serviceList[i].get() + if thread.serviceHandler.running(): + joinThread(thread.serviceHandler) + nimbus.serviceList[i] = none(NimbusService) + info "Exited service ", service = thread.name + + notice "Exited all services" + +## Service monitoring +proc monitor*(nimbus: Nimbus) = + info "started service monitoring" + + while isShutDownRequired.load() == false: + sleep(cNimbusServiceTimeoutMs) + + if isShutDownRequired.load() == true: + nimbus.exitServices() + + notice "Shutting down now" + +## create and configure service +proc startService*( + nimbus: var Nimbus, + config: var LayerConfig, + service: string, + fun: proc(config: ServiceParameters) {.thread.}, + timeout: uint32 = cNimbusServiceTimeoutMs, +) {.raises: [CatchableError].} = + var params: ServiceParameters = ServiceParameters(name: service, layerConfig: config) + let serviceId = nimbus.addService(fun, params, timeout) + + try: + createThread(nimbus.serviceList[serviceId].get().serviceHandler, fun, params) + except CatchableError as e: + fatal "error creating service (thread)", msg = e.msg + + info "Starting service ", service = service + +# ------ +when isMainModule: + notice "Starting Nimbus" + + setupFileLimits() + + var nimbus: Nimbus = Nimbus.new + + ## Graceful shutdown by handling of Ctrl+C signal + proc controlCHandler() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + try: + setupForeignThreadGc() + except NimbusServiceError as exc: + raiseAssert exc.msg # shouldn't happen + + notice "\tCtrl+C pressed. Shutting down services" + isShutDownRequired.store(true) + nimbus.exitServices() + + setControlCHook(controlCHandler) + + var + execution = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + consensus = LayerConfig(kind: Consensus, consensusConfig: BeaconNodeConf()) + + try: + nimbus.startService(execution, "Execution Layer", executionLayerHandler) + nimbus.startService(consensus, "Consensus Layer", consensusLayerHandler) + except Exception: + isShutDownRequired.store(true) + nimbus.exitServices() + quit QuitFailure + + nimbus.monitor() From d643900a96d9893ee02ecad6b92b63192c60e0fd Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Tue, 28 Jan 2025 23:30:51 +0000 Subject: [PATCH 05/34] added tests --- nimbus_unified/tests/all_tests_unified.nim | 13 ++ .../tests/consensus/test_consensus_layer.nim | 30 +++++ .../tests/execution/test_execution_layer.nim | 30 +++++ nimbus_unified/tests/nim.cfg | 14 ++ nimbus_unified/tests/test_nimbus_unified.nim | 126 ++++++++++++++++++ 5 files changed, 213 insertions(+) create mode 100644 nimbus_unified/tests/all_tests_unified.nim create mode 100644 nimbus_unified/tests/consensus/test_consensus_layer.nim create mode 100644 nimbus_unified/tests/execution/test_execution_layer.nim create mode 100644 nimbus_unified/tests/nim.cfg create mode 100644 nimbus_unified/tests/test_nimbus_unified.nim diff --git a/nimbus_unified/tests/all_tests_unified.nim b/nimbus_unified/tests/all_tests_unified.nim new file mode 100644 index 0000000000..54c93f124d --- /dev/null +++ b/nimbus_unified/tests/all_tests_unified.nim @@ -0,0 +1,13 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.warning[UnusedImport]: off.} + +import + ./test_nimbus_unified, + ./consensus/test_consensus_layer, + ./execution/test_execution_layer diff --git a/nimbus_unified/tests/consensus/test_consensus_layer.nim b/nimbus_unified/tests/consensus/test_consensus_layer.nim new file mode 100644 index 0000000000..be4398372d --- /dev/null +++ b/nimbus_unified/tests/consensus/test_consensus_layer.nim @@ -0,0 +1,30 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + std/atomics, unittest2, ../../consensus/consensus_layer, ../../configs/nimbus_configs + +# ---------------------------------------------------------------------------- +# Unit Tests +# ---------------------------------------------------------------------------- + +suite "Nimbus Consensus Layer Tests": + # Test: consensusLayer handles CatchableError gracefully + test "consensusLayer handles CatchableError and sets shutdown flag": + var params = ServiceParameters( + name: "ErrorTest", + layerConfig: LayerConfig(kind: Consensus, consensusConfig: BeaconNodeConf()), + ) + + check: + try: + consensusLayer(params) + true # No uncaught exceptions + except CatchableError: + false # If an exception is raised, the test fails + + check isShutDownRequired.load() == true # Verify shutdown flag is set diff --git a/nimbus_unified/tests/execution/test_execution_layer.nim b/nimbus_unified/tests/execution/test_execution_layer.nim new file mode 100644 index 0000000000..3a408ce1ac --- /dev/null +++ b/nimbus_unified/tests/execution/test_execution_layer.nim @@ -0,0 +1,30 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + std/atomics, unittest2, ../../execution/execution_layer, ../../configs/nimbus_configs + +# ---------------------------------------------------------------------------- +# Unit Tests +# ---------------------------------------------------------------------------- + +suite "Nimbus Execution Layer Tests": + # Test: executionLayer handles CatchableError gracefully + test "executionLayer handles CatchableError and sets shutdown flag": + var params = ServiceParameters( + name: "ErrorTest", + layerConfig: LayerConfig(kind: Execution, executionConfig: NimbusConf()), + ) + + check: + try: + executionLayer(params) + true # No uncaught exceptions + except CatchableError: + false # If an exception is raised, the test fails + + check isShutDownRequired.load() == true # Verify shutdown flag is set diff --git a/nimbus_unified/tests/nim.cfg b/nimbus_unified/tests/nim.cfg new file mode 100644 index 0000000000..e6add8a439 --- /dev/null +++ b/nimbus_unified/tests/nim.cfg @@ -0,0 +1,14 @@ +# Nimbus +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +# Use only `secp256k1` public key cryptography as an identity in LibP2P. +-d:"libp2p_pki_schemes=secp256k1" +-d:"chronicles_runtime_filtering=on" + +--styleCheck:usages +--styleCheck:hint +--hint[Processing]:offAd \ No newline at end of file diff --git a/nimbus_unified/tests/test_nimbus_unified.nim b/nimbus_unified/tests/test_nimbus_unified.nim new file mode 100644 index 0000000000..3cb8acba28 --- /dev/null +++ b/nimbus_unified/tests/test_nimbus_unified.nim @@ -0,0 +1,126 @@ +# nimbus_unified +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + std/[os, atomics], + unittest2, + ../nimbus_unified, + ../configs/nimbus_configs, + #eth1-configs + ../../nimbus/nimbus_desc + +# ---------------------------------------------------------------------------- +# Helper Functions +# ---------------------------------------------------------------------------- + +template fileExists(filename: string): bool = + try: + discard readFile(filename) + true + except IOError: + false + +template removeFile(filename: string) = + try: + discard io2.removeFile(filename) + except IOError: + discard # Ignore if the file does not exist + +proc handlerMock(parameters: ServiceParameters) {.thread.} = return + +# ---------------------------------------------------------------------------- +# Unit Tests +# ---------------------------------------------------------------------------- + +suite "Nimbus Service Management Tests": + var nimbus: Nimbus + setup: + nimbus = Nimbus.new + + # Test: Creating a new service successfully + test "startService successfully adds a service": + var layerConfig = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + + nimbus.startService(layerConfig, "TestService", handlerMock) + + check nimbus.serviceList[0].isSome + check nimbus.serviceList[0].get().name == "TestService" + + # Test: Adding more services than the maximum allowed + test "startService fails when Nimbus is full": + for i in 0 ..< cNimbusMaxServices: + var layerConfig = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + nimbus.startService(layerConfig, "service" & $i, handlerMock) + + # Attempt to add one more service than allowed + var extraConfig = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + check: + try: + nimbus.startService(extraConfig, "ExtraService", handlerMock) + false # If no exception, test fails + except NimbusServiceError: + true # Exception was correctly raised + + # Test: Services finish properly and exitServices correctly joins all threads + test "exitServices waits for all services to finish": + for i in 0 ..< cNimbusMaxServices: + var layerConfig = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + nimbus.startService(layerConfig, "service" & $i, handlerMock) + + nimbus.exitServices() + + # Check that all service slots are empty (thread was stopped, joined and its spot cleared) + for i in 0 ..< cNimbusMaxServices - 1: + check nimbus.serviceList[i].isNone + + # Test: startServices initializes both the execution and consensus layer services + test "startServices initializes execution and consensus services": + var execLayer = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + var consensusLayer = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + + nimbus.startService(execLayer, "service1", handlerMock) + nimbus.startService(consensusLayer, "service2", handlerMock) + + # Check that at least two services were created + check not nimbus.serviceList[0].isNone + check not nimbus.serviceList[1].isNone + + # Test: Monitor detects shutdown and calls exitServices + test "monitor stops on shutdown signal and calls exitServices": + var layer = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + nimbus.startService(layer, "service1", handlerMock) + + #simulates a shutdown signal + isShutDownRequired.store(true) + nimbus.monitor() + + # Check that the monitor loop exits correctly + # services running should be 0 + check isShutDownRequired.load() == true + for i in 0 .. cNimbusMaxServices - 1: + check nimbus.serviceList[i].isNone + + # Test: Control-C handler properly initiates shutdown + test "controlCHandler triggers shutdown sequence": + var layer = LayerConfig(kind: Execution, executionConfig: NimbusConf()) + nimbus.startService(layer, "service1", handlerMock) + + proc localControlCHandler() {.noconv.} = + isShutDownRequired.store(true) + nimbus.exitServices() + + # Set up a simulated control-C hook + setControlCHook(localControlCHandler) + + # Trigger the hook manually + localControlCHandler() + + check isShutDownRequired.load() == true + + #services running should be 0 + for i in 0 .. cNimbusMaxServices - 1: + check nimbus.serviceList[i].isNone From 866ecc85f2397d53f197c8b530563720301a16b3 Mon Sep 17 00:00:00 2001 From: Pedro Miranda Date: Wed, 29 Jan 2025 00:00:41 +0000 Subject: [PATCH 06/34] nph formatting and correct typos --- nimbus_unified/README.md | 2 +- nimbus_unified/consensus/consensus_layer.nim | 1 - nimbus_unified/nimbus_unified.nim | 3 +-- nimbus_unified/tests/nim.cfg | 2 +- nimbus_unified/tests/test_nimbus_unified.nim | 3 ++- 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/nimbus_unified/README.md b/nimbus_unified/README.md index cba7373882..2d750a3c45 100644 --- a/nimbus_unified/README.md +++ b/nimbus_unified/README.md @@ -9,7 +9,7 @@ # description - +tbd For in-depth configuration and functionality of Nimbus execution and consensus layer refer to: - [Nimbus-eth1 - Execution layer client](https://github.com/status-im/nimbus-eth1) Documentation - [Nimbus-eth2 - Consensus layer client](https://github.com/status-im/nimbus-eth2) Documentation diff --git a/nimbus_unified/consensus/consensus_layer.nim b/nimbus_unified/consensus/consensus_layer.nim index 90cbcd1f9e..5f20237c5f 100644 --- a/nimbus_unified/consensus/consensus_layer.nim +++ b/nimbus_unified/consensus/consensus_layer.nim @@ -9,7 +9,6 @@ import std/[atomics, os], chronicles, ../configs/nimbus_configs export nimbus_configs -## log logScope: topics = "Consensus layer" diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index 8a24f0fbfd..e9cb60901c 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -77,14 +77,13 @@ proc addService( ## Block execution and waits for services to finish proc exitServices*(nimbus: Nimbus) = - for i in 0 .. cNimbusMaxServices - 1: if nimbus.serviceList[i].isSome: let thread = nimbus.serviceList[i].get() if thread.serviceHandler.running(): joinThread(thread.serviceHandler) - nimbus.serviceList[i] = none(NimbusService) info "Exited service ", service = thread.name + nimbus.serviceList[i] = none(NimbusService) notice "Exited all services" diff --git a/nimbus_unified/tests/nim.cfg b/nimbus_unified/tests/nim.cfg index e6add8a439..11d9576ec2 100644 --- a/nimbus_unified/tests/nim.cfg +++ b/nimbus_unified/tests/nim.cfg @@ -11,4 +11,4 @@ --styleCheck:usages --styleCheck:hint ---hint[Processing]:offAd \ No newline at end of file +--hint[Processing]:off \ No newline at end of file diff --git a/nimbus_unified/tests/test_nimbus_unified.nim b/nimbus_unified/tests/test_nimbus_unified.nim index 3cb8acba28..6d32d2016c 100644 --- a/nimbus_unified/tests/test_nimbus_unified.nim +++ b/nimbus_unified/tests/test_nimbus_unified.nim @@ -30,7 +30,8 @@ template removeFile(filename: string) = except IOError: discard # Ignore if the file does not exist -proc handlerMock(parameters: ServiceParameters) {.thread.} = return +proc handlerMock(parameters: ServiceParameters) {.thread.} = + return # ---------------------------------------------------------------------------- # Unit Tests From 3511254be61219308215c18c513f929db1fc6b78 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Wed, 5 Feb 2025 18:00:14 +0000 Subject: [PATCH 07/34] removed deadcode --- nimbus_unified/nimbus_unified.nim | 10 ---- nimbus_unified/tests/test_nimbus_unified.nim | 23 +------- nimbus_unified/version.nim | 62 -------------------- 3 files changed, 3 insertions(+), 92 deletions(-) delete mode 100644 nimbus_unified/version.nim diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus_unified/nimbus_unified.nim index e9cb60901c..2d7d0d33de 100644 --- a/nimbus_unified/nimbus_unified.nim +++ b/nimbus_unified/nimbus_unified.nim @@ -36,16 +36,6 @@ proc consensusLayerHandler(parameters: ServiceParameters) {.thread.} = consensusLayer(parameters) info "\tExit service", service = parameters.name -# lock file -var gPidFile: string -proc createPidFile(filename: string) {.raises: [IOError].} = - writeFile filename, $os.getCurrentProcessId() - gPidFile = filename - addExitProc ( - proc() = - discard io2.removeFile(filename) - ) - ## adds a new service to nimbus services list. ## returns position on services list proc addService( diff --git a/nimbus_unified/tests/test_nimbus_unified.nim b/nimbus_unified/tests/test_nimbus_unified.nim index 6d32d2016c..0fb752cffc 100644 --- a/nimbus_unified/tests/test_nimbus_unified.nim +++ b/nimbus_unified/tests/test_nimbus_unified.nim @@ -16,20 +16,6 @@ import # ---------------------------------------------------------------------------- # Helper Functions # ---------------------------------------------------------------------------- - -template fileExists(filename: string): bool = - try: - discard readFile(filename) - true - except IOError: - false - -template removeFile(filename: string) = - try: - discard io2.removeFile(filename) - except IOError: - discard # Ignore if the file does not exist - proc handlerMock(parameters: ServiceParameters) {.thread.} = return @@ -75,8 +61,7 @@ suite "Nimbus Service Management Tests": nimbus.exitServices() # Check that all service slots are empty (thread was stopped, joined and its spot cleared) - for i in 0 ..< cNimbusMaxServices - 1: - check nimbus.serviceList[i].isNone + for s in nimbus.serviceList: check s.isNone # Test: startServices initializes both the execution and consensus layer services test "startServices initializes execution and consensus services": @@ -102,8 +87,7 @@ suite "Nimbus Service Management Tests": # Check that the monitor loop exits correctly # services running should be 0 check isShutDownRequired.load() == true - for i in 0 .. cNimbusMaxServices - 1: - check nimbus.serviceList[i].isNone + for s in nimbus.serviceList: check s.isNone # Test: Control-C handler properly initiates shutdown test "controlCHandler triggers shutdown sequence": @@ -123,5 +107,4 @@ suite "Nimbus Service Management Tests": check isShutDownRequired.load() == true #services running should be 0 - for i in 0 .. cNimbusMaxServices - 1: - check nimbus.serviceList[i].isNone + for s in nimbus.serviceList: check s.isNone diff --git a/nimbus_unified/version.nim b/nimbus_unified/version.nim deleted file mode 100644 index 4eaaf15007..0000000000 --- a/nimbus_unified/version.nim +++ /dev/null @@ -1,62 +0,0 @@ -# Nimbus -# Copyright (c) 2025 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import std/strutils, stew/byteutils, metrics - -const - versionMajor* = 0 - versionMinor* = 1 - versionBuild* = 0 - - gitRevision* = strip(staticExec("git rev-parse --short HEAD"))[0 .. 5] - - versionAsStr* = $versionMajor & "." & $versionMinor & "." & $versionBuild - - fullVersionStr* = "v" & versionAsStr & "-" & gitRevision - - clientName* = "Nimbus" - - nimFullBanner = staticExec("nim --version") - nimBanner* = staticExec("nim --version | grep Version") - - # The web3_clientVersion - clientVersion* = - clientName & "/" & fullVersionStr & "/" & hostOS & "-" & hostCPU & "/" & "Nim" & - NimVersion - - compileYear = CompileDate[0 ..< 4] # YYYY-MM-DD (UTC) - copyrightBanner* = - "Copyright (c) " & compileYear & " Status Research & Development GmbH" - - # Short debugging identifier to be placed in the ENR - enrClientInfoShort* = toBytes("f") - -func getNimGitHash*(): string = - const gitPrefix = "git hash: " - let tmp = splitLines(nimFullBanner) - if tmp.len == 0: - return - for line in tmp: - if line.startsWith(gitPrefix) and line.len > 8 + gitPrefix.len: - result = line[gitPrefix.len ..< gitPrefix.len + 8] - -# TODO: Currently prefixing these metric names as the non prefixed names give -# a collector already registered conflict at runtime. This is due to the same -# names in nimbus-eth2 nimbus_binary_common.nim even though there are no direct -# imports of that file. - -declareGauge versionGauge, - "Nimbus version info (as metric labels)", - ["version", "commit"], - name = "nimbus_version" -versionGauge.set(1, labelValues = [fullVersionStr, gitRevision]) - -declareGauge nimVersionGauge, - "Nim version info", ["version", "nim_commit"], name = "nimbus_nim_version" -nimVersionGauge.set(1, labelValues = [NimVersion, getNimGitHash()]) From fc7ca1b19f7a8d5703d50056c0965a65f8c2099f Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Thu, 13 Feb 2025 19:22:52 +0000 Subject: [PATCH 08/34] renamed from unified to nimbus --- {nimbus_unified => nimbus}/README.md | 0 {nimbus_unified => nimbus}/configs/nimbus_configs.nim | 0 {nimbus_unified => nimbus}/consensus/consensus_layer.nim | 0 {nimbus_unified => nimbus}/execution/execution_layer.nim | 0 nimbus_unified/nimbus_unified.cfg => nimbus/nimbus.cfg | 0 nimbus_unified/nimbus_unified.nim => nimbus/nimbus.nim | 0 {nimbus_unified => nimbus}/tests/all_tests_unified.nim | 0 .../tests/consensus/test_consensus_layer.nim | 0 .../tests/execution/test_execution_layer.nim | 0 {nimbus_unified => nimbus}/tests/nim.cfg | 0 {nimbus_unified => nimbus}/tests/test_nimbus_unified.nim | 0 11 files changed, 0 insertions(+), 0 deletions(-) rename {nimbus_unified => nimbus}/README.md (100%) rename {nimbus_unified => nimbus}/configs/nimbus_configs.nim (100%) rename {nimbus_unified => nimbus}/consensus/consensus_layer.nim (100%) rename {nimbus_unified => nimbus}/execution/execution_layer.nim (100%) rename nimbus_unified/nimbus_unified.cfg => nimbus/nimbus.cfg (100%) rename nimbus_unified/nimbus_unified.nim => nimbus/nimbus.nim (100%) rename {nimbus_unified => nimbus}/tests/all_tests_unified.nim (100%) rename {nimbus_unified => nimbus}/tests/consensus/test_consensus_layer.nim (100%) rename {nimbus_unified => nimbus}/tests/execution/test_execution_layer.nim (100%) rename {nimbus_unified => nimbus}/tests/nim.cfg (100%) rename {nimbus_unified => nimbus}/tests/test_nimbus_unified.nim (100%) diff --git a/nimbus_unified/README.md b/nimbus/README.md similarity index 100% rename from nimbus_unified/README.md rename to nimbus/README.md diff --git a/nimbus_unified/configs/nimbus_configs.nim b/nimbus/configs/nimbus_configs.nim similarity index 100% rename from nimbus_unified/configs/nimbus_configs.nim rename to nimbus/configs/nimbus_configs.nim diff --git a/nimbus_unified/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim similarity index 100% rename from nimbus_unified/consensus/consensus_layer.nim rename to nimbus/consensus/consensus_layer.nim diff --git a/nimbus_unified/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim similarity index 100% rename from nimbus_unified/execution/execution_layer.nim rename to nimbus/execution/execution_layer.nim diff --git a/nimbus_unified/nimbus_unified.cfg b/nimbus/nimbus.cfg similarity index 100% rename from nimbus_unified/nimbus_unified.cfg rename to nimbus/nimbus.cfg diff --git a/nimbus_unified/nimbus_unified.nim b/nimbus/nimbus.nim similarity index 100% rename from nimbus_unified/nimbus_unified.nim rename to nimbus/nimbus.nim diff --git a/nimbus_unified/tests/all_tests_unified.nim b/nimbus/tests/all_tests_unified.nim similarity index 100% rename from nimbus_unified/tests/all_tests_unified.nim rename to nimbus/tests/all_tests_unified.nim diff --git a/nimbus_unified/tests/consensus/test_consensus_layer.nim b/nimbus/tests/consensus/test_consensus_layer.nim similarity index 100% rename from nimbus_unified/tests/consensus/test_consensus_layer.nim rename to nimbus/tests/consensus/test_consensus_layer.nim diff --git a/nimbus_unified/tests/execution/test_execution_layer.nim b/nimbus/tests/execution/test_execution_layer.nim similarity index 100% rename from nimbus_unified/tests/execution/test_execution_layer.nim rename to nimbus/tests/execution/test_execution_layer.nim diff --git a/nimbus_unified/tests/nim.cfg b/nimbus/tests/nim.cfg similarity index 100% rename from nimbus_unified/tests/nim.cfg rename to nimbus/tests/nim.cfg diff --git a/nimbus_unified/tests/test_nimbus_unified.nim b/nimbus/tests/test_nimbus_unified.nim similarity index 100% rename from nimbus_unified/tests/test_nimbus_unified.nim rename to nimbus/tests/test_nimbus_unified.nim From 0b6407d418b6f190ceeb62a9bceda22dc7634393 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Thu, 13 Feb 2025 19:24:02 +0000 Subject: [PATCH 09/34] renamed references from unified to nimbus --- Makefile | 18 +++++-------- nimbus.nimble | 7 +++++ nimbus/README.md | 6 ++--- .../{configs/nimbus_configs.nim => conf.nim} | 26 +++++++++---------- nimbus/consensus/consensus_layer.nim | 2 +- nimbus/execution/execution_layer.nim | 2 +- nimbus/nimbus.cfg | 2 +- nimbus/nimbus.nim | 9 +++---- ...tests_unified.nim => all_tests_nimbus.nim} | 4 +-- .../tests/consensus/test_consensus_layer.nim | 2 +- .../tests/execution/test_execution_layer.nim | 2 +- ...est_nimbus_unified.nim => test_nimbus.nim} | 6 ++--- 12 files changed, 42 insertions(+), 44 deletions(-) rename nimbus/{configs/nimbus_configs.nim => conf.nim} (75%) rename nimbus/tests/{all_tests_unified.nim => all_tests_nimbus.nim} (92%) rename nimbus/tests/{test_nimbus_unified.nim => test_nimbus.nim} (98%) diff --git a/Makefile b/Makefile index 001242c6af..2df35594d0 100644 --- a/Makefile +++ b/Makefile @@ -108,7 +108,6 @@ VERIF_PROXY_OUT_PATH ?= build/libverifproxy/ nimbus \ nimbus_execution_client \ nimbus_portal_client \ - nimbus_unified \ fluffy \ nimbus_verified_proxy \ libverifproxy \ @@ -216,9 +215,6 @@ nimbus_execution_client: | build deps rocksdb echo -e $(BUILD_MSG) "build/nimbus_execution_client" && \ $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:chronicles_log_level=TRACE -o:build/nimbus_execution_client "execution_chain/nimbus_execution_client.nim" -nimbus: nimbus_execution_client - echo "The nimbus target is deprecated and will soon change meaning, use 'nimbus_execution_client' instead" - # symlink nimbus.nims: ln -s nimbus.nimble $@ @@ -379,23 +375,21 @@ txparse: | build deps # usual cleaning clean: | clean-common - rm -rf build/{nimbus_unified,nimbus,nimbus_execution_client,nimbus_portal_client,fluffy,portal_bridge,libverifproxy,nimbus_verified_proxy,$(TOOLS_CSV),$(PORTAL_TOOLS_CSV),all_tests,test_kvstore_rocksdb,test_rpc,all_portal_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test,nimbus_unified_test,*.dSYM} + rm -rf build/{nimbus,nimbus_execution_client,nimbus_portal_client,fluffy,portal_bridge,libverifproxy,nimbus_verified_proxy,$(TOOLS_CSV),$(PORTAL_TOOLS_CSV),all_tests,test_kvstore_rocksdb,test_rpc,all_portal_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test,*.dSYM} rm -rf tools/t8n/{t8n,t8n_test} rm -rf tools/evmstate/{evmstate,evmstate_test} ifneq ($(USE_LIBBACKTRACE), 0) + $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT) endif -# Nimbus unified related targets - -# builds the unified client -nimbus_unified: | build deps +# Nimbus +nimbus: | build deps rocksdb echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:release --parallelBuild:1 -d:libp2p_pki_schemes=secp256k1 -u:metrics -o:build/$@ "nimbus_unified/$@.nim" + $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:release --parallelBuild:1 -d:libp2p_pki_schemes=secp256k1 -u:metrics -o:build/nimbus "nimbus/nimbus.nim" -all_tests_unified: | build deps +all_tests_nimbus: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c -r $(NIM_PARAMS) --threads:on -d:chronicles_log_level=ERROR -o:build/$@ "nimbus_unified/tests/$@.nim" + $(ENV_SCRIPT) nim c -r $(NIM_PARAMS) --threads:on -d:chronicles_log_level=ERROR -o:build/$@ "nimbus/tests/$@.nim" # Note about building Nimbus as a library: # diff --git a/nimbus.nimble b/nimbus.nimble index 8306149a9b..ea8b6991b5 100644 --- a/nimbus.nimble +++ b/nimbus.nimble @@ -135,3 +135,10 @@ task build_fuzzers, "Build fuzzer test cases": for file in walkDirRec("tests/networking/fuzzing/"): if file.endsWith("nim"): exec "nim c -c -d:release " & file +## nimbus tasks + +task nimbus, "Build Nimbus": + buildBinary "nimbus", "nimbus/", "-d:chronicles_log_level=TRACE" + +task nimbus_test, "Run Nimbus tests": + test "nimbus/tests/","all_tests", "-d:chronicles_log_level=ERROR" diff --git a/nimbus/README.md b/nimbus/README.md index 2d750a3c45..4336132d84 100644 --- a/nimbus/README.md +++ b/nimbus/README.md @@ -1,4 +1,4 @@ -# Nimbus Unified +# Nimbus [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) @@ -26,9 +26,9 @@ tbd tbd - mac os, windows, and linux - ]$ make nimbus_unified + ]$ make nimbus ## colaborate -We welcome contributions to Nimbus Unified! Please adhere to the following guidelines: +We welcome contributions to Nimbus! Please adhere to the following guidelines: - Use the [Status Nim style guide](https://status-im.github.io/nim-style-guide/) to maintain code consistency. - Format your code using the [Nim Pretty Printer (nph)](https://github.com/nim-lang/nimpretty) to ensure consistency across the codebase. Run it as part of your pull request process. diff --git a/nimbus/configs/nimbus_configs.nim b/nimbus/conf.nim similarity index 75% rename from nimbus/configs/nimbus_configs.nim rename to nimbus/conf.nim index 3a55f6dbc5..fa821fb734 100644 --- a/nimbus/configs/nimbus_configs.nim +++ b/nimbus/conf.nim @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). @@ -12,7 +12,7 @@ import #eth2-configs beacon_chain/nimbus_binary_common, #eth1-configs - ../../nimbus/nimbus_desc + ../../execution_chain/nimbus_desc export BeaconNodeConf, NimbusConf @@ -37,22 +37,20 @@ type LayerConfig* = object case kind*: ConfigKind of Consensus: - consensusConfig*: BeaconNodeConf + consensusConfig*: seq[string] of Execution: - executionConfig*: NimbusConf - - ServiceParameters* = object - name*: string - layerConfig*: LayerConfig + executionConfig*: seq[string] NimbusService* = ref object name*: string - timeoutMs*: uint32 - serviceHandler*: Thread[ServiceParameters] + layerConfig*: LayerConfig + serviceHandler*: Thread[ptr Channel[pointer]] + serviceChannel: ptr Channel[pointer] Nimbus* = ref object - serviceList*: array[cNimbusMaxServices, Option[NimbusService]] + serviceList*: seq[NimbusService] +#replace with cond var ## Service shutdown var isShutDownRequired*: Atomic[bool] isShutDownRequired.store(false) @@ -61,10 +59,10 @@ isShutDownRequired.store(false) proc defaultDataDir*(): string = let dataDir = when defined(windows): - "AppData" / "Roaming" / "Nimbus_unified" + "AppData" / "Roaming" / "Nimbus" elif defined(macosx): - "Library" / "Application Support" / "Nimbus_unified" + "Library" / "Application Support" / "Nimbus" else: - ".cache" / "nimbus_unified" + ".cache" / "Nimbus" getHomeDir() / dataDir diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index 5f20237c5f..e7f13a0b4e 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 5cf09b8122..5f46714b8b 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). diff --git a/nimbus/nimbus.cfg b/nimbus/nimbus.cfg index 36a4fd937d..0383c2d483 100644 --- a/nimbus/nimbus.cfg +++ b/nimbus/nimbus.cfg @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 2d7d0d33de..4becfdb95d 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). @@ -6,9 +6,8 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[atomics, os, exitprocs], + std/[atomics, os], chronicles, - stew/io2, options, consensus/consensus_layer, execution/execution_layer, @@ -16,7 +15,7 @@ import #eth2-configs beacon_chain/nimbus_binary_common, #eth1-configs - ../nimbus/nimbus_desc + ../execution_chain/nimbus_desc # ------------------------------------------------------------------------------ # Private @@ -65,7 +64,7 @@ proc addService( # Public # ------------------------------------------------------------------------------ -## Block execution and waits for services to finish +## Gracefully exits all services proc exitServices*(nimbus: Nimbus) = for i in 0 .. cNimbusMaxServices - 1: if nimbus.serviceList[i].isSome: diff --git a/nimbus/tests/all_tests_unified.nim b/nimbus/tests/all_tests_nimbus.nim similarity index 92% rename from nimbus/tests/all_tests_unified.nim rename to nimbus/tests/all_tests_nimbus.nim index 54c93f124d..a835856a05 100644 --- a/nimbus/tests/all_tests_unified.nim +++ b/nimbus/tests/all_tests_nimbus.nim @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). @@ -8,6 +8,6 @@ {.warning[UnusedImport]: off.} import - ./test_nimbus_unified, + ./test_nimbus, ./consensus/test_consensus_layer, ./execution/test_execution_layer diff --git a/nimbus/tests/consensus/test_consensus_layer.nim b/nimbus/tests/consensus/test_consensus_layer.nim index be4398372d..274b2589ef 100644 --- a/nimbus/tests/consensus/test_consensus_layer.nim +++ b/nimbus/tests/consensus/test_consensus_layer.nim @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). diff --git a/nimbus/tests/execution/test_execution_layer.nim b/nimbus/tests/execution/test_execution_layer.nim index 3a408ce1ac..7f78921206 100644 --- a/nimbus/tests/execution/test_execution_layer.nim +++ b/nimbus/tests/execution/test_execution_layer.nim @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). diff --git a/nimbus/tests/test_nimbus_unified.nim b/nimbus/tests/test_nimbus.nim similarity index 98% rename from nimbus/tests/test_nimbus_unified.nim rename to nimbus/tests/test_nimbus.nim index 0fb752cffc..501fc264f1 100644 --- a/nimbus/tests/test_nimbus_unified.nim +++ b/nimbus/tests/test_nimbus.nim @@ -1,4 +1,4 @@ -# nimbus_unified +# Nimbus # Copyright (c) 2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). @@ -8,10 +8,10 @@ import std/[os, atomics], unittest2, - ../nimbus_unified, + ../nimbus, ../configs/nimbus_configs, #eth1-configs - ../../nimbus/nimbus_desc + ../../execution_chain/nimbus_desc # ---------------------------------------------------------------------------- # Helper Functions From c89970042df121a40d46569e35ea2a447f0402e8 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Mon, 17 Feb 2025 16:36:25 +0000 Subject: [PATCH 10/34] refined initial project: - removed dead code. - removed some structures not needed yet - thread communication with shared data via channels. --- Makefile | 9 +- nimbus.nimble | 4 +- nimbus/common/utils.nim | 38 ++++ nimbus/conf.nim | 41 ++-- nimbus/consensus/consensus_layer.nim | 27 ++- nimbus/execution/execution_layer.nim | 27 ++- nimbus/nimbus.nim | 202 +++++++++--------- .../tests/consensus/test_consensus_layer.nim | 29 +-- .../tests/execution/test_execution_layer.nim | 29 +-- nimbus/tests/test_nimbus.nim | 103 ++------- 10 files changed, 237 insertions(+), 272 deletions(-) create mode 100644 nimbus/common/utils.nim diff --git a/Makefile b/Makefile index 2df35594d0..f6536c4171 100644 --- a/Makefile +++ b/Makefile @@ -375,7 +375,10 @@ txparse: | build deps # usual cleaning clean: | clean-common - rm -rf build/{nimbus,nimbus_execution_client,nimbus_portal_client,fluffy,portal_bridge,libverifproxy,nimbus_verified_proxy,$(TOOLS_CSV),$(PORTAL_TOOLS_CSV),all_tests,test_kvstore_rocksdb,test_rpc,all_portal_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test,*.dSYM} + rm -rf build/{nimbus_client,nimbus_execution_client,nimbus_portal_client,fluffy,portal_bridge,libverifproxy,nimbus_verified_proxy} + rm -rf build/{$(TOOLS_CSV),$(PORTAL_TOOLS_CSV)} + rm -rf build/{all_tests_nimbus,all_tests,test_kvstore_rocksdb,test_rpc,all_portal_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test} + rm -rf build/{*.dSYM} rm -rf tools/t8n/{t8n,t8n_test} rm -rf tools/evmstate/{evmstate,evmstate_test} ifneq ($(USE_LIBBACKTRACE), 0) @@ -383,9 +386,9 @@ ifneq ($(USE_LIBBACKTRACE), 0) endif # Nimbus -nimbus: | build deps rocksdb +nimbus: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:release --parallelBuild:1 -d:libp2p_pki_schemes=secp256k1 -u:metrics -o:build/nimbus "nimbus/nimbus.nim" + $(ENV_SCRIPT) nim c $(NIM_PARAMS) --threads:on -d:chronicles_log_level=TRACE -o:build/nimbus_client "nimbus/nimbus.nim" all_tests_nimbus: | build deps echo -e $(BUILD_MSG) "build/$@" && \ diff --git a/nimbus.nimble b/nimbus.nimble index ea8b6991b5..bb44f0fdee 100644 --- a/nimbus.nimble +++ b/nimbus.nimble @@ -40,6 +40,7 @@ when declared(namedBin): "execution_chain/nimbus_execution_client": "nimbus_execution_client", "portal/client/nimbus_portal_client": "nimbus_portal_client", "nimbus_verified_proxy/nimbus_verified_proxy": "nimbus_verified_proxy", + "nimbus/nimbus_client": "nimbus_client", }.toTable() import std/[os, strutils] @@ -135,7 +136,8 @@ task build_fuzzers, "Build fuzzer test cases": for file in walkDirRec("tests/networking/fuzzing/"): if file.endsWith("nim"): exec "nim c -c -d:release " & file -## nimbus tasks + +## Nimbus tasks task nimbus, "Build Nimbus": buildBinary "nimbus", "nimbus/", "-d:chronicles_log_level=TRACE" diff --git a/nimbus/common/utils.nim b/nimbus/common/utils.nim new file mode 100644 index 0000000000..24e6ebda73 --- /dev/null +++ b/nimbus/common/utils.nim @@ -0,0 +1,38 @@ +# Nimbus +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import results +export results + +#Parses specific data from a given channel if given in following binary format: +# (array size Uint) | [ (element size Uint) (element data)] +proc parseChannelData*(p: pointer): Result[seq[string], string] = + # Start reading from base pointer + var readOffset = cast[uint](p) + var recoveredStrings: seq[string] + var totalSize: uint = 0 + + # length + copyMem(addr totalSize, cast[pointer](readOffset), sizeof(uint)) + readOffset += uint(sizeof(uint)) + + while readOffset < cast[uint](p) + totalSize: + #seq element size + var strLen: uint + copyMem(addr strLen, cast[pointer](readOffset), sizeof(uint)) + readOffset += uint(sizeof(uint)) + + #element + var strData = newString(strLen) + copyMem(addr strData[0], cast[pointer](readOffset), uint(strLen)) + readOffset += uint(strLen) + + recoveredStrings.add(strData) + + ok recoveredStrings diff --git a/nimbus/conf.nim b/nimbus/conf.nim index fa821fb734..2094e9f323 100644 --- a/nimbus/conf.nim +++ b/nimbus/conf.nim @@ -5,28 +5,31 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + import std/[os, atomics], chronicles, - options, - #eth2-configs - beacon_chain/nimbus_binary_common, - #eth1-configs - ../../execution_chain/nimbus_desc + #eth2 + beacon_chain/nimbus_binary_common + +export setupFileLimits -export BeaconNodeConf, NimbusConf +## log +logScope: + topics = "Service manager" ## Exceptions type NimbusServiceError* = object of CatchableError ## Constants -## TODO: evaluate the proposed timeouts -const cNimbusMaxServices* = 2 -const cNimbusServiceTimeoutMs* = 3000 +const + cNimbusServiceTimeoutMs* = 3000 + cThreadTimeAck* = 10 -## log -logScope: - topics = "Service manager" +# configuration read by threads +var isConfigRead*: Atomic[bool] +isConfigRead.store(false) ## Nimbus service arguments type @@ -37,25 +40,21 @@ type LayerConfig* = object case kind*: ConfigKind of Consensus: - consensusConfig*: seq[string] + consensusOptions*: seq[string] of Execution: - executionConfig*: seq[string] + executionOptions*: seq[string] NimbusService* = ref object name*: string layerConfig*: LayerConfig serviceHandler*: Thread[ptr Channel[pointer]] - serviceChannel: ptr Channel[pointer] + serviceChannel*: ptr Channel[pointer] = nil + serviceFunc*: proc(ch: ptr Channel[pointer]) {.thread.} Nimbus* = ref object serviceList*: seq[NimbusService] -#replace with cond var -## Service shutdown -var isShutDownRequired*: Atomic[bool] -isShutDownRequired.store(false) - -# filesystem specs +## filesystem specs proc defaultDataDir*(): string = let dataDir = when defined(windows): diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index e7f13a0b4e..efd46e62f9 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -5,27 +5,34 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import std/[atomics, os], chronicles, ../configs/nimbus_configs +{.push raises: [].} -export nimbus_configs +import std/[atomics, os], chronicles, ../conf, ../common/utils logScope: topics = "Consensus layer" -proc consensusLayer*(params: ServiceParameters) {.raises: [CatchableError].} = - var config = params.layerConfig +## Consensus Layer handler +proc consensusLayerHandler*(channel: ptr Channel[pointer]) = + var p: pointer + try: + p = channel[].recv() + except Exception as e: + fatal " service unable to receive configuration", err = e.msg + quit(QuitFailure) + + let configs = parseChannelData(p).valueOr: + fatal "unable to parse service data", message = error + quit(QuitFailure) - doAssert config.kind == Consensus + #signal main thread that data is read + isConfigRead.store(true) try: - while isShutDownRequired.load() == false: + while true: info "consensus ..." sleep(cNimbusServiceTimeoutMs + 1000) - - isShutDownRequired.store(true) except CatchableError as e: fatal "error", message = e.msg - isShutDownRequired.store(true) - isShutDownRequired.store(true) warn "\tExiting consensus layer" diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 5f46714b8b..c8482e5053 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -5,27 +5,34 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import std/[atomics, os], chronicles, ../configs/nimbus_configs +{.push raises: [].} -export nimbus_configs +import std/[atomics, os], chronicles, ../conf, ../common/utils logScope: topics = "Execution layer" -proc executionLayer*(params: ServiceParameters) {.raises: [CatchableError].} = - var config = params.layerConfig +## Execution Layer handler +proc executionLayerHandler*(channel: ptr Channel[pointer]) = + var p: pointer + try: + p = channel[].recv() + except Exception as e: + fatal "service unable to receive configuration", err = e.msg + quit(QuitFailure) + + let configs = parseChannelData(p).valueOr: + fatal "unable to parse service data", message = error + quit(QuitFailure) - doAssert config.kind == Execution + #signal main thread that data is read + isConfigRead.store(true) try: - while isShutDownRequired.load() == false: + while true: info "execution ..." sleep(cNimbusServiceTimeoutMs) - - isShutDownRequired.store(true) except CatchableError as e: fatal "error", message = e.msg - isShutDownRequired.store(true) - isShutDownRequired.store(true) warn "\tExiting execution layer" diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 4becfdb95d..1e820cb486 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -6,105 +6,125 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[atomics, os], + std/[concurrency/atomics, os], chronicles, - options, consensus/consensus_layer, execution/execution_layer, - configs/nimbus_configs, - #eth2-configs - beacon_chain/nimbus_binary_common, - #eth1-configs - ../execution_chain/nimbus_desc + conf # ------------------------------------------------------------------------------ # Private # ------------------------------------------------------------------------------ -## Execution Layer handler -proc executionLayerHandler(parameters: ServiceParameters) {.thread.} = - info "Started service:", service = parameters.name - executionLayer(parameters) - info "\tExited service", service = parameters.name - -## Consensus Layer handler -proc consensusLayerHandler(parameters: ServiceParameters) {.thread.} = - info "Started service:", service = parameters.name - - info "Waiting for execution layer bring up ..." - consensusLayer(parameters) - info "\tExit service", service = parameters.name - -## adds a new service to nimbus services list. -## returns position on services list -proc addService( - nimbus: var Nimbus, - serviceHandler: proc(config: ServiceParameters) {.thread.}, - parameters: var ServiceParameters, - timeout: uint32, -): int = - #search next available free worker - var currentIndex = -1 - for i in 0 .. cNimbusMaxServices - 1: - if nimbus.serviceList[i].isNone: - nimbus.serviceList[i] = - some(NimbusService(name: parameters.name, timeoutMs: timeout)) - currentIndex = i - parameters.name = parameters.name - break - - if currentIndex < 0: - raise newException(NimbusServiceError, "No available slots on nimbus services list") - - info "Created service:", service = nimbus.serviceList[currentIndex].get().name - - currentIndex +## create and configure service +proc startService(nimbus: var Nimbus, service: var NimbusService) = + #channel creation (shared memory) + service.serviceChannel = + cast[ptr Channel[pointer]](allocShared0(sizeof(Channel[pointer]))) + + service.serviceChannel[].open() + + #thread read ack + isConfigRead.store(false) + + #start thread + createThread(service.serviceHandler, service.serviceFunc, service.serviceChannel) + + let optionsList = block: + case service.layerConfig.kind + of Consensus: service.layerConfig.consensusOptions + of Execution: service.layerConfig.executionOptions + + #configs list total size + var totalSize: uint = 0 + totalSize += uint(sizeof(uint)) + for word in optionsList: + totalSize += uint(sizeof(uint)) # element type size + totalSize += uint(word.len) # element length + + # Allocate shared memory + # schema: (array size Uint) | [ (element size Uint) (element data)] + var byteArray = cast[ptr byte](allocShared(totalSize)) + if byteArray.isNil: + fatal "Memory allocation failed" + quit QuitFailure -# ------------------------------------------------------------------------------ -# Public -# ------------------------------------------------------------------------------ + # Writing to shared memory + var writeOffset = cast[uint](byteArray) -## Gracefully exits all services -proc exitServices*(nimbus: Nimbus) = - for i in 0 .. cNimbusMaxServices - 1: - if nimbus.serviceList[i].isSome: - let thread = nimbus.serviceList[i].get() - if thread.serviceHandler.running(): - joinThread(thread.serviceHandler) - info "Exited service ", service = thread.name - nimbus.serviceList[i] = none(NimbusService) + #write total size of array + copyMem(cast[pointer](writeOffset), addr totalSize, sizeof(uint)) + writeOffset += uint(sizeof(uint)) - notice "Exited all services" + for word in optionsList: + #elem size + let strLen = uint(word.len) + copyMem(cast[pointer](writeOffset), addr strLen, sizeof(uint)) + writeOffset += uint(sizeof(uint)) -## Service monitoring -proc monitor*(nimbus: Nimbus) = - info "started service monitoring" + #element data + copyMem(cast[pointer](writeOffset), unsafeAddr word[0], word.len) + writeOffset += uint(word.len) - while isShutDownRequired.load() == false: - sleep(cNimbusServiceTimeoutMs) + service.serviceChannel[].send(byteArray) - if isShutDownRequired.load() == true: - nimbus.exitServices() + #wait for service read ack + while not isConfigRead.load(): + sleep(cThreadTimeAck) + isConfigRead.store(true) - notice "Shutting down now" + #close channel + service.serviceChannel[].close() -## create and configure service -proc startService*( - nimbus: var Nimbus, - config: var LayerConfig, - service: string, - fun: proc(config: ServiceParameters) {.thread.}, - timeout: uint32 = cNimbusServiceTimeoutMs, -) {.raises: [CatchableError].} = - var params: ServiceParameters = ServiceParameters(name: service, layerConfig: config) - let serviceId = nimbus.addService(fun, params, timeout) + #dealloc shared data + deallocShared(byteArray) + deallocShared(service.serviceChannel) + +## Gracefully exits all services +proc monitorServices(nimbus: Nimbus) = + for service in nimbus.serviceList: + if service.serviceHandler.running(): + joinThread(service.serviceHandler) + info "Exited service ", service = service.name + + notice "Exited all services" + +# ------------------------------------------------------------------------------ +# Public +# ------------------------------------------------------------------------------ + +## start nimbus client +proc run*(nimbus: var Nimbus) = + # todo + # parse cmd, read options and create configs + var + execOpt = newSeq[string]() + consOpt = newSeq[string]() + executionService: NimbusService = NimbusService( + name: "Execution Layer", + serviceFunc: executionLayerHandler, + layerConfig: LayerConfig(kind: Execution, executionOptions: execOpt), + ) + + consensusService: NimbusService = NimbusService( + name: "Consensus Layer", + serviceFunc: consensusLayerHandler, + layerConfig: LayerConfig(kind: Consensus, consensusOptions: consOpt), + ) + + nimbus.serviceList.add(executionService) + nimbus.serviceList.add(consensusService) try: - createThread(nimbus.serviceList[serviceId].get().serviceHandler, fun, params) - except CatchableError as e: - fatal "error creating service (thread)", msg = e.msg + for service in nimbus.serviceList.mitems(): + info "Starting service ", service = service.name + nimbus.startService(service) + except Exception as e: + fatal "error", msg = e.msg + quit QuitFailure - info "Starting service ", service = service + ## wait for shutdown + nimbus.monitorServices() # ------ when isMainModule: @@ -123,22 +143,12 @@ when isMainModule: except NimbusServiceError as exc: raiseAssert exc.msg # shouldn't happen - notice "\tCtrl+C pressed. Shutting down services" - isShutDownRequired.store(true) - nimbus.exitServices() + notice "\tCtrl+C pressed. Shutting down services ..." + quit 0 setControlCHook(controlCHandler) + nimbus.run() - var - execution = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - consensus = LayerConfig(kind: Consensus, consensusConfig: BeaconNodeConf()) - - try: - nimbus.startService(execution, "Execution Layer", executionLayerHandler) - nimbus.startService(consensus, "Consensus Layer", consensusLayerHandler) - except Exception: - isShutDownRequired.store(true) - nimbus.exitServices() - quit QuitFailure - - nimbus.monitor() +# ----- +when defined(testing): + export monitorServices, startService diff --git a/nimbus/tests/consensus/test_consensus_layer.nim b/nimbus/tests/consensus/test_consensus_layer.nim index 274b2589ef..1c3be2a49d 100644 --- a/nimbus/tests/consensus/test_consensus_layer.nim +++ b/nimbus/tests/consensus/test_consensus_layer.nim @@ -5,26 +5,13 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import - std/atomics, unittest2, ../../consensus/consensus_layer, ../../configs/nimbus_configs - -# ---------------------------------------------------------------------------- -# Unit Tests -# ---------------------------------------------------------------------------- +{.push raises: [].} -suite "Nimbus Consensus Layer Tests": - # Test: consensusLayer handles CatchableError gracefully - test "consensusLayer handles CatchableError and sets shutdown flag": - var params = ServiceParameters( - name: "ErrorTest", - layerConfig: LayerConfig(kind: Consensus, consensusConfig: BeaconNodeConf()), - ) - - check: - try: - consensusLayer(params) - true # No uncaught exceptions - except CatchableError: - false # If an exception is raised, the test fails +import + unittest2, + std/[concurrency/atomics, os], + ../../../nimbus/common/utils, + ../../../nimbus/conf, + ../../../nimbus/consensus/consensus_layer - check isShutDownRequired.load() == true # Verify shutdown flag is set +#tbd \ No newline at end of file diff --git a/nimbus/tests/execution/test_execution_layer.nim b/nimbus/tests/execution/test_execution_layer.nim index 7f78921206..c7ed42727e 100644 --- a/nimbus/tests/execution/test_execution_layer.nim +++ b/nimbus/tests/execution/test_execution_layer.nim @@ -5,26 +5,13 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import - std/atomics, unittest2, ../../execution/execution_layer, ../../configs/nimbus_configs - -# ---------------------------------------------------------------------------- -# Unit Tests -# ---------------------------------------------------------------------------- +{.push raises: [].} -suite "Nimbus Execution Layer Tests": - # Test: executionLayer handles CatchableError gracefully - test "executionLayer handles CatchableError and sets shutdown flag": - var params = ServiceParameters( - name: "ErrorTest", - layerConfig: LayerConfig(kind: Execution, executionConfig: NimbusConf()), - ) - - check: - try: - executionLayer(params) - true # No uncaught exceptions - except CatchableError: - false # If an exception is raised, the test fails +import + unittest2, + std/[concurrency/atomics, os], + ../../../nimbus/common/utils, + ../../../nimbus/conf, + ../../../nimbus/execution/execution_layer - check isShutDownRequired.load() == true # Verify shutdown flag is set +#tbd diff --git a/nimbus/tests/test_nimbus.nim b/nimbus/tests/test_nimbus.nim index 501fc264f1..ef852fb547 100644 --- a/nimbus/tests/test_nimbus.nim +++ b/nimbus/tests/test_nimbus.nim @@ -5,18 +5,12 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import - std/[os, atomics], - unittest2, - ../nimbus, - ../configs/nimbus_configs, - #eth1-configs - ../../execution_chain/nimbus_desc +import std/[os, atomics], unittest2, ../nimbus, ../conf -# ---------------------------------------------------------------------------- -# Helper Functions -# ---------------------------------------------------------------------------- -proc handlerMock(parameters: ServiceParameters) {.thread.} = +# # ---------------------------------------------------------------------------- +# # Helper Functions +# # ---------------------------------------------------------------------------- +proc handlerMock(channel: ptr Channel[pointer]) = return # ---------------------------------------------------------------------------- @@ -30,81 +24,12 @@ suite "Nimbus Service Management Tests": # Test: Creating a new service successfully test "startService successfully adds a service": - var layerConfig = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - - nimbus.startService(layerConfig, "TestService", handlerMock) - - check nimbus.serviceList[0].isSome - check nimbus.serviceList[0].get().name == "TestService" - - # Test: Adding more services than the maximum allowed - test "startService fails when Nimbus is full": - for i in 0 ..< cNimbusMaxServices: - var layerConfig = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - nimbus.startService(layerConfig, "service" & $i, handlerMock) - - # Attempt to add one more service than allowed - var extraConfig = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - check: - try: - nimbus.startService(extraConfig, "ExtraService", handlerMock) - false # If no exception, test fails - except NimbusServiceError: - true # Exception was correctly raised - - # Test: Services finish properly and exitServices correctly joins all threads - test "exitServices waits for all services to finish": - for i in 0 ..< cNimbusMaxServices: - var layerConfig = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - nimbus.startService(layerConfig, "service" & $i, handlerMock) - - nimbus.exitServices() - - # Check that all service slots are empty (thread was stopped, joined and its spot cleared) - for s in nimbus.serviceList: check s.isNone - - # Test: startServices initializes both the execution and consensus layer services - test "startServices initializes execution and consensus services": - var execLayer = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - var consensusLayer = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - - nimbus.startService(execLayer, "service1", handlerMock) - nimbus.startService(consensusLayer, "service2", handlerMock) - - # Check that at least two services were created - check not nimbus.serviceList[0].isNone - check not nimbus.serviceList[1].isNone - - # Test: Monitor detects shutdown and calls exitServices - test "monitor stops on shutdown signal and calls exitServices": - var layer = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - nimbus.startService(layer, "service1", handlerMock) - - #simulates a shutdown signal - isShutDownRequired.store(true) - nimbus.monitor() - - # Check that the monitor loop exits correctly - # services running should be 0 - check isShutDownRequired.load() == true - for s in nimbus.serviceList: check s.isNone - - # Test: Control-C handler properly initiates shutdown - test "controlCHandler triggers shutdown sequence": - var layer = LayerConfig(kind: Execution, executionConfig: NimbusConf()) - nimbus.startService(layer, "service1", handlerMock) - - proc localControlCHandler() {.noconv.} = - isShutDownRequired.store(true) - nimbus.exitServices() - - # Set up a simulated control-C hook - setControlCHook(localControlCHandler) - - # Trigger the hook manually - localControlCHandler() - - check isShutDownRequired.load() == true - - #services running should be 0 - for s in nimbus.serviceList: check s.isNone + var someService: NimbusService = NimbusService( + name: "FooBar service", + serviceFunc: handlerMock, + layerConfig: LayerConfig(kind: Consensus, consensusOptions: @["foo", "bar"]), + ) + nimbus.serviceList.add(someService) + + check nimbus.serviceList.len == 1 + check nimbus.serviceList[0].name == "FooBar service" From 1f5288146644a5699033d3caa2b729bbbcb4d0d7 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Mon, 17 Feb 2025 23:39:45 +0000 Subject: [PATCH 11/34] ignore nimbus on CI and Kurtosis git workflows --- .github/workflows/ci.yml | 1 + .github/workflows/kurtosis.yml | 2 ++ Makefile | 2 +- nimbus/conf.nim | 12 ------------ 4 files changed, 4 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7ea027aaaf..f8555fe02b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,6 +18,7 @@ on: - '**/*.yml' - 'hive_integration/**' - 'portal/**' + - 'nimbus/**' - '.github/workflows/portal*.yml' - 'nimbus_verified_proxy/**' - '.github/workflows/nimbus_verified_proxy.yml' diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 6098d46d4f..443121b24a 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -20,6 +20,7 @@ on: - '**/*.md' - 'hive_integration/**' - 'portal/**' + - 'nimbus/**' - '.github/workflows/portal*.yml' - 'nimbus_verified_proxy/**' - '.github/workflows/nimbus_verified_proxy.yml' @@ -32,6 +33,7 @@ on: - '**/*.md' - 'hive_integration/**' - 'portal/**' + - 'nimbus/**' - '.github/workflows/portal*.yml' - 'nimbus_verified_proxy/**' - '.github/workflows/nimbus_verified_proxy.yml' diff --git a/Makefile b/Makefile index f6536c4171..cccb3b343a 100644 --- a/Makefile +++ b/Makefile @@ -378,7 +378,7 @@ clean: | clean-common rm -rf build/{nimbus_client,nimbus_execution_client,nimbus_portal_client,fluffy,portal_bridge,libverifproxy,nimbus_verified_proxy} rm -rf build/{$(TOOLS_CSV),$(PORTAL_TOOLS_CSV)} rm -rf build/{all_tests_nimbus,all_tests,test_kvstore_rocksdb,test_rpc,all_portal_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test} - rm -rf build/{*.dSYM} + rm -rf build/*.dSYM rm -rf tools/t8n/{t8n,t8n_test} rm -rf tools/evmstate/{evmstate,evmstate_test} ifneq ($(USE_LIBBACKTRACE), 0) diff --git a/nimbus/conf.nim b/nimbus/conf.nim index 2094e9f323..e18c7ad8e1 100644 --- a/nimbus/conf.nim +++ b/nimbus/conf.nim @@ -53,15 +53,3 @@ type Nimbus* = ref object serviceList*: seq[NimbusService] - -## filesystem specs -proc defaultDataDir*(): string = - let dataDir = - when defined(windows): - "AppData" / "Roaming" / "Nimbus" - elif defined(macosx): - "Library" / "Application Support" / "Nimbus" - else: - ".cache" / "Nimbus" - - getHomeDir() / dataDir From 717da17a638fc4f5065724493c84ed8db47bd7e9 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Fri, 28 Feb 2025 12:31:08 +0000 Subject: [PATCH 12/34] - swap sequence with table data type in order to show the idea of service layers creating and managing client configurations life time. - Improved unit tests and some unit tests skeleton for upcoming changes. - minor corrections in project --- Makefile | 2 +- nimbus.nimble | 2 +- nimbus/README.md | 8 +- nimbus/common/utils.nim | 63 ++++++++++----- nimbus/conf.nim | 9 ++- nimbus/consensus/consensus_layer.nim | 4 +- nimbus/execution/execution_layer.nim | 2 +- nimbus/nimbus.nim | 44 +++++----- .../tests/consensus/test_consensus_layer.nim | 12 ++- .../tests/execution/test_execution_layer.nim | 11 ++- nimbus/tests/test_nimbus.nim | 80 +++++++++++++++++-- 11 files changed, 159 insertions(+), 78 deletions(-) diff --git a/Makefile b/Makefile index cccb3b343a..0e4704ad48 100644 --- a/Makefile +++ b/Makefile @@ -392,7 +392,7 @@ nimbus: | build deps all_tests_nimbus: | build deps echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c -r $(NIM_PARAMS) --threads:on -d:chronicles_log_level=ERROR -o:build/$@ "nimbus/tests/$@.nim" + $(ENV_SCRIPT) nim c -r $(NIM_PARAMS) -d:testing --threads:on -d:chronicles_log_level=ERROR -o:build/$@ "nimbus/tests/$@.nim" # Note about building Nimbus as a library: # diff --git a/nimbus.nimble b/nimbus.nimble index bb44f0fdee..3d90229211 100644 --- a/nimbus.nimble +++ b/nimbus.nimble @@ -143,4 +143,4 @@ task nimbus, "Build Nimbus": buildBinary "nimbus", "nimbus/", "-d:chronicles_log_level=TRACE" task nimbus_test, "Run Nimbus tests": - test "nimbus/tests/","all_tests", "-d:chronicles_log_level=ERROR" + test "nimbus/tests/", "all_tests_nimbus", "-d:chronicles_log_level=ERROR -d:testing" diff --git a/nimbus/README.md b/nimbus/README.md index 4336132d84..7dc638ffc1 100644 --- a/nimbus/README.md +++ b/nimbus/README.md @@ -27,11 +27,9 @@ tbd - mac os, windows, and linux ]$ make nimbus -## colaborate -We welcome contributions to Nimbus! Please adhere to the following guidelines: - -- Use the [Status Nim style guide](https://status-im.github.io/nim-style-guide/) to maintain code consistency. -- Format your code using the [Nim Pretty Printer (nph)](https://github.com/nim-lang/nimpretty) to ensure consistency across the codebase. Run it as part of your pull request process. +## collaborate +- Use [Status Nim style guide](https://status-im.github.io/nim-style-guide/) to maintain code consistency. +- Format your code using the [Nim Pretty Printer (nph)](https://github.com/arnetheduck/nph) to ensure consistency across the codebase. Run it as part of your pull request process. ## License Licensed and distributed under either of diff --git a/nimbus/common/utils.nim b/nimbus/common/utils.nim index 24e6ebda73..79b1a68ba9 100644 --- a/nimbus/common/utils.nim +++ b/nimbus/common/utils.nim @@ -7,32 +7,53 @@ {.push raises: [].} -import results -export results - -#Parses specific data from a given channel if given in following binary format: -# (array size Uint) | [ (element size Uint) (element data)] -proc parseChannelData*(p: pointer): Result[seq[string], string] = +import results, ../conf, chronicles + +## Serialize table string elements +proc serializeTableElem*(offset: var uint, elem: string) = + if offset <= 0: + fatal "memory offset can't be zero" + quit(QuitFailure) + + #element size + let optLen = uint(elem.len) + copyMem(cast[pointer](offset), addr optLen, sizeof(uint)) + offset += uint(sizeof(uint)) + + #element data + copyMem(cast[pointer](offset), unsafeAddr elem[0], elem.len) + offset += uint(elem.len) + +## Deserialize table string elements +proc deserializeTableElem*(offset: var uint): string = + #element size + var strLen: uint + copyMem(addr strLen, cast[pointer](offset), sizeof(uint)) + offset += uint(sizeof(uint)) + + #element + var strData = newString(strLen) + copyMem(addr strData[0], cast[pointer](offset), uint(strLen)) + offset += uint(strLen) + + strData + +## Parse data from a given channel. +## schema: (table size:Uint) | [ (option size:Uint) (option data:byte) (arg size: Uint) (arg data:byte)] +proc parseChannelData*(p: pointer): Result[NimbusConfigTable, string] = # Start reading from base pointer - var readOffset = cast[uint](p) - var recoveredStrings: seq[string] - var totalSize: uint = 0 + var + readOffset = cast[uint](p) + confTable = NimbusConfigTable() + totalSize: uint = 0 # length copyMem(addr totalSize, cast[pointer](readOffset), sizeof(uint)) readOffset += uint(sizeof(uint)) while readOffset < cast[uint](p) + totalSize: - #seq element size - var strLen: uint - copyMem(addr strLen, cast[pointer](readOffset), sizeof(uint)) - readOffset += uint(sizeof(uint)) - - #element - var strData = newString(strLen) - copyMem(addr strData[0], cast[pointer](readOffset), uint(strLen)) - readOffset += uint(strLen) - - recoveredStrings.add(strData) + let opt = deserializeTableElem(readOffset) + let arg = deserializeTableElem(readOffset) + confTable[opt] = arg - ok recoveredStrings + ok confTable diff --git a/nimbus/conf.nim b/nimbus/conf.nim index e18c7ad8e1..808e02bd04 100644 --- a/nimbus/conf.nim +++ b/nimbus/conf.nim @@ -8,7 +8,7 @@ {.push raises: [].} import - std/[os, atomics], + std/[atomics, tables], chronicles, #eth2 beacon_chain/nimbus_binary_common @@ -33,6 +33,8 @@ isConfigRead.store(false) ## Nimbus service arguments type + NimbusConfigTable* = Table[string, string] + ConfigKind* = enum Execution Consensus @@ -40,15 +42,14 @@ type LayerConfig* = object case kind*: ConfigKind of Consensus: - consensusOptions*: seq[string] + consensusOptions*: NimbusConfigTable of Execution: - executionOptions*: seq[string] + executionOptions*: NimbusConfigTable NimbusService* = ref object name*: string layerConfig*: LayerConfig serviceHandler*: Thread[ptr Channel[pointer]] - serviceChannel*: ptr Channel[pointer] = nil serviceFunc*: proc(ch: ptr Channel[pointer]) {.thread.} Nimbus* = ref object diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index efd46e62f9..a6848ae8c0 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -7,7 +7,7 @@ {.push raises: [].} -import std/[atomics, os], chronicles, ../conf, ../common/utils +import std/[atomics, os], chronos, chronicles, ../conf, ../common/utils, results logScope: topics = "Consensus layer" @@ -31,7 +31,7 @@ proc consensusLayerHandler*(channel: ptr Channel[pointer]) = try: while true: info "consensus ..." - sleep(cNimbusServiceTimeoutMs + 1000) + sleep(cNimbusServiceTimeoutMs) except CatchableError as e: fatal "error", message = e.msg diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index c8482e5053..22988599be 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -7,7 +7,7 @@ {.push raises: [].} -import std/[atomics, os], chronicles, ../conf, ../common/utils +import std/[atomics, os], chronicles, ../conf, ../common/utils, results logScope: topics = "Execution layer" diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 1e820cb486..923f7cb8b7 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -10,6 +10,7 @@ import chronicles, consensus/consensus_layer, execution/execution_layer, + common/utils, conf # ------------------------------------------------------------------------------ @@ -19,31 +20,31 @@ import ## create and configure service proc startService(nimbus: var Nimbus, service: var NimbusService) = #channel creation (shared memory) - service.serviceChannel = + var serviceChannel = cast[ptr Channel[pointer]](allocShared0(sizeof(Channel[pointer]))) - service.serviceChannel[].open() + serviceChannel[].open() #thread read ack isConfigRead.store(false) #start thread - createThread(service.serviceHandler, service.serviceFunc, service.serviceChannel) + createThread(service.serviceHandler, service.serviceFunc, serviceChannel) - let optionsList = block: + let optionsTable = block: case service.layerConfig.kind of Consensus: service.layerConfig.consensusOptions of Execution: service.layerConfig.executionOptions - #configs list total size + #configs table total size var totalSize: uint = 0 totalSize += uint(sizeof(uint)) - for word in optionsList: - totalSize += uint(sizeof(uint)) # element type size - totalSize += uint(word.len) # element length + for opt, arg in optionsTable: + totalSize += uint(sizeof(uint)) + uint(opt.len) # option + totalSize += uint(sizeof(uint)) + uint(arg.len) # arg # Allocate shared memory - # schema: (array size Uint) | [ (element size Uint) (element data)] + # schema: (table size:Uint) | [ (option size:Uint) (option data:byte) (arg size: Uint) (arg data:byte)] var byteArray = cast[ptr byte](allocShared(totalSize)) if byteArray.isNil: fatal "Memory allocation failed" @@ -56,17 +57,11 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = copyMem(cast[pointer](writeOffset), addr totalSize, sizeof(uint)) writeOffset += uint(sizeof(uint)) - for word in optionsList: - #elem size - let strLen = uint(word.len) - copyMem(cast[pointer](writeOffset), addr strLen, sizeof(uint)) - writeOffset += uint(sizeof(uint)) + for opt, arg in optionsTable: + serializeTableElem(writeOffset, opt) + serializeTableElem(writeOffset, arg) - #element data - copyMem(cast[pointer](writeOffset), unsafeAddr word[0], word.len) - writeOffset += uint(word.len) - - service.serviceChannel[].send(byteArray) + serviceChannel[].send(byteArray) #wait for service read ack while not isConfigRead.load(): @@ -74,11 +69,11 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = isConfigRead.store(true) #close channel - service.serviceChannel[].close() + serviceChannel[].close() #dealloc shared data deallocShared(byteArray) - deallocShared(service.serviceChannel) + deallocShared(serviceChannel) ## Gracefully exits all services proc monitorServices(nimbus: Nimbus) = @@ -95,11 +90,10 @@ proc monitorServices(nimbus: Nimbus) = ## start nimbus client proc run*(nimbus: var Nimbus) = - # todo - # parse cmd, read options and create configs + # to be filled with command line options after parsed according to service var - execOpt = newSeq[string]() - consOpt = newSeq[string]() + consOpt, execOpt = NimbusConfigTable() + executionService: NimbusService = NimbusService( name: "Execution Layer", serviceFunc: executionLayerHandler, diff --git a/nimbus/tests/consensus/test_consensus_layer.nim b/nimbus/tests/consensus/test_consensus_layer.nim index 1c3be2a49d..6df458ba23 100644 --- a/nimbus/tests/consensus/test_consensus_layer.nim +++ b/nimbus/tests/consensus/test_consensus_layer.nim @@ -7,11 +7,9 @@ {.push raises: [].} -import - unittest2, - std/[concurrency/atomics, os], - ../../../nimbus/common/utils, - ../../../nimbus/conf, - ../../../nimbus/consensus/consensus_layer +import unittest2 -#tbd \ No newline at end of file +suite "Nimbus consensus layer": + #tbd, given that layer is in development + test "tbd": + check true \ No newline at end of file diff --git a/nimbus/tests/execution/test_execution_layer.nim b/nimbus/tests/execution/test_execution_layer.nim index c7ed42727e..2008a838e4 100644 --- a/nimbus/tests/execution/test_execution_layer.nim +++ b/nimbus/tests/execution/test_execution_layer.nim @@ -8,10 +8,9 @@ {.push raises: [].} import - unittest2, - std/[concurrency/atomics, os], - ../../../nimbus/common/utils, - ../../../nimbus/conf, - ../../../nimbus/execution/execution_layer + unittest2 -#tbd +suite "Nimbus execution layer": + #tbd, given that layer is in development + test "tbd": + check true diff --git a/nimbus/tests/test_nimbus.nim b/nimbus/tests/test_nimbus.nim index ef852fb547..93d33cba07 100644 --- a/nimbus/tests/test_nimbus.nim +++ b/nimbus/tests/test_nimbus.nim @@ -5,31 +5,101 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import std/[os, atomics], unittest2, ../nimbus, ../conf +import unittest2, std/atomics, ../[nimbus, conf], ../common/utils, tables, results # # ---------------------------------------------------------------------------- -# # Helper Functions +# # Helpers # # ---------------------------------------------------------------------------- + +# checks result computed in thread procedures +var checkResult: ptr bool = createShared(bool) + +# simple mock proc handlerMock(channel: ptr Channel[pointer]) = return +#handles data for a given service +proc handlerService_1(channel: ptr Channel[pointer]) = + const expectedConfigTable = {"0": "zero", "1": "one", "2": "two"}.toTable + + let p = channel[].recv() + + let configs = parseChannelData(p).valueOr: + quit(QuitFailure) + + isConfigRead.store(true) + checkResult[] = configs == expectedConfigTable + +#handles data for a given service +proc handlerService_2(channel: ptr Channel[pointer]) = + const expectedConfigTable = {"4": "four", "5": "five", "6": "six"}.toTable + let p = channel[].recv() + + let configs = parseChannelData(p).valueOr: + quit(QuitFailure) + + isConfigRead.store(true) + checkResult[] = configs == expectedConfigTable + # ---------------------------------------------------------------------------- -# Unit Tests +# # Unit Tests # ---------------------------------------------------------------------------- -suite "Nimbus Service Management Tests": +suite "Nimbus Service Management": var nimbus: Nimbus setup: nimbus = Nimbus.new + const configTable_1 = {"0": "zero", "1": "one", "2": "two"}.toTable + const configTable_2 = {"4": "four", "5": "five", "6": "six"}.toTable + # Test: Creating a new service successfully test "startService successfully adds a service": var someService: NimbusService = NimbusService( name: "FooBar service", serviceFunc: handlerMock, - layerConfig: LayerConfig(kind: Consensus, consensusOptions: @["foo", "bar"]), + layerConfig: LayerConfig(kind: Consensus, consensusOptions: configTable_1), + ) + nimbus.serviceList.add(someService) + + check nimbus.serviceList.len == 1 + check nimbus.serviceList[0].name == "FooBar service" + + test "nimbus sends correct data for a service": + var someService: NimbusService = NimbusService( + name: "FooBar service", + serviceFunc: handlerService_1, + layerConfig: LayerConfig(kind: Consensus, consensusOptions: configTable_1), ) + nimbus.serviceList.add(someService) + nimbus.startService(someService) + check nimbus.serviceList.len == 1 check nimbus.serviceList[0].name == "FooBar service" + check checkResult[] == true + + test "nimbus sends correct data for multiple services": + var someService: NimbusService = NimbusService( + name: "FooBar service", + serviceFunc: handlerService_1, + layerConfig: LayerConfig(kind: Consensus, consensusOptions: configTable_1), + ) + var anotherService: NimbusService = NimbusService( + name: "Xpto service", + serviceFunc: handlerService_2, + layerConfig: LayerConfig(kind: Execution, executionOptions: configTable_2), + ) + nimbus.serviceList.add(someService) + nimbus.serviceList.add(anotherService) + + nimbus.startService(someService) + check checkResult[] == true + + nimbus.startService(anotherService) + check checkResult[] == true + + check nimbus.serviceList.len == 2 + check nimbus.serviceList[0].name == "FooBar service" + check nimbus.serviceList[1].name == "Xpto service" From 098a09e480e22d91e1444a2d9eef5835ba11cf3f Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Mon, 17 Mar 2025 10:49:14 +0000 Subject: [PATCH 13/34] fixes: - fixed files ending missing new line- - fixed serializing configurations without arguments. --- nimbus/common/utils.nim | 32 +++++++++---------- nimbus/nimbus.cfg | 1 - nimbus/nimbus.nim | 4 +-- nimbus/tests/all_tests_nimbus.nim | 5 +-- .../tests/consensus/test_consensus_layer.nim | 2 +- nimbus/tests/nim.cfg | 2 +- nimbus/tests/test_nimbus.nim | 4 +-- 7 files changed, 22 insertions(+), 28 deletions(-) diff --git a/nimbus/common/utils.nim b/nimbus/common/utils.nim index 79b1a68ba9..8168a917d4 100644 --- a/nimbus/common/utils.nim +++ b/nimbus/common/utils.nim @@ -10,50 +10,48 @@ import results, ../conf, chronicles ## Serialize table string elements -proc serializeTableElem*(offset: var uint, elem: string) = +proc serializeTableElement*(offset: var uint, elem: string) = if offset <= 0: fatal "memory offset can't be zero" quit(QuitFailure) - #element size - let optLen = uint(elem.len) + let optLen = uint(elem.len) #element size + copyMem(cast[pointer](offset), addr optLen, sizeof(uint)) offset += uint(sizeof(uint)) - #element data - copyMem(cast[pointer](offset), unsafeAddr elem[0], elem.len) - offset += uint(elem.len) + if optLen > 0: + copyMem(cast[pointer](offset), unsafeAddr elem[0], elem.len) #element data + offset += uint(elem.len) ## Deserialize table string elements -proc deserializeTableElem*(offset: var uint): string = - #element size - var strLen: uint +proc deserializeTableElement*(offset: var uint): string = + var strLen: uint #element size copyMem(addr strLen, cast[pointer](offset), sizeof(uint)) offset += uint(sizeof(uint)) - #element - var strData = newString(strLen) - copyMem(addr strData[0], cast[pointer](offset), uint(strLen)) - offset += uint(strLen) + var strData = "" + if strLen > 0: + strData = newString(strLen) #element + copyMem(addr strData[0], cast[pointer](offset), uint(strLen)) + offset += uint(strLen) strData ## Parse data from a given channel. ## schema: (table size:Uint) | [ (option size:Uint) (option data:byte) (arg size: Uint) (arg data:byte)] proc parseChannelData*(p: pointer): Result[NimbusConfigTable, string] = - # Start reading from base pointer var readOffset = cast[uint](p) confTable = NimbusConfigTable() totalSize: uint = 0 - # length copyMem(addr totalSize, cast[pointer](readOffset), sizeof(uint)) readOffset += uint(sizeof(uint)) while readOffset < cast[uint](p) + totalSize: - let opt = deserializeTableElem(readOffset) - let arg = deserializeTableElem(readOffset) + let opt = deserializeTableElement(readOffset) + let arg = deserializeTableElement(readOffset) confTable[opt] = arg ok confTable diff --git a/nimbus/nimbus.cfg b/nimbus/nimbus.cfg index 0383c2d483..ab67ecad13 100644 --- a/nimbus/nimbus.cfg +++ b/nimbus/nimbus.cfg @@ -14,4 +14,3 @@ @if release: -d:"chronicles_line_numbers:0" @end - diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 923f7cb8b7..53db75f830 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -58,8 +58,8 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = writeOffset += uint(sizeof(uint)) for opt, arg in optionsTable: - serializeTableElem(writeOffset, opt) - serializeTableElem(writeOffset, arg) + serializeTableElement(writeOffset, opt) + serializeTableElement(writeOffset, arg) serviceChannel[].send(byteArray) diff --git a/nimbus/tests/all_tests_nimbus.nim b/nimbus/tests/all_tests_nimbus.nim index a835856a05..42aba2e018 100644 --- a/nimbus/tests/all_tests_nimbus.nim +++ b/nimbus/tests/all_tests_nimbus.nim @@ -7,7 +7,4 @@ {.warning[UnusedImport]: off.} -import - ./test_nimbus, - ./consensus/test_consensus_layer, - ./execution/test_execution_layer +import ./test_nimbus, ./consensus/test_consensus_layer, ./execution/test_execution_layer diff --git a/nimbus/tests/consensus/test_consensus_layer.nim b/nimbus/tests/consensus/test_consensus_layer.nim index 6df458ba23..601088203b 100644 --- a/nimbus/tests/consensus/test_consensus_layer.nim +++ b/nimbus/tests/consensus/test_consensus_layer.nim @@ -12,4 +12,4 @@ import unittest2 suite "Nimbus consensus layer": #tbd, given that layer is in development test "tbd": - check true \ No newline at end of file + check true diff --git a/nimbus/tests/nim.cfg b/nimbus/tests/nim.cfg index 11d9576ec2..81cc747b32 100644 --- a/nimbus/tests/nim.cfg +++ b/nimbus/tests/nim.cfg @@ -11,4 +11,4 @@ --styleCheck:usages --styleCheck:hint ---hint[Processing]:off \ No newline at end of file +--hint[Processing]:off diff --git a/nimbus/tests/test_nimbus.nim b/nimbus/tests/test_nimbus.nim index 93d33cba07..2add1b6648 100644 --- a/nimbus/tests/test_nimbus.nim +++ b/nimbus/tests/test_nimbus.nim @@ -32,7 +32,7 @@ proc handlerService_1(channel: ptr Channel[pointer]) = #handles data for a given service proc handlerService_2(channel: ptr Channel[pointer]) = - const expectedConfigTable = {"4": "four", "5": "five", "6": "six"}.toTable + const expectedConfigTable = {"4": "four", "5": "", "6": "six"}.toTable let p = channel[].recv() let configs = parseChannelData(p).valueOr: @@ -51,7 +51,7 @@ suite "Nimbus Service Management": nimbus = Nimbus.new const configTable_1 = {"0": "zero", "1": "one", "2": "two"}.toTable - const configTable_2 = {"4": "four", "5": "five", "6": "six"}.toTable + const configTable_2 = {"4": "four", "5": "", "6": "six"}.toTable # Test: Creating a new service successfully test "startService successfully adds a service": From 514a05f293817a4ccb98a244f5c34d05f37b91d3 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Mon, 12 May 2025 14:11:08 +0100 Subject: [PATCH 14/34] renamings --- nimbus/common/utils.nim | 41 ++++++++++++++++------------ nimbus/consensus/consensus_layer.nim | 2 +- nimbus/execution/execution_layer.nim | 2 +- nimbus/nimbus.nim | 4 +-- 4 files changed, 27 insertions(+), 22 deletions(-) diff --git a/nimbus/common/utils.nim b/nimbus/common/utils.nim index 8168a917d4..3008d1ecce 100644 --- a/nimbus/common/utils.nim +++ b/nimbus/common/utils.nim @@ -7,51 +7,56 @@ {.push raises: [].} -import results, ../conf, chronicles +import std/[strutils], results, ../conf, chronicles, stew/shims/macros -## Serialize table string elements -proc serializeTableElement*(offset: var uint, elem: string) = +logScope: + topics = "utils" + +## Write a string into a raw memory buffer (prefixed with length) +proc writeConfigString*(offset: var uint, elem: string) = if offset <= 0: fatal "memory offset can't be zero" quit(QuitFailure) - let optLen = uint(elem.len) #element size - + let optLen = uint(elem.len) copyMem(cast[pointer](offset), addr optLen, sizeof(uint)) offset += uint(sizeof(uint)) if optLen > 0: - copyMem(cast[pointer](offset), unsafeAddr elem[0], elem.len) #element data + copyMem(cast[pointer](offset), unsafeAddr elem[0], elem.len) offset += uint(elem.len) -## Deserialize table string elements -proc deserializeTableElement*(offset: var uint): string = - var strLen: uint #element size +## Read a string from a raw memory buffer (expects length prefix) +proc readConfigString*(offset: var uint): string = + var strLen: uint copyMem(addr strLen, cast[pointer](offset), sizeof(uint)) offset += uint(sizeof(uint)) var strData = "" if strLen > 0: - strData = newString(strLen) #element + strData = newString(strLen) copyMem(addr strData[0], cast[pointer](offset), uint(strLen)) offset += uint(strLen) strData -## Parse data from a given channel. -## schema: (table size:Uint) | [ (option size:Uint) (option data:byte) (arg size: Uint) (arg data:byte)] -proc parseChannelData*(p: pointer): Result[NimbusConfigTable, string] = +## Parse configuration options from a memory block. +## Format: (table size:uint) | [ (key size:uint)(key:string) (val size:uint)(val:string) ]* +proc deserializeConfigArgs*(p: pointer): Result[seq[string], string] = var readOffset = cast[uint](p) - confTable = NimbusConfigTable() + optionsList = newSeq[string]() totalSize: uint = 0 copyMem(addr totalSize, cast[pointer](readOffset), sizeof(uint)) readOffset += uint(sizeof(uint)) while readOffset < cast[uint](p) + totalSize: - let opt = deserializeTableElement(readOffset) - let arg = deserializeTableElement(readOffset) - confTable[opt] = arg + let + optName = readConfigString(readOffset) + arg = readConfigString(readOffset) + option = "--" & optName & "=" & arg + + optionsList.add(option) - ok confTable + ok optionsList diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index a6848ae8c0..d6bf4e23c6 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -21,7 +21,7 @@ proc consensusLayerHandler*(channel: ptr Channel[pointer]) = fatal " service unable to receive configuration", err = e.msg quit(QuitFailure) - let configs = parseChannelData(p).valueOr: + let configs = deserializeConfigArgs(p).valueOr: fatal "unable to parse service data", message = error quit(QuitFailure) diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 22988599be..5dd44d5aac 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -21,7 +21,7 @@ proc executionLayerHandler*(channel: ptr Channel[pointer]) = fatal "service unable to receive configuration", err = e.msg quit(QuitFailure) - let configs = parseChannelData(p).valueOr: + let configs = deserializeConfigArgs(p).valueOr: fatal "unable to parse service data", message = error quit(QuitFailure) diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 53db75f830..99a4c41d0c 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -58,8 +58,8 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = writeOffset += uint(sizeof(uint)) for opt, arg in optionsTable: - serializeTableElement(writeOffset, opt) - serializeTableElement(writeOffset, arg) + writeConfigString(writeOffset, opt) + writeConfigString(writeOffset, arg) serviceChannel[].send(byteArray) From 9d026b3e433492df1519e7347363245f9af41b0a Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Fri, 16 May 2025 16:02:48 +0100 Subject: [PATCH 15/34] Filter command line arguments according with each client configuration definitions. --- nimbus/common/utils.nim | 20 ++++++++++++++ nimbus/nimbus.nim | 58 +++++++++++++++++++++++++++++------------ 2 files changed, 61 insertions(+), 17 deletions(-) diff --git a/nimbus/common/utils.nim b/nimbus/common/utils.nim index 3008d1ecce..ebb88840db 100644 --- a/nimbus/common/utils.nim +++ b/nimbus/common/utils.nim @@ -12,6 +12,26 @@ import std/[strutils], results, ../conf, chronicles, stew/shims/macros logScope: topics = "utils" +## Macro that collects name-layer pairs from configuration +macro extractFieldNames*(configType: typed): untyped = + # TODO: add abbreviations support + var names: seq[string] = newSeq[string]() + let recDef = configType.getImpl() + + for field in recordFields(recDef): + let namePragma: NimNode = field.readPragma("name") + if namePragma.kind == nnkNilLit: + # let a: NimNode = field.readPragma("defaultValue") + # echo a + #error "missing configuration name" + continue + + let nameVal = $namePragma + names.add(nameVal) + + result = quote: + `names . mapIt ( newLit ( it ))` + ## Write a string into a raw memory buffer (prefixed with length) proc writeConfigString*(offset: var uint, elem: string) = if offset <= 0: diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 99a4c41d0c..e10a5d7a83 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -11,7 +11,10 @@ import consensus/consensus_layer, execution/execution_layer, common/utils, - conf + conf, + confutils/[cli_parser, toml/defs], + ../execution_chain/config, + beacon_chain/conf # ------------------------------------------------------------------------------ # Private @@ -84,37 +87,56 @@ proc monitorServices(nimbus: Nimbus) = notice "Exited all services" -# ------------------------------------------------------------------------------ -# Public -# ------------------------------------------------------------------------------ +# Setup services +proc setup(nimbus: var Nimbus) = + let + executionConfigNames = extractFieldNames(NimbusConf) + consensusConfigNames = extractFieldNames(BeaconNodeConf) -## start nimbus client -proc run*(nimbus: var Nimbus) = - # to be filled with command line options after parsed according to service - var - consOpt, execOpt = NimbusConfigTable() + var consensusParams, executionParams = NimbusConfigTable() - executionService: NimbusService = NimbusService( - name: "Execution Layer", - serviceFunc: executionLayerHandler, - layerConfig: LayerConfig(kind: Execution, executionOptions: execOpt), - ) + for _, cmdKey, cmdArg in getopt(commandLineParams()): + var found = false + if cmdKey in consensusConfigNames: + consensusParams[cmdKey] = cmdArg + found = true + + if cmdKey in executionConfigNames: + executionParams[cmdKey] = cmdArg + found = true + + if not found: + error "Unrecognized option ", option = cmdKey + #TODO: invoke configurations helpers + quit 0 - consensusService: NimbusService = NimbusService( + let + consensusService = NimbusService( name: "Consensus Layer", serviceFunc: consensusLayerHandler, - layerConfig: LayerConfig(kind: Consensus, consensusOptions: consOpt), + layerConfig: LayerConfig(kind: Consensus, consensusOptions: consensusParams), + ) + executionService = NimbusService( + name: "Execution Layer", + serviceFunc: executionLayerHandler, + layerConfig: LayerConfig(kind: Execution, executionOptions: executionParams), ) nimbus.serviceList.add(executionService) nimbus.serviceList.add(consensusService) +# ------------------------------------------------------------------------------ +# Public +# ------------------------------------------------------------------------------ + +## start nimbus client +proc run*(nimbus: var Nimbus) = try: for service in nimbus.serviceList.mitems(): info "Starting service ", service = service.name nimbus.startService(service) except Exception as e: - fatal "error", msg = e.msg + fatal "error starting service:", msg = e.msg quit QuitFailure ## wait for shutdown @@ -141,6 +163,8 @@ when isMainModule: quit 0 setControlCHook(controlCHandler) + + nimbus.setup() nimbus.run() # ----- From ebb62803865cf4f35ecfc62cfe8b99d9ead04f44 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Sun, 18 May 2025 11:48:36 +0100 Subject: [PATCH 16/34] Added support for reading abbreviations from command line --- nimbus/common/utils.nim | 23 +++++++++--------- nimbus/consensus/consensus_layer.nim | 4 +++- nimbus/execution/execution_layer.nim | 2 ++ nimbus/nimbus.nim | 35 +++++++++++++++++++++------- 4 files changed, 44 insertions(+), 20 deletions(-) diff --git a/nimbus/common/utils.nim b/nimbus/common/utils.nim index ebb88840db..c9360fbdcb 100644 --- a/nimbus/common/utils.nim +++ b/nimbus/common/utils.nim @@ -7,27 +7,28 @@ {.push raises: [].} -import std/[strutils], results, ../conf, chronicles, stew/shims/macros +import std/[strutils], results, chronicles, stew/shims/macros, confutils, ../conf logScope: topics = "utils" -## Macro that collects name-layer pairs from configuration +## Macro that collects names and abbreviations per layer from configuration macro extractFieldNames*(configType: typed): untyped = - # TODO: add abbreviations support var names: seq[string] = newSeq[string]() let recDef = configType.getImpl() for field in recordFields(recDef): - let namePragma: NimNode = field.readPragma("name") - if namePragma.kind == nnkNilLit: - # let a: NimNode = field.readPragma("defaultValue") - # echo a - #error "missing configuration name" + let + name = field.readPragma("name") + abbr = field.readPragma("abbr") + + if name.kind == nnkNilLit: continue - let nameVal = $namePragma - names.add(nameVal) + names.add($name) + + if abbr.kind != nnkNilLit: + names.add($abbr) result = quote: `names . mapIt ( newLit ( it ))` @@ -75,7 +76,7 @@ proc deserializeConfigArgs*(p: pointer): Result[seq[string], string] = let optName = readConfigString(readOffset) arg = readConfigString(readOffset) - option = "--" & optName & "=" & arg + option = optName & arg optionsList.add(option) diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index d6bf4e23c6..5f96b52894 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -3,7 +3,7 @@ # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. +# at your option. This file may not be copied, modified, or distributed except according to those terms . {.push raises: [].} @@ -28,6 +28,8 @@ proc consensusLayerHandler*(channel: ptr Channel[pointer]) = #signal main thread that data is read isConfigRead.store(true) + info "consensus configs ", configs = configs + try: while true: info "consensus ..." diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 5dd44d5aac..906997351e 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -28,6 +28,8 @@ proc executionLayerHandler*(channel: ptr Channel[pointer]) = #signal main thread that data is read isConfigRead.store(true) + info "execution configs ", configs = configs + try: while true: info "execution ..." diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index e10a5d7a83..afaaa7678c 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -87,22 +87,45 @@ proc monitorServices(nimbus: Nimbus) = notice "Exited all services" +# ------------------------------------------------------------------------------ +# Public +# ------------------------------------------------------------------------------ + +# aux function to prepare arguments and options for eth1 and eth2 +func addArg( + paramTable: var NimbusConfigTable, cmdKind: CmdLineKind, key: string, arg: string +) = + var + newKey = "" + newArg = "" + + if cmdKind == cmdLongOption: + newKey = "--" & key + + if cmdKind == cmdShortOption: + newKey = "-" & key + + if arg != "": + newArg = "=" & arg + + paramTable[newKey] = newArg + # Setup services -proc setup(nimbus: var Nimbus) = +proc setup*(nimbus: var Nimbus) = let executionConfigNames = extractFieldNames(NimbusConf) consensusConfigNames = extractFieldNames(BeaconNodeConf) var consensusParams, executionParams = NimbusConfigTable() - for _, cmdKey, cmdArg in getopt(commandLineParams()): + for cmdKind, cmdKey, cmdArg in getopt(commandLineParams()): var found = false if cmdKey in consensusConfigNames: - consensusParams[cmdKey] = cmdArg + consensusParams.addArg(cmdKind, cmdKey, cmdArg) found = true if cmdKey in executionConfigNames: - executionParams[cmdKey] = cmdArg + executionParams.addArg(cmdKind, cmdKey, cmdArg) found = true if not found: @@ -125,10 +148,6 @@ proc setup(nimbus: var Nimbus) = nimbus.serviceList.add(executionService) nimbus.serviceList.add(consensusService) -# ------------------------------------------------------------------------------ -# Public -# ------------------------------------------------------------------------------ - ## start nimbus client proc run*(nimbus: var Nimbus) = try: From 2e536723f7fe2a707499398f876c64e6a6f28a5e Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Fri, 23 May 2025 16:41:44 +0100 Subject: [PATCH 17/34] - beacon node minimal config setup - corrected nimbus tests --- nimbus/consensus/consensus_layer.nim | 29 +++++++++++++++++++++++++--- nimbus/tests/test_nimbus.nim | 4 ++-- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index 5f96b52894..47043f7eaf 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -7,11 +7,33 @@ {.push raises: [].} -import std/[atomics, os], chronos, chronicles, ../conf, ../common/utils, results +import + std/[atomics, os], + chronos, + chronicles, + ../conf, + ../common/utils, + results, + beacon_chain/[beacon_node_status, nimbus_binary_common] logScope: topics = "Consensus layer" +proc startBeaconNode(configs: seq[string]) = + proc commandLineParams(): seq[string] = + configs + + var config = makeBannerAndConfig( + "clientId", "copyrights", "nimBanner", "SPEC_VERSION", [], BeaconNodeConf + ).valueOr: + error "Error starting consensus", err = error + quit QuitFailure + + setupLogging(config.logLevel, config.logStdout, config.logFile) + + #TODO: create public entry on beacon node + #handleStartUpCmd(config) + ## Consensus Layer handler proc consensusLayerHandler*(channel: ptr Channel[pointer]) = var p: pointer @@ -21,14 +43,15 @@ proc consensusLayerHandler*(channel: ptr Channel[pointer]) = fatal " service unable to receive configuration", err = e.msg quit(QuitFailure) - let configs = deserializeConfigArgs(p).valueOr: + let configList = deserializeConfigArgs(p).valueOr: fatal "unable to parse service data", message = error quit(QuitFailure) #signal main thread that data is read isConfigRead.store(true) - info "consensus configs ", configs = configs + {.gcsafe.}: + startBeaconNode(configList) try: while true: diff --git a/nimbus/tests/test_nimbus.nim b/nimbus/tests/test_nimbus.nim index 2add1b6648..3ab0756682 100644 --- a/nimbus/tests/test_nimbus.nim +++ b/nimbus/tests/test_nimbus.nim @@ -32,7 +32,7 @@ proc handlerService_1(channel: ptr Channel[pointer]) = #handles data for a given service proc handlerService_2(channel: ptr Channel[pointer]) = - const expectedConfigTable = {"4": "four", "5": "", "6": "six"}.toTable + const expectedConfigTable = {"4": "four", "5": "five", "6": ""}.toTable let p = channel[].recv() let configs = parseChannelData(p).valueOr: @@ -51,7 +51,7 @@ suite "Nimbus Service Management": nimbus = Nimbus.new const configTable_1 = {"0": "zero", "1": "one", "2": "two"}.toTable - const configTable_2 = {"4": "four", "5": "", "6": "six"}.toTable + const configTable_2 = {"4": "four", "5": "five", "6": ""}.toTable # Test: Creating a new service successfully test "startService successfully adds a service": From ea6d871643fec8c29803390f57c4a1515b72dbf4 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Tue, 27 May 2025 10:48:19 +0100 Subject: [PATCH 18/34] Added nimbus_beacon_node module copy. This module is a copy from nimbus_beacon_node module where handleStartUpCmd visibility is changed to public. --- nimbus/consensus/wrapper_consensus.nim | 2696 ++++++++++++++++++++++++ 1 file changed, 2696 insertions(+) create mode 100644 nimbus/consensus/wrapper_consensus.nim diff --git a/nimbus/consensus/wrapper_consensus.nim b/nimbus/consensus/wrapper_consensus.nim new file mode 100644 index 0000000000..547fe8de84 --- /dev/null +++ b/nimbus/consensus/wrapper_consensus.nim @@ -0,0 +1,2696 @@ +# beacon_chain +# Copyright (c) 2018-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + std/[os, random, terminal, times, exitprocs], + chronos, + chronicles, + metrics, + metrics/chronos_httpserver, + stew/[byteutils, io2], + eth/p2p/discoveryv5/[enr, random2], + ./consensus_object_pools/[blob_quarantine, data_column_quarantine, blockchain_list], + ./consensus_object_pools/vanity_logs/vanity_logs, + ./networking/[topic_params, network_metadata_downloads], + ./rpc/[rest_api, state_ttl_cache], + ./spec/datatypes/[altair, bellatrix, phase0], + ./spec/[engine_authentication, weak_subjectivity, peerdas_helpers], + ./sync/[sync_protocol, light_client_protocol, sync_overseer], + ./validators/[keystore_management, beacon_validators], + "."/[ + beacon_node, beacon_node_light_client, deposits, nimbus_binary_common, statusbar, + trusted_node_sync, wallets, + ] + +when defined(posix): + import system/ansi_c + +from ./spec/datatypes/deneb import SignedBeaconBlock + +from libp2p/protocols/pubsub/gossipsub import TopicParams, validateParameters, init + +logScope: + topics = "beacnde" + +# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics +declareGauge beacon_slot, "Latest slot of the beacon chain state" +declareGauge beacon_current_epoch, "Current epoch" + +# Finalization tracking +declareGauge finalization_delay, + "Epoch delay between scheduled epoch and finalized epoch" + +declareGauge ticks_delay, "How long does to take to run the onSecond loop" + +declareGauge next_action_wait, "Seconds until the next attestation will be sent" + +declareGauge next_proposal_wait, + "Seconds until the next proposal will be sent, or Inf if not known" + +declareGauge sync_committee_active, + "1 if there are current sync committee duties, 0 otherwise" + +declareCounter db_checkpoint_seconds, + "Time spent checkpointing the database to clear the WAL file" + +proc fetchGenesisState( + metadata: Eth2NetworkMetadata, + genesisState = none(InputFile), + genesisStateUrl = none(Uri), +): Future[ref ForkedHashedBeaconState] {.async: (raises: []).} = + let genesisBytes = + if metadata.genesis.kind != BakedIn and genesisState.isSome: + let res = io2.readAllBytes(genesisState.get.string) + res.valueOr: + error "Failed to read genesis state file", err = res.error.ioErrorMsg + quit 1 + elif metadata.hasGenesis: + try: + if metadata.genesis.kind == BakedInUrl: + info "Obtaining genesis state", + sourceUrl = $genesisStateUrl.get(parseUri metadata.genesis.url) + await metadata.fetchGenesisBytes(genesisStateUrl) + except CatchableError as err: + error "Failed to obtain genesis state", + source = metadata.genesis.sourceDesc, err = err.msg + quit 1 + else: + @[] + + if genesisBytes.len > 0: + try: + newClone readSszForkedHashedBeaconState(metadata.cfg, genesisBytes) + except CatchableError as err: + error "Invalid genesis state", + size = genesisBytes.len, digest = eth2digest(genesisBytes), err = err.msg + quit 1 + else: + nil + +proc doRunTrustedNodeSync( + db: BeaconChainDB, + metadata: Eth2NetworkMetadata, + databaseDir: string, + eraDir: string, + restUrl: string, + stateId: Option[string], + trustedBlockRoot: Option[Eth2Digest], + backfill: bool, + reindex: bool, + genesisState: ref ForkedHashedBeaconState, +) {.async.} = + let syncTarget = + if stateId.isSome: + if trustedBlockRoot.isSome: + warn "Ignoring `trustedBlockRoot`, `stateId` is set", stateId, trustedBlockRoot + TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: stateId.get) + elif trustedBlockRoot.isSome: + TrustedNodeSyncTarget( + kind: TrustedNodeSyncKind.TrustedBlockRoot, + trustedBlockRoot: trustedBlockRoot.get, + ) + else: + TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: "finalized") + + await db.doTrustedNodeSync( + metadata.cfg, databaseDir, eraDir, restUrl, syncTarget, backfill, reindex, + genesisState, + ) + +func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = + case stdoutKind + of StdoutLogKind.Auto: + raiseAssert "inadmissable here" + of StdoutLogKind.Colors: + VanityLogs( + onKnownBlsToExecutionChange: capellaBlink, + onUpgradeToDeneb: denebColor, + onUpgradeToElectra: electraColor, + onKnownCompoundingChange: electraBlink, + onUpgradeToFulu: fuluColor, + onBlobParametersUpdate: fuluColor, + ) + of StdoutLogKind.NoColors: + VanityLogs( + onKnownBlsToExecutionChange: capellaMono, + onUpgradeToDeneb: denebMono, + onUpgradeToElectra: electraMono, + onKnownCompoundingChange: electraMono, + onUpgradeToFulu: fuluMono, + onBlobParametersUpdate: fuluMono, + ) + of StdoutLogKind.Json, StdoutLogKind.None: + VanityLogs( + onKnownBlsToExecutionChange: ( + proc() = + notice "🦉 BLS to execution changed 🦉" + ), + onUpgradeToDeneb: ( + proc() = + notice "🐟 Proto-Danksharding is ON 🐟" + ), + onUpgradeToElectra: ( + proc() = + notice "🦒 Compounding is available 🦒" + ), + onKnownCompoundingChange: ( + proc() = + notice "🦒 Compounding is activated 🦒" + ), + onUpgradeToFulu: ( + proc() = + notice "🐅 Blobs columnized 🐅" + ), + onBlobParametersUpdate: ( + proc() = + notice "🐅 Blob parameters updated 🐅" + ), + ) + +func getVanityMascot(consensusFork: ConsensusFork): string = + case consensusFork + of ConsensusFork.Fulu: "🐅" + of ConsensusFork.Electra: "🦒" + of ConsensusFork.Deneb: "🐟" + of ConsensusFork.Capella: "🦉" + of ConsensusFork.Bellatrix: "🐼" + of ConsensusFork.Altair: "✨" + of ConsensusFork.Phase0: "🦏" + +proc loadChainDag( + config: BeaconNodeConf, + cfg: RuntimeConfig, + db: BeaconChainDB, + eventBus: EventBus, + validatorMonitor: ref ValidatorMonitor, + networkGenesisValidatorsRoot: Opt[Eth2Digest], +): ChainDAGRef = + info "Loading block DAG from database", path = config.databaseDir + + var dag: ChainDAGRef + proc onLightClientFinalityUpdate(data: ForkedLightClientFinalityUpdate) = + if dag == nil: + return + withForkyFinalityUpdate(data): + when lcDataFork > LightClientDataFork.None: + let contextFork = dag.cfg.consensusForkAtEpoch(forkyFinalityUpdate.contextEpoch) + eventBus.finUpdateQueue.emit( + RestVersioned[ForkedLightClientFinalityUpdate]( + data: data, + jsonVersion: contextFork, + sszContext: dag.forkDigests[].atConsensusFork(contextFork), + ) + ) + + proc onLightClientOptimisticUpdate(data: ForkedLightClientOptimisticUpdate) = + if dag == nil: + return + withForkyOptimisticUpdate(data): + when lcDataFork > LightClientDataFork.None: + let contextFork = + dag.cfg.consensusForkAtEpoch(forkyOptimisticUpdate.contextEpoch) + eventBus.optUpdateQueue.emit( + RestVersioned[ForkedLightClientOptimisticUpdate]( + data: data, + jsonVersion: contextFork, + sszContext: dag.forkDigests[].atConsensusFork(contextFork), + ) + ) + + let + chainDagFlags = + if config.strictVerification: + {strictVerification} + else: + {} + onLightClientFinalityUpdateCb = + if config.lightClientDataServe: onLightClientFinalityUpdate else: nil + onLightClientOptimisticUpdateCb = + if config.lightClientDataServe: onLightClientOptimisticUpdate else: nil + dag = ChainDAGRef.init( + cfg, + db, + validatorMonitor, + chainDagFlags, + config.eraDir, + vanityLogs = getVanityLogs(detectTTY(config.logStdout)), + lcDataConfig = LightClientDataConfig( + serve: config.lightClientDataServe, + importMode: config.lightClientDataImportMode, + maxPeriods: config.lightClientDataMaxPeriods, + onLightClientFinalityUpdate: onLightClientFinalityUpdateCb, + onLightClientOptimisticUpdate: onLightClientOptimisticUpdateCb, + ), + ) + + if networkGenesisValidatorsRoot.isSome: + let databaseGenesisValidatorsRoot = + getStateField(dag.headState, genesis_validators_root) + if networkGenesisValidatorsRoot.get != databaseGenesisValidatorsRoot: + fatal "The specified --data-dir contains data for a different network", + networkGenesisValidatorsRoot = networkGenesisValidatorsRoot.get, + databaseGenesisValidatorsRoot, + dataDir = config.dataDir + quit 1 + + # The first pruning after restart may take a while.. + if config.historyMode == HistoryMode.Prune: + dag.pruneHistory(true) + + dag + +proc checkWeakSubjectivityCheckpoint( + dag: ChainDAGRef, wsCheckpoint: Checkpoint, beaconClock: BeaconClock +) = + let + currentSlot = beaconClock.now.slotOrZero + isCheckpointStale = + not is_within_weak_subjectivity_period( + dag.cfg, currentSlot, dag.headState, wsCheckpoint + ) + + if isCheckpointStale: + error "Weak subjectivity checkpoint is stale", + currentSlot, + checkpoint = wsCheckpoint, + headStateSlot = getStateField(dag.headState, slot) + quit 1 + +from ./spec/state_transition_block import kzg_commitment_to_versioned_hash + +proc isSlotWithinWeakSubjectivityPeriod(dag: ChainDAGRef, slot: Slot): bool = + let checkpoint = Checkpoint( + epoch: epoch(getStateField(dag.headState, slot)), + root: getStateField(dag.headState, latest_block_header).state_root, + ) + is_within_weak_subjectivity_period(dag.cfg, slot, dag.headState, checkpoint) + +proc initFullNode( + node: BeaconNode, + rng: ref HmacDrbgContext, + dag: ChainDAGRef, + clist: ChainListRef, + taskpool: Taskpool, + getBeaconTime: GetBeaconTimeFn, +) {.async.} = + template config(): auto = + node.config + + proc onPhase0AttestationReceived(data: phase0.Attestation) = + node.eventBus.phase0AttestQueue.emit(data) + + proc onSingleAttestationReceived(data: SingleAttestation) = + node.eventBus.singleAttestQueue.emit(data) + + proc onSyncContribution(data: SignedContributionAndProof) = + node.eventBus.contribQueue.emit(data) + + proc onVoluntaryExitAdded(data: SignedVoluntaryExit) = + node.eventBus.exitQueue.emit(data) + + proc onBLSToExecutionChangeAdded(data: SignedBLSToExecutionChange) = + node.eventBus.blsToExecQueue.emit(data) + + proc onProposerSlashingAdded(data: ProposerSlashing) = + node.eventBus.propSlashQueue.emit(data) + + proc onPhase0AttesterSlashingAdded(data: phase0.AttesterSlashing) = + node.eventBus.phase0AttSlashQueue.emit(data) + + proc onElectraAttesterSlashingAdded(data: electra.AttesterSlashing) = + node.eventBus.electraAttSlashQueue.emit(data) + + proc onBlobSidecarAdded(data: BlobSidecarInfoObject) = + node.eventBus.blobSidecarQueue.emit(data) + + proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) = + let optimistic = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + some node.dag.is_optimistic(data.toBlockId()) + else: + none[bool]() + node.eventBus.blocksQueue.emit(EventBeaconBlockObject.init(data, optimistic)) + + proc onBlockGossipAdded(data: ForkedSignedBeaconBlock) = + node.eventBus.blockGossipQueue.emit(EventBeaconBlockGossipObject.init(data)) + + proc onHeadChanged(data: HeadChangeInfoObject) = + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + res.optimistic = + some node.dag.is_optimistic(BlockId(slot: data.slot, root: data.block_root)) + res + else: + data + node.eventBus.headQueue.emit(eventData) + + proc onChainReorg(data: ReorgInfoObject) = + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + res.optimistic = some node.dag.is_optimistic( + BlockId(slot: data.slot, root: data.new_head_block) + ) + res + else: + data + node.eventBus.reorgQueue.emit(eventData) + + proc makeOnFinalizationCb( + # This `nimcall` functions helps for keeping track of what + # needs to be captured by the onFinalization closure. + eventBus: EventBus, + elManager: ELManager, + ): OnFinalizedCallback {.nimcall.} = + static: + doAssert (elManager is ref) + return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = + node.updateLightClientFromDag() + let eventData = + if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: + var res = data + # `slot` in this `BlockId` may be higher than block's actual slot, + # this is alright for the purpose of calling `is_optimistic`. + res.optimistic = some node.dag.is_optimistic( + BlockId(slot: data.epoch.start_slot, root: data.block_root) + ) + res + else: + data + eventBus.finalQueue.emit(eventData) + + func getLocalHeadSlot(): Slot = + dag.head.slot + + proc getLocalWallSlot(): Slot = + node.beaconClock.now.slotOrZero + + func getFirstSlotAtFinalizedEpoch(): Slot = + dag.finalizedHead.slot + + func getBackfillSlot(): Slot = + if dag.backfill.parent_root != dag.tail.root: dag.backfill.slot else: dag.tail.slot + + func getUntrustedBackfillSlot(): Slot = + if clist.tail.isSome(): + clist.tail.get().blck.slot + else: + dag.tail.slot + + func getFrontfillSlot(): Slot = + max(dag.frontfill.get(BlockId()).slot, dag.horizon) + + proc isWithinWeakSubjectivityPeriod(): bool = + isSlotWithinWeakSubjectivityPeriod(node.dag, node.beaconClock.now().slotOrZero()) + + proc eventWaiter(): Future[void] {.async: (raises: [CancelledError]).} = + await node.shutdownEvent.wait() + bnStatus = BeaconNodeStatus.Stopping + + asyncSpawn eventWaiter() + + let + quarantine = newClone(Quarantine.init()) + attestationPool = newClone( + AttestationPool.init( + dag, quarantine, onPhase0AttestationReceived, onSingleAttestationReceived + ) + ) + syncCommitteeMsgPool = + newClone(SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution)) + lightClientPool = newClone(LightClientPool()) + validatorChangePool = newClone( + ValidatorChangePool.init( + dag, attestationPool, onVoluntaryExitAdded, onBLSToExecutionChangeAdded, + onProposerSlashingAdded, onPhase0AttesterSlashingAdded, + onElectraAttesterSlashingAdded, + ) + ) + blobQuarantine = newClone( + BlobQuarantine.init(dag.cfg, dag.db.getQuarantineDB(), 10, onBlobSidecarAdded) + ) + dataColumnQuarantine = newClone(DataColumnQuarantine.init()) + supernode = node.config.peerdasSupernode + localCustodyGroups = + if supernode: NUMBER_OF_CUSTODY_GROUPS.uint64 else: CUSTODY_REQUIREMENT.uint64 + custody_columns_set = node.network.nodeId.resolve_column_sets_from_custody_groups( + max(SAMPLES_PER_SLOT.uint64, localCustodyGroups) + ) + consensusManager = ConsensusManager.new( + dag, + attestationPool, + quarantine, + node.elManager, + ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), + node.dynamicFeeRecipientsStore, + config.validatorsDir, + config.defaultFeeRecipient, + config.suggestedGasLimit, + ) + batchVerifier = BatchVerifier.new(rng, taskpool) + blockProcessor = BlockProcessor.new( + config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, batchVerifier, + consensusManager, node.validatorMonitor, blobQuarantine, getBeaconTime, + config.invalidBlockRoots, + ) + + blockVerifier = proc( + signedBlock: ForkedSignedBeaconBlock, + blobs: Opt[BlobSidecars], + maybeFinalized: bool, + ): Future[Result[void, VerifierError]] {. + async: (raises: [CancelledError], raw: true) + .} = + # The design with a callback for block verification is unusual compared + # to the rest of the application, but fits with the general approach + # taken in the sync/request managers - this is an architectural compromise + # that should probably be reimagined more holistically in the future. + blockProcessor[].addBlock( + MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized + ) + untrustedBlockVerifier = proc( + signedBlock: ForkedSignedBeaconBlock, + blobs: Opt[BlobSidecars], + maybeFinalized: bool, + ): Future[Result[void, VerifierError]] {. + async: (raises: [CancelledError], raw: true) + .} = + clist.untrustedBackfillVerifier(signedBlock, blobs, maybeFinalized) + rmanBlockVerifier = proc( + signedBlock: ForkedSignedBeaconBlock, maybeFinalized: bool + ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = + withBlck(signedBlock): + when consensusFork >= ConsensusFork.Deneb: + let bres = blobQuarantine[].popSidecars(forkyBlck.root, forkyBlck) + if bres.isSome(): + await blockProcessor[].addBlock( + MsgSource.gossip, signedBlock, bres, maybeFinalized = maybeFinalized + ) + else: + # We don't have all the blobs for this block, so we have + # to put it in blobless quarantine. + if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): + err(VerifierError.UnviableFork) + else: + err(VerifierError.MissingParent) + else: + await blockProcessor[].addBlock( + MsgSource.gossip, + signedBlock, + Opt.none(BlobSidecars), + maybeFinalized = maybeFinalized, + ) + rmanBlockLoader = proc(blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = + dag.getForkedBlock(blockRoot) + rmanBlobLoader = proc(blobId: BlobIdentifier): Opt[ref BlobSidecar] = + var blob_sidecar = BlobSidecar.new() + if dag.db.getBlobSidecar(blobId.block_root, blobId.index, blob_sidecar[]): + Opt.some blob_sidecar + else: + Opt.none(ref BlobSidecar) + rmanDataColumnLoader = proc( + columnId: DataColumnIdentifier + ): Opt[ref DataColumnSidecar] = + var data_column_sidecar = DataColumnSidecar.new() + if dag.db.getDataColumnSidecar( + columnId.block_root, columnId.index, data_column_sidecar[] + ): + Opt.some data_column_sidecar + else: + Opt.none(ref DataColumnSidecar) + + processor = Eth2Processor.new( + config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, + attestationPool, validatorChangePool, node.attachedValidators, + syncCommitteeMsgPool, lightClientPool, quarantine, blobQuarantine, rng, + getBeaconTime, taskpool, + ) + syncManagerFlags = + if node.config.longRangeSync != LongRangeSyncMode.Lenient: + {SyncManagerFlag.NoGenesisSync} + else: + {} + syncManager = newSyncManager[Peer, PeerId]( + node.network.peerPool, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, + SyncQueueKind.Forward, + getLocalHeadSlot, + getLocalWallSlot, + getFirstSlotAtFinalizedEpoch, + getBackfillSlot, + getFrontfillSlot, + isWithinWeakSubjectivityPeriod, + dag.tail.slot, + blockVerifier, + shutdownEvent = node.shutdownEvent, + flags = syncManagerFlags, + ) + backfiller = newSyncManager[Peer, PeerId]( + node.network.peerPool, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, + SyncQueueKind.Backward, + getLocalHeadSlot, + getLocalWallSlot, + getFirstSlotAtFinalizedEpoch, + getBackfillSlot, + getFrontfillSlot, + isWithinWeakSubjectivityPeriod, + dag.backfill.slot, + blockVerifier, + maxHeadAge = 0, + shutdownEvent = node.shutdownEvent, + flags = syncManagerFlags, + ) + clistPivotSlot = + if clist.tail.isSome(): + clist.tail.get().blck.slot() + else: + getLocalWallSlot() + untrustedManager = newSyncManager[Peer, PeerId]( + node.network.peerPool, + dag.cfg.DENEB_FORK_EPOCH, + dag.cfg.FULU_FORK_EPOCH, + dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, + dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, + SyncQueueKind.Backward, + getLocalHeadSlot, + getLocalWallSlot, + getFirstSlotAtFinalizedEpoch, + getUntrustedBackfillSlot, + getFrontfillSlot, + isWithinWeakSubjectivityPeriod, + clistPivotSlot, + untrustedBlockVerifier, + maxHeadAge = 0, + shutdownEvent = node.shutdownEvent, + flags = syncManagerFlags, + ) + router = (ref MessageRouter)(processor: processor, network: node.network) + requestManager = RequestManager.init( + node.network, + supernode, + custody_columns_set, + dag.cfg.DENEB_FORK_EPOCH, + getBeaconTime, + ( + proc(): bool = + syncManager.inProgress + ), + quarantine, + blobQuarantine, + dataColumnQuarantine, + rmanBlockVerifier, + rmanBlockLoader, + rmanBlobLoader, + rmanDataColumnLoader, + ) + + # As per EIP 7594, the BN is now categorised into a + # `Fullnode` and a `Supernode`, the fullnodes custodies a + # given set of data columns, and hence ONLY subcribes to those + # data column subnet topics, however, the supernodes subscribe + # to all of the topics. This in turn keeps our `data column quarantine` + # really variable. Whenever the BN is a supernode, column quarantine + # essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the + # spec. However, in terms of fullnode, quarantine is really dependent + # on the randomly assigned columns, by `resolve_columns_from_custody_groups`. + + # Hence, in order to keep column quarantine accurate and error proof + # the custody columns are computed once as the BN boots. Then the values + # are used globally around the codebase. + + # `resolve_columns_from_custody_groups` is not a very expensive function, + # but there are multiple instances of computing custody columns, especially + # during peer selection, sync with columns, and so on. That is why, + # the rationale of populating it at boot and using it gloabally. + + dataColumnQuarantine[].supernode = supernode + dataColumnQuarantine[].custody_columns = node.network.nodeId.resolve_columns_from_custody_groups( + max(SAMPLES_PER_SLOT.uint64, localCustodyGroups) + ) + + if node.config.peerdasSupernode: + node.network.loadCgcnetMetadataAndEnr(NUMBER_OF_CUSTODY_GROUPS.uint8) + else: + node.network.loadCgcnetMetadataAndEnr(CUSTODY_REQUIREMENT.uint8) + + if node.config.lightClientDataServe: + proc scheduleSendingLightClientUpdates(slot: Slot) = + if node.lightClientPool[].broadcastGossipFut != nil: + return + if slot <= node.lightClientPool[].latestBroadcastedSlot: + return + node.lightClientPool[].latestBroadcastedSlot = slot + + template fut(): auto = + node.lightClientPool[].broadcastGossipFut + + fut = node.handleLightClientUpdates(slot) + fut.addCallback do(p: pointer) {.gcsafe.}: + fut = nil + + router.onSyncCommitteeMessage = scheduleSendingLightClientUpdates + + dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.elManager) + dag.setBlockCb(onBlockAdded) + dag.setBlockGossipCb(onBlockGossipAdded) + dag.setHeadCb(onHeadChanged) + dag.setReorgCb(onChainReorg) + + node.dag = dag + node.list = clist + node.blobQuarantine = blobQuarantine + node.quarantine = quarantine + node.attestationPool = attestationPool + node.syncCommitteeMsgPool = syncCommitteeMsgPool + node.lightClientPool = lightClientPool + node.validatorChangePool = validatorChangePool + node.processor = processor + node.batchVerifier = batchVerifier + node.blockProcessor = blockProcessor + node.consensusManager = consensusManager + node.requestManager = requestManager + node.syncManager = syncManager + node.backfiller = backfiller + node.untrustedManager = untrustedManager + node.syncOverseer = SyncOverseerRef.new( + node.consensusManager, node.validatorMonitor, config, getBeaconTime, node.list, + node.beaconClock, node.eventBus.optFinHeaderUpdateQueue, node.network.peerPool, + node.batchVerifier, syncManager, backfiller, untrustedManager, + ) + node.router = router + + await node.addValidators() + + block: + # Add in-process validators to the list of "known" validators such that + # we start with a reasonable ENR + let wallSlot = node.beaconClock.now().slotOrZero() + for validator in node.attachedValidators[].validators.values(): + if config.validatorMonitorAuto: + node.validatorMonitor[].addMonitor(validator.pubkey, validator.index) + + if validator.index.isSome(): + withState(dag.headState): + let idx = validator.index.get() + if distinctBase(idx) <= forkyState.data.validators.lenu64: + template v(): auto = + forkyState.data.validators.item(idx) + + if is_active_validator(v, wallSlot.epoch) or + is_active_validator(v, wallSlot.epoch + 1): + node.consensusManager[].actionTracker.knownValidators[idx] = wallSlot + elif is_exited_validator(v, wallSlot.epoch): + notice "Ignoring exited validator", + index = idx, pubkey = shortLog(v.pubkey) + let stabilitySubnets = + node.consensusManager[].actionTracker.stabilitySubnets(wallSlot) + # Here, we also set the correct ENR should we be in all subnets mode! + node.network.updateStabilitySubnetMetadata(stabilitySubnets) + + node.network.registerProtocol( + PeerSync, PeerSync.NetworkState.init(node.dag, node.beaconClock.getBeaconTimeFn()) + ) + + node.network.registerProtocol(BeaconSync, BeaconSync.NetworkState.init(node.dag)) + + if node.dag.lcDataStore.serve: + node.network.registerProtocol( + LightClientSync, LightClientSync.NetworkState.init(node.dag) + ) + + node.updateValidatorMetrics() + +const + SlashingDbName = "slashing_protection" + # changing this requires physical file rename as well or history is lost. + +proc init*( + T: type BeaconNode, + rng: ref HmacDrbgContext, + config: BeaconNodeConf, + metadata: Eth2NetworkMetadata, +): Future[BeaconNode] {.async.} = + var genesisState: ref ForkedHashedBeaconState = nil + + template cfg(): auto = + metadata.cfg + + template eth1Network(): auto = + metadata.eth1Network + + if not (isDir(config.databaseDir)): + # If database directory missing, we going to use genesis state to check + # for weak_subjectivity_period. + genesisState = + await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) + let + genesisTime = getStateField(genesisState[], genesis_time) + beaconClock = BeaconClock.init(genesisTime).valueOr: + fatal "Invalid genesis time in genesis state", genesisTime + quit 1 + currentSlot = beaconClock.now().slotOrZero() + checkpoint = Checkpoint( + epoch: epoch(getStateField(genesisState[], slot)), + root: getStateField(genesisState[], latest_block_header).state_root, + ) + + notice "Genesis state information", + genesis_fork = genesisState.kind, + is_post_altair = (cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH) + + if config.longRangeSync == LongRangeSyncMode.Light: + if not is_within_weak_subjectivity_period( + metadata.cfg, currentSlot, genesisState[], checkpoint + ): + # We do support any network which starts from Altair or later fork. + let metadata = config.loadEth2Network() + if metadata.cfg.ALTAIR_FORK_EPOCH != GENESIS_EPOCH: + fatal WeakSubjectivityLogMessage, + current_slot = currentSlot, + altair_fork_epoch = metadata.cfg.ALTAIR_FORK_EPOCH + quit 1 + + let taskpool = + try: + if config.numThreads < 0: + fatal "The number of threads --num-threads cannot be negative." + quit 1 + elif config.numThreads == 0: + Taskpool.new(numThreads = min(countProcessors(), 16)) + else: + Taskpool.new(numThreads = config.numThreads) + except CatchableError as e: + fatal "Cannot start taskpool", err = e.msg + quit 1 + + info "Threadpool started", numThreads = taskpool.numThreads + + if metadata.genesis.kind == BakedIn: + if config.genesisState.isSome: + warn "The --genesis-state option has no effect on networks with built-in genesis state" + + if config.genesisStateUrl.isSome: + warn "The --genesis-state-url option has no effect on networks with built-in genesis state" + + let + eventBus = EventBus( + headQueue: newAsyncEventQueue[HeadChangeInfoObject](), + blocksQueue: newAsyncEventQueue[EventBeaconBlockObject](), + blockGossipQueue: newAsyncEventQueue[EventBeaconBlockGossipObject](), + phase0AttestQueue: newAsyncEventQueue[phase0.Attestation](), + singleAttestQueue: newAsyncEventQueue[SingleAttestation](), + exitQueue: newAsyncEventQueue[SignedVoluntaryExit](), + blsToExecQueue: newAsyncEventQueue[SignedBLSToExecutionChange](), + propSlashQueue: newAsyncEventQueue[ProposerSlashing](), + phase0AttSlashQueue: newAsyncEventQueue[phase0.AttesterSlashing](), + electraAttSlashQueue: newAsyncEventQueue[electra.AttesterSlashing](), + blobSidecarQueue: newAsyncEventQueue[BlobSidecarInfoObject](), + finalQueue: newAsyncEventQueue[FinalizationInfoObject](), + reorgQueue: newAsyncEventQueue[ReorgInfoObject](), + contribQueue: newAsyncEventQueue[SignedContributionAndProof](), + finUpdateQueue: + newAsyncEventQueue[RestVersioned[ForkedLightClientFinalityUpdate]](), + optUpdateQueue: + newAsyncEventQueue[RestVersioned[ForkedLightClientOptimisticUpdate]](), + optFinHeaderUpdateQueue: newAsyncEventQueue[ForkedLightClientHeader](), + ) + db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false) + + if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr: + let trustedBlockRoot = + if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome: + config.trustedBlockRoot + elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH: + # Sync can be bootstrapped from the genesis block root + if genesisState.isNil: + genesisState = await fetchGenesisState( + metadata, config.genesisState, config.genesisStateUrl + ) + if not genesisState.isNil: + let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root + notice "Neither `--trusted-block-root` nor `--trusted-state-root` " & + "provided with `--external-beacon-api-url`, " & + "falling back to genesis block root", + externalBeaconApiUrl = config.externalBeaconApiUrl.get, + trustedBlockRoot = config.trustedBlockRoot, + trustedStateRoot = config.trustedStateRoot, + genesisBlockRoot = $genesisBlockRoot + some genesisBlockRoot + else: + none[Eth2Digest]() + else: + none[Eth2Digest]() + if config.trustedStateRoot.isNone and trustedBlockRoot.isNone: + warn "Ignoring `--external-beacon-api-url`, neither " & + "`--trusted-block-root` nor `--trusted-state-root` provided", + externalBeaconApiUrl = config.externalBeaconApiUrl.get, + trustedBlockRoot = config.trustedBlockRoot, + trustedStateRoot = config.trustedStateRoot + else: + if genesisState.isNil: + genesisState = + await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) + await db.doRunTrustedNodeSync( + metadata, + config.databaseDir, + config.eraDir, + config.externalBeaconApiUrl.get, + config.trustedStateRoot.map do(x: Eth2Digest) -> string: + "0x" & x.data.toHex, + trustedBlockRoot, + backfill = false, + reindex = false, + genesisState, + ) + + if config.finalizedCheckpointBlock.isSome: + warn "--finalized-checkpoint-block has been deprecated, ignoring" + + let checkpointState = + if config.finalizedCheckpointState.isSome: + let checkpointStatePath = config.finalizedCheckpointState.get.string + let tmp = + try: + newClone( + readSszForkedHashedBeaconState( + cfg, readAllBytes(checkpointStatePath).tryGet() + ) + ) + except SszError as err: + fatal "Checkpoint state loading failed", + err = formatMsg(err, checkpointStatePath) + quit 1 + except CatchableError as err: + fatal "Failed to read checkpoint state file", err = err.msg + quit 1 + + if not getStateField(tmp[], slot).is_epoch: + fatal "--finalized-checkpoint-state must point to a state for an epoch slot", + slot = getStateField(tmp[], slot) + quit 1 + tmp + else: + nil + + let engineApiUrls = config.engineApiUrls + + if engineApiUrls.len == 0: + notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)" + + var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot + + if not ChainDAGRef.isInitialized(db).isOk(): + genesisState = + if not checkpointState.isNil and getStateField(checkpointState[], slot) == 0: + checkpointState + else: + if genesisState.isNil: + await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) + else: + genesisState + + if genesisState.isNil and checkpointState.isNil: + fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " & + "with the network configuration" + quit 1 + + if not genesisState.isNil and not checkpointState.isNil: + if getStateField(genesisState[], genesis_validators_root) != + getStateField(checkpointState[], genesis_validators_root): + fatal "Checkpoint state does not match genesis - check the --network parameter", + rootFromGenesis = getStateField(genesisState[], genesis_validators_root), + rootFromCheckpoint = getStateField(checkpointState[], genesis_validators_root) + quit 1 + + try: + # Always store genesis state if we have it - this allows reindexing and + # answering genesis queries + if not genesisState.isNil: + ChainDAGRef.preInit(db, genesisState[]) + networkGenesisValidatorsRoot = + Opt.some(getStateField(genesisState[], genesis_validators_root)) + + if not checkpointState.isNil: + if genesisState.isNil or getStateField(checkpointState[], slot) != GENESIS_SLOT: + ChainDAGRef.preInit(db, checkpointState[]) + + doAssert ChainDAGRef.isInitialized(db).isOk(), + "preInit should have initialized db" + except CatchableError as exc: + error "Failed to initialize database", err = exc.msg + quit 1 + else: + if not checkpointState.isNil: + fatal "A database already exists, cannot start from given checkpoint", + dataDir = config.dataDir + quit 1 + + # Doesn't use std/random directly, but dependencies might + randomize(rng[].rand(high(int))) + + # The validatorMonitorTotals flag has been deprecated and should eventually be + # removed - until then, it's given priority if set so as not to needlessly + # break existing setups + let validatorMonitor = newClone( + ValidatorMonitor.init( + config.validatorMonitorAuto, + config.validatorMonitorTotals.get(not config.validatorMonitorDetails), + ) + ) + + for key in config.validatorMonitorPubkeys: + validatorMonitor[].addMonitor(key, Opt.none(ValidatorIndex)) + + let + dag = loadChainDag( + config, cfg, db, eventBus, validatorMonitor, networkGenesisValidatorsRoot + ) + genesisTime = getStateField(dag.headState, genesis_time) + beaconClock = BeaconClock.init(genesisTime).valueOr: + fatal "Invalid genesis time in state", genesisTime + quit 1 + + getBeaconTime = beaconClock.getBeaconTimeFn() + + let clist = block: + let res = ChainListRef.init(config.databaseDir()) + + debug "Backfill database has been loaded", + path = config.databaseDir(), head = shortLog(res.head), tail = shortLog(res.tail) + + if res.handle.isSome() and res.tail().isSome(): + if not (isSlotWithinWeakSubjectivityPeriod(dag, res.tail.get().slot())): + notice "Backfill database is outdated " & + "(outside of weak subjectivity period), reseting database", + path = config.databaseDir(), tail = shortLog(res.tail) + res.clear().isOkOr: + fatal "Unable to reset backfill database", + path = config.databaseDir(), reason = error + quit 1 + res + + info "Backfill database initialized", + path = config.databaseDir(), + head = shortLog(clist.head), + tail = shortLog(clist.tail) + + if config.weakSubjectivityCheckpoint.isSome: + dag.checkWeakSubjectivityCheckpoint( + config.weakSubjectivityCheckpoint.get, beaconClock + ) + + let elManager = ELManager.new(engineApiUrls, eth1Network) + + if config.rpcEnabled.isSome: + warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." + + let restServer = + if config.restEnabled: + RestServerRef.init( + config.restAddress, config.restPort, config.restAllowedOrigin, + validateBeaconApiQueries, nimbusAgentStr, config, + ) + else: + nil + + let + netKeys = getPersistentNetKeys(rng[], config) + nickname = + if config.nodeName == "auto": + shortForm(netKeys) + else: + config.nodeName + network = createEth2Node( + rng, + config, + netKeys, + cfg, + dag.forkDigests, + getBeaconTime, + getStateField(dag.headState, genesis_validators_root), + ) + + case config.slashingDbKind + of SlashingDbKind.v2: + discard + of SlashingDbKind.v1: + error "Slashing DB v1 is no longer supported for writing" + quit 1 + of SlashingDbKind.both: + warn "Slashing DB v1 deprecated, writing only v2" + + info "Loading slashing protection database (v2)", path = config.validatorsDir() + + proc getValidatorAndIdx(pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] = + withState(dag.headState): + getValidator(forkyState().data.validators.asSeq(), pubkey) + + func getCapellaForkVersion(): Opt[presets.Version] = + Opt.some(cfg.CAPELLA_FORK_VERSION) + + func getDenebForkEpoch(): Opt[Epoch] = + Opt.some(cfg.DENEB_FORK_EPOCH) + + proc getForkForEpoch(epoch: Epoch): Opt[Fork] = + Opt.some(dag.forkAtEpoch(epoch)) + + proc getGenesisRoot(): Eth2Digest = + getStateField(dag.headState, genesis_validators_root) + + let + keystoreCache = KeystoreCacheRef.init() + slashingProtectionDB = SlashingProtectionDB.init( + getStateField(dag.headState, genesis_validators_root), + config.validatorsDir(), + SlashingDbName, + ) + validatorPool = + newClone(ValidatorPool.init(slashingProtectionDB, config.doppelgangerDetection)) + + keymanagerInitResult = initKeymanagerServer(config, restServer) + keymanagerHost = + if keymanagerInitResult.server != nil: + newClone KeymanagerHost.init( + validatorPool, keystoreCache, rng, keymanagerInitResult.token, + config.validatorsDir, config.secretsDir, config.defaultFeeRecipient, + config.suggestedGasLimit, config.defaultGraffitiBytes, + config.getPayloadBuilderAddress, getValidatorAndIdx, getBeaconTime, + getCapellaForkVersion, getDenebForkEpoch, getForkForEpoch, getGenesisRoot, + ) + else: + nil + + stateTtlCache = + if config.restCacheSize > 0: + StateTtlCache.init( + cacheSize = config.restCacheSize, + cacheTtl = chronos.seconds(config.restCacheTtl), + ) + else: + nil + + if config.payloadBuilderEnable: + info "Using external payload builder", payloadBuilderUrl = config.payloadBuilderUrl + + let node = BeaconNode( + nickname: nickname, + graffitiBytes: + if config.graffiti.isSome: + config.graffiti.get + else: + defaultGraffitiBytes(), + network: network, + netKeys: netKeys, + db: db, + config: config, + attachedValidators: validatorPool, + elManager: elManager, + restServer: restServer, + keymanagerHost: keymanagerHost, + keymanagerServer: keymanagerInitResult.server, + keystoreCache: keystoreCache, + eventBus: eventBus, + gossipState: {}, + blocksGossipState: {}, + beaconClock: beaconClock, + validatorMonitor: validatorMonitor, + stateTtlCache: stateTtlCache, + shutdownEvent: newAsyncEvent(), + dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()), + ) + + node.initLightClient( + rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root + ) + await node.initFullNode(rng, dag, clist, taskpool, getBeaconTime) + + node.updateLightClientFromDag() + + node + +func verifyFinalization(node: BeaconNode, slot: Slot) = + # Epoch must be >= 4 to check finalization + const SETTLING_TIME_OFFSET = 1'u64 + let epoch = slot.epoch() + + # Don't static-assert this -- if this isn't called, don't require it + doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET + + # Intentionally, loudly assert. Point is to fail visibly and unignorably + # during testing. + if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET: + let finalizedEpoch = node.dag.finalizedHead.slot.epoch() + # Finalization rule 234, that has the most lag slots among the cases, sets + # state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3 + # and then state.slot gets incremented, to increase the maximum offset, if + # finalization occurs every slot, to 4 slots vs scheduledSlot. + doAssert finalizedEpoch + 4 >= epoch + +from std/sequtils import toSeq + +func subnetLog(v: BitArray): string = + $toSeq(v.oneIndices()) + +func forkDigests(node: BeaconNode): auto = + let forkDigestsArray: array[ConsensusFork, auto] = [ + node.dag.forkDigests.phase0, node.dag.forkDigests.altair, + node.dag.forkDigests.bellatrix, node.dag.forkDigests.capella, + node.dag.forkDigests.deneb, node.dag.forkDigests.electra, node.dag.forkDigests.fulu, + ] + forkDigestsArray + +# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription +proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = + if node.gossipState.card == 0: + # When disconnected, updateBlocksGossipStatus is responsible for all things + # subnets - in particular, it will remove subscriptions on the edge where + # we enter the disconnected state. + return + + let + aggregateSubnets = node.consensusManager[].actionTracker.aggregateSubnets(slot) + stabilitySubnets = node.consensusManager[].actionTracker.stabilitySubnets(slot) + subnets = aggregateSubnets + stabilitySubnets + validatorsCount = withState(node.dag.headState): + forkyState.data.validators.lenu64 + + node.network.updateStabilitySubnetMetadata(stabilitySubnets) + + # Now we know what we should be subscribed to - make it so + let + prevSubnets = node.consensusManager[].actionTracker.subscribedSubnets + unsubscribeSubnets = prevSubnets - subnets + subscribeSubnets = subnets - prevSubnets + + # Remember what we subscribed to, so we can unsubscribe later + node.consensusManager[].actionTracker.subscribedSubnets = subnets + + let forkDigests = node.forkDigests() + + for gossipFork in node.gossipState: + let forkDigest = forkDigests[gossipFork] + node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, forkDigest) + node.network.subscribeAttestationSubnets( + subscribeSubnets, forkDigest, getAttestationSubnetTopicParams(validatorsCount) + ) + + debug "Attestation subnets", + slot, + epoch = slot.epoch, + gossipState = node.gossipState, + stabilitySubnets = subnetLog(stabilitySubnets), + aggregateSubnets = subnetLog(aggregateSubnets), + prevSubnets = subnetLog(prevSubnets), + subscribeSubnets = subnetLog(subscribeSubnets), + unsubscribeSubnets = subnetLog(unsubscribeSubnets), + gossipState = node.gossipState + +proc updateBlocksGossipStatus*(node: BeaconNode, slot: Slot, dagIsBehind: bool) = + template cfg(): auto = + node.dag.cfg + + let + isBehind = + if node.shouldSyncOptimistically(slot): + # If optimistic sync is active, always subscribe to blocks gossip + false + else: + # Use DAG status to determine whether to subscribe for blocks gossip + dagIsBehind + + targetGossipState = getTargetGossipState( + slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, + cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, + cfg.FULU_FORK_EPOCH, isBehind, + ) + + template currentGossipState(): auto = + node.blocksGossipState + + if currentGossipState == targetGossipState: + return + + if currentGossipState.card == 0 and targetGossipState.card > 0: + debug "Enabling blocks topic subscriptions", wallSlot = slot, targetGossipState + elif currentGossipState.card > 0 and targetGossipState.card == 0: + debug "Disabling blocks topic subscriptions", wallSlot = slot + else: + # Individual forks added / removed + discard + + let + newGossipForks = targetGossipState - currentGossipState + oldGossipForks = currentGossipState - targetGossipState + + for gossipFork in oldGossipForks: + let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) + node.network.unsubscribe(getBeaconBlocksTopic(forkDigest)) + + for gossipFork in newGossipForks: + let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) + node.network.subscribe( + getBeaconBlocksTopic(forkDigest), getBlockTopicParams(), enableTopicMetrics = true + ) + + node.blocksGossipState = targetGossipState + +proc addPhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + let validatorsCount = withState(node.dag.headState): + forkyState.data.validators.lenu64 + node.network.subscribe( + getAttesterSlashingsTopic(forkDigest), getAttesterSlashingTopicParams() + ) + node.network.subscribe( + getProposerSlashingsTopic(forkDigest), getProposerSlashingTopicParams() + ) + node.network.subscribe( + getVoluntaryExitsTopic(forkDigest), getVoluntaryExitTopicParams() + ) + node.network.subscribe( + getAggregateAndProofsTopic(forkDigest), + getAggregateProofTopicParams(validatorsCount), + enableTopicMetrics = true, + ) + + # updateAttestationSubnetHandlers subscribes attestation subnets + +proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.network.unsubscribe(getVoluntaryExitsTopic(forkDigest)) + node.network.unsubscribe(getProposerSlashingsTopic(forkDigest)) + node.network.unsubscribe(getAttesterSlashingsTopic(forkDigest)) + node.network.unsubscribe(getAggregateAndProofsTopic(forkDigest)) + + for subnet_id in SubnetId: + node.network.unsubscribe(getAttestationTopic(forkDigest, subnet_id)) + + node.consensusManager[].actionTracker.subscribedSubnets = default(AttnetBits) + +func hasSyncPubKey(node: BeaconNode, epoch: Epoch): auto = + # Only used to determine which gossip topics to which to subscribe + if node.config.subscribeAllSubnets: + ( + func (pubkey: ValidatorPubKey): bool {.closure.} = + true + ) + else: + ( + func (pubkey: ValidatorPubKey): bool = + node.consensusManager[].actionTracker.hasSyncDuty(pubkey, epoch) or + pubkey in node.attachedValidators[].validators + ) + +func getCurrentSyncCommiteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = + let syncCommittee = withState(node.dag.headState): + when consensusFork >= ConsensusFork.Altair: + forkyState.data.current_sync_committee + else: + return static(default(SyncnetBits)) + + getSyncSubnets(node.hasSyncPubKey(epoch), syncCommittee) + +func getNextSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = + let syncCommittee = withState(node.dag.headState): + when consensusFork >= ConsensusFork.Altair: + forkyState.data.next_sync_committee + else: + return static(default(SyncnetBits)) + + getSyncSubnets( + node.hasSyncPubKey((epoch.sync_committee_period + 1).start_slot().epoch), + syncCommittee, + ) + +func getSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = + let + subnets = node.getCurrentSyncCommiteeSubnets(epoch) + epochsToSyncPeriod = nearSyncCommitteePeriod(epoch) + + # The end-slot tracker might call this when it's theoretically applicable, + # but more than SYNC_COMMITTEE_SUBNET_COUNT epochs from when the next sync + # committee period begins, in which case `epochsToNextSyncPeriod` is none. + if epochsToSyncPeriod.isNone or + node.dag.cfg.consensusForkAtEpoch(epoch + epochsToSyncPeriod.get) < + ConsensusFork.Altair: + return subnets + + subnets + node.getNextSyncCommitteeSubnets(epoch) + +proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addPhase0MessageHandlers(forkDigest, slot) + + # If this comes online near sync committee period, it'll immediately get + # replaced as usual by trackSyncCommitteeTopics, which runs at slot end. + let + syncnets = node.getSyncCommitteeSubnets(slot.epoch) + validatorsCount = withState(node.dag.headState): + forkyState.data.validators.lenu64 + + for subcommitteeIdx in SyncSubcommitteeIndex: + if syncnets[subcommitteeIdx]: + node.network.subscribe( + getSyncCommitteeTopic(forkDigest, subcommitteeIdx), + getSyncCommitteeSubnetTopicParams(validatorsCount), + ) + + node.network.subscribe( + getSyncCommitteeContributionAndProofTopic(forkDigest), + getSyncContributionTopicParams(), + ) + + node.network.updateSyncnetsMetadata(syncnets) + +proc addCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addAltairMessageHandlers(forkDigest, slot) + node.network.subscribe( + getBlsToExecutionChangeTopic(forkDigest), getBlsToExecutionChangeTopicParams() + ) + +proc doAddDenebMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, slot: Slot, blobSidecarSubnetCount: uint64 +) = + node.addCapellaMessageHandlers(forkDigest, slot) + for topic in blobSidecarTopics(forkDigest, blobSidecarSubnetCount): + node.network.subscribe(topic, basicParams()) + +proc addDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.doAddDenebMessageHandlers( + forkDigest, slot, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT + ) + +proc addElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.doAddDenebMessageHandlers( + forkDigest, slot, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA + ) + +proc addFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = + node.addElectraMessageHandlers(forkDigest, slot) + +proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removePhase0MessageHandlers(forkDigest) + + for subcommitteeIdx in SyncSubcommitteeIndex: + closureScope: + let idx = subcommitteeIdx + node.network.unsubscribe(getSyncCommitteeTopic(forkDigest, idx)) + + node.network.unsubscribe(getSyncCommitteeContributionAndProofTopic(forkDigest)) + +proc removeCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeAltairMessageHandlers(forkDigest) + node.network.unsubscribe(getBlsToExecutionChangeTopic(forkDigest)) + +proc doRemoveDenebMessageHandlers( + node: BeaconNode, forkDigest: ForkDigest, blobSidecarSubnetCount: uint64 +) = + node.removeCapellaMessageHandlers(forkDigest) + for topic in blobSidecarTopics(forkDigest, blobSidecarSubnetCount): + node.network.unsubscribe(topic) + +proc removeDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.doRemoveDenebMessageHandlers(forkDigest, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT) + +proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.doRemoveDenebMessageHandlers( + forkDigest, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA + ) + +proc removeFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = + node.removeElectraMessageHandlers(forkDigest) + +proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) = + template lastSyncUpdate(): untyped = + node.consensusManager[].actionTracker.lastSyncUpdate + + if lastSyncUpdate == Opt.some(slot.sync_committee_period()) and + nearSyncCommitteePeriod(slot.epoch).isNone(): + # No need to update unless we're close to the next sync committee period or + # new validators were registered with the action tracker + # TODO we _could_ skip running this in some of the "near" slots, but.. + return + + lastSyncUpdate = Opt.some(slot.sync_committee_period()) + + let syncnets = node.getSyncCommitteeSubnets(slot.epoch) + + debug "Updating sync committee subnets", + syncnets, + metadata_syncnets = node.network.metadata.syncnets, + gossipState = node.gossipState + + # Assume that different gossip fork sync committee setups are in sync; this + # only remains relevant, currently, for one gossip transition epoch, so the + # consequences of this not being true aren't exceptionally dire, while this + # allows for bookkeeping simplication. + if syncnets == node.network.metadata.syncnets: + return + + let + newSyncnets = syncnets - node.network.metadata.syncnets + oldSyncnets = node.network.metadata.syncnets - syncnets + forkDigests = node.forkDigests() + validatorsCount = withState(node.dag.headState): + forkyState.data.validators.lenu64 + + for subcommitteeIdx in SyncSubcommitteeIndex: + doAssert not (newSyncnets[subcommitteeIdx] and oldSyncnets[subcommitteeIdx]) + for gossipFork in node.gossipState: + template topic(): auto = + getSyncCommitteeTopic(forkDigests[gossipFork], subcommitteeIdx) + + if oldSyncnets[subcommitteeIdx]: + node.network.unsubscribe(topic) + elif newSyncnets[subcommitteeIdx]: + node.network.subscribe( + topic, getSyncCommitteeSubnetTopicParams(validatorsCount) + ) + + node.network.updateSyncnetsMetadata(syncnets) + +proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) = + if not node.processor[].doppelgangerDetectionEnabled: + return + + # broadcastStartEpoch is set to FAR_FUTURE_EPOCH when we're not monitoring + # gossip - it is only viable to assert liveness in epochs where gossip is + # active + if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch: + for validator in node.attachedValidators[]: + validator.doppelgangerChecked(epoch - 1) + +proc maybeUpdateActionTrackerNextEpoch( + node: BeaconNode, forkyState: ForkyHashedBeaconState, currentSlot: Slot +) = + let nextEpoch = currentSlot.epoch + 1 + if node.consensusManager[].actionTracker.needsUpdate(forkyState, nextEpoch): + template epochRefFallback() = + let epochRef = node.dag.getEpochRef(node.dag.head, nextEpoch, false).expect( + "Getting head EpochRef should never fail" + ) + node.consensusManager[].actionTracker.updateActions( + epochRef.shufflingRef, epochRef.beacon_proposers + ) + + when forkyState is phase0.HashedBeaconState: + # The previous_epoch_participation-based logic requires Altair or newer + epochRefFallback() + else: + let + shufflingRef = node.dag.getShufflingRef(node.dag.head, nextEpoch, false).valueOr: + # epochRefFallback() won't work in this case either + return + nextEpochProposers = get_beacon_proposer_indices( + forkyState.data, shufflingRef.shuffled_active_validator_indices, nextEpoch + ) + nextEpochFirstProposer = nextEpochProposers[0].valueOr: + # All proposers except the first can be more straightforwardly and + # efficiently (re)computed correctly once in that epoch. + epochRefFallback() + return + + # Has to account for potential epoch transition TIMELY_SOURCE_FLAG_INDEX, + # TIMELY_TARGET_FLAG_INDEX, and inactivity penalties, resulting from spec + # functions get_flag_index_deltas() and get_inactivity_penalty_deltas(). + # + # There are no penalties associated with TIMELY_HEAD_FLAG_INDEX, but a + # reward exists. effective_balance == MAX_EFFECTIVE_BALANCE.Gwei ensures + # if even so, then the effective balance cannot change as a result. + # + # It's not truly necessary to avoid all rewards and penalties, but only + # to bound them to ensure they won't unexpected alter effective balance + # during the upcoming epoch transition. + # + # During genesis epoch, the check for epoch participation is against + # current, not previous, epoch, and therefore there's a possibility of + # checking for if a validator has participated in an epoch before it will + # happen. + # + # Because process_rewards_and_penalties() in epoch processing happens + # before the current/previous participation swap, previous is correct + # even here, and consistent with what the epoch transition uses. + # + # Whilst slashing, proposal, and sync committee rewards and penalties do + # update the balances as they occur, they don't update effective_balance + # until the end of epoch, so detect via effective_balance_might_update. + # + # On EF mainnet epoch 233906, this matches 99.5% of active validators; + # with Holesky epoch 2041, 83% of active validators. + let + participation_flags = + forkyState.data.previous_epoch_participation.item(nextEpochFirstProposer) + effective_balance = + forkyState.data.validators.item(nextEpochFirstProposer).effective_balance + + # Maximal potential accuracy primarily useful during the last slot of + # each epoch to prepare for a possible proposal the first slot of the + # next epoch. Otherwise, epochRefFallback is potentially very slow as + # it can induce a lengthy state replay. + if (not (currentSlot + 1).is_epoch) or ( + participation_flags.has_flag(TIMELY_SOURCE_FLAG_INDEX) and + participation_flags.has_flag(TIMELY_TARGET_FLAG_INDEX) and + effective_balance == MAX_EFFECTIVE_BALANCE.Gwei and + forkyState.data.slot.epoch != GENESIS_EPOCH and + forkyState.data.inactivity_scores.item(nextEpochFirstProposer) == 0 and + not effective_balance_might_update( + forkyState.data.balances.item(nextEpochFirstProposer), effective_balance + ) + ): + node.consensusManager[].actionTracker.updateActions( + shufflingRef, nextEpochProposers + ) + else: + epochRefFallback() + +proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = + ## Subscribe to subnets that we are providing stability for or aggregating + ## and unsubscribe from the ones that are no longer relevant. + + # Let the tracker know what duties are approaching - this will tell us how + # many stability subnets we need to be subscribed to and what subnets we'll + # soon be aggregating - in addition to the in-beacon-node duties, there may + # also be duties coming from the validator client, but we don't control when + # these arrive + await node.registerDuties(slot) + + # We start subscribing to gossip before we're fully synced - this allows time + # to subscribe before the sync end game + const + TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64 + HYSTERESIS_BUFFER = 16 + + static: + doAssert high(ConsensusFork) == ConsensusFork.Fulu + + let + head = node.dag.head + headDistance = + if slot > head.slot: + (slot - head.slot).uint64 + else: + 0'u64 + isBehind = headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER + targetGossipState = getTargetGossipState( + slot.epoch, node.dag.cfg.ALTAIR_FORK_EPOCH, node.dag.cfg.BELLATRIX_FORK_EPOCH, + node.dag.cfg.CAPELLA_FORK_EPOCH, node.dag.cfg.DENEB_FORK_EPOCH, + node.dag.cfg.ELECTRA_FORK_EPOCH, node.dag.cfg.FULU_FORK_EPOCH, isBehind, + ) + + doAssert targetGossipState.card <= 2 + + let + newGossipForks = targetGossipState - node.gossipState + oldGossipForks = node.gossipState - targetGossipState + + doAssert newGossipForks.card <= 2 + doAssert oldGossipForks.card <= 2 + + func maxGossipFork(gossipState: GossipState): int = + var res = -1 + for gossipFork in gossipState: + res = max(res, gossipFork.int) + res + + if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and + targetGossipState != {}: + warn "Unexpected clock regression during transition", + targetGossipState, gossipState = node.gossipState + + if node.gossipState.card == 0 and targetGossipState.card > 0: + # We are synced, so we will connect + debug "Enabling topic subscriptions", + wallSlot = slot, headSlot = head.slot, headDistance, targetGossipState + + node.processor[].setupDoppelgangerDetection(slot) + + # Specially when waiting for genesis, we'll already be synced on startup - + # it might also happen on a sufficiently fast restart + + # We "know" the actions for the current and the next epoch + withState(node.dag.headState): + if node.consensusManager[].actionTracker.needsUpdate(forkyState, slot.epoch): + let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect( + "Getting head EpochRef should never fail" + ) + node.consensusManager[].actionTracker.updateActions( + epochRef.shufflingRef, epochRef.beacon_proposers + ) + + node.maybeUpdateActionTrackerNextEpoch(forkyState, slot) + + if node.gossipState.card > 0 and targetGossipState.card == 0: + debug "Disabling topic subscriptions", + wallSlot = slot, headSlot = head.slot, headDistance + + node.processor[].clearDoppelgangerProtection() + + let forkDigests = node.forkDigests() + + const removeMessageHandlers: array[ConsensusFork, auto] = [ + removePhase0MessageHandlers, + removeAltairMessageHandlers, + removeAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) + removeCapellaMessageHandlers, + removeDenebMessageHandlers, + removeElectraMessageHandlers, + removeFuluMessageHandlers, + ] + + for gossipFork in oldGossipForks: + removeMessageHandlers[gossipFork](node, forkDigests[gossipFork]) + + const addMessageHandlers: array[ConsensusFork, auto] = [ + addPhase0MessageHandlers, + addAltairMessageHandlers, + addAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) + addCapellaMessageHandlers, + addDenebMessageHandlers, + addElectraMessageHandlers, + addFuluMessageHandlers, + ] + + for gossipFork in newGossipForks: + addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot) + + node.gossipState = targetGossipState + node.doppelgangerChecked(slot.epoch) + node.updateAttestationSubnetHandlers(slot) + node.updateBlocksGossipStatus(slot, isBehind) + node.updateLightClientGossipStatus(slot, isBehind) + +proc pruneBlobs(node: BeaconNode, slot: Slot) = + let blobPruneEpoch = + (slot.epoch - node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1) + if slot.is_epoch() and blobPruneEpoch >= node.dag.cfg.DENEB_FORK_EPOCH: + var blocks: array[SLOTS_PER_EPOCH.int, BlockId] + var count = 0 + let startIndex = node.dag.getBlockRange( + blobPruneEpoch.start_slot, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1) + ) + for i in startIndex ..< SLOTS_PER_EPOCH: + let blck = node.dag.getForkedBlock(blocks[int(i)]).valueOr: + continue + withBlck(blck): + when typeof(forkyBlck).kind < ConsensusFork.Deneb: + continue + else: + for j in 0 .. len(forkyBlck.message.body.blob_kzg_commitments) - 1: + if node.db.delBlobSidecar(blocks[int(i)].root, BlobIndex(j)): + count = count + 1 + debug "pruned blobs", count, blobPruneEpoch + +proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = + # Things we do when slot processing has ended and we're about to wait for the + # next slot + + # By waiting until close before slot end, ensure that preparation for next + # slot does not interfere with propagation of messages and with VC duties. + const endOffset = + aggregateSlotOffset + + nanos((NANOSECONDS_PER_SLOT - aggregateSlotOffset.nanoseconds.uint64).int64 div 2) + let endCutoff = node.beaconClock.fromNow(slot.start_beacon_time + endOffset) + if endCutoff.inFuture: + debug "Waiting for slot end", slot, endCutoff = shortLog(endCutoff.offset) + await sleepAsync(endCutoff.offset) + + if node.dag.needStateCachesAndForkChoicePruning(): + if node.attachedValidators[].validators.len > 0: + node.attachedValidators[].slashingProtection + # pruning is only done if the DB is set to pruning mode. + .pruneAfterFinalization(node.dag.finalizedHead.slot.epoch()) + node.processor.blobQuarantine[].pruneAfterFinalization( + node.dag.finalizedHead.slot.epoch() + ) + + # Delay part of pruning until latency critical duties are done. + # The other part of pruning, `pruneBlocksDAG`, is done eagerly. + # ---- + # This is the last pruning to do as it clears the "needPruning" condition. + node.consensusManager[].pruneStateCachesAndForkChoice() + + if node.config.historyMode == HistoryMode.Prune: + if not (slot + 1).is_epoch(): + # The epoch slot already is "heavy" due to the epoch processing, leave + # the pruning for later + node.dag.pruneHistory() + node.pruneBlobs(slot) + + when declared(GC_fullCollect): + # The slots in the beacon node work as frames in a game: we want to make + # sure that we're ready for the next one and don't get stuck in lengthy + # garbage collection tasks when time is of essence in the middle of a slot - + # while this does not guarantee that we'll never collect during a slot, it + # makes sure that all the scratch space we used during slot tasks (logging, + # temporary buffers etc) gets recycled for the next slot that is likely to + # need similar amounts of memory. + try: + GC_fullCollect() + except Defect as exc: + raise exc # Reraise to maintain call stack + except Exception: + # TODO upstream + raiseAssert "Unexpected exception during GC collection" + let gcCollectionTick = Moment.now() + + # Checkpoint the database to clear the WAL file and make sure changes in + # the database are synced with the filesystem. + node.db.checkpoint() + let + dbCheckpointTick = Moment.now() + dbCheckpointDur = dbCheckpointTick - gcCollectionTick + db_checkpoint_seconds.inc(dbCheckpointDur.toFloatSeconds) + if dbCheckpointDur >= MinSignificantProcessingDuration: + info "Database checkpointed", dur = dbCheckpointDur + else: + debug "Database checkpointed", dur = dbCheckpointDur + + node.syncCommitteeMsgPool[].pruneData(slot) + if slot.is_epoch: + node.dynamicFeeRecipientsStore[].pruneOldMappings(slot.epoch) + + # Update upcoming actions - we do this every slot in case a reorg happens + let head = node.dag.head + if node.isSynced(head) and head.executionValid: + withState(node.dag.headState): + # maybeUpdateActionTrackerNextEpoch might not account for balance changes + # from the process_rewards_and_penalties() epoch transition but only from + # process_block() and other per-slot sources. This mainly matters insofar + # as it might trigger process_effective_balance_updates() changes in that + # same epoch transition, which function is therefore potentially blind to + # but which might then affect beacon proposers. + # + # Because this runs every slot, it can account naturally for slashings, + # which affect balances via slash_validator() when they happen, and any + # missed sync committee participation via process_sync_aggregate(), but + # attestation penalties for example, need, specific handling. + # checked by maybeUpdateActionTrackerNextEpoch. + node.maybeUpdateActionTrackerNextEpoch(forkyState, slot) + + let + nextAttestationSlot = + node.consensusManager[].actionTracker.getNextAttestationSlot(slot) + nextProposalSlot = node.consensusManager[].actionTracker.getNextProposalSlot(slot) + nextActionSlot = min(nextAttestationSlot, nextProposalSlot) + nextActionWaitTime = saturate(fromNow(node.beaconClock, nextActionSlot)) + + # -1 is a more useful output than 18446744073709551615 as an indicator of + # no future attestation/proposal known. + template formatInt64(x: Slot): int64 = + if x == high(uint64).Slot: + -1'i64 + else: + toGaugeValue(x) + + let + syncCommitteeSlot = slot + 1 + syncCommitteeEpoch = syncCommitteeSlot.epoch + inCurrentSyncCommittee = + not node.getCurrentSyncCommiteeSubnets(syncCommitteeEpoch).isZeros() + + template formatSyncCommitteeStatus(): string = + if inCurrentSyncCommittee: + "current" + elif not node.getNextSyncCommitteeSubnets(syncCommitteeEpoch).isZeros(): + let slotsToNextSyncCommitteePeriod = + SLOTS_PER_SYNC_COMMITTEE_PERIOD - + since_sync_committee_period_start(syncCommitteeSlot) + # int64 conversion is safe + doAssert slotsToNextSyncCommitteePeriod <= SLOTS_PER_SYNC_COMMITTEE_PERIOD + "in " & + toTimeLeftString( + SECONDS_PER_SLOT.int64.seconds * slotsToNextSyncCommitteePeriod.int64 + ) + else: + "none" + + info "Slot end", + slot = shortLog(slot), + nextActionWait = + if nextActionSlot == FAR_FUTURE_SLOT: + "n/a" + else: + shortLog(nextActionWaitTime), + nextAttestationSlot = formatInt64(nextAttestationSlot), + nextProposalSlot = formatInt64(nextProposalSlot), + syncCommitteeDuties = formatSyncCommitteeStatus(), + head = shortLog(head) + + if nextActionSlot != FAR_FUTURE_SLOT: + next_action_wait.set(nextActionWaitTime.toFloatSeconds) + + next_proposal_wait.set( + if nextProposalSlot != FAR_FUTURE_SLOT: + saturate(fromNow(node.beaconClock, nextProposalSlot)).toFloatSeconds() + else: + Inf + ) + + sync_committee_active.set(if inCurrentSyncCommittee: 1 else: 0) + + let epoch = slot.epoch + if epoch + 1 >= node.network.forkId.next_fork_epoch: + # Update 1 epoch early to block non-fork-ready peers + node.network.updateForkId(epoch, node.dag.genesis_validators_root) + + # When we're not behind schedule, we'll speculatively update the clearance + # state in anticipation of receiving the next block - we do it after + # logging slot end since the nextActionWaitTime can be short + let advanceCutoff = node.beaconClock.fromNow( + slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1)) + ) + if advanceCutoff.inFuture: + # We wait until there's only a second left before the next slot begins, then + # we advance the clearance state to the next slot - this gives us a high + # probability of being prepared for the block that will arrive and the + # epoch processing that follows + await sleepAsync(advanceCutoff.offset) + node.dag.advanceClearanceState() + + # Prepare action tracker for the next slot + node.consensusManager[].actionTracker.updateSlot(slot + 1) + + # The last thing we do is to perform the subscriptions and unsubscriptions for + # the next slot, just before that slot starts - because of the advance cuttoff + # above, this will be done just before the next slot starts + node.updateSyncCommitteeTopics(slot + 1) + + await node.updateGossipStatus(slot + 1) + +func formatNextConsensusFork(node: BeaconNode, withVanityArt = false): Opt[string] = + let consensusFork = node.dag.cfg.consensusForkAtEpoch(node.dag.head.slot.epoch) + if consensusFork == ConsensusFork.high: + return Opt.none(string) + let + nextConsensusFork = consensusFork.succ() + nextForkEpoch = node.dag.cfg.consensusForkEpoch(nextConsensusFork) + if nextForkEpoch == FAR_FUTURE_EPOCH: + return Opt.none(string) + Opt.some( + (if withVanityArt: nextConsensusFork.getVanityMascot & " " else: "") & + $nextConsensusFork & ":" & $nextForkEpoch + ) + +proc syncStatus(node: BeaconNode, wallSlot: Slot): string = + node.syncOverseer.syncStatusMessage() + +when defined(windows): + from winservice import establishWindowsService, reportServiceStatusSuccess + +proc onSlotStart( + node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot +): Future[bool] {.async.} = + ## Called at the beginning of a slot - usually every slot, but sometimes might + ## skip a few in case we're running late. + ## wallTime: current system time - we will strive to perform all duties up + ## to this point in time + ## lastSlot: the last slot that we successfully processed, so we know where to + ## start work from - there might be jumps if processing is delayed + let + # The slot we should be at, according to the clock + wallSlot = wallTime.slotOrZero + # If everything was working perfectly, the slot that we should be processing + expectedSlot = lastSlot + 1 + finalizedEpoch = node.dag.finalizedHead.blck.slot.epoch() + delay = wallTime - expectedSlot.start_beacon_time() + + node.processingDelay = Opt.some(nanoseconds(delay.nanoseconds)) + + block: + logScope: + slot = shortLog(wallSlot) + epoch = shortLog(wallSlot.epoch) + sync = node.syncStatus(wallSlot) + peers = len(node.network.peerPool) + head = shortLog(node.dag.head) + finalized = shortLog(getStateField(node.dag.headState, finalized_checkpoint)) + delay = shortLog(delay) + let nextConsensusForkDescription = node.formatNextConsensusFork() + if nextConsensusForkDescription.isNone: + info "Slot start" + else: + info "Slot start", nextFork = nextConsensusForkDescription.get + + # Check before any re-scheduling of onSlotStart() + if checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch): + quit(0) + + when defined(windows): + if node.config.runAsService: + reportServiceStatusSuccess() + + beacon_slot.set wallSlot.toGaugeValue + beacon_current_epoch.set wallSlot.epoch.toGaugeValue + + # both non-negative, so difference can't overflow or underflow int64 + finalization_delay.set(wallSlot.epoch.toGaugeValue - finalizedEpoch.toGaugeValue) + + if node.config.strictVerification: + verifyFinalization(node, wallSlot) + + node.consensusManager[].updateHead(wallSlot) + + await node.handleValidatorDuties(lastSlot, wallSlot) + + await onSlotEnd(node, wallSlot) + + # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination + # This specification suggests validators re-submit to builder software every + # `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs. + if wallSlot.is_epoch and + wallSlot.epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION == 0: + asyncSpawn node.registerValidators(wallSlot.epoch) + + return false + +proc onSecond(node: BeaconNode, time: Moment) = + # Nim GC metrics (for the main thread) + updateThreadMetrics() + + if node.config.stopAtSyncedEpoch != 0 and + node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch: + notice "Shutting down after having reached the target synced epoch" + bnStatus = BeaconNodeStatus.Stopping + +proc runOnSecondLoop(node: BeaconNode) {.async.} = + const + sleepTime = chronos.seconds(1) + nanosecondsIn1s = float(sleepTime.nanoseconds) + while true: + let start = chronos.now(chronos.Moment) + await chronos.sleepAsync(sleepTime) + let afterSleep = chronos.now(chronos.Moment) + let sleepTime = afterSleep - start + node.onSecond(start) + let finished = chronos.now(chronos.Moment) + let processingTime = finished - afterSleep + ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s) + trace "onSecond task completed", sleepTime, processingTime + +func connectedPeersCount(node: BeaconNode): int = + len(node.network.peerPool) + +proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) = + restServer.router.installBeaconApiHandlers(node) + restServer.router.installBuilderApiHandlers(node) + restServer.router.installConfigApiHandlers(node) + restServer.router.installDebugApiHandlers(node) + restServer.router.installEventApiHandlers(node) + restServer.router.installNimbusApiHandlers(node) + restServer.router.installNodeApiHandlers(node) + restServer.router.installValidatorApiHandlers(node) + restServer.router.installRewardsApiHandlers(node) + if node.dag.lcDataStore.serve: + restServer.router.installLightClientApiHandlers(node) + +from ./spec/datatypes/capella import SignedBeaconBlock + +proc installMessageValidators(node: BeaconNode) = + # These validators stay around the whole time, regardless of which specific + # subnets are subscribed to during any given epoch. + let forkDigests = node.dag.forkDigests + + for fork in ConsensusFork: + withConsensusFork(fork): + let digest = forkDigests[].atConsensusFork(consensusFork) + + # beacon_block + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block + node.network.addValidator( + getBeaconBlocksTopic(digest), + proc(signedBlock: consensusFork.SignedBeaconBlock): ValidationResult = + if node.shouldSyncOptimistically(node.currentSlot): + toValidationResult( + node.optimisticProcessor.processSignedBeaconBlock(signedBlock) + ) + else: + toValidationResult( + node.processor[].processSignedBeaconBlock(MsgSource.gossip, signedBlock) + ), + ) + + # beacon_attestation_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id + when consensusFork >= ConsensusFork.Electra: + for it in SubnetId: + closureScope: + let subnet_id = it + node.network.addAsyncValidator( + getAttestationTopic(digest, subnet_id), + proc( + attestation: SingleAttestation + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processAttestation( + MsgSource.gossip, + attestation, + subnet_id, + checkSignature = true, + checkValidator = false, + ) + ), + ) + else: + for it in SubnetId: + closureScope: + let subnet_id = it + node.network.addAsyncValidator( + getAttestationTopic(digest, subnet_id), + proc( + attestation: phase0.Attestation + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processAttestation( + MsgSource.gossip, + attestation, + subnet_id, + checkSignature = true, + checkValidator = false, + ) + ), + ) + + # beacon_aggregate_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof + when consensusFork >= ConsensusFork.Electra: + node.network.addAsyncValidator( + getAggregateAndProofsTopic(digest), + proc( + signedAggregateAndProof: electra.SignedAggregateAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedAggregateAndProof( + MsgSource.gossip, signedAggregateAndProof + ) + ), + ) + else: + node.network.addAsyncValidator( + getAggregateAndProofsTopic(digest), + proc( + signedAggregateAndProof: phase0.SignedAggregateAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedAggregateAndProof( + MsgSource.gossip, signedAggregateAndProof + ) + ), + ) + + # attester_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/p2p-interface.md#attester_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/p2p-interface.md#modifications-in-electra + when consensusFork >= ConsensusFork.Electra: + node.network.addValidator( + getAttesterSlashingsTopic(digest), + proc(attesterSlashing: electra.AttesterSlashing): ValidationResult = + toValidationResult( + node.processor[].processAttesterSlashing( + MsgSource.gossip, attesterSlashing + ) + ), + ) + else: + node.network.addValidator( + getAttesterSlashingsTopic(digest), + proc(attesterSlashing: phase0.AttesterSlashing): ValidationResult = + toValidationResult( + node.processor[].processAttesterSlashing( + MsgSource.gossip, attesterSlashing + ) + ), + ) + + # proposer_slashing + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#proposer_slashing + node.network.addValidator( + getProposerSlashingsTopic(digest), + proc(proposerSlashing: ProposerSlashing): ValidationResult = + toValidationResult( + node.processor[].processProposerSlashing(MsgSource.gossip, proposerSlashing) + ), + ) + + # voluntary_exit + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#voluntary_exit + node.network.addValidator( + getVoluntaryExitsTopic(digest), + proc(signedVoluntaryExit: SignedVoluntaryExit): ValidationResult = + toValidationResult( + node.processor[].processSignedVoluntaryExit( + MsgSource.gossip, signedVoluntaryExit + ) + ), + ) + + when consensusFork >= ConsensusFork.Altair: + # sync_committee_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id + for subcommitteeIdx in SyncSubcommitteeIndex: + closureScope: + let idx = subcommitteeIdx + node.network.addAsyncValidator( + getSyncCommitteeTopic(digest, idx), + proc( + msg: SyncCommitteeMessage + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSyncCommitteeMessage( + MsgSource.gossip, msg, idx + ) + ), + ) + + # sync_committee_contribution_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof + node.network.addAsyncValidator( + getSyncCommitteeContributionAndProofTopic(digest), + proc( + msg: SignedContributionAndProof + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processSignedContributionAndProof( + MsgSource.gossip, msg + ) + ), + ) + + when consensusFork >= ConsensusFork.Capella: + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/p2p-interface.md#bls_to_execution_change + node.network.addAsyncValidator( + getBlsToExecutionChangeTopic(digest), + proc( + msg: SignedBLSToExecutionChange + ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processBlsToExecutionChange(MsgSource.gossip, msg) + ), + ) + + when consensusFork >= ConsensusFork.Deneb: + # blob_sidecar_{subnet_id} + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id + let subnetCount = + when consensusFork >= ConsensusFork.Electra: + node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA + else: + node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT + for it in 0.BlobId ..< subnetCount.BlobId: + closureScope: + let subnet_id = it + node.network.addValidator( + getBlobSidecarTopic(digest, subnet_id), + proc(blobSidecar: deneb.BlobSidecar): ValidationResult = + toValidationResult( + node.processor[].processBlobSidecar( + MsgSource.gossip, blobSidecar, subnet_id + ) + ), + ) + + node.installLightClientMessageValidators() + +proc stop(node: BeaconNode) = + bnStatus = BeaconNodeStatus.Stopping + notice "Graceful shutdown" + if not node.config.inProcessValidators: + try: + node.vcProcess.close() + except Exception as exc: + warn "Couldn't close vc process", msg = exc.msg + try: + waitFor node.network.stop() + except CatchableError as exc: + warn "Couldn't stop network", msg = exc.msg + + waitFor node.metricsServer.stopMetricsServer() + + node.attachedValidators[].slashingProtection.close() + node.attachedValidators[].close() + node.db.close() + notice "Databases closed" + +proc run(node: BeaconNode) {.raises: [CatchableError].} = + bnStatus = BeaconNodeStatus.Running + + if not isNil(node.restServer): + node.restServer.installRestHandlers(node) + node.restServer.start() + + if not isNil(node.keymanagerServer): + doAssert not isNil(node.keymanagerHost) + node.keymanagerServer.router.installKeymanagerHandlers(node.keymanagerHost[]) + if node.keymanagerServer != node.restServer: + node.keymanagerServer.start() + + let + wallTime = node.beaconClock.now() + wallSlot = wallTime.slotOrZero() + + node.startLightClient() + node.requestManager.start() + node.syncOverseer.start() + + waitFor node.updateGossipStatus(wallSlot) + + for web3signerUrl in node.config.web3SignerUrls: + # TODO + # The current strategy polls all remote signers independently + # from each other which may lead to some race conditions of + # validators are migrated from one signer to another + # (because the updates to our validator pool are not atomic). + # Consider using different strategies that would detect such + # race conditions. + asyncSpawn node.pollForDynamicValidators( + web3signerUrl, node.config.web3signerUpdateInterval + ) + + asyncSpawn runSlotLoop(node, wallTime, onSlotStart) + asyncSpawn runOnSecondLoop(node) + asyncSpawn runQueueProcessingLoop(node.blockProcessor) + asyncSpawn runKeystoreCachePruningLoop(node.keystoreCache) + + # main event loop + while bnStatus == BeaconNodeStatus.Running: + poll() # if poll fails, the network is broken + + # time to say goodbye + node.stop() + +var gPidFile: string +proc createPidFile(filename: string) {.raises: [IOError].} = + writeFile filename, $os.getCurrentProcessId() + gPidFile = filename + addExitProc proc() {.noconv.} = + discard io2.removeFile(gPidFile) + +proc initializeNetworking(node: BeaconNode) {.async.} = + node.installMessageValidators() + + info "Listening to incoming network requests" + await node.network.startListening() + + let addressFile = node.config.dataDir / "beacon_node.enr" + writeFile(addressFile, node.network.announcedENR.toURI) + + await node.network.start() + +proc start*(node: BeaconNode) {.raises: [CatchableError].} = + let + head = node.dag.head + finalizedHead = node.dag.finalizedHead + genesisTime = node.beaconClock.fromNow(start_beacon_time(Slot 0)) + + notice "Starting beacon node", + version = fullVersionStr, + nimVersion = NimVersion, + enr = node.network.announcedENR.toURI, + peerId = $node.network.switch.peerInfo.peerId, + timeSinceFinalization = + node.beaconClock.now() - finalizedHead.slot.start_beacon_time(), + head = shortLog(head), + justified = + shortLog(getStateField(node.dag.headState, current_justified_checkpoint)), + finalized = shortLog(getStateField(node.dag.headState, finalized_checkpoint)), + finalizedHead = shortLog(finalizedHead), + SLOTS_PER_EPOCH, + SECONDS_PER_SLOT, + SPEC_VERSION, + dataDir = node.config.dataDir.string, + validators = node.attachedValidators[].count + + if genesisTime.inFuture: + notice "Waiting for genesis", genesisIn = genesisTime.offset + + waitFor node.initializeNetworking() + + node.elManager.start() + node.run() + +func formatGwei(amount: Gwei): string = + # TODO This is implemented in a quite a silly way. + # Better routines for formatting decimal numbers + # should exists somewhere else. + let + eth = distinctBase(amount) div 1000000000 + remainder = distinctBase(amount) mod 1000000000 + + result = $eth + if remainder != 0: + result.add '.' + let remainderStr = $remainder + for i in remainderStr.len ..< 9: + result.add '0' + result.add remainderStr + while result[^1] == '0': + result.setLen(result.len - 1) + +when not defined(windows): + proc initStatusBar(node: BeaconNode) {.raises: [ValueError].} = + if not isatty(stdout): + return + if not node.config.statusBarEnabled: + return + + try: + enableTrueColors() + except Exception as exc: # TODO Exception + error "Couldn't enable colors", err = exc.msg + + proc dataResolver(expr: string): string {.raises: [].} = + template justified(): untyped = + node.dag.head.atEpochStart( + getStateField(node.dag.headState, current_justified_checkpoint).epoch + ) + + # TODO: + # We should introduce a general API for resolving dot expressions + # such as `db.latest_block.slot` or `metrics.connected_peers`. + # Such an API can be shared between the RPC back-end, CLI tools + # such as ncli, a potential GraphQL back-end and so on. + # The status bar feature would allow the user to specify an + # arbitrary expression that is resolvable through this API. + case expr.toLowerAscii + of "version": + versionAsStr + of "full_version": + fullVersionStr + of "connected_peers": + $(node.connectedPeersCount) + of "head_root": + shortLog(node.dag.head.root) + of "head_epoch": + $(node.dag.head.slot.epoch) + of "head_epoch_slot": + $(node.dag.head.slot.since_epoch_start) + of "head_slot": + $(node.dag.head.slot) + of "justifed_root": + shortLog(justified.blck.root) + of "justifed_epoch": + $(justified.slot.epoch) + of "justifed_epoch_slot": + $(justified.slot.since_epoch_start) + of "justifed_slot": + $(justified.slot) + of "finalized_root": + shortLog(node.dag.finalizedHead.blck.root) + of "finalized_epoch": + $(node.dag.finalizedHead.slot.epoch) + of "finalized_epoch_slot": + $(node.dag.finalizedHead.slot.since_epoch_start) + of "finalized_slot": + $(node.dag.finalizedHead.slot) + of "epoch": + $node.currentSlot.epoch + of "epoch_slot": + $(node.currentSlot.since_epoch_start) + of "slot": + $node.currentSlot + of "slots_per_epoch": + $SLOTS_PER_EPOCH + of "slot_trailing_digits": + var slotStr = $node.currentSlot + if slotStr.len > 3: + slotStr = slotStr[^3 ..^ 1] + slotStr + of "attached_validators_balance": + formatGwei(node.attachedValidatorBalanceTotal) + of "next_consensus_fork": + let nextConsensusForkDescription = + node.formatNextConsensusFork(withVanityArt = true) + if nextConsensusForkDescription.isNone: + "" + else: + " (scheduled " & nextConsensusForkDescription.get & ")" + of "sync_status": + node.syncStatus(node.currentSlot) + else: + # We ignore typos for now and just render the expression + # as it was written. TODO: come up with a good way to show + # an error message to the user. + "$" & expr + + var statusBar = StatusBarView.init(node.config.statusBarContents, dataResolver) + + when compiles(defaultChroniclesStream.outputs[0].writer): + let tmp = defaultChroniclesStream.outputs[0].writer + + defaultChroniclesStream.outputs[0].writer = proc( + logLevel: LogLevel, msg: LogOutputStr + ) {.raises: [].} = + try: + # p.hidePrompt + erase statusBar + # p.writeLine msg + tmp(logLevel, msg) + render statusBar + # p.showPrompt + except Exception as e: # render raises Exception + logLoggingFailure(cstring(msg), e) + + proc statusBarUpdatesPollingLoop() {.async.} = + try: + while true: + update statusBar + erase statusBar + render statusBar + await sleepAsync(chronos.seconds(1)) + except CatchableError as exc: + warn "Failed to update status bar, no further updates", err = exc.msg + + asyncSpawn statusBarUpdatesPollingLoop() + +proc doRunBeaconNode( + config: var BeaconNodeConf, rng: ref HmacDrbgContext +) {.raises: [CatchableError].} = + info "Launching beacon node", + version = fullVersionStr, + bls_backend = $BLS_BACKEND, + const_preset, + cmdParams = commandLineParams(), + config + + template ignoreDeprecatedOption(option: untyped): untyped = + if config.option.isSome: + warn "Config option is deprecated", option = config.option.get + + ignoreDeprecatedOption requireEngineAPI + ignoreDeprecatedOption safeSlotsToImportOptimistically + ignoreDeprecatedOption terminalTotalDifficultyOverride + ignoreDeprecatedOption optimistic + ignoreDeprecatedOption validatorMonitorTotals + ignoreDeprecatedOption web3ForcePolling + ignoreDeprecatedOption finalizedDepositTreeSnapshot + + createPidFile(config.dataDir.string / "beacon_node.pid") + + config.createDumpDirs() + + # There are no managed event loops in here, to do a graceful shutdown, but + # letting the default Ctrl+C handler exit is safe, since we only read from + # the db. + let metadata = config.loadEth2Network() + + # Updating the config based on the metadata certainly is not beautiful but it + # works + for node in metadata.bootstrapNodes: + config.bootstrapNodes.add node + + ## Ctrl+C handling + proc controlCHandler() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + try: + setupForeignThreadGc() + except Exception as exc: + raiseAssert exc.msg + # shouldn't happen + notice "Shutting down after having received SIGINT" + bnStatus = BeaconNodeStatus.Stopping + + try: + setControlCHook(controlCHandler) + except Exception as exc: # TODO Exception + warn "Cannot set ctrl-c handler", msg = exc.msg + + # equivalent SIGTERM handler + when defined(posix): + proc SIGTERMHandler(signal: cint) {.noconv.} = + notice "Shutting down after having received SIGTERM" + bnStatus = BeaconNodeStatus.Stopping + + c_signal(ansi_c.SIGTERM, SIGTERMHandler) + + block: + let res = + if config.trustedSetupFile.isNone: + conf.loadKzgTrustedSetup() + else: + conf.loadKzgTrustedSetup(config.trustedSetupFile.get) + if res.isErr(): + raiseAssert res.error() + + let node = waitFor BeaconNode.init(rng, config, metadata) + + let metricsServer = (waitFor config.initMetricsServer()).valueOr: + return + + # Nim GC metrics (for the main thread) will be collected in onSecond(), but + # we disable piggy-backing on other metrics here. + setSystemMetricsAutomaticUpdate(false) + + node.metricsServer = metricsServer + + if bnStatus == BeaconNodeStatus.Stopping: + return + + when not defined(windows): + # This status bar can lock a Windows terminal emulator, blocking the whole + # event loop (seen on Windows 10, with a default MSYS2 terminal). + initStatusBar(node) + + if node.nickname != "": + dynamicLogScope(node = node.nickname): + node.start() + else: + node.start() + +proc doRecord( + config: BeaconNodeConf, rng: var HmacDrbgContext +) {.raises: [CatchableError].} = + case config.recordCmd + of RecordCmd.create: + let netKeys = getPersistentNetKeys(rng, config) + + var fieldPairs: seq[FieldPair] + for field in config.fields: + let fieldPair = field.split(":") + if fieldPair.len > 1: + fieldPairs.add(toFieldPair(fieldPair[0], hexToSeqByte(fieldPair[1]))) + else: + fatal "Invalid field pair" + quit QuitFailure + + let record = enr.Record + .init( + config.seqNumber, + netKeys.seckey.asEthKey, + Opt.some(config.ipExt), + Opt.some(config.tcpPortExt), + Opt.some(config.udpPortExt), + fieldPairs, + ) + .expect("Record within size limits") + + echo record.toURI() + of RecordCmd.print: + echo $config.recordPrint + +proc doWeb3Cmd( + config: BeaconNodeConf, rng: var HmacDrbgContext +) {.raises: [CatchableError].} = + case config.web3Cmd + of Web3Cmd.test: + waitFor testWeb3Provider( + config.web3TestUrl, rng.loadJwtSecret(config, allowCreate = true) + ) + +proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError].} = + let + dir = conf.validatorsDir() + filetrunc = SlashingDbName + # TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312 + let db = SlashingProtectionDB.loadUnchecked(dir, filetrunc, readOnly = false) + + let interchange = conf.exportedInterchangeFile.string + db.exportSlashingInterchange(interchange, conf.exportedValidators) + echo "Export finished: '", dir / filetrunc & ".sqlite3", "' into '", interchange, "'" + +proc doSlashingImport(conf: BeaconNodeConf) {.raises: [IOError].} = + let + dir = conf.validatorsDir() + filetrunc = SlashingDbName + # TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312 + + let interchange = conf.importedInterchangeFile.string + + var spdir: SPDIR + try: + spdir = Json.loadFile(interchange, SPDIR, requireAllFields = true) + except SerializationError as err: + writeStackTrace() + stderr.write $Json & " load issue for file \"", interchange, "\"\n" + stderr.write err.formatMsg(interchange), "\n" + quit 1 + + # Open DB and handle migration from v1 to v2 if needed + let db = SlashingProtectionDB.init( + genesis_validators_root = Eth2Digest spdir.metadata.genesis_validators_root, + basePath = dir, + dbname = filetrunc, + modes = {kCompleteArchive}, + ) + + # Now import the slashing interchange file + # Failures mode: + # - siError can only happen with invalid genesis_validators_root which would be caught above + # - siPartial can happen for invalid public keys, slashable blocks, slashable votes + let status = db.inclSPDIR(spdir) + doAssert status in {siSuccess, siPartial} + + echo "Import finished: '", interchange, "' into '", dir / filetrunc & ".sqlite3", "'" + +proc doSlashingInterchange(conf: BeaconNodeConf) {.raises: [CatchableError].} = + case conf.slashingdbCmd + of SlashProtCmd.`export`: + conf.doSlashingExport() + of SlashProtCmd.`import`: + conf.doSlashingImport() + +proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [CatchableError].} = + # Single RNG instance for the application - will be seeded on construction + # and avoid using system resources (such as urandom) after that + let rng = HmacDrbgContext.new() + + case config.cmd + of BNStartUpCmd.noCommand: + doRunBeaconNode(config, rng) + of BNStartUpCmd.deposits: + doDeposits(config, rng[]) + of BNStartUpCmd.wallets: + doWallets(config, rng[]) + of BNStartUpCmd.record: + doRecord(config, rng[]) + of BNStartUpCmd.web3: + doWeb3Cmd(config, rng[]) + of BNStartUpCmd.slashingdb: + doSlashingInterchange(config) + of BNStartUpCmd.trustedNodeSync: + if config.blockId.isSome(): + error "--blockId option has been removed - use --state-id instead!" + quit 1 + + let + metadata = loadEth2Network(config) + db = BeaconChainDB.new(config.databaseDir, metadata.cfg, inMemory = false) + genesisState = waitFor fetchGenesisState(metadata) + waitFor db.doRunTrustedNodeSync( + metadata, config.databaseDir, config.eraDir, config.trustedNodeUrl, + config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, + genesisState, + ) + db.close() + +{.pop.} # TODO moduletests exceptions From 5ee4d6fd00ca45dc7534ab1e24e9b65fc28beaca Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Fri, 30 May 2025 13:53:42 +0100 Subject: [PATCH 19/34] Added beacon node: - Makefile support - setup and handlers --- Makefile | 7 ++++--- nimbus/common/utils.nim | 2 +- nimbus/consensus/consensus_layer.nim | 16 ++++++--------- nimbus/consensus/wrapper_consensus.nim | 27 +++++++++++++------------- 4 files changed, 25 insertions(+), 27 deletions(-) diff --git a/Makefile b/Makefile index 0e4704ad48..4954eb784b 100644 --- a/Makefile +++ b/Makefile @@ -386,9 +386,10 @@ ifneq ($(USE_LIBBACKTRACE), 0) endif # Nimbus -nimbus: | build deps - echo -e $(BUILD_MSG) "build/$@" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) --threads:on -d:chronicles_log_level=TRACE -o:build/nimbus_client "nimbus/nimbus.nim" +NIM_PARAMS := -d:release --parallelBuild:1-d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS) +nimbus: | build deps rocksdb + echo -e $(BUILD_MSG) "build/nimbus_client" && \ + $(ENV_SCRIPT) nim c $(NIM_PARAMS) --threads:on -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -o:build//nimbus_client "nimbus/nimbus.nim" all_tests_nimbus: | build deps echo -e $(BUILD_MSG) "build/$@" && \ diff --git a/nimbus/common/utils.nim b/nimbus/common/utils.nim index c9360fbdcb..d44d9e0025 100644 --- a/nimbus/common/utils.nim +++ b/nimbus/common/utils.nim @@ -7,7 +7,7 @@ {.push raises: [].} -import std/[strutils], results, chronicles, stew/shims/macros, confutils, ../conf +import results, chronicles, stew/shims/macros, ../conf logScope: topics = "utils" diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index 47043f7eaf..8ac8ec2d39 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -11,15 +11,16 @@ import std/[atomics, os], chronos, chronicles, + results, ../conf, ../common/utils, - results, + ./wrapper_consensus, beacon_chain/[beacon_node_status, nimbus_binary_common] logScope: topics = "Consensus layer" -proc startBeaconNode(configs: seq[string]) = +proc startBeaconNode(configs: seq[string]) {.raises: [CatchableError].} = proc commandLineParams(): seq[string] = configs @@ -31,8 +32,7 @@ proc startBeaconNode(configs: seq[string]) = setupLogging(config.logLevel, config.logStdout, config.logFile) - #TODO: create public entry on beacon node - #handleStartUpCmd(config) + handleStartUpCmd(config) ## Consensus Layer handler proc consensusLayerHandler*(channel: ptr Channel[pointer]) = @@ -50,13 +50,9 @@ proc consensusLayerHandler*(channel: ptr Channel[pointer]) = #signal main thread that data is read isConfigRead.store(true) - {.gcsafe.}: - startBeaconNode(configList) - try: - while true: - info "consensus ..." - sleep(cNimbusServiceTimeoutMs) + {.gcsafe.}: + startBeaconNode(configList) except CatchableError as e: fatal "error", message = e.msg diff --git a/nimbus/consensus/wrapper_consensus.nim b/nimbus/consensus/wrapper_consensus.nim index 547fe8de84..4f62fe3ecb 100644 --- a/nimbus/consensus/wrapper_consensus.nim +++ b/nimbus/consensus/wrapper_consensus.nim @@ -15,15 +15,16 @@ import metrics/chronos_httpserver, stew/[byteutils, io2], eth/p2p/discoveryv5/[enr, random2], - ./consensus_object_pools/[blob_quarantine, data_column_quarantine, blockchain_list], - ./consensus_object_pools/vanity_logs/vanity_logs, - ./networking/[topic_params, network_metadata_downloads], - ./rpc/[rest_api, state_ttl_cache], - ./spec/datatypes/[altair, bellatrix, phase0], - ./spec/[engine_authentication, weak_subjectivity, peerdas_helpers], - ./sync/[sync_protocol, light_client_protocol, sync_overseer], - ./validators/[keystore_management, beacon_validators], - "."/[ + beacon_chain/consensus_object_pools/ + [blob_quarantine, data_column_quarantine, blockchain_list], + beacon_chain/consensus_object_pools/vanity_logs/vanity_logs, + beacon_chain/networking/[topic_params, network_metadata_downloads], + beacon_chain/rpc/[rest_api, state_ttl_cache], + beacon_chain/spec/datatypes/[altair, bellatrix, phase0], + beacon_chain/spec/[engine_authentication, weak_subjectivity, peerdas_helpers], + beacon_chain/sync/[sync_protocol, light_client_protocol, sync_overseer], + beacon_chain/validators/[keystore_management, beacon_validators], + beacon_chain/[ beacon_node, beacon_node_light_client, deposits, nimbus_binary_common, statusbar, trusted_node_sync, wallets, ] @@ -31,7 +32,7 @@ import when defined(posix): import system/ansi_c -from ./spec/datatypes/deneb import SignedBeaconBlock +from beacon_chain/spec/datatypes/deneb import SignedBeaconBlock from libp2p/protocols/pubsub/gossipsub import TopicParams, validateParameters, init @@ -282,7 +283,7 @@ proc checkWeakSubjectivityCheckpoint( headStateSlot = getStateField(dag.headState, slot) quit 1 -from ./spec/state_transition_block import kzg_commitment_to_versioned_hash +from beacon_chain/spec/state_transition_block import kzg_commitment_to_versioned_hash proc isSlotWithinWeakSubjectivityPeriod(dag: ChainDAGRef, slot: Slot): bool = let checkpoint = Checkpoint( @@ -2014,7 +2015,7 @@ proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) = if node.dag.lcDataStore.serve: restServer.router.installLightClientApiHandlers(node) -from ./spec/datatypes/capella import SignedBeaconBlock +from beacon_chain/spec/datatypes/capella import SignedBeaconBlock proc installMessageValidators(node: BeaconNode) = # These validators stay around the whole time, regardless of which specific @@ -2659,7 +2660,7 @@ proc doSlashingInterchange(conf: BeaconNodeConf) {.raises: [CatchableError].} = of SlashProtCmd.`import`: conf.doSlashingImport() -proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [CatchableError].} = +proc handleStartUpCmd*(config: var BeaconNodeConf) {.raises: [CatchableError].} = # Single RNG instance for the application - will be seeded on construction # and avoid using system resources (such as urandom) after that let rng = HmacDrbgContext.new() From 1abbc58246b420002f8b292493a45d7a09afc80f Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Mon, 2 Jun 2025 10:36:19 +0100 Subject: [PATCH 20/34] - Added nimbus_execution_client module copy - minor fixes --- nimbus/common/utils.nim | 2 +- nimbus/consensus/wrapper_consensus.nim | 5 + nimbus/execution/wrapper_execution.nim | 293 +++++++++++++++++++++++++ 3 files changed, 299 insertions(+), 1 deletion(-) create mode 100644 nimbus/execution/wrapper_execution.nim diff --git a/nimbus/common/utils.nim b/nimbus/common/utils.nim index d44d9e0025..46ed636fa7 100644 --- a/nimbus/common/utils.nim +++ b/nimbus/common/utils.nim @@ -7,7 +7,7 @@ {.push raises: [].} -import results, chronicles, stew/shims/macros, ../conf +import results, chronicles, stew/shims/macros, confutils logScope: topics = "utils" diff --git a/nimbus/consensus/wrapper_consensus.nim b/nimbus/consensus/wrapper_consensus.nim index 4f62fe3ecb..8e13998465 100644 --- a/nimbus/consensus/wrapper_consensus.nim +++ b/nimbus/consensus/wrapper_consensus.nim @@ -29,6 +29,11 @@ import trusted_node_sync, wallets, ] +# This module is a copy from nimbus_beacon_node modulem where 'handleStartUpCmd' procedure +# visibility is changed to public. +# This file should be removed when 'handleStartUpCmd' is made public or we create a public +# entry point on nimbus_beacon_node module. + when defined(posix): import system/ansi_c diff --git a/nimbus/execution/wrapper_execution.nim b/nimbus/execution/wrapper_execution.nim new file mode 100644 index 0000000000..688803bed5 --- /dev/null +++ b/nimbus/execution/wrapper_execution.nim @@ -0,0 +1,293 @@ +# Nimbus +# Copyright (c) 2018-2025 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +import ../execution_chain/compile_info + +import + std/[os, osproc, strutils, net, options], + chronicles, + eth/net/nat, + metrics, + metrics/chronicles_support, + stew/byteutils, + ./rpc, + ./version, + ./constants, + ./nimbus_desc, + ./nimbus_import, + ./core/block_import, + ./core/lazy_kzg, + ./core/chain/forked_chain/chain_serialize, + ./db/core_db/persistent, + ./db/storage_types, + ./sync/wire_protocol, + ./common/chain_config_hash, + ./portal/portal + +from beacon_chain/nimbus_binary_common import setupFileLimits + +## TODO: +## * No IPv6 support +## * No multiple bind addresses support +## * No database support + +proc basicServices(nimbus: NimbusNode, conf: NimbusConf, com: CommonRef) = + # Setup the chain + let fc = ForkedChainRef.init( + com, + eagerStateRoot = conf.eagerStateRootCheck, + persistBatchSize = conf.persistBatchSize, + enableQueue = true, + ) + fc.deserialize().isOkOr: + warn "Loading block DAG from database", msg = error + + nimbus.fc = fc + # Setup history expiry and portal + nimbus.fc.portal = HistoryExpiryRef.init(conf, com) + # txPool must be informed of active head + # so it can know the latest account state + # e.g. sender nonce, etc + nimbus.txPool = TxPoolRef.new(nimbus.fc) + nimbus.beaconEngine = BeaconEngineRef.new(nimbus.txPool) + +proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) = + if string(conf.keyStore).len > 0: + let res = nimbus.ctx.am.loadKeystores(string conf.keyStore) + if res.isErr: + fatal "Load keystore error", msg = res.error() + quit(QuitFailure) + + if string(conf.importKey).len > 0: + let res = nimbus.ctx.am.importPrivateKey(string conf.importKey) + if res.isErr: + fatal "Import private key error", msg = res.error() + quit(QuitFailure) + +proc setupP2P( + nimbus: NimbusNode, conf: NimbusConf, com: CommonRef +) {.raises: [OSError].} = + ## Creating P2P Server + let kpres = nimbus.ctx.getNetKeys(conf.netKey, conf.dataDir.string) + if kpres.isErr: + fatal "Get network keys error", msg = kpres.error + quit(QuitFailure) + + let keypair = kpres.get() + var address = + enode.Address(ip: conf.listenAddress, tcpPort: conf.tcpPort, udpPort: conf.udpPort) + + if conf.nat.hasExtIp: + # any required port redirection is assumed to be done by hand + address.ip = conf.nat.extIp + else: + # automated NAT traversal + let extIP = getExternalIP(conf.nat.nat) + # This external IP only appears in the logs, so don't worry about dynamic + # IPs. Don't remove it either, because the above call does initialisation + # and discovery for NAT-related objects. + if extIP.isSome: + address.ip = extIP.get() + let extPorts = redirectPorts( + tcpPort = address.tcpPort, + udpPort = address.udpPort, + description = NimbusName & " " & NimbusVersion, + ) + if extPorts.isSome: + (address.tcpPort, address.udpPort) = extPorts.get() + + let bootstrapNodes = conf.getBootNodes() + + nimbus.ethNode = newEthereumNode( + keypair, + address, + conf.networkId, + conf.agentString, + addAllCapabilities = false, + minPeers = conf.maxPeers, + bootstrapNodes = bootstrapNodes, + bindUdpPort = conf.udpPort, + bindTcpPort = conf.tcpPort, + bindIp = conf.listenAddress, + rng = nimbus.ctx.rng, + ) + + # Add protocol capabilities + nimbus.wire = nimbus.ethNode.addEthHandlerCapability(nimbus.txPool) + + # Always initialise beacon syncer + nimbus.beaconSyncRef = BeaconSyncRef.init(nimbus.ethNode, nimbus.fc, conf.maxPeers) + + # Optional for pre-setting the sync target (i.e. debugging) + if conf.beaconSyncTargetFile.isSome(): + nimbus.beaconSyncRef.targetInit conf.beaconSyncTargetFile.unsafeGet.string + + # Connect directly to the static nodes + let staticPeers = conf.getStaticPeers() + if staticPeers.len > 0: + nimbus.peerManager = PeerManagerRef.new( + nimbus.ethNode.peerPool, conf.reconnectInterval, conf.reconnectMaxRetry, + staticPeers, + ) + nimbus.peerManager.start() + + # Start Eth node + if conf.maxPeers > 0: + nimbus.networkLoop = nimbus.ethNode.connectToNetwork( + enableDiscovery = conf.discovery != DiscoveryType.None, waitForPeers = true + ) + +proc setupMetrics( + nimbus: NimbusNode, conf: NimbusConf +) {.raises: [CancelledError, MetricsError].} = + # metrics logging + if conf.logMetricsEnabled: + # https://github.com/nim-lang/Nim/issues/17369 + var logMetrics: proc(udata: pointer) {.gcsafe, raises: [].} + logMetrics = proc(udata: pointer) = + {.gcsafe.}: + let registry = defaultRegistry + info "metrics", registry + discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics) + discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics) + + # metrics server + if conf.metricsEnabled: + info "Starting metrics HTTP server", + address = conf.metricsAddress, port = conf.metricsPort + let res = MetricsHttpServerRef.new($conf.metricsAddress, conf.metricsPort) + if res.isErr: + fatal "Failed to create metrics server", msg = res.error + quit(QuitFailure) + + nimbus.metricsServer = res.get + waitFor nimbus.metricsServer.start() + +proc preventLoadingDataDirForTheWrongNetwork(db: CoreDbRef, conf: NimbusConf) = + proc writeDataDirId(kvt: CoreDbTxRef, calculatedId: Hash32) = + info "Writing data dir ID", ID = calculatedId + kvt.put(dataDirIdKey().toOpenArray, calculatedId.data).isOkOr: + fatal "Cannot write data dir ID", ID = calculatedId + quit(QuitFailure) + db.persist(kvt, Opt.none(Hash32)) + + let + kvt = db.baseTxFrame() + calculatedId = calcHash(conf.networkId, conf.networkParams) + dataDirIdBytes = kvt.get(dataDirIdKey().toOpenArray).valueOr: + # an empty database + writeDataDirId(kvt, calculatedId) + return + + if conf.rewriteDatadirId: + writeDataDirId(kvt, calculatedId) + return + + if calculatedId.data != dataDirIdBytes: + fatal "Data dir already initialized with other network configuration", + get = dataDirIdBytes.toHex, expected = calculatedId + quit(QuitFailure) + +proc run(nimbus: NimbusNode, conf: NimbusConf) = + info "Launching execution client", version = FullVersionStr, conf + + # Trusted setup is needed for processing Cancun+ blocks + # If user not specify the trusted setup, baked in + # trusted setup will be loaded, lazily. + if conf.trustedSetupFile.isSome: + let fileName = conf.trustedSetupFile.get() + let res = loadTrustedSetup(fileName, 0) + if res.isErr: + fatal "Cannot load Kzg trusted setup from file", msg = res.error + quit(QuitFailure) + + createDir(string conf.dataDir) + let coreDB = + # Resolve statically for database type + AristoDbRocks.newCoreDbRef( + string conf.dataDir, conf.dbOptions(noKeyCache = conf.cmd == NimbusCmd.`import`) + ) + + preventLoadingDataDirForTheWrongNetwork(coreDB, conf) + setupMetrics(nimbus, conf) + + let taskpool = + try: + if conf.numThreads < 0: + fatal "The number of threads --num-threads cannot be negative." + quit QuitFailure + elif conf.numThreads == 0: + Taskpool.new(numThreads = min(countProcessors(), 16)) + else: + Taskpool.new(numThreads = conf.numThreads) + except CatchableError as e: + fatal "Cannot start taskpool", err = e.msg + quit QuitFailure + + info "Threadpool started", numThreads = taskpool.numThreads + + let com = CommonRef.new( + db = coreDB, + taskpool = taskpool, + networkId = conf.networkId, + params = conf.networkParams, + ) + + if conf.extraData.len > 32: + warn "ExtraData exceeds 32 bytes limit, truncate", + extraData = conf.extraData, len = conf.extraData.len + + if conf.gasLimit > GAS_LIMIT_MAXIMUM or conf.gasLimit < GAS_LIMIT_MINIMUM: + warn "GasLimit not in expected range, truncate", + min = GAS_LIMIT_MINIMUM, max = GAS_LIMIT_MAXIMUM, get = conf.gasLimit + + com.extraData = conf.extraData + com.gasLimit = conf.gasLimit + + defer: + if not nimbus.fc.isNil: + let + fc = nimbus.fc + txFrame = fc.baseTxFrame + fc.serialize(txFrame).isOkOr: + error "FC.serialize error: ", msg = error + com.db.persist(txFrame, Opt.none(Hash32)) + com.db.finish() + + case conf.cmd + of NimbusCmd.`import`: + importBlocks(conf, com) + of NimbusCmd.`import - rlp`: + waitFor importRlpBlocks(conf, com) + else: + basicServices(nimbus, conf, com) + manageAccounts(nimbus, conf) + setupP2P(nimbus, conf, com) + setupRpc(nimbus, conf, com) + + if conf.maxPeers > 0 and conf.engineApiServerEnabled(): + # Not starting syncer if there is definitely no way to run it. This + # avoids polling (i.e. waiting for instructions) and some logging. + if not nimbus.beaconSyncRef.start(): + nimbus.beaconSyncRef = BeaconSyncRef(nil) + + if nimbus.state == NimbusState.Starting: + # it might have been set to "Stopping" with Ctrl+C + nimbus.state = NimbusState.Running + + # Main event loop + while nimbus.state == NimbusState.Running: + try: + poll() + except CatchableError as e: + debug "Exception in poll()", exc = e.name, err = e.msg + discard e # silence warning when chronicles not activated + + # Stop loop + waitFor nimbus.stop(conf) From 0c674eac555ff01e3e25010bf1492736034e3196 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Fri, 6 Jun 2025 16:34:34 +0100 Subject: [PATCH 21/34] Added execution chain support: - Makefile support - setup and handlers --- nimbus/consensus/consensus_layer.nim | 5 ++ nimbus/consensus/wrapper_consensus.nim | 109 ++++++++++++++++--------- nimbus/execution/execution_layer.nim | 25 ++++-- nimbus/execution/wrapper_execution.nim | 35 ++++---- nimbus/nimbus.nim | 6 +- 5 files changed, 116 insertions(+), 64 deletions(-) diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index 8ac8ec2d39..9537321a81 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -15,6 +15,7 @@ import ../conf, ../common/utils, ./wrapper_consensus, + beacon_chain/validators/keystore_management, beacon_chain/[beacon_node_status, nimbus_binary_common] logScope: @@ -30,6 +31,10 @@ proc startBeaconNode(configs: seq[string]) {.raises: [CatchableError].} = error "Error starting consensus", err = error quit QuitFailure + # required for db + if not (checkAndCreateDataDir(string(config.dataDir))): + quit QuitFailure + setupLogging(config.logLevel, config.logStdout, config.logFile) handleStartUpCmd(config) diff --git a/nimbus/consensus/wrapper_consensus.nim b/nimbus/consensus/wrapper_consensus.nim index 8e13998465..464e78b5fa 100644 --- a/nimbus/consensus/wrapper_consensus.nim +++ b/nimbus/consensus/wrapper_consensus.nim @@ -21,7 +21,8 @@ import beacon_chain/networking/[topic_params, network_metadata_downloads], beacon_chain/rpc/[rest_api, state_ttl_cache], beacon_chain/spec/datatypes/[altair, bellatrix, phase0], - beacon_chain/spec/[engine_authentication, weak_subjectivity, peerdas_helpers], + beacon_chain/spec/ + [deposit_snapshots, engine_authentication, weak_subjectivity, peerdas_helpers], beacon_chain/sync/[sync_protocol, light_client_protocol, sync_overseer], beacon_chain/validators/[keystore_management, beacon_validators], beacon_chain/[ @@ -29,7 +30,7 @@ import trusted_node_sync, wallets, ] -# This module is a copy from nimbus_beacon_node modulem where 'handleStartUpCmd' procedure +# This module is a copy from nimbus_beacon_node module where 'handleStartUpCmd' procedure # visibility is changed to public. # This file should be removed when 'handleStartUpCmd' is made public or we create a public # entry point on nimbus_beacon_node module. @@ -109,6 +110,7 @@ proc doRunTrustedNodeSync( trustedBlockRoot: Option[Eth2Digest], backfill: bool, reindex: bool, + downloadDepositSnapshot: bool, genesisState: ref ForkedHashedBeaconState, ) {.async.} = let syncTarget = @@ -126,7 +128,7 @@ proc doRunTrustedNodeSync( await db.doTrustedNodeSync( metadata.cfg, databaseDir, eraDir, restUrl, syncTarget, backfill, reindex, - genesisState, + downloadDepositSnapshot, genesisState, ) func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = @@ -139,8 +141,6 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = onUpgradeToDeneb: denebColor, onUpgradeToElectra: electraColor, onKnownCompoundingChange: electraBlink, - onUpgradeToFulu: fuluColor, - onBlobParametersUpdate: fuluColor, ) of StdoutLogKind.NoColors: VanityLogs( @@ -148,8 +148,6 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = onUpgradeToDeneb: denebMono, onUpgradeToElectra: electraMono, onKnownCompoundingChange: electraMono, - onUpgradeToFulu: fuluMono, - onBlobParametersUpdate: fuluMono, ) of StdoutLogKind.Json, StdoutLogKind.None: VanityLogs( @@ -169,19 +167,11 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = proc() = notice "🦒 Compounding is activated 🦒" ), - onUpgradeToFulu: ( - proc() = - notice "🐅 Blobs columnized 🐅" - ), - onBlobParametersUpdate: ( - proc() = - notice "🐅 Blob parameters updated 🐅" - ), ) func getVanityMascot(consensusFork: ConsensusFork): string = case consensusFork - of ConsensusFork.Fulu: "🐅" + of ConsensusFork.Fulu: "❓" of ConsensusFork.Electra: "🦒" of ConsensusFork.Deneb: "🐟" of ConsensusFork.Capella: "🦉" @@ -378,6 +368,11 @@ proc initFullNode( static: doAssert (elManager is ref) return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = + if elManager != nil: + let finalizedEpochRef = dag.getFinalizedEpochRef() + discard trackFinalizedState( + elManager, finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index + ) node.updateLightClientFromDag() let eventData = if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: @@ -439,9 +434,7 @@ proc initFullNode( onElectraAttesterSlashingAdded, ) ) - blobQuarantine = newClone( - BlobQuarantine.init(dag.cfg, dag.db.getQuarantineDB(), 10, onBlobSidecarAdded) - ) + blobQuarantine = newClone(BlobQuarantine.init(dag.cfg, onBlobSidecarAdded)) dataColumnQuarantine = newClone(DataColumnQuarantine.init()) supernode = node.config.peerdasSupernode localCustodyGroups = @@ -464,7 +457,6 @@ proc initFullNode( blockProcessor = BlockProcessor.new( config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, batchVerifier, consensusManager, node.validatorMonitor, blobQuarantine, getBeaconTime, - config.invalidBlockRoots, ) blockVerifier = proc( @@ -494,18 +486,21 @@ proc initFullNode( ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = withBlck(signedBlock): when consensusFork >= ConsensusFork.Deneb: - let bres = blobQuarantine[].popSidecars(forkyBlck.root, forkyBlck) - if bres.isSome(): - await blockProcessor[].addBlock( - MsgSource.gossip, signedBlock, bres, maybeFinalized = maybeFinalized - ) - else: + if not blobQuarantine[].hasBlobs(forkyBlck): # We don't have all the blobs for this block, so we have # to put it in blobless quarantine. if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): err(VerifierError.UnviableFork) else: err(VerifierError.MissingParent) + else: + let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) + await blockProcessor[].addBlock( + MsgSource.gossip, + signedBlock, + Opt.some(blobs), + maybeFinalized = maybeFinalized, + ) else: await blockProcessor[].addBlock( MsgSource.gossip, @@ -880,6 +875,7 @@ proc init*( trustedBlockRoot, backfill = false, reindex = false, + downloadDepositSnapshot = false, genesisState, ) @@ -912,6 +908,24 @@ proc init*( else: nil + if config.finalizedDepositTreeSnapshot.isSome: + let + depositTreeSnapshotPath = config.finalizedDepositTreeSnapshot.get.string + snapshot = + try: + SSZ.loadFile(depositTreeSnapshotPath, DepositTreeSnapshot) + except SszError as err: + fatal "Deposit tree snapshot loading failed", + err = formatMsg(err, depositTreeSnapshotPath) + quit 1 + except CatchableError as err: + fatal "Failed to read deposit tree snapshot file", err = err.msg + quit 1 + depositContractSnapshot = DepositContractSnapshot.init(snapshot).valueOr: + fatal "Invalid deposit tree snapshot file" + quit 1 + db.putDepositContractSnapshot(depositContractSnapshot) + let engineApiUrls = config.engineApiUrls if engineApiUrls.len == 0: @@ -1019,7 +1033,10 @@ proc init*( config.weakSubjectivityCheckpoint.get, beaconClock ) - let elManager = ELManager.new(engineApiUrls, eth1Network) + let elManager = ELManager.new( + cfg, metadata.depositContractBlock, metadata.depositContractBlockHash, db, + engineApiUrls, eth1Network, + ) if config.rpcEnabled.isSome: warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." @@ -1736,9 +1753,6 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = node.attachedValidators[].slashingProtection # pruning is only done if the DB is set to pruning mode. .pruneAfterFinalization(node.dag.finalizedHead.slot.epoch()) - node.processor.blobQuarantine[].pruneAfterFinalization( - node.dag.finalizedHead.slot.epoch() - ) # Delay part of pruning until latency critical duties are done. # The other part of pruning, `pruneBlocksDAG`, is done eagerly. @@ -1908,8 +1922,26 @@ func formatNextConsensusFork(node: BeaconNode, withVanityArt = false): Opt[strin $nextConsensusFork & ":" & $nextForkEpoch ) -proc syncStatus(node: BeaconNode, wallSlot: Slot): string = - node.syncOverseer.syncStatusMessage() +func syncStatus(node: BeaconNode, wallSlot: Slot): string = + node.syncOverseer.statusMsg.valueOr: + let optimisticHead = not node.dag.head.executionValid + if node.syncManager.inProgress: + let + optimisticSuffix = if optimisticHead: "/opt" else: "" + lightClientSuffix = + if node.consensusManager[].shouldSyncOptimistically(wallSlot): + " - lc: " & $shortLog(node.consensusManager[].optimisticHead) + else: + "" + node.syncManager.syncStatus & optimisticSuffix & lightClientSuffix + elif node.untrustedManager.inProgress: + "untrusted: " & node.untrustedManager.syncStatus + elif node.backfiller.inProgress: + "backfill: " & node.backfiller.syncStatus + elif optimisticHead: + "synced/opt" + else: + "synced" when defined(windows): from winservice import establishWindowsService, reportServiceStatusSuccess @@ -2088,7 +2120,7 @@ proc installMessageValidators(node: BeaconNode) = ) # beacon_aggregate_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof when consensusFork >= ConsensusFork.Electra: node.network.addAsyncValidator( getAggregateAndProofsTopic(digest), @@ -2498,7 +2530,6 @@ proc doRunBeaconNode( ignoreDeprecatedOption optimistic ignoreDeprecatedOption validatorMonitorTotals ignoreDeprecatedOption web3ForcePolling - ignoreDeprecatedOption finalizedDepositTreeSnapshot createPidFile(config.dataDir.string / "beacon_node.pid") @@ -2609,8 +2640,12 @@ proc doWeb3Cmd( ) {.raises: [CatchableError].} = case config.web3Cmd of Web3Cmd.test: + let metadata = config.loadEth2Network() + waitFor testWeb3Provider( - config.web3TestUrl, rng.loadJwtSecret(config, allowCreate = true) + config.web3TestUrl, + metadata.cfg.DEPOSIT_CONTRACT_ADDRESS, + rng.loadJwtSecret(config, allowCreate = true), ) proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError].} = @@ -2624,7 +2659,7 @@ proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError].} = db.exportSlashingInterchange(interchange, conf.exportedValidators) echo "Export finished: '", dir / filetrunc & ".sqlite3", "' into '", interchange, "'" -proc doSlashingImport(conf: BeaconNodeConf) {.raises: [IOError].} = +proc doSlashingImport(conf: BeaconNodeConf) {.raises: [SerializationError, IOError].} = let dir = conf.validatorsDir() filetrunc = SlashingDbName @@ -2695,7 +2730,7 @@ proc handleStartUpCmd*(config: var BeaconNodeConf) {.raises: [CatchableError].} waitFor db.doRunTrustedNodeSync( metadata, config.databaseDir, config.eraDir, config.trustedNodeUrl, config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, - genesisState, + config.downloadDepositSnapshot, genesisState, ) db.close() diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 906997351e..05fe3fa62b 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -7,7 +7,15 @@ {.push raises: [].} -import std/[atomics, os], chronicles, ../conf, ../common/utils, results +import + std/[atomics, os], + chronicles, + results, + ../conf, + ../common/utils, + ./wrapper_execution, + ../../execution_chain/config, + ../../execution_chain/nimbus_desc logScope: topics = "Execution layer" @@ -21,20 +29,19 @@ proc executionLayerHandler*(channel: ptr Channel[pointer]) = fatal "service unable to receive configuration", err = e.msg quit(QuitFailure) - let configs = deserializeConfigArgs(p).valueOr: + let parametersList = deserializeConfigArgs(p).valueOr: fatal "unable to parse service data", message = error quit(QuitFailure) #signal main thread that data is read isConfigRead.store(true) - info "execution configs ", configs = configs - try: - while true: - info "execution ..." - sleep(cNimbusServiceTimeoutMs) - except CatchableError as e: - fatal "error", message = e.msg + {.gcsafe.}: + var nimbusHandler = NimbusNode(state: NimbusState.Starting, ctx: newEthContext()) + let conf = makeConfig(parametersList) + nimbusHandler.run(conf) + except [CatchableError, OSError, IOError, CancelledError, MetricsError]: + fatal "error", message = getCurrentExceptionMsg() warn "\tExiting execution layer" diff --git a/nimbus/execution/wrapper_execution.nim b/nimbus/execution/wrapper_execution.nim index 688803bed5..a07680abae 100644 --- a/nimbus/execution/wrapper_execution.nim +++ b/nimbus/execution/wrapper_execution.nim @@ -7,7 +7,7 @@ # This file may not be copied, modified, or distributed except according to # those terms. -import ../execution_chain/compile_info +import ../../execution_chain/compile_info import std/[os, osproc, strutils, net, options], @@ -16,22 +16,27 @@ import metrics, metrics/chronicles_support, stew/byteutils, - ./rpc, - ./version, - ./constants, - ./nimbus_desc, - ./nimbus_import, - ./core/block_import, - ./core/lazy_kzg, - ./core/chain/forked_chain/chain_serialize, - ./db/core_db/persistent, - ./db/storage_types, - ./sync/wire_protocol, - ./common/chain_config_hash, - ./portal/portal + ../../execution_chain/rpc, + ../../execution_chain/version, + ../../execution_chain/constants, + ../../execution_chain/nimbus_desc, + ../../execution_chain/nimbus_import, + ../../execution_chain/core/block_import, + ../../execution_chain/core/lazy_kzg, + ../../execution_chain/core/chain/forked_chain/chain_serialize, + ../../execution_chain/db/core_db/persistent, + ../../execution_chain/db/storage_types, + ../../execution_chain/sync/wire_protocol, + ../../execution_chain/common/chain_config_hash, + ../../execution_chain/portal/portal from beacon_chain/nimbus_binary_common import setupFileLimits +# This module is a copy from nimbus_execution_client module where 'run' procedure +# visibility is changed to public. +# This file should be removed when 'run' is made public or we create a public +# entry point on nimbus_execution_client module. + ## TODO: ## * No IPv6 support ## * No multiple bind addresses support @@ -194,7 +199,7 @@ proc preventLoadingDataDirForTheWrongNetwork(db: CoreDbRef, conf: NimbusConf) = get = dataDirIdBytes.toHex, expected = calculatedId quit(QuitFailure) -proc run(nimbus: NimbusNode, conf: NimbusConf) = +proc run*(nimbus: NimbusNode, conf: NimbusConf) = info "Launching execution client", version = FullVersionStr, conf # Trusted setup is needed for processing Cancun+ blocks diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index afaaa7678c..a83720a531 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -8,13 +8,13 @@ import std/[concurrency/atomics, os], chronicles, - consensus/consensus_layer, execution/execution_layer, + consensus/consensus_layer, common/utils, conf, confutils/[cli_parser, toml/defs], - ../execution_chain/config, - beacon_chain/conf + beacon_chain/conf, + ../execution_chain/config # ------------------------------------------------------------------------------ # Private From ee595d954a00a17ccc568c3d2246de96ba514599 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Fri, 13 Jun 2025 10:59:32 +0100 Subject: [PATCH 22/34] Fixed behaviour on consensus layer about reading program options from command line, instead of loading the ones already filtered. --- nimbus/consensus/consensus_layer.nim | 42 ++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index 9537321a81..11f4a6a26f 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -12,6 +12,7 @@ import chronos, chronicles, results, + confutils, ../conf, ../common/utils, ./wrapper_consensus, @@ -21,13 +22,42 @@ import logScope: topics = "Consensus layer" -proc startBeaconNode(configs: seq[string]) {.raises: [CatchableError].} = - proc commandLineParams(): seq[string] = - configs +proc makeConfig*( + cmdCommandList: seq[string], ConfType: type +): Result[ConfType, string] = + {.push warning[ProveInit]: off.} + let config = + try: + ConfType.load( + cmdLine = cmdCommandList, + secondarySources = proc( + config: ConfType, sources: auto + ) {.raises: [ConfigurationError], gcsafe.} = + if config.configFile.isSome: + sources.addConfigFile(Toml, config.configFile.get) + , + ) + except CatchableError as exc: + # We need to log to stderr here, because logging hasn't been configured yet + var msg = "Failure while loading the configuration:\p" & exc.msg & "\p" + if (exc[] of ConfigurationError) and not (isNil(exc.parent)) and + (exc.parent[] of TomlFieldReadingError): + let fieldName = ((ref TomlFieldReadingError)(exc.parent)).field + if fieldName in + [ + "el", "web3-url", "bootstrap-node", "direct-peer", + "validator-monitor-pubkey", + ]: + msg &= + "Since the '" & fieldName & "' option is allowed to " & + "have more than one value, please make sure to supply " & + "a properly formatted TOML array\p" + return err(msg) + {.pop.} + ok(config) - var config = makeBannerAndConfig( - "clientId", "copyrights", "nimBanner", "SPEC_VERSION", [], BeaconNodeConf - ).valueOr: +proc startBeaconNode(paramsList: seq[string]) {.raises: [CatchableError].} = + var config = makeConfig(paramsList, BeaconNodeConf).valueOr: error "Error starting consensus", err = error quit QuitFailure From 107efa51349cbf09b3012dd116dbef0eca9433ba Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Tue, 17 Jun 2025 10:00:23 +0100 Subject: [PATCH 23/34] Added graceful shutdown --- nimbus/consensus/consensus_layer.nim | 3 ++ nimbus/execution/execution_layer.nim | 7 ++++- nimbus/nimbus.nim | 46 ++++++++++++++-------------- 3 files changed, 32 insertions(+), 24 deletions(-) diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index 11f4a6a26f..f76dfd70a2 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -22,6 +22,9 @@ import logScope: topics = "Consensus layer" +proc shutdownConsensus*() = + bnStatus = BeaconNodeStatus.Stopping + proc makeConfig*( cmdCommandList: seq[string], ConfType: type ): Result[ConfType, string] = diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 05fe3fa62b..00d4fccf16 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -20,6 +20,11 @@ import logScope: topics = "Execution layer" +var nimbusHandler = NimbusNode() + +proc shutdownExecution*() = + nimbusHandler.state = NimbusState.Stopping + ## Execution Layer handler proc executionLayerHandler*(channel: ptr Channel[pointer]) = var p: pointer @@ -38,7 +43,7 @@ proc executionLayerHandler*(channel: ptr Channel[pointer]) = try: {.gcsafe.}: - var nimbusHandler = NimbusNode(state: NimbusState.Starting, ctx: newEthContext()) + nimbusHandler = NimbusNode(state: NimbusState.Starting, ctx: newEthContext()) let conf = makeConfig(parametersList) nimbusHandler.run(conf) except [CatchableError, OSError, IOError, CancelledError, MetricsError]: diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index a83720a531..2266d6e7fa 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -81,16 +81,11 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = ## Gracefully exits all services proc monitorServices(nimbus: Nimbus) = for service in nimbus.serviceList: - if service.serviceHandler.running(): - joinThread(service.serviceHandler) - info "Exited service ", service = service.name + joinThread(service.serviceHandler) + info "Exited service ", service = service.name notice "Exited all services" -# ------------------------------------------------------------------------------ -# Public -# ------------------------------------------------------------------------------ - # aux function to prepare arguments and options for eth1 and eth2 func addArg( paramTable: var NimbusConfigTable, cmdKind: CmdLineKind, key: string, arg: string @@ -110,6 +105,22 @@ func addArg( paramTable[newKey] = newArg +proc controlCHandler() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + try: + setupForeignThreadGc() + except NimbusServiceError as exc: + raiseAssert exc.msg # shouldn't happen + + notice "\tCtrl+C pressed. Shutting down services ..." + shutdownExecution() + shutdownConsensus() + +# ------------------------------------------------------------------------------ +# Public +# ------------------------------------------------------------------------------ + # Setup services proc setup*(nimbus: var Nimbus) = let @@ -158,6 +169,10 @@ proc run*(nimbus: var Nimbus) = fatal "error starting service:", msg = e.msg quit QuitFailure + # handling Ctrl+C signal + # note: do not move. Both execution and consensus clients create these handlers. + setControlCHook(controlCHandler) + ## wait for shutdown nimbus.monitorServices() @@ -167,22 +182,7 @@ when isMainModule: setupFileLimits() - var nimbus: Nimbus = Nimbus.new - - ## Graceful shutdown by handling of Ctrl+C signal - proc controlCHandler() {.noconv.} = - when defined(windows): - # workaround for https://github.com/nim-lang/Nim/issues/4057 - try: - setupForeignThreadGc() - except NimbusServiceError as exc: - raiseAssert exc.msg # shouldn't happen - - notice "\tCtrl+C pressed. Shutting down services ..." - quit 0 - - setControlCHook(controlCHandler) - + var nimbus = Nimbus() nimbus.setup() nimbus.run() From 7946ea50772cf01898d0eb774c441e3f38025ce4 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Wed, 25 Jun 2025 10:19:17 +0100 Subject: [PATCH 24/34] temporary shutdown wa --- nimbus/nimbus.nim | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 2266d6e7fa..b2fed4903e 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -114,6 +114,7 @@ proc controlCHandler() {.noconv.} = raiseAssert exc.msg # shouldn't happen notice "\tCtrl+C pressed. Shutting down services ..." + shutdownExecution() shutdownConsensus() @@ -173,9 +174,13 @@ proc run*(nimbus: var Nimbus) = # note: do not move. Both execution and consensus clients create these handlers. setControlCHook(controlCHandler) - ## wait for shutdown + # wait for shutdown nimbus.monitorServices() + # WA to shutdown (exceptions thrown) + # current shutdown procedure hangs on nat.nim from nim-eth + quit 0 + # ------ when isMainModule: notice "Starting Nimbus" From 1f86d37ad68010166aab3ca27a0dab4f0449d621 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Wed, 2 Jul 2025 23:48:47 +0100 Subject: [PATCH 25/34] - Removed execution wrapper and give 'run' procedure public scope. - Updated consensus wrapper with latest beacon node changes --- execution_chain/nimbus_execution_client.nim | 2 +- nimbus/consensus/wrapper_consensus.nim | 108 +++---- nimbus/execution/execution_layer.nim | 4 +- nimbus/execution/wrapper_execution.nim | 298 -------------------- 4 files changed, 40 insertions(+), 372 deletions(-) delete mode 100644 nimbus/execution/wrapper_execution.nim diff --git a/execution_chain/nimbus_execution_client.nim b/execution_chain/nimbus_execution_client.nim index 0d9acf2c59..81890fa30b 100644 --- a/execution_chain/nimbus_execution_client.nim +++ b/execution_chain/nimbus_execution_client.nim @@ -192,7 +192,7 @@ proc preventLoadingDataDirForTheWrongNetwork(db: CoreDbRef; conf: NimbusConf) = expected=calculatedId quit(QuitFailure) -proc run(nimbus: NimbusNode, conf: NimbusConf) = +proc run*(nimbus: NimbusNode, conf: NimbusConf) = info "Launching execution client", version = FullVersionStr, conf diff --git a/nimbus/consensus/wrapper_consensus.nim b/nimbus/consensus/wrapper_consensus.nim index 464e78b5fa..24c25c4de8 100644 --- a/nimbus/consensus/wrapper_consensus.nim +++ b/nimbus/consensus/wrapper_consensus.nim @@ -21,8 +21,7 @@ import beacon_chain/networking/[topic_params, network_metadata_downloads], beacon_chain/rpc/[rest_api, state_ttl_cache], beacon_chain/spec/datatypes/[altair, bellatrix, phase0], - beacon_chain/spec/ - [deposit_snapshots, engine_authentication, weak_subjectivity, peerdas_helpers], + beacon_chain/spec/[engine_authentication, weak_subjectivity, peerdas_helpers], beacon_chain/sync/[sync_protocol, light_client_protocol, sync_overseer], beacon_chain/validators/[keystore_management, beacon_validators], beacon_chain/[ @@ -110,7 +109,6 @@ proc doRunTrustedNodeSync( trustedBlockRoot: Option[Eth2Digest], backfill: bool, reindex: bool, - downloadDepositSnapshot: bool, genesisState: ref ForkedHashedBeaconState, ) {.async.} = let syncTarget = @@ -128,7 +126,7 @@ proc doRunTrustedNodeSync( await db.doTrustedNodeSync( metadata.cfg, databaseDir, eraDir, restUrl, syncTarget, backfill, reindex, - downloadDepositSnapshot, genesisState, + genesisState, ) func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = @@ -141,6 +139,8 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = onUpgradeToDeneb: denebColor, onUpgradeToElectra: electraColor, onKnownCompoundingChange: electraBlink, + onUpgradeToFulu: fuluColor, + onBlobParametersUpdate: fuluColor, ) of StdoutLogKind.NoColors: VanityLogs( @@ -148,6 +148,8 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = onUpgradeToDeneb: denebMono, onUpgradeToElectra: electraMono, onKnownCompoundingChange: electraMono, + onUpgradeToFulu: fuluMono, + onBlobParametersUpdate: fuluMono, ) of StdoutLogKind.Json, StdoutLogKind.None: VanityLogs( @@ -167,11 +169,19 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = proc() = notice "🦒 Compounding is activated 🦒" ), + onUpgradeToFulu: ( + proc() = + notice "🐅 Blobs columnized 🐅" + ), + onBlobParametersUpdate: ( + proc() = + notice "🐅 Blob parameters updated 🐅" + ), ) func getVanityMascot(consensusFork: ConsensusFork): string = case consensusFork - of ConsensusFork.Fulu: "❓" + of ConsensusFork.Fulu: "🐅" of ConsensusFork.Electra: "🦒" of ConsensusFork.Deneb: "🐟" of ConsensusFork.Capella: "🦉" @@ -368,11 +378,6 @@ proc initFullNode( static: doAssert (elManager is ref) return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = - if elManager != nil: - let finalizedEpochRef = dag.getFinalizedEpochRef() - discard trackFinalizedState( - elManager, finalizedEpochRef.eth1_data, finalizedEpochRef.eth1_deposit_index - ) node.updateLightClientFromDag() let eventData = if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: @@ -434,7 +439,9 @@ proc initFullNode( onElectraAttesterSlashingAdded, ) ) - blobQuarantine = newClone(BlobQuarantine.init(dag.cfg, onBlobSidecarAdded)) + blobQuarantine = newClone( + BlobQuarantine.init(dag.cfg, dag.db.getQuarantineDB(), 10, onBlobSidecarAdded) + ) dataColumnQuarantine = newClone(DataColumnQuarantine.init()) supernode = node.config.peerdasSupernode localCustodyGroups = @@ -457,6 +464,7 @@ proc initFullNode( blockProcessor = BlockProcessor.new( config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, batchVerifier, consensusManager, node.validatorMonitor, blobQuarantine, getBeaconTime, + config.invalidBlockRoots, ) blockVerifier = proc( @@ -486,21 +494,18 @@ proc initFullNode( ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = withBlck(signedBlock): when consensusFork >= ConsensusFork.Deneb: - if not blobQuarantine[].hasBlobs(forkyBlck): + let bres = blobQuarantine[].popSidecars(forkyBlck.root, forkyBlck) + if bres.isSome(): + await blockProcessor[].addBlock( + MsgSource.gossip, signedBlock, bres, maybeFinalized = maybeFinalized + ) + else: # We don't have all the blobs for this block, so we have # to put it in blobless quarantine. if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): err(VerifierError.UnviableFork) else: err(VerifierError.MissingParent) - else: - let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck) - await blockProcessor[].addBlock( - MsgSource.gossip, - signedBlock, - Opt.some(blobs), - maybeFinalized = maybeFinalized, - ) else: await blockProcessor[].addBlock( MsgSource.gossip, @@ -875,7 +880,6 @@ proc init*( trustedBlockRoot, backfill = false, reindex = false, - downloadDepositSnapshot = false, genesisState, ) @@ -908,24 +912,6 @@ proc init*( else: nil - if config.finalizedDepositTreeSnapshot.isSome: - let - depositTreeSnapshotPath = config.finalizedDepositTreeSnapshot.get.string - snapshot = - try: - SSZ.loadFile(depositTreeSnapshotPath, DepositTreeSnapshot) - except SszError as err: - fatal "Deposit tree snapshot loading failed", - err = formatMsg(err, depositTreeSnapshotPath) - quit 1 - except CatchableError as err: - fatal "Failed to read deposit tree snapshot file", err = err.msg - quit 1 - depositContractSnapshot = DepositContractSnapshot.init(snapshot).valueOr: - fatal "Invalid deposit tree snapshot file" - quit 1 - db.putDepositContractSnapshot(depositContractSnapshot) - let engineApiUrls = config.engineApiUrls if engineApiUrls.len == 0: @@ -1033,10 +1019,7 @@ proc init*( config.weakSubjectivityCheckpoint.get, beaconClock ) - let elManager = ELManager.new( - cfg, metadata.depositContractBlock, metadata.depositContractBlockHash, db, - engineApiUrls, eth1Network, - ) + let elManager = ELManager.new(engineApiUrls, eth1Network) if config.rpcEnabled.isSome: warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." @@ -1535,6 +1518,7 @@ proc maybeUpdateActionTrackerNextEpoch( shufflingRef = node.dag.getShufflingRef(node.dag.head, nextEpoch, false).valueOr: # epochRefFallback() won't work in this case either return + # using the separate method of proposer indices calculation in Fulu nextEpochProposers = get_beacon_proposer_indices( forkyState.data, shufflingRef.shuffled_active_validator_indices, nextEpoch ) @@ -1753,6 +1737,9 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = node.attachedValidators[].slashingProtection # pruning is only done if the DB is set to pruning mode. .pruneAfterFinalization(node.dag.finalizedHead.slot.epoch()) + node.processor.blobQuarantine[].pruneAfterFinalization( + node.dag.finalizedHead.slot.epoch() + ) # Delay part of pruning until latency critical duties are done. # The other part of pruning, `pruneBlocksDAG`, is done eagerly. @@ -1922,26 +1909,8 @@ func formatNextConsensusFork(node: BeaconNode, withVanityArt = false): Opt[strin $nextConsensusFork & ":" & $nextForkEpoch ) -func syncStatus(node: BeaconNode, wallSlot: Slot): string = - node.syncOverseer.statusMsg.valueOr: - let optimisticHead = not node.dag.head.executionValid - if node.syncManager.inProgress: - let - optimisticSuffix = if optimisticHead: "/opt" else: "" - lightClientSuffix = - if node.consensusManager[].shouldSyncOptimistically(wallSlot): - " - lc: " & $shortLog(node.consensusManager[].optimisticHead) - else: - "" - node.syncManager.syncStatus & optimisticSuffix & lightClientSuffix - elif node.untrustedManager.inProgress: - "untrusted: " & node.untrustedManager.syncStatus - elif node.backfiller.inProgress: - "backfill: " & node.backfiller.syncStatus - elif optimisticHead: - "synced/opt" - else: - "synced" +proc syncStatus(node: BeaconNode, wallSlot: Slot): string = + node.syncOverseer.syncStatusMessage() when defined(windows): from winservice import establishWindowsService, reportServiceStatusSuccess @@ -2120,7 +2089,7 @@ proc installMessageValidators(node: BeaconNode) = ) # beacon_aggregate_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof when consensusFork >= ConsensusFork.Electra: node.network.addAsyncValidator( getAggregateAndProofsTopic(digest), @@ -2530,6 +2499,7 @@ proc doRunBeaconNode( ignoreDeprecatedOption optimistic ignoreDeprecatedOption validatorMonitorTotals ignoreDeprecatedOption web3ForcePolling + ignoreDeprecatedOption finalizedDepositTreeSnapshot createPidFile(config.dataDir.string / "beacon_node.pid") @@ -2640,12 +2610,8 @@ proc doWeb3Cmd( ) {.raises: [CatchableError].} = case config.web3Cmd of Web3Cmd.test: - let metadata = config.loadEth2Network() - waitFor testWeb3Provider( - config.web3TestUrl, - metadata.cfg.DEPOSIT_CONTRACT_ADDRESS, - rng.loadJwtSecret(config, allowCreate = true), + config.web3TestUrl, rng.loadJwtSecret(config, allowCreate = true) ) proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError].} = @@ -2659,7 +2625,7 @@ proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError].} = db.exportSlashingInterchange(interchange, conf.exportedValidators) echo "Export finished: '", dir / filetrunc & ".sqlite3", "' into '", interchange, "'" -proc doSlashingImport(conf: BeaconNodeConf) {.raises: [SerializationError, IOError].} = +proc doSlashingImport(conf: BeaconNodeConf) {.raises: [IOError].} = let dir = conf.validatorsDir() filetrunc = SlashingDbName @@ -2730,7 +2696,7 @@ proc handleStartUpCmd*(config: var BeaconNodeConf) {.raises: [CatchableError].} waitFor db.doRunTrustedNodeSync( metadata, config.databaseDir, config.eraDir, config.trustedNodeUrl, config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, - config.downloadDepositSnapshot, genesisState, + genesisState, ) db.close() diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 00d4fccf16..2b167cd443 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -8,12 +8,12 @@ {.push raises: [].} import - std/[atomics, os], + std/[atomics], chronicles, results, ../conf, ../common/utils, - ./wrapper_execution, + ../../execution_chain/nimbus_execution_client, ../../execution_chain/config, ../../execution_chain/nimbus_desc diff --git a/nimbus/execution/wrapper_execution.nim b/nimbus/execution/wrapper_execution.nim deleted file mode 100644 index a07680abae..0000000000 --- a/nimbus/execution/wrapper_execution.nim +++ /dev/null @@ -1,298 +0,0 @@ -# Nimbus -# Copyright (c) 2018-2025 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -# * MIT license ([LICENSE-MIT](LICENSE-MIT)) -# at your option. -# This file may not be copied, modified, or distributed except according to -# those terms. - -import ../../execution_chain/compile_info - -import - std/[os, osproc, strutils, net, options], - chronicles, - eth/net/nat, - metrics, - metrics/chronicles_support, - stew/byteutils, - ../../execution_chain/rpc, - ../../execution_chain/version, - ../../execution_chain/constants, - ../../execution_chain/nimbus_desc, - ../../execution_chain/nimbus_import, - ../../execution_chain/core/block_import, - ../../execution_chain/core/lazy_kzg, - ../../execution_chain/core/chain/forked_chain/chain_serialize, - ../../execution_chain/db/core_db/persistent, - ../../execution_chain/db/storage_types, - ../../execution_chain/sync/wire_protocol, - ../../execution_chain/common/chain_config_hash, - ../../execution_chain/portal/portal - -from beacon_chain/nimbus_binary_common import setupFileLimits - -# This module is a copy from nimbus_execution_client module where 'run' procedure -# visibility is changed to public. -# This file should be removed when 'run' is made public or we create a public -# entry point on nimbus_execution_client module. - -## TODO: -## * No IPv6 support -## * No multiple bind addresses support -## * No database support - -proc basicServices(nimbus: NimbusNode, conf: NimbusConf, com: CommonRef) = - # Setup the chain - let fc = ForkedChainRef.init( - com, - eagerStateRoot = conf.eagerStateRootCheck, - persistBatchSize = conf.persistBatchSize, - enableQueue = true, - ) - fc.deserialize().isOkOr: - warn "Loading block DAG from database", msg = error - - nimbus.fc = fc - # Setup history expiry and portal - nimbus.fc.portal = HistoryExpiryRef.init(conf, com) - # txPool must be informed of active head - # so it can know the latest account state - # e.g. sender nonce, etc - nimbus.txPool = TxPoolRef.new(nimbus.fc) - nimbus.beaconEngine = BeaconEngineRef.new(nimbus.txPool) - -proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) = - if string(conf.keyStore).len > 0: - let res = nimbus.ctx.am.loadKeystores(string conf.keyStore) - if res.isErr: - fatal "Load keystore error", msg = res.error() - quit(QuitFailure) - - if string(conf.importKey).len > 0: - let res = nimbus.ctx.am.importPrivateKey(string conf.importKey) - if res.isErr: - fatal "Import private key error", msg = res.error() - quit(QuitFailure) - -proc setupP2P( - nimbus: NimbusNode, conf: NimbusConf, com: CommonRef -) {.raises: [OSError].} = - ## Creating P2P Server - let kpres = nimbus.ctx.getNetKeys(conf.netKey, conf.dataDir.string) - if kpres.isErr: - fatal "Get network keys error", msg = kpres.error - quit(QuitFailure) - - let keypair = kpres.get() - var address = - enode.Address(ip: conf.listenAddress, tcpPort: conf.tcpPort, udpPort: conf.udpPort) - - if conf.nat.hasExtIp: - # any required port redirection is assumed to be done by hand - address.ip = conf.nat.extIp - else: - # automated NAT traversal - let extIP = getExternalIP(conf.nat.nat) - # This external IP only appears in the logs, so don't worry about dynamic - # IPs. Don't remove it either, because the above call does initialisation - # and discovery for NAT-related objects. - if extIP.isSome: - address.ip = extIP.get() - let extPorts = redirectPorts( - tcpPort = address.tcpPort, - udpPort = address.udpPort, - description = NimbusName & " " & NimbusVersion, - ) - if extPorts.isSome: - (address.tcpPort, address.udpPort) = extPorts.get() - - let bootstrapNodes = conf.getBootNodes() - - nimbus.ethNode = newEthereumNode( - keypair, - address, - conf.networkId, - conf.agentString, - addAllCapabilities = false, - minPeers = conf.maxPeers, - bootstrapNodes = bootstrapNodes, - bindUdpPort = conf.udpPort, - bindTcpPort = conf.tcpPort, - bindIp = conf.listenAddress, - rng = nimbus.ctx.rng, - ) - - # Add protocol capabilities - nimbus.wire = nimbus.ethNode.addEthHandlerCapability(nimbus.txPool) - - # Always initialise beacon syncer - nimbus.beaconSyncRef = BeaconSyncRef.init(nimbus.ethNode, nimbus.fc, conf.maxPeers) - - # Optional for pre-setting the sync target (i.e. debugging) - if conf.beaconSyncTargetFile.isSome(): - nimbus.beaconSyncRef.targetInit conf.beaconSyncTargetFile.unsafeGet.string - - # Connect directly to the static nodes - let staticPeers = conf.getStaticPeers() - if staticPeers.len > 0: - nimbus.peerManager = PeerManagerRef.new( - nimbus.ethNode.peerPool, conf.reconnectInterval, conf.reconnectMaxRetry, - staticPeers, - ) - nimbus.peerManager.start() - - # Start Eth node - if conf.maxPeers > 0: - nimbus.networkLoop = nimbus.ethNode.connectToNetwork( - enableDiscovery = conf.discovery != DiscoveryType.None, waitForPeers = true - ) - -proc setupMetrics( - nimbus: NimbusNode, conf: NimbusConf -) {.raises: [CancelledError, MetricsError].} = - # metrics logging - if conf.logMetricsEnabled: - # https://github.com/nim-lang/Nim/issues/17369 - var logMetrics: proc(udata: pointer) {.gcsafe, raises: [].} - logMetrics = proc(udata: pointer) = - {.gcsafe.}: - let registry = defaultRegistry - info "metrics", registry - discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics) - discard setTimer(Moment.fromNow(conf.logMetricsInterval.seconds), logMetrics) - - # metrics server - if conf.metricsEnabled: - info "Starting metrics HTTP server", - address = conf.metricsAddress, port = conf.metricsPort - let res = MetricsHttpServerRef.new($conf.metricsAddress, conf.metricsPort) - if res.isErr: - fatal "Failed to create metrics server", msg = res.error - quit(QuitFailure) - - nimbus.metricsServer = res.get - waitFor nimbus.metricsServer.start() - -proc preventLoadingDataDirForTheWrongNetwork(db: CoreDbRef, conf: NimbusConf) = - proc writeDataDirId(kvt: CoreDbTxRef, calculatedId: Hash32) = - info "Writing data dir ID", ID = calculatedId - kvt.put(dataDirIdKey().toOpenArray, calculatedId.data).isOkOr: - fatal "Cannot write data dir ID", ID = calculatedId - quit(QuitFailure) - db.persist(kvt, Opt.none(Hash32)) - - let - kvt = db.baseTxFrame() - calculatedId = calcHash(conf.networkId, conf.networkParams) - dataDirIdBytes = kvt.get(dataDirIdKey().toOpenArray).valueOr: - # an empty database - writeDataDirId(kvt, calculatedId) - return - - if conf.rewriteDatadirId: - writeDataDirId(kvt, calculatedId) - return - - if calculatedId.data != dataDirIdBytes: - fatal "Data dir already initialized with other network configuration", - get = dataDirIdBytes.toHex, expected = calculatedId - quit(QuitFailure) - -proc run*(nimbus: NimbusNode, conf: NimbusConf) = - info "Launching execution client", version = FullVersionStr, conf - - # Trusted setup is needed for processing Cancun+ blocks - # If user not specify the trusted setup, baked in - # trusted setup will be loaded, lazily. - if conf.trustedSetupFile.isSome: - let fileName = conf.trustedSetupFile.get() - let res = loadTrustedSetup(fileName, 0) - if res.isErr: - fatal "Cannot load Kzg trusted setup from file", msg = res.error - quit(QuitFailure) - - createDir(string conf.dataDir) - let coreDB = - # Resolve statically for database type - AristoDbRocks.newCoreDbRef( - string conf.dataDir, conf.dbOptions(noKeyCache = conf.cmd == NimbusCmd.`import`) - ) - - preventLoadingDataDirForTheWrongNetwork(coreDB, conf) - setupMetrics(nimbus, conf) - - let taskpool = - try: - if conf.numThreads < 0: - fatal "The number of threads --num-threads cannot be negative." - quit QuitFailure - elif conf.numThreads == 0: - Taskpool.new(numThreads = min(countProcessors(), 16)) - else: - Taskpool.new(numThreads = conf.numThreads) - except CatchableError as e: - fatal "Cannot start taskpool", err = e.msg - quit QuitFailure - - info "Threadpool started", numThreads = taskpool.numThreads - - let com = CommonRef.new( - db = coreDB, - taskpool = taskpool, - networkId = conf.networkId, - params = conf.networkParams, - ) - - if conf.extraData.len > 32: - warn "ExtraData exceeds 32 bytes limit, truncate", - extraData = conf.extraData, len = conf.extraData.len - - if conf.gasLimit > GAS_LIMIT_MAXIMUM or conf.gasLimit < GAS_LIMIT_MINIMUM: - warn "GasLimit not in expected range, truncate", - min = GAS_LIMIT_MINIMUM, max = GAS_LIMIT_MAXIMUM, get = conf.gasLimit - - com.extraData = conf.extraData - com.gasLimit = conf.gasLimit - - defer: - if not nimbus.fc.isNil: - let - fc = nimbus.fc - txFrame = fc.baseTxFrame - fc.serialize(txFrame).isOkOr: - error "FC.serialize error: ", msg = error - com.db.persist(txFrame, Opt.none(Hash32)) - com.db.finish() - - case conf.cmd - of NimbusCmd.`import`: - importBlocks(conf, com) - of NimbusCmd.`import - rlp`: - waitFor importRlpBlocks(conf, com) - else: - basicServices(nimbus, conf, com) - manageAccounts(nimbus, conf) - setupP2P(nimbus, conf, com) - setupRpc(nimbus, conf, com) - - if conf.maxPeers > 0 and conf.engineApiServerEnabled(): - # Not starting syncer if there is definitely no way to run it. This - # avoids polling (i.e. waiting for instructions) and some logging. - if not nimbus.beaconSyncRef.start(): - nimbus.beaconSyncRef = BeaconSyncRef(nil) - - if nimbus.state == NimbusState.Starting: - # it might have been set to "Stopping" with Ctrl+C - nimbus.state = NimbusState.Running - - # Main event loop - while nimbus.state == NimbusState.Running: - try: - poll() - except CatchableError as e: - debug "Exception in poll()", exc = e.name, err = e.msg - discard e # silence warning when chronicles not activated - - # Stop loop - waitFor nimbus.stop(conf) From f32c045177066eeb7308acfb75bd479a9933de4c Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Wed, 2 Jul 2025 23:55:18 +0100 Subject: [PATCH 26/34] Fixed tests --- nimbus/tests/test_nimbus.nim | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/nimbus/tests/test_nimbus.nim b/nimbus/tests/test_nimbus.nim index 3ab0756682..b69315e234 100644 --- a/nimbus/tests/test_nimbus.nim +++ b/nimbus/tests/test_nimbus.nim @@ -20,26 +20,31 @@ proc handlerMock(channel: ptr Channel[pointer]) = #handles data for a given service proc handlerService_1(channel: ptr Channel[pointer]) = - const expectedConfigTable = {"0": "zero", "1": "one", "2": "two"}.toTable + const expectedConfigList = + @["-config=a", "--singleconfig", "-abbrev", "-abbrevArg=arg"] let p = channel[].recv() - let configs = parseChannelData(p).valueOr: + let configs = deserializeConfigArgs(p).valueOr: quit(QuitFailure) isConfigRead.store(true) - checkResult[] = configs == expectedConfigTable + + checkResult[] = configs == expectedConfigList #handles data for a given service proc handlerService_2(channel: ptr Channel[pointer]) = - const expectedConfigTable = {"4": "four", "5": "five", "6": ""}.toTable + const expectedConfigList = + @["--singleconfig2", "-config2=a2", "-abbrev2", "-abbrevArg2=arg2"] + let p = channel[].recv() - let configs = parseChannelData(p).valueOr: + let configs = deserializeConfigArgs(p).valueOr: quit(QuitFailure) isConfigRead.store(true) - checkResult[] = configs == expectedConfigTable + + checkResult[] = configs == expectedConfigList # ---------------------------------------------------------------------------- # # Unit Tests @@ -50,8 +55,11 @@ suite "Nimbus Service Management": setup: nimbus = Nimbus.new - const configTable_1 = {"0": "zero", "1": "one", "2": "two"}.toTable - const configTable_2 = {"4": "four", "5": "five", "6": ""}.toTable + const configTable_1 = + {"-config": "=a", "--singleconfig": "", "-abbrev": "", "-abbrevArg": "=arg"}.toTable + const configTable_2 = { + "-config2": "=a2", "--singleconfig2": "", "-abbrev2": "", "-abbrevArg2": "=arg2" + }.toTable # Test: Creating a new service successfully test "startService successfully adds a service": From c850a49cfe742ad01ad5672c5576ee982ca074a3 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Thu, 3 Jul 2025 00:05:47 +0100 Subject: [PATCH 27/34] Immediate shutdown workaround --- nimbus/nimbus.nim | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index b2fed4903e..af6247e7dc 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -12,7 +12,7 @@ import consensus/consensus_layer, common/utils, conf, - confutils/[cli_parser, toml/defs], + confutils/cli_parser, beacon_chain/conf, ../execution_chain/config @@ -115,6 +115,10 @@ proc controlCHandler() {.noconv.} = notice "\tCtrl+C pressed. Shutting down services ..." + # WA to shutdown client(exceptions thrown) + # issues related with nat.nim shutdown procedure (nim-eth + quit 0 + shutdownExecution() shutdownConsensus() @@ -177,10 +181,6 @@ proc run*(nimbus: var Nimbus) = # wait for shutdown nimbus.monitorServices() - # WA to shutdown (exceptions thrown) - # current shutdown procedure hangs on nat.nim from nim-eth - quit 0 - # ------ when isMainModule: notice "Starting Nimbus" From 50e013432761124d74dd2cc29b8cbfe9ff67c382 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Wed, 9 Jul 2025 14:34:57 +0100 Subject: [PATCH 28/34] - Added beacon node db file locks - minor fixes --- nimbus/consensus/wrapper_consensus.nim | 15 +++++--- nimbus/execution/execution_layer.nim | 2 +- nimbus/nimbus.nim | 48 ++++++++++++++++++++------ 3 files changed, 48 insertions(+), 17 deletions(-) diff --git a/nimbus/consensus/wrapper_consensus.nim b/nimbus/consensus/wrapper_consensus.nim index 24c25c4de8..c57c2a92fb 100644 --- a/nimbus/consensus/wrapper_consensus.nim +++ b/nimbus/consensus/wrapper_consensus.nim @@ -8,7 +8,7 @@ {.push raises: [].} import - std/[os, random, terminal, times, exitprocs], + std/[os, random, terminal, times, exitprocs, atomics], chronos, chronicles, metrics, @@ -2295,12 +2295,17 @@ proc run(node: BeaconNode) {.raises: [CatchableError].} = # time to say goodbye node.stop() +# db lock +var shouldCreatePid*: Atomic[bool] +shouldCreatePid.store(true) + var gPidFile: string proc createPidFile(filename: string) {.raises: [IOError].} = - writeFile filename, $os.getCurrentProcessId() - gPidFile = filename - addExitProc proc() {.noconv.} = - discard io2.removeFile(gPidFile) + if shouldCreatePid.load(): + writeFile filename, $os.getCurrentProcessId() + gPidFile = filename + addExitProc proc() {.noconv.} = + discard io2.removeFile(gPidFile) proc initializeNetworking(node: BeaconNode) {.async.} = node.installMessageValidators() diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 2b167cd443..4a330fa9ec 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -20,7 +20,7 @@ import logScope: topics = "Execution layer" -var nimbusHandler = NimbusNode() +var nimbusHandler: NimbusNode proc shutdownExecution*() = nimbusHandler.state = NimbusState.Stopping diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index af6247e7dc..ebc4b16d56 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -5,14 +5,17 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + import - std/[concurrency/atomics, os], + std/[concurrency/atomics, os, exitprocs], chronicles, execution/execution_layer, - consensus/consensus_layer, + consensus/[consensus_layer, wrapper_consensus], common/utils, conf, confutils/cli_parser, + stew/io2, beacon_chain/conf, ../execution_chain/config @@ -20,6 +23,19 @@ import # Private # ------------------------------------------------------------------------------ +#beacon node db lock +var beaconNodeLock {.global.}: string + +proc createBeaconNodeFileLock(filename: string) {.raises: [IOError].} = + shouldCreatePid.store(false) + + writeFile filename, $os.getCurrentProcessId() + beaconNodeLock = filename + + addExitProc proc() {.noconv.} = + if beaconNodeLock.len > 0: + discard io2.removeFile(beaconNodeLock) + ## create and configure service proc startService(nimbus: var Nimbus, service: var NimbusService) = #channel creation (shared memory) @@ -32,7 +48,10 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = isConfigRead.store(false) #start thread - createThread(service.serviceHandler, service.serviceFunc, serviceChannel) + try: + createThread(service.serviceHandler, service.serviceFunc, serviceChannel) + except Exception as e: + fatal "error creating thread", err = e.msg let optionsTable = block: case service.layerConfig.kind @@ -64,7 +83,10 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = writeConfigString(writeOffset, opt) writeConfigString(writeOffset, arg) - serviceChannel[].send(byteArray) + try: + serviceChannel[].send(byteArray) + except Exception as e: + fatal "channel error: ", err = e.msg #wait for service read ack while not isConfigRead.load(): @@ -115,10 +137,6 @@ proc controlCHandler() {.noconv.} = notice "\tCtrl+C pressed. Shutting down services ..." - # WA to shutdown client(exceptions thrown) - # issues related with nat.nim shutdown procedure (nim-eth - quit 0 - shutdownExecution() shutdownConsensus() @@ -126,8 +144,8 @@ proc controlCHandler() {.noconv.} = # Public # ------------------------------------------------------------------------------ -# Setup services -proc setup*(nimbus: var Nimbus) = +# Setup nimbus and services +proc setup(nimbus: var Nimbus) {.raises: [CatchableError].} = let executionConfigNames = extractFieldNames(NimbusConf) consensusConfigNames = extractFieldNames(BeaconNodeConf) @@ -164,6 +182,9 @@ proc setup*(nimbus: var Nimbus) = nimbus.serviceList.add(executionService) nimbus.serviceList.add(consensusService) + # todo: replace path with config,datadir when creating Nimbus config + createBeaconNodeFileLock(".beacon_node.pid") + ## start nimbus client proc run*(nimbus: var Nimbus) = try: @@ -181,12 +202,17 @@ proc run*(nimbus: var Nimbus) = # wait for shutdown nimbus.monitorServices() -# ------ +{.pop.} +# ----- + when isMainModule: notice "Starting Nimbus" setupFileLimits() + # todo: replace path with config after creating Nimbus config + # setupLogging(config.logLevel, config.logStdout, config.logFile) + var nimbus = Nimbus() nimbus.setup() nimbus.run() From 3ba61c66bbcd2547c5b09c8dd67a6996090c353b Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Wed, 16 Jul 2025 17:02:43 +0100 Subject: [PATCH 29/34] bump nimbus-eth2 to auxiliary PR --- vendor/nimbus-eth2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/nimbus-eth2 b/vendor/nimbus-eth2 index 8eb4f78569..454ead72f7 160000 --- a/vendor/nimbus-eth2 +++ b/vendor/nimbus-eth2 @@ -1 +1 @@ -Subproject commit 8eb4f785690b4a7b7e203a158632e68d048f4ee8 +Subproject commit 454ead72f7384da33d738fcc23daa74b2b53da9d From 3403c5b313af7b04c45bc9d8eb46f663151a4873 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Thu, 17 Jul 2025 21:57:27 +0100 Subject: [PATCH 30/34] Removed beacon node wrapper with duplicated code and added necessary changes. --- config.nims | 1 + nimbus/consensus/consensus_layer.nim | 2 +- nimbus/consensus/wrapper_consensus.nim | 2708 ------------------------ nimbus/nimbus.nim | 5 +- 4 files changed, 5 insertions(+), 2711 deletions(-) delete mode 100644 nimbus/consensus/wrapper_consensus.nim diff --git a/config.nims b/config.nims index 7a67d1f935..d298393fa8 100644 --- a/config.nims +++ b/config.nims @@ -132,6 +132,7 @@ if not defined(windows): --mm:refc switch("define", "withoutPCRE") +switch("import", "testutils/moduletests") when not defined(disable_libbacktrace): --define:nimStackTraceOverride diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index f76dfd70a2..877932c324 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -15,7 +15,7 @@ import confutils, ../conf, ../common/utils, - ./wrapper_consensus, + beacon_chain/nimbus_beacon_node, beacon_chain/validators/keystore_management, beacon_chain/[beacon_node_status, nimbus_binary_common] diff --git a/nimbus/consensus/wrapper_consensus.nim b/nimbus/consensus/wrapper_consensus.nim deleted file mode 100644 index c57c2a92fb..0000000000 --- a/nimbus/consensus/wrapper_consensus.nim +++ /dev/null @@ -1,2708 +0,0 @@ -# beacon_chain -# Copyright (c) 2018-2025 Status Research & Development GmbH -# Licensed and distributed under either of -# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). -# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). -# at your option. This file may not be copied, modified, or distributed except according to those terms. - -{.push raises: [].} - -import - std/[os, random, terminal, times, exitprocs, atomics], - chronos, - chronicles, - metrics, - metrics/chronos_httpserver, - stew/[byteutils, io2], - eth/p2p/discoveryv5/[enr, random2], - beacon_chain/consensus_object_pools/ - [blob_quarantine, data_column_quarantine, blockchain_list], - beacon_chain/consensus_object_pools/vanity_logs/vanity_logs, - beacon_chain/networking/[topic_params, network_metadata_downloads], - beacon_chain/rpc/[rest_api, state_ttl_cache], - beacon_chain/spec/datatypes/[altair, bellatrix, phase0], - beacon_chain/spec/[engine_authentication, weak_subjectivity, peerdas_helpers], - beacon_chain/sync/[sync_protocol, light_client_protocol, sync_overseer], - beacon_chain/validators/[keystore_management, beacon_validators], - beacon_chain/[ - beacon_node, beacon_node_light_client, deposits, nimbus_binary_common, statusbar, - trusted_node_sync, wallets, - ] - -# This module is a copy from nimbus_beacon_node module where 'handleStartUpCmd' procedure -# visibility is changed to public. -# This file should be removed when 'handleStartUpCmd' is made public or we create a public -# entry point on nimbus_beacon_node module. - -when defined(posix): - import system/ansi_c - -from beacon_chain/spec/datatypes/deneb import SignedBeaconBlock - -from libp2p/protocols/pubsub/gossipsub import TopicParams, validateParameters, init - -logScope: - topics = "beacnde" - -# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics -declareGauge beacon_slot, "Latest slot of the beacon chain state" -declareGauge beacon_current_epoch, "Current epoch" - -# Finalization tracking -declareGauge finalization_delay, - "Epoch delay between scheduled epoch and finalized epoch" - -declareGauge ticks_delay, "How long does to take to run the onSecond loop" - -declareGauge next_action_wait, "Seconds until the next attestation will be sent" - -declareGauge next_proposal_wait, - "Seconds until the next proposal will be sent, or Inf if not known" - -declareGauge sync_committee_active, - "1 if there are current sync committee duties, 0 otherwise" - -declareCounter db_checkpoint_seconds, - "Time spent checkpointing the database to clear the WAL file" - -proc fetchGenesisState( - metadata: Eth2NetworkMetadata, - genesisState = none(InputFile), - genesisStateUrl = none(Uri), -): Future[ref ForkedHashedBeaconState] {.async: (raises: []).} = - let genesisBytes = - if metadata.genesis.kind != BakedIn and genesisState.isSome: - let res = io2.readAllBytes(genesisState.get.string) - res.valueOr: - error "Failed to read genesis state file", err = res.error.ioErrorMsg - quit 1 - elif metadata.hasGenesis: - try: - if metadata.genesis.kind == BakedInUrl: - info "Obtaining genesis state", - sourceUrl = $genesisStateUrl.get(parseUri metadata.genesis.url) - await metadata.fetchGenesisBytes(genesisStateUrl) - except CatchableError as err: - error "Failed to obtain genesis state", - source = metadata.genesis.sourceDesc, err = err.msg - quit 1 - else: - @[] - - if genesisBytes.len > 0: - try: - newClone readSszForkedHashedBeaconState(metadata.cfg, genesisBytes) - except CatchableError as err: - error "Invalid genesis state", - size = genesisBytes.len, digest = eth2digest(genesisBytes), err = err.msg - quit 1 - else: - nil - -proc doRunTrustedNodeSync( - db: BeaconChainDB, - metadata: Eth2NetworkMetadata, - databaseDir: string, - eraDir: string, - restUrl: string, - stateId: Option[string], - trustedBlockRoot: Option[Eth2Digest], - backfill: bool, - reindex: bool, - genesisState: ref ForkedHashedBeaconState, -) {.async.} = - let syncTarget = - if stateId.isSome: - if trustedBlockRoot.isSome: - warn "Ignoring `trustedBlockRoot`, `stateId` is set", stateId, trustedBlockRoot - TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: stateId.get) - elif trustedBlockRoot.isSome: - TrustedNodeSyncTarget( - kind: TrustedNodeSyncKind.TrustedBlockRoot, - trustedBlockRoot: trustedBlockRoot.get, - ) - else: - TrustedNodeSyncTarget(kind: TrustedNodeSyncKind.StateId, stateId: "finalized") - - await db.doTrustedNodeSync( - metadata.cfg, databaseDir, eraDir, restUrl, syncTarget, backfill, reindex, - genesisState, - ) - -func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs = - case stdoutKind - of StdoutLogKind.Auto: - raiseAssert "inadmissable here" - of StdoutLogKind.Colors: - VanityLogs( - onKnownBlsToExecutionChange: capellaBlink, - onUpgradeToDeneb: denebColor, - onUpgradeToElectra: electraColor, - onKnownCompoundingChange: electraBlink, - onUpgradeToFulu: fuluColor, - onBlobParametersUpdate: fuluColor, - ) - of StdoutLogKind.NoColors: - VanityLogs( - onKnownBlsToExecutionChange: capellaMono, - onUpgradeToDeneb: denebMono, - onUpgradeToElectra: electraMono, - onKnownCompoundingChange: electraMono, - onUpgradeToFulu: fuluMono, - onBlobParametersUpdate: fuluMono, - ) - of StdoutLogKind.Json, StdoutLogKind.None: - VanityLogs( - onKnownBlsToExecutionChange: ( - proc() = - notice "🦉 BLS to execution changed 🦉" - ), - onUpgradeToDeneb: ( - proc() = - notice "🐟 Proto-Danksharding is ON 🐟" - ), - onUpgradeToElectra: ( - proc() = - notice "🦒 Compounding is available 🦒" - ), - onKnownCompoundingChange: ( - proc() = - notice "🦒 Compounding is activated 🦒" - ), - onUpgradeToFulu: ( - proc() = - notice "🐅 Blobs columnized 🐅" - ), - onBlobParametersUpdate: ( - proc() = - notice "🐅 Blob parameters updated 🐅" - ), - ) - -func getVanityMascot(consensusFork: ConsensusFork): string = - case consensusFork - of ConsensusFork.Fulu: "🐅" - of ConsensusFork.Electra: "🦒" - of ConsensusFork.Deneb: "🐟" - of ConsensusFork.Capella: "🦉" - of ConsensusFork.Bellatrix: "🐼" - of ConsensusFork.Altair: "✨" - of ConsensusFork.Phase0: "🦏" - -proc loadChainDag( - config: BeaconNodeConf, - cfg: RuntimeConfig, - db: BeaconChainDB, - eventBus: EventBus, - validatorMonitor: ref ValidatorMonitor, - networkGenesisValidatorsRoot: Opt[Eth2Digest], -): ChainDAGRef = - info "Loading block DAG from database", path = config.databaseDir - - var dag: ChainDAGRef - proc onLightClientFinalityUpdate(data: ForkedLightClientFinalityUpdate) = - if dag == nil: - return - withForkyFinalityUpdate(data): - when lcDataFork > LightClientDataFork.None: - let contextFork = dag.cfg.consensusForkAtEpoch(forkyFinalityUpdate.contextEpoch) - eventBus.finUpdateQueue.emit( - RestVersioned[ForkedLightClientFinalityUpdate]( - data: data, - jsonVersion: contextFork, - sszContext: dag.forkDigests[].atConsensusFork(contextFork), - ) - ) - - proc onLightClientOptimisticUpdate(data: ForkedLightClientOptimisticUpdate) = - if dag == nil: - return - withForkyOptimisticUpdate(data): - when lcDataFork > LightClientDataFork.None: - let contextFork = - dag.cfg.consensusForkAtEpoch(forkyOptimisticUpdate.contextEpoch) - eventBus.optUpdateQueue.emit( - RestVersioned[ForkedLightClientOptimisticUpdate]( - data: data, - jsonVersion: contextFork, - sszContext: dag.forkDigests[].atConsensusFork(contextFork), - ) - ) - - let - chainDagFlags = - if config.strictVerification: - {strictVerification} - else: - {} - onLightClientFinalityUpdateCb = - if config.lightClientDataServe: onLightClientFinalityUpdate else: nil - onLightClientOptimisticUpdateCb = - if config.lightClientDataServe: onLightClientOptimisticUpdate else: nil - dag = ChainDAGRef.init( - cfg, - db, - validatorMonitor, - chainDagFlags, - config.eraDir, - vanityLogs = getVanityLogs(detectTTY(config.logStdout)), - lcDataConfig = LightClientDataConfig( - serve: config.lightClientDataServe, - importMode: config.lightClientDataImportMode, - maxPeriods: config.lightClientDataMaxPeriods, - onLightClientFinalityUpdate: onLightClientFinalityUpdateCb, - onLightClientOptimisticUpdate: onLightClientOptimisticUpdateCb, - ), - ) - - if networkGenesisValidatorsRoot.isSome: - let databaseGenesisValidatorsRoot = - getStateField(dag.headState, genesis_validators_root) - if networkGenesisValidatorsRoot.get != databaseGenesisValidatorsRoot: - fatal "The specified --data-dir contains data for a different network", - networkGenesisValidatorsRoot = networkGenesisValidatorsRoot.get, - databaseGenesisValidatorsRoot, - dataDir = config.dataDir - quit 1 - - # The first pruning after restart may take a while.. - if config.historyMode == HistoryMode.Prune: - dag.pruneHistory(true) - - dag - -proc checkWeakSubjectivityCheckpoint( - dag: ChainDAGRef, wsCheckpoint: Checkpoint, beaconClock: BeaconClock -) = - let - currentSlot = beaconClock.now.slotOrZero - isCheckpointStale = - not is_within_weak_subjectivity_period( - dag.cfg, currentSlot, dag.headState, wsCheckpoint - ) - - if isCheckpointStale: - error "Weak subjectivity checkpoint is stale", - currentSlot, - checkpoint = wsCheckpoint, - headStateSlot = getStateField(dag.headState, slot) - quit 1 - -from beacon_chain/spec/state_transition_block import kzg_commitment_to_versioned_hash - -proc isSlotWithinWeakSubjectivityPeriod(dag: ChainDAGRef, slot: Slot): bool = - let checkpoint = Checkpoint( - epoch: epoch(getStateField(dag.headState, slot)), - root: getStateField(dag.headState, latest_block_header).state_root, - ) - is_within_weak_subjectivity_period(dag.cfg, slot, dag.headState, checkpoint) - -proc initFullNode( - node: BeaconNode, - rng: ref HmacDrbgContext, - dag: ChainDAGRef, - clist: ChainListRef, - taskpool: Taskpool, - getBeaconTime: GetBeaconTimeFn, -) {.async.} = - template config(): auto = - node.config - - proc onPhase0AttestationReceived(data: phase0.Attestation) = - node.eventBus.phase0AttestQueue.emit(data) - - proc onSingleAttestationReceived(data: SingleAttestation) = - node.eventBus.singleAttestQueue.emit(data) - - proc onSyncContribution(data: SignedContributionAndProof) = - node.eventBus.contribQueue.emit(data) - - proc onVoluntaryExitAdded(data: SignedVoluntaryExit) = - node.eventBus.exitQueue.emit(data) - - proc onBLSToExecutionChangeAdded(data: SignedBLSToExecutionChange) = - node.eventBus.blsToExecQueue.emit(data) - - proc onProposerSlashingAdded(data: ProposerSlashing) = - node.eventBus.propSlashQueue.emit(data) - - proc onPhase0AttesterSlashingAdded(data: phase0.AttesterSlashing) = - node.eventBus.phase0AttSlashQueue.emit(data) - - proc onElectraAttesterSlashingAdded(data: electra.AttesterSlashing) = - node.eventBus.electraAttSlashQueue.emit(data) - - proc onBlobSidecarAdded(data: BlobSidecarInfoObject) = - node.eventBus.blobSidecarQueue.emit(data) - - proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) = - let optimistic = - if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - some node.dag.is_optimistic(data.toBlockId()) - else: - none[bool]() - node.eventBus.blocksQueue.emit(EventBeaconBlockObject.init(data, optimistic)) - - proc onBlockGossipAdded(data: ForkedSignedBeaconBlock) = - node.eventBus.blockGossipQueue.emit(EventBeaconBlockGossipObject.init(data)) - - proc onHeadChanged(data: HeadChangeInfoObject) = - let eventData = - if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - var res = data - res.optimistic = - some node.dag.is_optimistic(BlockId(slot: data.slot, root: data.block_root)) - res - else: - data - node.eventBus.headQueue.emit(eventData) - - proc onChainReorg(data: ReorgInfoObject) = - let eventData = - if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - var res = data - res.optimistic = some node.dag.is_optimistic( - BlockId(slot: data.slot, root: data.new_head_block) - ) - res - else: - data - node.eventBus.reorgQueue.emit(eventData) - - proc makeOnFinalizationCb( - # This `nimcall` functions helps for keeping track of what - # needs to be captured by the onFinalization closure. - eventBus: EventBus, - elManager: ELManager, - ): OnFinalizedCallback {.nimcall.} = - static: - doAssert (elManager is ref) - return proc(dag: ChainDAGRef, data: FinalizationInfoObject) = - node.updateLightClientFromDag() - let eventData = - if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH: - var res = data - # `slot` in this `BlockId` may be higher than block's actual slot, - # this is alright for the purpose of calling `is_optimistic`. - res.optimistic = some node.dag.is_optimistic( - BlockId(slot: data.epoch.start_slot, root: data.block_root) - ) - res - else: - data - eventBus.finalQueue.emit(eventData) - - func getLocalHeadSlot(): Slot = - dag.head.slot - - proc getLocalWallSlot(): Slot = - node.beaconClock.now.slotOrZero - - func getFirstSlotAtFinalizedEpoch(): Slot = - dag.finalizedHead.slot - - func getBackfillSlot(): Slot = - if dag.backfill.parent_root != dag.tail.root: dag.backfill.slot else: dag.tail.slot - - func getUntrustedBackfillSlot(): Slot = - if clist.tail.isSome(): - clist.tail.get().blck.slot - else: - dag.tail.slot - - func getFrontfillSlot(): Slot = - max(dag.frontfill.get(BlockId()).slot, dag.horizon) - - proc isWithinWeakSubjectivityPeriod(): bool = - isSlotWithinWeakSubjectivityPeriod(node.dag, node.beaconClock.now().slotOrZero()) - - proc eventWaiter(): Future[void] {.async: (raises: [CancelledError]).} = - await node.shutdownEvent.wait() - bnStatus = BeaconNodeStatus.Stopping - - asyncSpawn eventWaiter() - - let - quarantine = newClone(Quarantine.init()) - attestationPool = newClone( - AttestationPool.init( - dag, quarantine, onPhase0AttestationReceived, onSingleAttestationReceived - ) - ) - syncCommitteeMsgPool = - newClone(SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution)) - lightClientPool = newClone(LightClientPool()) - validatorChangePool = newClone( - ValidatorChangePool.init( - dag, attestationPool, onVoluntaryExitAdded, onBLSToExecutionChangeAdded, - onProposerSlashingAdded, onPhase0AttesterSlashingAdded, - onElectraAttesterSlashingAdded, - ) - ) - blobQuarantine = newClone( - BlobQuarantine.init(dag.cfg, dag.db.getQuarantineDB(), 10, onBlobSidecarAdded) - ) - dataColumnQuarantine = newClone(DataColumnQuarantine.init()) - supernode = node.config.peerdasSupernode - localCustodyGroups = - if supernode: NUMBER_OF_CUSTODY_GROUPS.uint64 else: CUSTODY_REQUIREMENT.uint64 - custody_columns_set = node.network.nodeId.resolve_column_sets_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, localCustodyGroups) - ) - consensusManager = ConsensusManager.new( - dag, - attestationPool, - quarantine, - node.elManager, - ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets), - node.dynamicFeeRecipientsStore, - config.validatorsDir, - config.defaultFeeRecipient, - config.suggestedGasLimit, - ) - batchVerifier = BatchVerifier.new(rng, taskpool) - blockProcessor = BlockProcessor.new( - config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, batchVerifier, - consensusManager, node.validatorMonitor, blobQuarantine, getBeaconTime, - config.invalidBlockRoots, - ) - - blockVerifier = proc( - signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], - maybeFinalized: bool, - ): Future[Result[void, VerifierError]] {. - async: (raises: [CancelledError], raw: true) - .} = - # The design with a callback for block verification is unusual compared - # to the rest of the application, but fits with the general approach - # taken in the sync/request managers - this is an architectural compromise - # that should probably be reimagined more holistically in the future. - blockProcessor[].addBlock( - MsgSource.gossip, signedBlock, blobs, maybeFinalized = maybeFinalized - ) - untrustedBlockVerifier = proc( - signedBlock: ForkedSignedBeaconBlock, - blobs: Opt[BlobSidecars], - maybeFinalized: bool, - ): Future[Result[void, VerifierError]] {. - async: (raises: [CancelledError], raw: true) - .} = - clist.untrustedBackfillVerifier(signedBlock, blobs, maybeFinalized) - rmanBlockVerifier = proc( - signedBlock: ForkedSignedBeaconBlock, maybeFinalized: bool - ): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).} = - withBlck(signedBlock): - when consensusFork >= ConsensusFork.Deneb: - let bres = blobQuarantine[].popSidecars(forkyBlck.root, forkyBlck) - if bres.isSome(): - await blockProcessor[].addBlock( - MsgSource.gossip, signedBlock, bres, maybeFinalized = maybeFinalized - ) - else: - # We don't have all the blobs for this block, so we have - # to put it in blobless quarantine. - if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck): - err(VerifierError.UnviableFork) - else: - err(VerifierError.MissingParent) - else: - await blockProcessor[].addBlock( - MsgSource.gossip, - signedBlock, - Opt.none(BlobSidecars), - maybeFinalized = maybeFinalized, - ) - rmanBlockLoader = proc(blockRoot: Eth2Digest): Opt[ForkedTrustedSignedBeaconBlock] = - dag.getForkedBlock(blockRoot) - rmanBlobLoader = proc(blobId: BlobIdentifier): Opt[ref BlobSidecar] = - var blob_sidecar = BlobSidecar.new() - if dag.db.getBlobSidecar(blobId.block_root, blobId.index, blob_sidecar[]): - Opt.some blob_sidecar - else: - Opt.none(ref BlobSidecar) - rmanDataColumnLoader = proc( - columnId: DataColumnIdentifier - ): Opt[ref DataColumnSidecar] = - var data_column_sidecar = DataColumnSidecar.new() - if dag.db.getDataColumnSidecar( - columnId.block_root, columnId.index, data_column_sidecar[] - ): - Opt.some data_column_sidecar - else: - Opt.none(ref DataColumnSidecar) - - processor = Eth2Processor.new( - config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, - attestationPool, validatorChangePool, node.attachedValidators, - syncCommitteeMsgPool, lightClientPool, quarantine, blobQuarantine, rng, - getBeaconTime, taskpool, - ) - syncManagerFlags = - if node.config.longRangeSync != LongRangeSyncMode.Lenient: - {SyncManagerFlag.NoGenesisSync} - else: - {} - syncManager = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, - dag.cfg.FULU_FORK_EPOCH, - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, - SyncQueueKind.Forward, - getLocalHeadSlot, - getLocalWallSlot, - getFirstSlotAtFinalizedEpoch, - getBackfillSlot, - getFrontfillSlot, - isWithinWeakSubjectivityPeriod, - dag.tail.slot, - blockVerifier, - shutdownEvent = node.shutdownEvent, - flags = syncManagerFlags, - ) - backfiller = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, - dag.cfg.FULU_FORK_EPOCH, - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, - SyncQueueKind.Backward, - getLocalHeadSlot, - getLocalWallSlot, - getFirstSlotAtFinalizedEpoch, - getBackfillSlot, - getFrontfillSlot, - isWithinWeakSubjectivityPeriod, - dag.backfill.slot, - blockVerifier, - maxHeadAge = 0, - shutdownEvent = node.shutdownEvent, - flags = syncManagerFlags, - ) - clistPivotSlot = - if clist.tail.isSome(): - clist.tail.get().blck.slot() - else: - getLocalWallSlot() - untrustedManager = newSyncManager[Peer, PeerId]( - node.network.peerPool, - dag.cfg.DENEB_FORK_EPOCH, - dag.cfg.FULU_FORK_EPOCH, - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, - dag.cfg.MAX_BLOBS_PER_BLOCK_ELECTRA, - SyncQueueKind.Backward, - getLocalHeadSlot, - getLocalWallSlot, - getFirstSlotAtFinalizedEpoch, - getUntrustedBackfillSlot, - getFrontfillSlot, - isWithinWeakSubjectivityPeriod, - clistPivotSlot, - untrustedBlockVerifier, - maxHeadAge = 0, - shutdownEvent = node.shutdownEvent, - flags = syncManagerFlags, - ) - router = (ref MessageRouter)(processor: processor, network: node.network) - requestManager = RequestManager.init( - node.network, - supernode, - custody_columns_set, - dag.cfg.DENEB_FORK_EPOCH, - getBeaconTime, - ( - proc(): bool = - syncManager.inProgress - ), - quarantine, - blobQuarantine, - dataColumnQuarantine, - rmanBlockVerifier, - rmanBlockLoader, - rmanBlobLoader, - rmanDataColumnLoader, - ) - - # As per EIP 7594, the BN is now categorised into a - # `Fullnode` and a `Supernode`, the fullnodes custodies a - # given set of data columns, and hence ONLY subcribes to those - # data column subnet topics, however, the supernodes subscribe - # to all of the topics. This in turn keeps our `data column quarantine` - # really variable. Whenever the BN is a supernode, column quarantine - # essentially means all the NUMBER_OF_COLUMNS, as per mentioned in the - # spec. However, in terms of fullnode, quarantine is really dependent - # on the randomly assigned columns, by `resolve_columns_from_custody_groups`. - - # Hence, in order to keep column quarantine accurate and error proof - # the custody columns are computed once as the BN boots. Then the values - # are used globally around the codebase. - - # `resolve_columns_from_custody_groups` is not a very expensive function, - # but there are multiple instances of computing custody columns, especially - # during peer selection, sync with columns, and so on. That is why, - # the rationale of populating it at boot and using it gloabally. - - dataColumnQuarantine[].supernode = supernode - dataColumnQuarantine[].custody_columns = node.network.nodeId.resolve_columns_from_custody_groups( - max(SAMPLES_PER_SLOT.uint64, localCustodyGroups) - ) - - if node.config.peerdasSupernode: - node.network.loadCgcnetMetadataAndEnr(NUMBER_OF_CUSTODY_GROUPS.uint8) - else: - node.network.loadCgcnetMetadataAndEnr(CUSTODY_REQUIREMENT.uint8) - - if node.config.lightClientDataServe: - proc scheduleSendingLightClientUpdates(slot: Slot) = - if node.lightClientPool[].broadcastGossipFut != nil: - return - if slot <= node.lightClientPool[].latestBroadcastedSlot: - return - node.lightClientPool[].latestBroadcastedSlot = slot - - template fut(): auto = - node.lightClientPool[].broadcastGossipFut - - fut = node.handleLightClientUpdates(slot) - fut.addCallback do(p: pointer) {.gcsafe.}: - fut = nil - - router.onSyncCommitteeMessage = scheduleSendingLightClientUpdates - - dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.elManager) - dag.setBlockCb(onBlockAdded) - dag.setBlockGossipCb(onBlockGossipAdded) - dag.setHeadCb(onHeadChanged) - dag.setReorgCb(onChainReorg) - - node.dag = dag - node.list = clist - node.blobQuarantine = blobQuarantine - node.quarantine = quarantine - node.attestationPool = attestationPool - node.syncCommitteeMsgPool = syncCommitteeMsgPool - node.lightClientPool = lightClientPool - node.validatorChangePool = validatorChangePool - node.processor = processor - node.batchVerifier = batchVerifier - node.blockProcessor = blockProcessor - node.consensusManager = consensusManager - node.requestManager = requestManager - node.syncManager = syncManager - node.backfiller = backfiller - node.untrustedManager = untrustedManager - node.syncOverseer = SyncOverseerRef.new( - node.consensusManager, node.validatorMonitor, config, getBeaconTime, node.list, - node.beaconClock, node.eventBus.optFinHeaderUpdateQueue, node.network.peerPool, - node.batchVerifier, syncManager, backfiller, untrustedManager, - ) - node.router = router - - await node.addValidators() - - block: - # Add in-process validators to the list of "known" validators such that - # we start with a reasonable ENR - let wallSlot = node.beaconClock.now().slotOrZero() - for validator in node.attachedValidators[].validators.values(): - if config.validatorMonitorAuto: - node.validatorMonitor[].addMonitor(validator.pubkey, validator.index) - - if validator.index.isSome(): - withState(dag.headState): - let idx = validator.index.get() - if distinctBase(idx) <= forkyState.data.validators.lenu64: - template v(): auto = - forkyState.data.validators.item(idx) - - if is_active_validator(v, wallSlot.epoch) or - is_active_validator(v, wallSlot.epoch + 1): - node.consensusManager[].actionTracker.knownValidators[idx] = wallSlot - elif is_exited_validator(v, wallSlot.epoch): - notice "Ignoring exited validator", - index = idx, pubkey = shortLog(v.pubkey) - let stabilitySubnets = - node.consensusManager[].actionTracker.stabilitySubnets(wallSlot) - # Here, we also set the correct ENR should we be in all subnets mode! - node.network.updateStabilitySubnetMetadata(stabilitySubnets) - - node.network.registerProtocol( - PeerSync, PeerSync.NetworkState.init(node.dag, node.beaconClock.getBeaconTimeFn()) - ) - - node.network.registerProtocol(BeaconSync, BeaconSync.NetworkState.init(node.dag)) - - if node.dag.lcDataStore.serve: - node.network.registerProtocol( - LightClientSync, LightClientSync.NetworkState.init(node.dag) - ) - - node.updateValidatorMetrics() - -const - SlashingDbName = "slashing_protection" - # changing this requires physical file rename as well or history is lost. - -proc init*( - T: type BeaconNode, - rng: ref HmacDrbgContext, - config: BeaconNodeConf, - metadata: Eth2NetworkMetadata, -): Future[BeaconNode] {.async.} = - var genesisState: ref ForkedHashedBeaconState = nil - - template cfg(): auto = - metadata.cfg - - template eth1Network(): auto = - metadata.eth1Network - - if not (isDir(config.databaseDir)): - # If database directory missing, we going to use genesis state to check - # for weak_subjectivity_period. - genesisState = - await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) - let - genesisTime = getStateField(genesisState[], genesis_time) - beaconClock = BeaconClock.init(genesisTime).valueOr: - fatal "Invalid genesis time in genesis state", genesisTime - quit 1 - currentSlot = beaconClock.now().slotOrZero() - checkpoint = Checkpoint( - epoch: epoch(getStateField(genesisState[], slot)), - root: getStateField(genesisState[], latest_block_header).state_root, - ) - - notice "Genesis state information", - genesis_fork = genesisState.kind, - is_post_altair = (cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH) - - if config.longRangeSync == LongRangeSyncMode.Light: - if not is_within_weak_subjectivity_period( - metadata.cfg, currentSlot, genesisState[], checkpoint - ): - # We do support any network which starts from Altair or later fork. - let metadata = config.loadEth2Network() - if metadata.cfg.ALTAIR_FORK_EPOCH != GENESIS_EPOCH: - fatal WeakSubjectivityLogMessage, - current_slot = currentSlot, - altair_fork_epoch = metadata.cfg.ALTAIR_FORK_EPOCH - quit 1 - - let taskpool = - try: - if config.numThreads < 0: - fatal "The number of threads --num-threads cannot be negative." - quit 1 - elif config.numThreads == 0: - Taskpool.new(numThreads = min(countProcessors(), 16)) - else: - Taskpool.new(numThreads = config.numThreads) - except CatchableError as e: - fatal "Cannot start taskpool", err = e.msg - quit 1 - - info "Threadpool started", numThreads = taskpool.numThreads - - if metadata.genesis.kind == BakedIn: - if config.genesisState.isSome: - warn "The --genesis-state option has no effect on networks with built-in genesis state" - - if config.genesisStateUrl.isSome: - warn "The --genesis-state-url option has no effect on networks with built-in genesis state" - - let - eventBus = EventBus( - headQueue: newAsyncEventQueue[HeadChangeInfoObject](), - blocksQueue: newAsyncEventQueue[EventBeaconBlockObject](), - blockGossipQueue: newAsyncEventQueue[EventBeaconBlockGossipObject](), - phase0AttestQueue: newAsyncEventQueue[phase0.Attestation](), - singleAttestQueue: newAsyncEventQueue[SingleAttestation](), - exitQueue: newAsyncEventQueue[SignedVoluntaryExit](), - blsToExecQueue: newAsyncEventQueue[SignedBLSToExecutionChange](), - propSlashQueue: newAsyncEventQueue[ProposerSlashing](), - phase0AttSlashQueue: newAsyncEventQueue[phase0.AttesterSlashing](), - electraAttSlashQueue: newAsyncEventQueue[electra.AttesterSlashing](), - blobSidecarQueue: newAsyncEventQueue[BlobSidecarInfoObject](), - finalQueue: newAsyncEventQueue[FinalizationInfoObject](), - reorgQueue: newAsyncEventQueue[ReorgInfoObject](), - contribQueue: newAsyncEventQueue[SignedContributionAndProof](), - finUpdateQueue: - newAsyncEventQueue[RestVersioned[ForkedLightClientFinalityUpdate]](), - optUpdateQueue: - newAsyncEventQueue[RestVersioned[ForkedLightClientOptimisticUpdate]](), - optFinHeaderUpdateQueue: newAsyncEventQueue[ForkedLightClientHeader](), - ) - db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false) - - if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr: - let trustedBlockRoot = - if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome: - config.trustedBlockRoot - elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH: - # Sync can be bootstrapped from the genesis block root - if genesisState.isNil: - genesisState = await fetchGenesisState( - metadata, config.genesisState, config.genesisStateUrl - ) - if not genesisState.isNil: - let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root - notice "Neither `--trusted-block-root` nor `--trusted-state-root` " & - "provided with `--external-beacon-api-url`, " & - "falling back to genesis block root", - externalBeaconApiUrl = config.externalBeaconApiUrl.get, - trustedBlockRoot = config.trustedBlockRoot, - trustedStateRoot = config.trustedStateRoot, - genesisBlockRoot = $genesisBlockRoot - some genesisBlockRoot - else: - none[Eth2Digest]() - else: - none[Eth2Digest]() - if config.trustedStateRoot.isNone and trustedBlockRoot.isNone: - warn "Ignoring `--external-beacon-api-url`, neither " & - "`--trusted-block-root` nor `--trusted-state-root` provided", - externalBeaconApiUrl = config.externalBeaconApiUrl.get, - trustedBlockRoot = config.trustedBlockRoot, - trustedStateRoot = config.trustedStateRoot - else: - if genesisState.isNil: - genesisState = - await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) - await db.doRunTrustedNodeSync( - metadata, - config.databaseDir, - config.eraDir, - config.externalBeaconApiUrl.get, - config.trustedStateRoot.map do(x: Eth2Digest) -> string: - "0x" & x.data.toHex, - trustedBlockRoot, - backfill = false, - reindex = false, - genesisState, - ) - - if config.finalizedCheckpointBlock.isSome: - warn "--finalized-checkpoint-block has been deprecated, ignoring" - - let checkpointState = - if config.finalizedCheckpointState.isSome: - let checkpointStatePath = config.finalizedCheckpointState.get.string - let tmp = - try: - newClone( - readSszForkedHashedBeaconState( - cfg, readAllBytes(checkpointStatePath).tryGet() - ) - ) - except SszError as err: - fatal "Checkpoint state loading failed", - err = formatMsg(err, checkpointStatePath) - quit 1 - except CatchableError as err: - fatal "Failed to read checkpoint state file", err = err.msg - quit 1 - - if not getStateField(tmp[], slot).is_epoch: - fatal "--finalized-checkpoint-state must point to a state for an epoch slot", - slot = getStateField(tmp[], slot) - quit 1 - tmp - else: - nil - - let engineApiUrls = config.engineApiUrls - - if engineApiUrls.len == 0: - notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)" - - var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot - - if not ChainDAGRef.isInitialized(db).isOk(): - genesisState = - if not checkpointState.isNil and getStateField(checkpointState[], slot) == 0: - checkpointState - else: - if genesisState.isNil: - await fetchGenesisState(metadata, config.genesisState, config.genesisStateUrl) - else: - genesisState - - if genesisState.isNil and checkpointState.isNil: - fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " & - "with the network configuration" - quit 1 - - if not genesisState.isNil and not checkpointState.isNil: - if getStateField(genesisState[], genesis_validators_root) != - getStateField(checkpointState[], genesis_validators_root): - fatal "Checkpoint state does not match genesis - check the --network parameter", - rootFromGenesis = getStateField(genesisState[], genesis_validators_root), - rootFromCheckpoint = getStateField(checkpointState[], genesis_validators_root) - quit 1 - - try: - # Always store genesis state if we have it - this allows reindexing and - # answering genesis queries - if not genesisState.isNil: - ChainDAGRef.preInit(db, genesisState[]) - networkGenesisValidatorsRoot = - Opt.some(getStateField(genesisState[], genesis_validators_root)) - - if not checkpointState.isNil: - if genesisState.isNil or getStateField(checkpointState[], slot) != GENESIS_SLOT: - ChainDAGRef.preInit(db, checkpointState[]) - - doAssert ChainDAGRef.isInitialized(db).isOk(), - "preInit should have initialized db" - except CatchableError as exc: - error "Failed to initialize database", err = exc.msg - quit 1 - else: - if not checkpointState.isNil: - fatal "A database already exists, cannot start from given checkpoint", - dataDir = config.dataDir - quit 1 - - # Doesn't use std/random directly, but dependencies might - randomize(rng[].rand(high(int))) - - # The validatorMonitorTotals flag has been deprecated and should eventually be - # removed - until then, it's given priority if set so as not to needlessly - # break existing setups - let validatorMonitor = newClone( - ValidatorMonitor.init( - config.validatorMonitorAuto, - config.validatorMonitorTotals.get(not config.validatorMonitorDetails), - ) - ) - - for key in config.validatorMonitorPubkeys: - validatorMonitor[].addMonitor(key, Opt.none(ValidatorIndex)) - - let - dag = loadChainDag( - config, cfg, db, eventBus, validatorMonitor, networkGenesisValidatorsRoot - ) - genesisTime = getStateField(dag.headState, genesis_time) - beaconClock = BeaconClock.init(genesisTime).valueOr: - fatal "Invalid genesis time in state", genesisTime - quit 1 - - getBeaconTime = beaconClock.getBeaconTimeFn() - - let clist = block: - let res = ChainListRef.init(config.databaseDir()) - - debug "Backfill database has been loaded", - path = config.databaseDir(), head = shortLog(res.head), tail = shortLog(res.tail) - - if res.handle.isSome() and res.tail().isSome(): - if not (isSlotWithinWeakSubjectivityPeriod(dag, res.tail.get().slot())): - notice "Backfill database is outdated " & - "(outside of weak subjectivity period), reseting database", - path = config.databaseDir(), tail = shortLog(res.tail) - res.clear().isOkOr: - fatal "Unable to reset backfill database", - path = config.databaseDir(), reason = error - quit 1 - res - - info "Backfill database initialized", - path = config.databaseDir(), - head = shortLog(clist.head), - tail = shortLog(clist.tail) - - if config.weakSubjectivityCheckpoint.isSome: - dag.checkWeakSubjectivityCheckpoint( - config.weakSubjectivityCheckpoint.get, beaconClock - ) - - let elManager = ELManager.new(engineApiUrls, eth1Network) - - if config.rpcEnabled.isSome: - warn "Nimbus's JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it." - - let restServer = - if config.restEnabled: - RestServerRef.init( - config.restAddress, config.restPort, config.restAllowedOrigin, - validateBeaconApiQueries, nimbusAgentStr, config, - ) - else: - nil - - let - netKeys = getPersistentNetKeys(rng[], config) - nickname = - if config.nodeName == "auto": - shortForm(netKeys) - else: - config.nodeName - network = createEth2Node( - rng, - config, - netKeys, - cfg, - dag.forkDigests, - getBeaconTime, - getStateField(dag.headState, genesis_validators_root), - ) - - case config.slashingDbKind - of SlashingDbKind.v2: - discard - of SlashingDbKind.v1: - error "Slashing DB v1 is no longer supported for writing" - quit 1 - of SlashingDbKind.both: - warn "Slashing DB v1 deprecated, writing only v2" - - info "Loading slashing protection database (v2)", path = config.validatorsDir() - - proc getValidatorAndIdx(pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] = - withState(dag.headState): - getValidator(forkyState().data.validators.asSeq(), pubkey) - - func getCapellaForkVersion(): Opt[presets.Version] = - Opt.some(cfg.CAPELLA_FORK_VERSION) - - func getDenebForkEpoch(): Opt[Epoch] = - Opt.some(cfg.DENEB_FORK_EPOCH) - - proc getForkForEpoch(epoch: Epoch): Opt[Fork] = - Opt.some(dag.forkAtEpoch(epoch)) - - proc getGenesisRoot(): Eth2Digest = - getStateField(dag.headState, genesis_validators_root) - - let - keystoreCache = KeystoreCacheRef.init() - slashingProtectionDB = SlashingProtectionDB.init( - getStateField(dag.headState, genesis_validators_root), - config.validatorsDir(), - SlashingDbName, - ) - validatorPool = - newClone(ValidatorPool.init(slashingProtectionDB, config.doppelgangerDetection)) - - keymanagerInitResult = initKeymanagerServer(config, restServer) - keymanagerHost = - if keymanagerInitResult.server != nil: - newClone KeymanagerHost.init( - validatorPool, keystoreCache, rng, keymanagerInitResult.token, - config.validatorsDir, config.secretsDir, config.defaultFeeRecipient, - config.suggestedGasLimit, config.defaultGraffitiBytes, - config.getPayloadBuilderAddress, getValidatorAndIdx, getBeaconTime, - getCapellaForkVersion, getDenebForkEpoch, getForkForEpoch, getGenesisRoot, - ) - else: - nil - - stateTtlCache = - if config.restCacheSize > 0: - StateTtlCache.init( - cacheSize = config.restCacheSize, - cacheTtl = chronos.seconds(config.restCacheTtl), - ) - else: - nil - - if config.payloadBuilderEnable: - info "Using external payload builder", payloadBuilderUrl = config.payloadBuilderUrl - - let node = BeaconNode( - nickname: nickname, - graffitiBytes: - if config.graffiti.isSome: - config.graffiti.get - else: - defaultGraffitiBytes(), - network: network, - netKeys: netKeys, - db: db, - config: config, - attachedValidators: validatorPool, - elManager: elManager, - restServer: restServer, - keymanagerHost: keymanagerHost, - keymanagerServer: keymanagerInitResult.server, - keystoreCache: keystoreCache, - eventBus: eventBus, - gossipState: {}, - blocksGossipState: {}, - beaconClock: beaconClock, - validatorMonitor: validatorMonitor, - stateTtlCache: stateTtlCache, - shutdownEvent: newAsyncEvent(), - dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()), - ) - - node.initLightClient( - rng, cfg, dag.forkDigests, getBeaconTime, dag.genesis_validators_root - ) - await node.initFullNode(rng, dag, clist, taskpool, getBeaconTime) - - node.updateLightClientFromDag() - - node - -func verifyFinalization(node: BeaconNode, slot: Slot) = - # Epoch must be >= 4 to check finalization - const SETTLING_TIME_OFFSET = 1'u64 - let epoch = slot.epoch() - - # Don't static-assert this -- if this isn't called, don't require it - doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET - - # Intentionally, loudly assert. Point is to fail visibly and unignorably - # during testing. - if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET: - let finalizedEpoch = node.dag.finalizedHead.slot.epoch() - # Finalization rule 234, that has the most lag slots among the cases, sets - # state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3 - # and then state.slot gets incremented, to increase the maximum offset, if - # finalization occurs every slot, to 4 slots vs scheduledSlot. - doAssert finalizedEpoch + 4 >= epoch - -from std/sequtils import toSeq - -func subnetLog(v: BitArray): string = - $toSeq(v.oneIndices()) - -func forkDigests(node: BeaconNode): auto = - let forkDigestsArray: array[ConsensusFork, auto] = [ - node.dag.forkDigests.phase0, node.dag.forkDigests.altair, - node.dag.forkDigests.bellatrix, node.dag.forkDigests.capella, - node.dag.forkDigests.deneb, node.dag.forkDigests.electra, node.dag.forkDigests.fulu, - ] - forkDigestsArray - -# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription -proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) = - if node.gossipState.card == 0: - # When disconnected, updateBlocksGossipStatus is responsible for all things - # subnets - in particular, it will remove subscriptions on the edge where - # we enter the disconnected state. - return - - let - aggregateSubnets = node.consensusManager[].actionTracker.aggregateSubnets(slot) - stabilitySubnets = node.consensusManager[].actionTracker.stabilitySubnets(slot) - subnets = aggregateSubnets + stabilitySubnets - validatorsCount = withState(node.dag.headState): - forkyState.data.validators.lenu64 - - node.network.updateStabilitySubnetMetadata(stabilitySubnets) - - # Now we know what we should be subscribed to - make it so - let - prevSubnets = node.consensusManager[].actionTracker.subscribedSubnets - unsubscribeSubnets = prevSubnets - subnets - subscribeSubnets = subnets - prevSubnets - - # Remember what we subscribed to, so we can unsubscribe later - node.consensusManager[].actionTracker.subscribedSubnets = subnets - - let forkDigests = node.forkDigests() - - for gossipFork in node.gossipState: - let forkDigest = forkDigests[gossipFork] - node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, forkDigest) - node.network.subscribeAttestationSubnets( - subscribeSubnets, forkDigest, getAttestationSubnetTopicParams(validatorsCount) - ) - - debug "Attestation subnets", - slot, - epoch = slot.epoch, - gossipState = node.gossipState, - stabilitySubnets = subnetLog(stabilitySubnets), - aggregateSubnets = subnetLog(aggregateSubnets), - prevSubnets = subnetLog(prevSubnets), - subscribeSubnets = subnetLog(subscribeSubnets), - unsubscribeSubnets = subnetLog(unsubscribeSubnets), - gossipState = node.gossipState - -proc updateBlocksGossipStatus*(node: BeaconNode, slot: Slot, dagIsBehind: bool) = - template cfg(): auto = - node.dag.cfg - - let - isBehind = - if node.shouldSyncOptimistically(slot): - # If optimistic sync is active, always subscribe to blocks gossip - false - else: - # Use DAG status to determine whether to subscribe for blocks gossip - dagIsBehind - - targetGossipState = getTargetGossipState( - slot.epoch, cfg.ALTAIR_FORK_EPOCH, cfg.BELLATRIX_FORK_EPOCH, - cfg.CAPELLA_FORK_EPOCH, cfg.DENEB_FORK_EPOCH, cfg.ELECTRA_FORK_EPOCH, - cfg.FULU_FORK_EPOCH, isBehind, - ) - - template currentGossipState(): auto = - node.blocksGossipState - - if currentGossipState == targetGossipState: - return - - if currentGossipState.card == 0 and targetGossipState.card > 0: - debug "Enabling blocks topic subscriptions", wallSlot = slot, targetGossipState - elif currentGossipState.card > 0 and targetGossipState.card == 0: - debug "Disabling blocks topic subscriptions", wallSlot = slot - else: - # Individual forks added / removed - discard - - let - newGossipForks = targetGossipState - currentGossipState - oldGossipForks = currentGossipState - targetGossipState - - for gossipFork in oldGossipForks: - let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) - node.network.unsubscribe(getBeaconBlocksTopic(forkDigest)) - - for gossipFork in newGossipForks: - let forkDigest = node.dag.forkDigests[].atConsensusFork(gossipFork) - node.network.subscribe( - getBeaconBlocksTopic(forkDigest), getBlockTopicParams(), enableTopicMetrics = true - ) - - node.blocksGossipState = targetGossipState - -proc addPhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - let validatorsCount = withState(node.dag.headState): - forkyState.data.validators.lenu64 - node.network.subscribe( - getAttesterSlashingsTopic(forkDigest), getAttesterSlashingTopicParams() - ) - node.network.subscribe( - getProposerSlashingsTopic(forkDigest), getProposerSlashingTopicParams() - ) - node.network.subscribe( - getVoluntaryExitsTopic(forkDigest), getVoluntaryExitTopicParams() - ) - node.network.subscribe( - getAggregateAndProofsTopic(forkDigest), - getAggregateProofTopicParams(validatorsCount), - enableTopicMetrics = true, - ) - - # updateAttestationSubnetHandlers subscribes attestation subnets - -proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.network.unsubscribe(getVoluntaryExitsTopic(forkDigest)) - node.network.unsubscribe(getProposerSlashingsTopic(forkDigest)) - node.network.unsubscribe(getAttesterSlashingsTopic(forkDigest)) - node.network.unsubscribe(getAggregateAndProofsTopic(forkDigest)) - - for subnet_id in SubnetId: - node.network.unsubscribe(getAttestationTopic(forkDigest, subnet_id)) - - node.consensusManager[].actionTracker.subscribedSubnets = default(AttnetBits) - -func hasSyncPubKey(node: BeaconNode, epoch: Epoch): auto = - # Only used to determine which gossip topics to which to subscribe - if node.config.subscribeAllSubnets: - ( - func (pubkey: ValidatorPubKey): bool {.closure.} = - true - ) - else: - ( - func (pubkey: ValidatorPubKey): bool = - node.consensusManager[].actionTracker.hasSyncDuty(pubkey, epoch) or - pubkey in node.attachedValidators[].validators - ) - -func getCurrentSyncCommiteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = - let syncCommittee = withState(node.dag.headState): - when consensusFork >= ConsensusFork.Altair: - forkyState.data.current_sync_committee - else: - return static(default(SyncnetBits)) - - getSyncSubnets(node.hasSyncPubKey(epoch), syncCommittee) - -func getNextSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = - let syncCommittee = withState(node.dag.headState): - when consensusFork >= ConsensusFork.Altair: - forkyState.data.next_sync_committee - else: - return static(default(SyncnetBits)) - - getSyncSubnets( - node.hasSyncPubKey((epoch.sync_committee_period + 1).start_slot().epoch), - syncCommittee, - ) - -func getSyncCommitteeSubnets(node: BeaconNode, epoch: Epoch): SyncnetBits = - let - subnets = node.getCurrentSyncCommiteeSubnets(epoch) - epochsToSyncPeriod = nearSyncCommitteePeriod(epoch) - - # The end-slot tracker might call this when it's theoretically applicable, - # but more than SYNC_COMMITTEE_SUBNET_COUNT epochs from when the next sync - # committee period begins, in which case `epochsToNextSyncPeriod` is none. - if epochsToSyncPeriod.isNone or - node.dag.cfg.consensusForkAtEpoch(epoch + epochsToSyncPeriod.get) < - ConsensusFork.Altair: - return subnets - - subnets + node.getNextSyncCommitteeSubnets(epoch) - -proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addPhase0MessageHandlers(forkDigest, slot) - - # If this comes online near sync committee period, it'll immediately get - # replaced as usual by trackSyncCommitteeTopics, which runs at slot end. - let - syncnets = node.getSyncCommitteeSubnets(slot.epoch) - validatorsCount = withState(node.dag.headState): - forkyState.data.validators.lenu64 - - for subcommitteeIdx in SyncSubcommitteeIndex: - if syncnets[subcommitteeIdx]: - node.network.subscribe( - getSyncCommitteeTopic(forkDigest, subcommitteeIdx), - getSyncCommitteeSubnetTopicParams(validatorsCount), - ) - - node.network.subscribe( - getSyncCommitteeContributionAndProofTopic(forkDigest), - getSyncContributionTopicParams(), - ) - - node.network.updateSyncnetsMetadata(syncnets) - -proc addCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addAltairMessageHandlers(forkDigest, slot) - node.network.subscribe( - getBlsToExecutionChangeTopic(forkDigest), getBlsToExecutionChangeTopicParams() - ) - -proc doAddDenebMessageHandlers( - node: BeaconNode, forkDigest: ForkDigest, slot: Slot, blobSidecarSubnetCount: uint64 -) = - node.addCapellaMessageHandlers(forkDigest, slot) - for topic in blobSidecarTopics(forkDigest, blobSidecarSubnetCount): - node.network.subscribe(topic, basicParams()) - -proc addDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.doAddDenebMessageHandlers( - forkDigest, slot, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT - ) - -proc addElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.doAddDenebMessageHandlers( - forkDigest, slot, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA - ) - -proc addFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) = - node.addElectraMessageHandlers(forkDigest, slot) - -proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removePhase0MessageHandlers(forkDigest) - - for subcommitteeIdx in SyncSubcommitteeIndex: - closureScope: - let idx = subcommitteeIdx - node.network.unsubscribe(getSyncCommitteeTopic(forkDigest, idx)) - - node.network.unsubscribe(getSyncCommitteeContributionAndProofTopic(forkDigest)) - -proc removeCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removeAltairMessageHandlers(forkDigest) - node.network.unsubscribe(getBlsToExecutionChangeTopic(forkDigest)) - -proc doRemoveDenebMessageHandlers( - node: BeaconNode, forkDigest: ForkDigest, blobSidecarSubnetCount: uint64 -) = - node.removeCapellaMessageHandlers(forkDigest) - for topic in blobSidecarTopics(forkDigest, blobSidecarSubnetCount): - node.network.unsubscribe(topic) - -proc removeDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.doRemoveDenebMessageHandlers(forkDigest, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT) - -proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.doRemoveDenebMessageHandlers( - forkDigest, node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA - ) - -proc removeFuluMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) = - node.removeElectraMessageHandlers(forkDigest) - -proc updateSyncCommitteeTopics(node: BeaconNode, slot: Slot) = - template lastSyncUpdate(): untyped = - node.consensusManager[].actionTracker.lastSyncUpdate - - if lastSyncUpdate == Opt.some(slot.sync_committee_period()) and - nearSyncCommitteePeriod(slot.epoch).isNone(): - # No need to update unless we're close to the next sync committee period or - # new validators were registered with the action tracker - # TODO we _could_ skip running this in some of the "near" slots, but.. - return - - lastSyncUpdate = Opt.some(slot.sync_committee_period()) - - let syncnets = node.getSyncCommitteeSubnets(slot.epoch) - - debug "Updating sync committee subnets", - syncnets, - metadata_syncnets = node.network.metadata.syncnets, - gossipState = node.gossipState - - # Assume that different gossip fork sync committee setups are in sync; this - # only remains relevant, currently, for one gossip transition epoch, so the - # consequences of this not being true aren't exceptionally dire, while this - # allows for bookkeeping simplication. - if syncnets == node.network.metadata.syncnets: - return - - let - newSyncnets = syncnets - node.network.metadata.syncnets - oldSyncnets = node.network.metadata.syncnets - syncnets - forkDigests = node.forkDigests() - validatorsCount = withState(node.dag.headState): - forkyState.data.validators.lenu64 - - for subcommitteeIdx in SyncSubcommitteeIndex: - doAssert not (newSyncnets[subcommitteeIdx] and oldSyncnets[subcommitteeIdx]) - for gossipFork in node.gossipState: - template topic(): auto = - getSyncCommitteeTopic(forkDigests[gossipFork], subcommitteeIdx) - - if oldSyncnets[subcommitteeIdx]: - node.network.unsubscribe(topic) - elif newSyncnets[subcommitteeIdx]: - node.network.subscribe( - topic, getSyncCommitteeSubnetTopicParams(validatorsCount) - ) - - node.network.updateSyncnetsMetadata(syncnets) - -proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) = - if not node.processor[].doppelgangerDetectionEnabled: - return - - # broadcastStartEpoch is set to FAR_FUTURE_EPOCH when we're not monitoring - # gossip - it is only viable to assert liveness in epochs where gossip is - # active - if epoch > node.processor[].doppelgangerDetection.broadcastStartEpoch: - for validator in node.attachedValidators[]: - validator.doppelgangerChecked(epoch - 1) - -proc maybeUpdateActionTrackerNextEpoch( - node: BeaconNode, forkyState: ForkyHashedBeaconState, currentSlot: Slot -) = - let nextEpoch = currentSlot.epoch + 1 - if node.consensusManager[].actionTracker.needsUpdate(forkyState, nextEpoch): - template epochRefFallback() = - let epochRef = node.dag.getEpochRef(node.dag.head, nextEpoch, false).expect( - "Getting head EpochRef should never fail" - ) - node.consensusManager[].actionTracker.updateActions( - epochRef.shufflingRef, epochRef.beacon_proposers - ) - - when forkyState is phase0.HashedBeaconState: - # The previous_epoch_participation-based logic requires Altair or newer - epochRefFallback() - else: - let - shufflingRef = node.dag.getShufflingRef(node.dag.head, nextEpoch, false).valueOr: - # epochRefFallback() won't work in this case either - return - # using the separate method of proposer indices calculation in Fulu - nextEpochProposers = get_beacon_proposer_indices( - forkyState.data, shufflingRef.shuffled_active_validator_indices, nextEpoch - ) - nextEpochFirstProposer = nextEpochProposers[0].valueOr: - # All proposers except the first can be more straightforwardly and - # efficiently (re)computed correctly once in that epoch. - epochRefFallback() - return - - # Has to account for potential epoch transition TIMELY_SOURCE_FLAG_INDEX, - # TIMELY_TARGET_FLAG_INDEX, and inactivity penalties, resulting from spec - # functions get_flag_index_deltas() and get_inactivity_penalty_deltas(). - # - # There are no penalties associated with TIMELY_HEAD_FLAG_INDEX, but a - # reward exists. effective_balance == MAX_EFFECTIVE_BALANCE.Gwei ensures - # if even so, then the effective balance cannot change as a result. - # - # It's not truly necessary to avoid all rewards and penalties, but only - # to bound them to ensure they won't unexpected alter effective balance - # during the upcoming epoch transition. - # - # During genesis epoch, the check for epoch participation is against - # current, not previous, epoch, and therefore there's a possibility of - # checking for if a validator has participated in an epoch before it will - # happen. - # - # Because process_rewards_and_penalties() in epoch processing happens - # before the current/previous participation swap, previous is correct - # even here, and consistent with what the epoch transition uses. - # - # Whilst slashing, proposal, and sync committee rewards and penalties do - # update the balances as they occur, they don't update effective_balance - # until the end of epoch, so detect via effective_balance_might_update. - # - # On EF mainnet epoch 233906, this matches 99.5% of active validators; - # with Holesky epoch 2041, 83% of active validators. - let - participation_flags = - forkyState.data.previous_epoch_participation.item(nextEpochFirstProposer) - effective_balance = - forkyState.data.validators.item(nextEpochFirstProposer).effective_balance - - # Maximal potential accuracy primarily useful during the last slot of - # each epoch to prepare for a possible proposal the first slot of the - # next epoch. Otherwise, epochRefFallback is potentially very slow as - # it can induce a lengthy state replay. - if (not (currentSlot + 1).is_epoch) or ( - participation_flags.has_flag(TIMELY_SOURCE_FLAG_INDEX) and - participation_flags.has_flag(TIMELY_TARGET_FLAG_INDEX) and - effective_balance == MAX_EFFECTIVE_BALANCE.Gwei and - forkyState.data.slot.epoch != GENESIS_EPOCH and - forkyState.data.inactivity_scores.item(nextEpochFirstProposer) == 0 and - not effective_balance_might_update( - forkyState.data.balances.item(nextEpochFirstProposer), effective_balance - ) - ): - node.consensusManager[].actionTracker.updateActions( - shufflingRef, nextEpochProposers - ) - else: - epochRefFallback() - -proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = - ## Subscribe to subnets that we are providing stability for or aggregating - ## and unsubscribe from the ones that are no longer relevant. - - # Let the tracker know what duties are approaching - this will tell us how - # many stability subnets we need to be subscribed to and what subnets we'll - # soon be aggregating - in addition to the in-beacon-node duties, there may - # also be duties coming from the validator client, but we don't control when - # these arrive - await node.registerDuties(slot) - - # We start subscribing to gossip before we're fully synced - this allows time - # to subscribe before the sync end game - const - TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64 - HYSTERESIS_BUFFER = 16 - - static: - doAssert high(ConsensusFork) == ConsensusFork.Fulu - - let - head = node.dag.head - headDistance = - if slot > head.slot: - (slot - head.slot).uint64 - else: - 0'u64 - isBehind = headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER - targetGossipState = getTargetGossipState( - slot.epoch, node.dag.cfg.ALTAIR_FORK_EPOCH, node.dag.cfg.BELLATRIX_FORK_EPOCH, - node.dag.cfg.CAPELLA_FORK_EPOCH, node.dag.cfg.DENEB_FORK_EPOCH, - node.dag.cfg.ELECTRA_FORK_EPOCH, node.dag.cfg.FULU_FORK_EPOCH, isBehind, - ) - - doAssert targetGossipState.card <= 2 - - let - newGossipForks = targetGossipState - node.gossipState - oldGossipForks = node.gossipState - targetGossipState - - doAssert newGossipForks.card <= 2 - doAssert oldGossipForks.card <= 2 - - func maxGossipFork(gossipState: GossipState): int = - var res = -1 - for gossipFork in gossipState: - res = max(res, gossipFork.int) - res - - if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and - targetGossipState != {}: - warn "Unexpected clock regression during transition", - targetGossipState, gossipState = node.gossipState - - if node.gossipState.card == 0 and targetGossipState.card > 0: - # We are synced, so we will connect - debug "Enabling topic subscriptions", - wallSlot = slot, headSlot = head.slot, headDistance, targetGossipState - - node.processor[].setupDoppelgangerDetection(slot) - - # Specially when waiting for genesis, we'll already be synced on startup - - # it might also happen on a sufficiently fast restart - - # We "know" the actions for the current and the next epoch - withState(node.dag.headState): - if node.consensusManager[].actionTracker.needsUpdate(forkyState, slot.epoch): - let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect( - "Getting head EpochRef should never fail" - ) - node.consensusManager[].actionTracker.updateActions( - epochRef.shufflingRef, epochRef.beacon_proposers - ) - - node.maybeUpdateActionTrackerNextEpoch(forkyState, slot) - - if node.gossipState.card > 0 and targetGossipState.card == 0: - debug "Disabling topic subscriptions", - wallSlot = slot, headSlot = head.slot, headDistance - - node.processor[].clearDoppelgangerProtection() - - let forkDigests = node.forkDigests() - - const removeMessageHandlers: array[ConsensusFork, auto] = [ - removePhase0MessageHandlers, - removeAltairMessageHandlers, - removeAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) - removeCapellaMessageHandlers, - removeDenebMessageHandlers, - removeElectraMessageHandlers, - removeFuluMessageHandlers, - ] - - for gossipFork in oldGossipForks: - removeMessageHandlers[gossipFork](node, forkDigests[gossipFork]) - - const addMessageHandlers: array[ConsensusFork, auto] = [ - addPhase0MessageHandlers, - addAltairMessageHandlers, - addAltairMessageHandlers, # bellatrix (altair handlers, different forkDigest) - addCapellaMessageHandlers, - addDenebMessageHandlers, - addElectraMessageHandlers, - addFuluMessageHandlers, - ] - - for gossipFork in newGossipForks: - addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot) - - node.gossipState = targetGossipState - node.doppelgangerChecked(slot.epoch) - node.updateAttestationSubnetHandlers(slot) - node.updateBlocksGossipStatus(slot, isBehind) - node.updateLightClientGossipStatus(slot, isBehind) - -proc pruneBlobs(node: BeaconNode, slot: Slot) = - let blobPruneEpoch = - (slot.epoch - node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1) - if slot.is_epoch() and blobPruneEpoch >= node.dag.cfg.DENEB_FORK_EPOCH: - var blocks: array[SLOTS_PER_EPOCH.int, BlockId] - var count = 0 - let startIndex = node.dag.getBlockRange( - blobPruneEpoch.start_slot, blocks.toOpenArray(0, SLOTS_PER_EPOCH - 1) - ) - for i in startIndex ..< SLOTS_PER_EPOCH: - let blck = node.dag.getForkedBlock(blocks[int(i)]).valueOr: - continue - withBlck(blck): - when typeof(forkyBlck).kind < ConsensusFork.Deneb: - continue - else: - for j in 0 .. len(forkyBlck.message.body.blob_kzg_commitments) - 1: - if node.db.delBlobSidecar(blocks[int(i)].root, BlobIndex(j)): - count = count + 1 - debug "pruned blobs", count, blobPruneEpoch - -proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} = - # Things we do when slot processing has ended and we're about to wait for the - # next slot - - # By waiting until close before slot end, ensure that preparation for next - # slot does not interfere with propagation of messages and with VC duties. - const endOffset = - aggregateSlotOffset + - nanos((NANOSECONDS_PER_SLOT - aggregateSlotOffset.nanoseconds.uint64).int64 div 2) - let endCutoff = node.beaconClock.fromNow(slot.start_beacon_time + endOffset) - if endCutoff.inFuture: - debug "Waiting for slot end", slot, endCutoff = shortLog(endCutoff.offset) - await sleepAsync(endCutoff.offset) - - if node.dag.needStateCachesAndForkChoicePruning(): - if node.attachedValidators[].validators.len > 0: - node.attachedValidators[].slashingProtection - # pruning is only done if the DB is set to pruning mode. - .pruneAfterFinalization(node.dag.finalizedHead.slot.epoch()) - node.processor.blobQuarantine[].pruneAfterFinalization( - node.dag.finalizedHead.slot.epoch() - ) - - # Delay part of pruning until latency critical duties are done. - # The other part of pruning, `pruneBlocksDAG`, is done eagerly. - # ---- - # This is the last pruning to do as it clears the "needPruning" condition. - node.consensusManager[].pruneStateCachesAndForkChoice() - - if node.config.historyMode == HistoryMode.Prune: - if not (slot + 1).is_epoch(): - # The epoch slot already is "heavy" due to the epoch processing, leave - # the pruning for later - node.dag.pruneHistory() - node.pruneBlobs(slot) - - when declared(GC_fullCollect): - # The slots in the beacon node work as frames in a game: we want to make - # sure that we're ready for the next one and don't get stuck in lengthy - # garbage collection tasks when time is of essence in the middle of a slot - - # while this does not guarantee that we'll never collect during a slot, it - # makes sure that all the scratch space we used during slot tasks (logging, - # temporary buffers etc) gets recycled for the next slot that is likely to - # need similar amounts of memory. - try: - GC_fullCollect() - except Defect as exc: - raise exc # Reraise to maintain call stack - except Exception: - # TODO upstream - raiseAssert "Unexpected exception during GC collection" - let gcCollectionTick = Moment.now() - - # Checkpoint the database to clear the WAL file and make sure changes in - # the database are synced with the filesystem. - node.db.checkpoint() - let - dbCheckpointTick = Moment.now() - dbCheckpointDur = dbCheckpointTick - gcCollectionTick - db_checkpoint_seconds.inc(dbCheckpointDur.toFloatSeconds) - if dbCheckpointDur >= MinSignificantProcessingDuration: - info "Database checkpointed", dur = dbCheckpointDur - else: - debug "Database checkpointed", dur = dbCheckpointDur - - node.syncCommitteeMsgPool[].pruneData(slot) - if slot.is_epoch: - node.dynamicFeeRecipientsStore[].pruneOldMappings(slot.epoch) - - # Update upcoming actions - we do this every slot in case a reorg happens - let head = node.dag.head - if node.isSynced(head) and head.executionValid: - withState(node.dag.headState): - # maybeUpdateActionTrackerNextEpoch might not account for balance changes - # from the process_rewards_and_penalties() epoch transition but only from - # process_block() and other per-slot sources. This mainly matters insofar - # as it might trigger process_effective_balance_updates() changes in that - # same epoch transition, which function is therefore potentially blind to - # but which might then affect beacon proposers. - # - # Because this runs every slot, it can account naturally for slashings, - # which affect balances via slash_validator() when they happen, and any - # missed sync committee participation via process_sync_aggregate(), but - # attestation penalties for example, need, specific handling. - # checked by maybeUpdateActionTrackerNextEpoch. - node.maybeUpdateActionTrackerNextEpoch(forkyState, slot) - - let - nextAttestationSlot = - node.consensusManager[].actionTracker.getNextAttestationSlot(slot) - nextProposalSlot = node.consensusManager[].actionTracker.getNextProposalSlot(slot) - nextActionSlot = min(nextAttestationSlot, nextProposalSlot) - nextActionWaitTime = saturate(fromNow(node.beaconClock, nextActionSlot)) - - # -1 is a more useful output than 18446744073709551615 as an indicator of - # no future attestation/proposal known. - template formatInt64(x: Slot): int64 = - if x == high(uint64).Slot: - -1'i64 - else: - toGaugeValue(x) - - let - syncCommitteeSlot = slot + 1 - syncCommitteeEpoch = syncCommitteeSlot.epoch - inCurrentSyncCommittee = - not node.getCurrentSyncCommiteeSubnets(syncCommitteeEpoch).isZeros() - - template formatSyncCommitteeStatus(): string = - if inCurrentSyncCommittee: - "current" - elif not node.getNextSyncCommitteeSubnets(syncCommitteeEpoch).isZeros(): - let slotsToNextSyncCommitteePeriod = - SLOTS_PER_SYNC_COMMITTEE_PERIOD - - since_sync_committee_period_start(syncCommitteeSlot) - # int64 conversion is safe - doAssert slotsToNextSyncCommitteePeriod <= SLOTS_PER_SYNC_COMMITTEE_PERIOD - "in " & - toTimeLeftString( - SECONDS_PER_SLOT.int64.seconds * slotsToNextSyncCommitteePeriod.int64 - ) - else: - "none" - - info "Slot end", - slot = shortLog(slot), - nextActionWait = - if nextActionSlot == FAR_FUTURE_SLOT: - "n/a" - else: - shortLog(nextActionWaitTime), - nextAttestationSlot = formatInt64(nextAttestationSlot), - nextProposalSlot = formatInt64(nextProposalSlot), - syncCommitteeDuties = formatSyncCommitteeStatus(), - head = shortLog(head) - - if nextActionSlot != FAR_FUTURE_SLOT: - next_action_wait.set(nextActionWaitTime.toFloatSeconds) - - next_proposal_wait.set( - if nextProposalSlot != FAR_FUTURE_SLOT: - saturate(fromNow(node.beaconClock, nextProposalSlot)).toFloatSeconds() - else: - Inf - ) - - sync_committee_active.set(if inCurrentSyncCommittee: 1 else: 0) - - let epoch = slot.epoch - if epoch + 1 >= node.network.forkId.next_fork_epoch: - # Update 1 epoch early to block non-fork-ready peers - node.network.updateForkId(epoch, node.dag.genesis_validators_root) - - # When we're not behind schedule, we'll speculatively update the clearance - # state in anticipation of receiving the next block - we do it after - # logging slot end since the nextActionWaitTime can be short - let advanceCutoff = node.beaconClock.fromNow( - slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1)) - ) - if advanceCutoff.inFuture: - # We wait until there's only a second left before the next slot begins, then - # we advance the clearance state to the next slot - this gives us a high - # probability of being prepared for the block that will arrive and the - # epoch processing that follows - await sleepAsync(advanceCutoff.offset) - node.dag.advanceClearanceState() - - # Prepare action tracker for the next slot - node.consensusManager[].actionTracker.updateSlot(slot + 1) - - # The last thing we do is to perform the subscriptions and unsubscriptions for - # the next slot, just before that slot starts - because of the advance cuttoff - # above, this will be done just before the next slot starts - node.updateSyncCommitteeTopics(slot + 1) - - await node.updateGossipStatus(slot + 1) - -func formatNextConsensusFork(node: BeaconNode, withVanityArt = false): Opt[string] = - let consensusFork = node.dag.cfg.consensusForkAtEpoch(node.dag.head.slot.epoch) - if consensusFork == ConsensusFork.high: - return Opt.none(string) - let - nextConsensusFork = consensusFork.succ() - nextForkEpoch = node.dag.cfg.consensusForkEpoch(nextConsensusFork) - if nextForkEpoch == FAR_FUTURE_EPOCH: - return Opt.none(string) - Opt.some( - (if withVanityArt: nextConsensusFork.getVanityMascot & " " else: "") & - $nextConsensusFork & ":" & $nextForkEpoch - ) - -proc syncStatus(node: BeaconNode, wallSlot: Slot): string = - node.syncOverseer.syncStatusMessage() - -when defined(windows): - from winservice import establishWindowsService, reportServiceStatusSuccess - -proc onSlotStart( - node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot -): Future[bool] {.async.} = - ## Called at the beginning of a slot - usually every slot, but sometimes might - ## skip a few in case we're running late. - ## wallTime: current system time - we will strive to perform all duties up - ## to this point in time - ## lastSlot: the last slot that we successfully processed, so we know where to - ## start work from - there might be jumps if processing is delayed - let - # The slot we should be at, according to the clock - wallSlot = wallTime.slotOrZero - # If everything was working perfectly, the slot that we should be processing - expectedSlot = lastSlot + 1 - finalizedEpoch = node.dag.finalizedHead.blck.slot.epoch() - delay = wallTime - expectedSlot.start_beacon_time() - - node.processingDelay = Opt.some(nanoseconds(delay.nanoseconds)) - - block: - logScope: - slot = shortLog(wallSlot) - epoch = shortLog(wallSlot.epoch) - sync = node.syncStatus(wallSlot) - peers = len(node.network.peerPool) - head = shortLog(node.dag.head) - finalized = shortLog(getStateField(node.dag.headState, finalized_checkpoint)) - delay = shortLog(delay) - let nextConsensusForkDescription = node.formatNextConsensusFork() - if nextConsensusForkDescription.isNone: - info "Slot start" - else: - info "Slot start", nextFork = nextConsensusForkDescription.get - - # Check before any re-scheduling of onSlotStart() - if checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch): - quit(0) - - when defined(windows): - if node.config.runAsService: - reportServiceStatusSuccess() - - beacon_slot.set wallSlot.toGaugeValue - beacon_current_epoch.set wallSlot.epoch.toGaugeValue - - # both non-negative, so difference can't overflow or underflow int64 - finalization_delay.set(wallSlot.epoch.toGaugeValue - finalizedEpoch.toGaugeValue) - - if node.config.strictVerification: - verifyFinalization(node, wallSlot) - - node.consensusManager[].updateHead(wallSlot) - - await node.handleValidatorDuties(lastSlot, wallSlot) - - await onSlotEnd(node, wallSlot) - - # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination - # This specification suggests validators re-submit to builder software every - # `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs. - if wallSlot.is_epoch and - wallSlot.epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION == 0: - asyncSpawn node.registerValidators(wallSlot.epoch) - - return false - -proc onSecond(node: BeaconNode, time: Moment) = - # Nim GC metrics (for the main thread) - updateThreadMetrics() - - if node.config.stopAtSyncedEpoch != 0 and - node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch: - notice "Shutting down after having reached the target synced epoch" - bnStatus = BeaconNodeStatus.Stopping - -proc runOnSecondLoop(node: BeaconNode) {.async.} = - const - sleepTime = chronos.seconds(1) - nanosecondsIn1s = float(sleepTime.nanoseconds) - while true: - let start = chronos.now(chronos.Moment) - await chronos.sleepAsync(sleepTime) - let afterSleep = chronos.now(chronos.Moment) - let sleepTime = afterSleep - start - node.onSecond(start) - let finished = chronos.now(chronos.Moment) - let processingTime = finished - afterSleep - ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s) - trace "onSecond task completed", sleepTime, processingTime - -func connectedPeersCount(node: BeaconNode): int = - len(node.network.peerPool) - -proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) = - restServer.router.installBeaconApiHandlers(node) - restServer.router.installBuilderApiHandlers(node) - restServer.router.installConfigApiHandlers(node) - restServer.router.installDebugApiHandlers(node) - restServer.router.installEventApiHandlers(node) - restServer.router.installNimbusApiHandlers(node) - restServer.router.installNodeApiHandlers(node) - restServer.router.installValidatorApiHandlers(node) - restServer.router.installRewardsApiHandlers(node) - if node.dag.lcDataStore.serve: - restServer.router.installLightClientApiHandlers(node) - -from beacon_chain/spec/datatypes/capella import SignedBeaconBlock - -proc installMessageValidators(node: BeaconNode) = - # These validators stay around the whole time, regardless of which specific - # subnets are subscribed to during any given epoch. - let forkDigests = node.dag.forkDigests - - for fork in ConsensusFork: - withConsensusFork(fork): - let digest = forkDigests[].atConsensusFork(consensusFork) - - # beacon_block - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#beacon_block - node.network.addValidator( - getBeaconBlocksTopic(digest), - proc(signedBlock: consensusFork.SignedBeaconBlock): ValidationResult = - if node.shouldSyncOptimistically(node.currentSlot): - toValidationResult( - node.optimisticProcessor.processSignedBeaconBlock(signedBlock) - ) - else: - toValidationResult( - node.processor[].processSignedBeaconBlock(MsgSource.gossip, signedBlock) - ), - ) - - # beacon_attestation_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - when consensusFork >= ConsensusFork.Electra: - for it in SubnetId: - closureScope: - let subnet_id = it - node.network.addAsyncValidator( - getAttestationTopic(digest, subnet_id), - proc( - attestation: SingleAttestation - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processAttestation( - MsgSource.gossip, - attestation, - subnet_id, - checkSignature = true, - checkValidator = false, - ) - ), - ) - else: - for it in SubnetId: - closureScope: - let subnet_id = it - node.network.addAsyncValidator( - getAttestationTopic(digest, subnet_id), - proc( - attestation: phase0.Attestation - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processAttestation( - MsgSource.gossip, - attestation, - subnet_id, - checkSignature = true, - checkValidator = false, - ) - ), - ) - - # beacon_aggregate_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - when consensusFork >= ConsensusFork.Electra: - node.network.addAsyncValidator( - getAggregateAndProofsTopic(digest), - proc( - signedAggregateAndProof: electra.SignedAggregateAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedAggregateAndProof( - MsgSource.gossip, signedAggregateAndProof - ) - ), - ) - else: - node.network.addAsyncValidator( - getAggregateAndProofsTopic(digest), - proc( - signedAggregateAndProof: phase0.SignedAggregateAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedAggregateAndProof( - MsgSource.gossip, signedAggregateAndProof - ) - ), - ) - - # attester_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/p2p-interface.md#attester_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.6/specs/electra/p2p-interface.md#modifications-in-electra - when consensusFork >= ConsensusFork.Electra: - node.network.addValidator( - getAttesterSlashingsTopic(digest), - proc(attesterSlashing: electra.AttesterSlashing): ValidationResult = - toValidationResult( - node.processor[].processAttesterSlashing( - MsgSource.gossip, attesterSlashing - ) - ), - ) - else: - node.network.addValidator( - getAttesterSlashingsTopic(digest), - proc(attesterSlashing: phase0.AttesterSlashing): ValidationResult = - toValidationResult( - node.processor[].processAttesterSlashing( - MsgSource.gossip, attesterSlashing - ) - ), - ) - - # proposer_slashing - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/p2p-interface.md#proposer_slashing - node.network.addValidator( - getProposerSlashingsTopic(digest), - proc(proposerSlashing: ProposerSlashing): ValidationResult = - toValidationResult( - node.processor[].processProposerSlashing(MsgSource.gossip, proposerSlashing) - ), - ) - - # voluntary_exit - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#voluntary_exit - node.network.addValidator( - getVoluntaryExitsTopic(digest), - proc(signedVoluntaryExit: SignedVoluntaryExit): ValidationResult = - toValidationResult( - node.processor[].processSignedVoluntaryExit( - MsgSource.gossip, signedVoluntaryExit - ) - ), - ) - - when consensusFork >= ConsensusFork.Altair: - # sync_committee_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id - for subcommitteeIdx in SyncSubcommitteeIndex: - closureScope: - let idx = subcommitteeIdx - node.network.addAsyncValidator( - getSyncCommitteeTopic(digest, idx), - proc( - msg: SyncCommitteeMessage - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSyncCommitteeMessage( - MsgSource.gossip, msg, idx - ) - ), - ) - - # sync_committee_contribution_and_proof - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof - node.network.addAsyncValidator( - getSyncCommitteeContributionAndProofTopic(digest), - proc( - msg: SignedContributionAndProof - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processSignedContributionAndProof( - MsgSource.gossip, msg - ) - ), - ) - - when consensusFork >= ConsensusFork.Capella: - # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.4/specs/capella/p2p-interface.md#bls_to_execution_change - node.network.addAsyncValidator( - getBlsToExecutionChangeTopic(digest), - proc( - msg: SignedBLSToExecutionChange - ): Future[ValidationResult] {.async: (raises: [CancelledError]).} = - return toValidationResult( - await node.processor.processBlsToExecutionChange(MsgSource.gossip, msg) - ), - ) - - when consensusFork >= ConsensusFork.Deneb: - # blob_sidecar_{subnet_id} - # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id - let subnetCount = - when consensusFork >= ConsensusFork.Electra: - node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA - else: - node.dag.cfg.BLOB_SIDECAR_SUBNET_COUNT - for it in 0.BlobId ..< subnetCount.BlobId: - closureScope: - let subnet_id = it - node.network.addValidator( - getBlobSidecarTopic(digest, subnet_id), - proc(blobSidecar: deneb.BlobSidecar): ValidationResult = - toValidationResult( - node.processor[].processBlobSidecar( - MsgSource.gossip, blobSidecar, subnet_id - ) - ), - ) - - node.installLightClientMessageValidators() - -proc stop(node: BeaconNode) = - bnStatus = BeaconNodeStatus.Stopping - notice "Graceful shutdown" - if not node.config.inProcessValidators: - try: - node.vcProcess.close() - except Exception as exc: - warn "Couldn't close vc process", msg = exc.msg - try: - waitFor node.network.stop() - except CatchableError as exc: - warn "Couldn't stop network", msg = exc.msg - - waitFor node.metricsServer.stopMetricsServer() - - node.attachedValidators[].slashingProtection.close() - node.attachedValidators[].close() - node.db.close() - notice "Databases closed" - -proc run(node: BeaconNode) {.raises: [CatchableError].} = - bnStatus = BeaconNodeStatus.Running - - if not isNil(node.restServer): - node.restServer.installRestHandlers(node) - node.restServer.start() - - if not isNil(node.keymanagerServer): - doAssert not isNil(node.keymanagerHost) - node.keymanagerServer.router.installKeymanagerHandlers(node.keymanagerHost[]) - if node.keymanagerServer != node.restServer: - node.keymanagerServer.start() - - let - wallTime = node.beaconClock.now() - wallSlot = wallTime.slotOrZero() - - node.startLightClient() - node.requestManager.start() - node.syncOverseer.start() - - waitFor node.updateGossipStatus(wallSlot) - - for web3signerUrl in node.config.web3SignerUrls: - # TODO - # The current strategy polls all remote signers independently - # from each other which may lead to some race conditions of - # validators are migrated from one signer to another - # (because the updates to our validator pool are not atomic). - # Consider using different strategies that would detect such - # race conditions. - asyncSpawn node.pollForDynamicValidators( - web3signerUrl, node.config.web3signerUpdateInterval - ) - - asyncSpawn runSlotLoop(node, wallTime, onSlotStart) - asyncSpawn runOnSecondLoop(node) - asyncSpawn runQueueProcessingLoop(node.blockProcessor) - asyncSpawn runKeystoreCachePruningLoop(node.keystoreCache) - - # main event loop - while bnStatus == BeaconNodeStatus.Running: - poll() # if poll fails, the network is broken - - # time to say goodbye - node.stop() - -# db lock -var shouldCreatePid*: Atomic[bool] -shouldCreatePid.store(true) - -var gPidFile: string -proc createPidFile(filename: string) {.raises: [IOError].} = - if shouldCreatePid.load(): - writeFile filename, $os.getCurrentProcessId() - gPidFile = filename - addExitProc proc() {.noconv.} = - discard io2.removeFile(gPidFile) - -proc initializeNetworking(node: BeaconNode) {.async.} = - node.installMessageValidators() - - info "Listening to incoming network requests" - await node.network.startListening() - - let addressFile = node.config.dataDir / "beacon_node.enr" - writeFile(addressFile, node.network.announcedENR.toURI) - - await node.network.start() - -proc start*(node: BeaconNode) {.raises: [CatchableError].} = - let - head = node.dag.head - finalizedHead = node.dag.finalizedHead - genesisTime = node.beaconClock.fromNow(start_beacon_time(Slot 0)) - - notice "Starting beacon node", - version = fullVersionStr, - nimVersion = NimVersion, - enr = node.network.announcedENR.toURI, - peerId = $node.network.switch.peerInfo.peerId, - timeSinceFinalization = - node.beaconClock.now() - finalizedHead.slot.start_beacon_time(), - head = shortLog(head), - justified = - shortLog(getStateField(node.dag.headState, current_justified_checkpoint)), - finalized = shortLog(getStateField(node.dag.headState, finalized_checkpoint)), - finalizedHead = shortLog(finalizedHead), - SLOTS_PER_EPOCH, - SECONDS_PER_SLOT, - SPEC_VERSION, - dataDir = node.config.dataDir.string, - validators = node.attachedValidators[].count - - if genesisTime.inFuture: - notice "Waiting for genesis", genesisIn = genesisTime.offset - - waitFor node.initializeNetworking() - - node.elManager.start() - node.run() - -func formatGwei(amount: Gwei): string = - # TODO This is implemented in a quite a silly way. - # Better routines for formatting decimal numbers - # should exists somewhere else. - let - eth = distinctBase(amount) div 1000000000 - remainder = distinctBase(amount) mod 1000000000 - - result = $eth - if remainder != 0: - result.add '.' - let remainderStr = $remainder - for i in remainderStr.len ..< 9: - result.add '0' - result.add remainderStr - while result[^1] == '0': - result.setLen(result.len - 1) - -when not defined(windows): - proc initStatusBar(node: BeaconNode) {.raises: [ValueError].} = - if not isatty(stdout): - return - if not node.config.statusBarEnabled: - return - - try: - enableTrueColors() - except Exception as exc: # TODO Exception - error "Couldn't enable colors", err = exc.msg - - proc dataResolver(expr: string): string {.raises: [].} = - template justified(): untyped = - node.dag.head.atEpochStart( - getStateField(node.dag.headState, current_justified_checkpoint).epoch - ) - - # TODO: - # We should introduce a general API for resolving dot expressions - # such as `db.latest_block.slot` or `metrics.connected_peers`. - # Such an API can be shared between the RPC back-end, CLI tools - # such as ncli, a potential GraphQL back-end and so on. - # The status bar feature would allow the user to specify an - # arbitrary expression that is resolvable through this API. - case expr.toLowerAscii - of "version": - versionAsStr - of "full_version": - fullVersionStr - of "connected_peers": - $(node.connectedPeersCount) - of "head_root": - shortLog(node.dag.head.root) - of "head_epoch": - $(node.dag.head.slot.epoch) - of "head_epoch_slot": - $(node.dag.head.slot.since_epoch_start) - of "head_slot": - $(node.dag.head.slot) - of "justifed_root": - shortLog(justified.blck.root) - of "justifed_epoch": - $(justified.slot.epoch) - of "justifed_epoch_slot": - $(justified.slot.since_epoch_start) - of "justifed_slot": - $(justified.slot) - of "finalized_root": - shortLog(node.dag.finalizedHead.blck.root) - of "finalized_epoch": - $(node.dag.finalizedHead.slot.epoch) - of "finalized_epoch_slot": - $(node.dag.finalizedHead.slot.since_epoch_start) - of "finalized_slot": - $(node.dag.finalizedHead.slot) - of "epoch": - $node.currentSlot.epoch - of "epoch_slot": - $(node.currentSlot.since_epoch_start) - of "slot": - $node.currentSlot - of "slots_per_epoch": - $SLOTS_PER_EPOCH - of "slot_trailing_digits": - var slotStr = $node.currentSlot - if slotStr.len > 3: - slotStr = slotStr[^3 ..^ 1] - slotStr - of "attached_validators_balance": - formatGwei(node.attachedValidatorBalanceTotal) - of "next_consensus_fork": - let nextConsensusForkDescription = - node.formatNextConsensusFork(withVanityArt = true) - if nextConsensusForkDescription.isNone: - "" - else: - " (scheduled " & nextConsensusForkDescription.get & ")" - of "sync_status": - node.syncStatus(node.currentSlot) - else: - # We ignore typos for now and just render the expression - # as it was written. TODO: come up with a good way to show - # an error message to the user. - "$" & expr - - var statusBar = StatusBarView.init(node.config.statusBarContents, dataResolver) - - when compiles(defaultChroniclesStream.outputs[0].writer): - let tmp = defaultChroniclesStream.outputs[0].writer - - defaultChroniclesStream.outputs[0].writer = proc( - logLevel: LogLevel, msg: LogOutputStr - ) {.raises: [].} = - try: - # p.hidePrompt - erase statusBar - # p.writeLine msg - tmp(logLevel, msg) - render statusBar - # p.showPrompt - except Exception as e: # render raises Exception - logLoggingFailure(cstring(msg), e) - - proc statusBarUpdatesPollingLoop() {.async.} = - try: - while true: - update statusBar - erase statusBar - render statusBar - await sleepAsync(chronos.seconds(1)) - except CatchableError as exc: - warn "Failed to update status bar, no further updates", err = exc.msg - - asyncSpawn statusBarUpdatesPollingLoop() - -proc doRunBeaconNode( - config: var BeaconNodeConf, rng: ref HmacDrbgContext -) {.raises: [CatchableError].} = - info "Launching beacon node", - version = fullVersionStr, - bls_backend = $BLS_BACKEND, - const_preset, - cmdParams = commandLineParams(), - config - - template ignoreDeprecatedOption(option: untyped): untyped = - if config.option.isSome: - warn "Config option is deprecated", option = config.option.get - - ignoreDeprecatedOption requireEngineAPI - ignoreDeprecatedOption safeSlotsToImportOptimistically - ignoreDeprecatedOption terminalTotalDifficultyOverride - ignoreDeprecatedOption optimistic - ignoreDeprecatedOption validatorMonitorTotals - ignoreDeprecatedOption web3ForcePolling - ignoreDeprecatedOption finalizedDepositTreeSnapshot - - createPidFile(config.dataDir.string / "beacon_node.pid") - - config.createDumpDirs() - - # There are no managed event loops in here, to do a graceful shutdown, but - # letting the default Ctrl+C handler exit is safe, since we only read from - # the db. - let metadata = config.loadEth2Network() - - # Updating the config based on the metadata certainly is not beautiful but it - # works - for node in metadata.bootstrapNodes: - config.bootstrapNodes.add node - - ## Ctrl+C handling - proc controlCHandler() {.noconv.} = - when defined(windows): - # workaround for https://github.com/nim-lang/Nim/issues/4057 - try: - setupForeignThreadGc() - except Exception as exc: - raiseAssert exc.msg - # shouldn't happen - notice "Shutting down after having received SIGINT" - bnStatus = BeaconNodeStatus.Stopping - - try: - setControlCHook(controlCHandler) - except Exception as exc: # TODO Exception - warn "Cannot set ctrl-c handler", msg = exc.msg - - # equivalent SIGTERM handler - when defined(posix): - proc SIGTERMHandler(signal: cint) {.noconv.} = - notice "Shutting down after having received SIGTERM" - bnStatus = BeaconNodeStatus.Stopping - - c_signal(ansi_c.SIGTERM, SIGTERMHandler) - - block: - let res = - if config.trustedSetupFile.isNone: - conf.loadKzgTrustedSetup() - else: - conf.loadKzgTrustedSetup(config.trustedSetupFile.get) - if res.isErr(): - raiseAssert res.error() - - let node = waitFor BeaconNode.init(rng, config, metadata) - - let metricsServer = (waitFor config.initMetricsServer()).valueOr: - return - - # Nim GC metrics (for the main thread) will be collected in onSecond(), but - # we disable piggy-backing on other metrics here. - setSystemMetricsAutomaticUpdate(false) - - node.metricsServer = metricsServer - - if bnStatus == BeaconNodeStatus.Stopping: - return - - when not defined(windows): - # This status bar can lock a Windows terminal emulator, blocking the whole - # event loop (seen on Windows 10, with a default MSYS2 terminal). - initStatusBar(node) - - if node.nickname != "": - dynamicLogScope(node = node.nickname): - node.start() - else: - node.start() - -proc doRecord( - config: BeaconNodeConf, rng: var HmacDrbgContext -) {.raises: [CatchableError].} = - case config.recordCmd - of RecordCmd.create: - let netKeys = getPersistentNetKeys(rng, config) - - var fieldPairs: seq[FieldPair] - for field in config.fields: - let fieldPair = field.split(":") - if fieldPair.len > 1: - fieldPairs.add(toFieldPair(fieldPair[0], hexToSeqByte(fieldPair[1]))) - else: - fatal "Invalid field pair" - quit QuitFailure - - let record = enr.Record - .init( - config.seqNumber, - netKeys.seckey.asEthKey, - Opt.some(config.ipExt), - Opt.some(config.tcpPortExt), - Opt.some(config.udpPortExt), - fieldPairs, - ) - .expect("Record within size limits") - - echo record.toURI() - of RecordCmd.print: - echo $config.recordPrint - -proc doWeb3Cmd( - config: BeaconNodeConf, rng: var HmacDrbgContext -) {.raises: [CatchableError].} = - case config.web3Cmd - of Web3Cmd.test: - waitFor testWeb3Provider( - config.web3TestUrl, rng.loadJwtSecret(config, allowCreate = true) - ) - -proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError].} = - let - dir = conf.validatorsDir() - filetrunc = SlashingDbName - # TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312 - let db = SlashingProtectionDB.loadUnchecked(dir, filetrunc, readOnly = false) - - let interchange = conf.exportedInterchangeFile.string - db.exportSlashingInterchange(interchange, conf.exportedValidators) - echo "Export finished: '", dir / filetrunc & ".sqlite3", "' into '", interchange, "'" - -proc doSlashingImport(conf: BeaconNodeConf) {.raises: [IOError].} = - let - dir = conf.validatorsDir() - filetrunc = SlashingDbName - # TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312 - - let interchange = conf.importedInterchangeFile.string - - var spdir: SPDIR - try: - spdir = Json.loadFile(interchange, SPDIR, requireAllFields = true) - except SerializationError as err: - writeStackTrace() - stderr.write $Json & " load issue for file \"", interchange, "\"\n" - stderr.write err.formatMsg(interchange), "\n" - quit 1 - - # Open DB and handle migration from v1 to v2 if needed - let db = SlashingProtectionDB.init( - genesis_validators_root = Eth2Digest spdir.metadata.genesis_validators_root, - basePath = dir, - dbname = filetrunc, - modes = {kCompleteArchive}, - ) - - # Now import the slashing interchange file - # Failures mode: - # - siError can only happen with invalid genesis_validators_root which would be caught above - # - siPartial can happen for invalid public keys, slashable blocks, slashable votes - let status = db.inclSPDIR(spdir) - doAssert status in {siSuccess, siPartial} - - echo "Import finished: '", interchange, "' into '", dir / filetrunc & ".sqlite3", "'" - -proc doSlashingInterchange(conf: BeaconNodeConf) {.raises: [CatchableError].} = - case conf.slashingdbCmd - of SlashProtCmd.`export`: - conf.doSlashingExport() - of SlashProtCmd.`import`: - conf.doSlashingImport() - -proc handleStartUpCmd*(config: var BeaconNodeConf) {.raises: [CatchableError].} = - # Single RNG instance for the application - will be seeded on construction - # and avoid using system resources (such as urandom) after that - let rng = HmacDrbgContext.new() - - case config.cmd - of BNStartUpCmd.noCommand: - doRunBeaconNode(config, rng) - of BNStartUpCmd.deposits: - doDeposits(config, rng[]) - of BNStartUpCmd.wallets: - doWallets(config, rng[]) - of BNStartUpCmd.record: - doRecord(config, rng[]) - of BNStartUpCmd.web3: - doWeb3Cmd(config, rng[]) - of BNStartUpCmd.slashingdb: - doSlashingInterchange(config) - of BNStartUpCmd.trustedNodeSync: - if config.blockId.isSome(): - error "--blockId option has been removed - use --state-id instead!" - quit 1 - - let - metadata = loadEth2Network(config) - db = BeaconChainDB.new(config.databaseDir, metadata.cfg, inMemory = false) - genesisState = waitFor fetchGenesisState(metadata) - waitFor db.doRunTrustedNodeSync( - metadata, config.databaseDir, config.eraDir, config.trustedNodeUrl, - config.stateId, config.lcTrustedBlockRoot, config.backfillBlocks, config.reindex, - genesisState, - ) - db.close() - -{.pop.} # TODO moduletests exceptions diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index ebc4b16d56..09415ba250 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -11,7 +11,7 @@ import std/[concurrency/atomics, os, exitprocs], chronicles, execution/execution_layer, - consensus/[consensus_layer, wrapper_consensus], + consensus/consensus_layer, common/utils, conf, confutils/cli_parser, @@ -27,7 +27,8 @@ import var beaconNodeLock {.global.}: string proc createBeaconNodeFileLock(filename: string) {.raises: [IOError].} = - shouldCreatePid.store(false) + var shouldCreatePidFile: Atomic[bool] # REMOVE THIS + shouldCreatePidFile.store(false) writeFile filename, $os.getCurrentProcessId() beaconNodeLock = filename From ca4d2bdf900f49ebe14d40afab7fb993f73a5c0f Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Fri, 18 Jul 2025 14:06:47 +0100 Subject: [PATCH 31/34] Changed submodule nimbus-eth2 branch to a feature branch with all the required changes in nimbus-eth2. --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 5478a8f64a..1ea525e0e2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -161,7 +161,7 @@ [submodule "vendor/nimbus-eth2"] path = vendor/nimbus-eth2 url = https://github.com/status-im/nimbus-eth2.git - branch = unstable + branch = dev/pedro/pubBN [submodule "vendor/nim-taskpools"] path = vendor/nim-taskpools url = https://github.com/status-im/nim-taskpools.git From 4e69a00ad87d87642cfb6b31e7dd4bc4e0e3906e Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Fri, 18 Jul 2025 14:09:46 +0100 Subject: [PATCH 32/34] bump nimbus-eth2 to 24ec4576d3be2847dfca0fab5f0ef2e574659ea3 --- vendor/nimbus-eth2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/nimbus-eth2 b/vendor/nimbus-eth2 index 454ead72f7..24ec4576d3 160000 --- a/vendor/nimbus-eth2 +++ b/vendor/nimbus-eth2 @@ -1 +1 @@ -Subproject commit 454ead72f7384da33d738fcc23daa74b2b53da9d +Subproject commit 24ec4576d3be2847dfca0fab5f0ef2e574659ea3 From 2686ae14d8691230ee3466585bdfaf0b1349163f Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Mon, 21 Jul 2025 15:24:50 +0100 Subject: [PATCH 33/34] optimized imports and code adaptations for changes in eth2 --- Makefile | 2 +- nimbus/conf.nim | 5 +---- nimbus/consensus/consensus_layer.nim | 13 ++++++++----- nimbus/execution/execution_layer.nim | 18 ++++++++---------- nimbus/nimbus.nim | 19 ++++++------------- 5 files changed, 24 insertions(+), 33 deletions(-) diff --git a/Makefile b/Makefile index 4954eb784b..67a1b98e42 100644 --- a/Makefile +++ b/Makefile @@ -386,7 +386,7 @@ ifneq ($(USE_LIBBACKTRACE), 0) endif # Nimbus -NIM_PARAMS := -d:release --parallelBuild:1-d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS) +NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS) nimbus: | build deps rocksdb echo -e $(BUILD_MSG) "build/nimbus_client" && \ $(ENV_SCRIPT) nim c $(NIM_PARAMS) --threads:on -d:disable_libbacktrace -d:libp2p_pki_schemes=secp256k1 -o:build//nimbus_client "nimbus/nimbus.nim" diff --git a/nimbus/conf.nim b/nimbus/conf.nim index 808e02bd04..1a950b72f9 100644 --- a/nimbus/conf.nim +++ b/nimbus/conf.nim @@ -9,11 +9,8 @@ import std/[atomics, tables], - chronicles, - #eth2 - beacon_chain/nimbus_binary_common + chronicles -export setupFileLimits ## log logScope: diff --git a/nimbus/consensus/consensus_layer.nim b/nimbus/consensus/consensus_layer.nim index 877932c324..9c1e217586 100644 --- a/nimbus/consensus/consensus_layer.nim +++ b/nimbus/consensus/consensus_layer.nim @@ -9,25 +9,27 @@ import std/[atomics, os], - chronos, chronicles, results, confutils, ../conf, ../common/utils, - beacon_chain/nimbus_beacon_node, beacon_chain/validators/keystore_management, beacon_chain/[beacon_node_status, nimbus_binary_common] +from beacon_chain/nimbus_beacon_node import handleStartUpCmd +from beacon_chain/conf import + BeaconNodeConf, SlashingDbKind, BNStartUpCmd, defaultDataDir + logScope: topics = "Consensus layer" +## Request to shutdown Consensus layer proc shutdownConsensus*() = bnStatus = BeaconNodeStatus.Stopping -proc makeConfig*( - cmdCommandList: seq[string], ConfType: type -): Result[ConfType, string] = +## Creates required beacon node configuration and possibility of additional sources +proc makeConfig(cmdCommandList: seq[string], ConfType: type): Result[ConfType, string] = {.push warning[ProveInit]: off.} let config = try: @@ -59,6 +61,7 @@ proc makeConfig*( {.pop.} ok(config) +## starts beacon node proc startBeaconNode(paramsList: seq[string]) {.raises: [CatchableError].} = var config = makeConfig(paramsList, BeaconNodeConf).valueOr: error "Error starting consensus", err = error diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index 4a330fa9ec..c47d86cb8d 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -7,21 +7,19 @@ {.push raises: [].} -import - std/[atomics], - chronicles, - results, - ../conf, - ../common/utils, - ../../execution_chain/nimbus_execution_client, - ../../execution_chain/config, - ../../execution_chain/nimbus_desc +import std/[atomics], chronos, chronicles, results, ../conf, ../common/utils + +from metrics/chronos_httpserver import MetricsError +from ../../execution_chain/nimbus_execution_client import run +from ../../execution_chain/nimbus_desc import NimbusNode, NimbusState +from ../../execution_chain/config import makeConfig +from ../../execution_chain/common import newEthContext logScope: topics = "Execution layer" +## Request to shutdown execution layer var nimbusHandler: NimbusNode - proc shutdownExecution*() = nimbusHandler.state = NimbusState.Stopping diff --git a/nimbus/nimbus.nim b/nimbus/nimbus.nim index 09415ba250..cf020c6fdf 100644 --- a/nimbus/nimbus.nim +++ b/nimbus/nimbus.nim @@ -15,9 +15,11 @@ import common/utils, conf, confutils/cli_parser, - stew/io2, - beacon_chain/conf, - ../execution_chain/config + stew/io2 + +from beacon_chain/conf import BeaconNodeConf +from ../execution_chain/config import NimbusConf +from beacon_chain/nimbus_binary_common import setupFileLimits, shouldCreatePidFile # ------------------------------------------------------------------------------ # Private @@ -27,7 +29,6 @@ import var beaconNodeLock {.global.}: string proc createBeaconNodeFileLock(filename: string) {.raises: [IOError].} = - var shouldCreatePidFile: Atomic[bool] # REMOVE THIS shouldCreatePidFile.store(false) writeFile filename, $os.getCurrentProcessId() @@ -39,16 +40,13 @@ proc createBeaconNodeFileLock(filename: string) {.raises: [IOError].} = ## create and configure service proc startService(nimbus: var Nimbus, service: var NimbusService) = - #channel creation (shared memory) var serviceChannel = cast[ptr Channel[pointer]](allocShared0(sizeof(Channel[pointer]))) serviceChannel[].open() - #thread read ack isConfigRead.store(false) - #start thread try: createThread(service.serviceHandler, service.serviceFunc, serviceChannel) except Exception as e: @@ -73,10 +71,7 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = fatal "Memory allocation failed" quit QuitFailure - # Writing to shared memory var writeOffset = cast[uint](byteArray) - - #write total size of array copyMem(cast[pointer](writeOffset), addr totalSize, sizeof(uint)) writeOffset += uint(sizeof(uint)) @@ -94,9 +89,7 @@ proc startService(nimbus: var Nimbus, service: var NimbusService) = sleep(cThreadTimeAck) isConfigRead.store(true) - #close channel serviceChannel[].close() - #dealloc shared data deallocShared(byteArray) deallocShared(serviceChannel) @@ -109,7 +102,7 @@ proc monitorServices(nimbus: Nimbus) = notice "Exited all services" -# aux function to prepare arguments and options for eth1 and eth2 +## Auxiliary function to prepare arguments and options for eth1 and eth2 func addArg( paramTable: var NimbusConfigTable, cmdKind: CmdLineKind, key: string, arg: string ) = From cdff96914c042f2ac4c739cc4f4cc6581cb11e95 Mon Sep 17 00:00:00 2001 From: pmmiranda Date: Wed, 23 Jul 2025 13:18:10 +0100 Subject: [PATCH 34/34] WA for nim issue https://github.com/nim-lang/Nim/issues/24844 --- nimbus/execution/execution_layer.nim | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nimbus/execution/execution_layer.nim b/nimbus/execution/execution_layer.nim index c47d86cb8d..fae7ccb496 100644 --- a/nimbus/execution/execution_layer.nim +++ b/nimbus/execution/execution_layer.nim @@ -15,6 +15,10 @@ from ../../execution_chain/nimbus_desc import NimbusNode, NimbusState from ../../execution_chain/config import makeConfig from ../../execution_chain/common import newEthContext +# Workaround for https://github.com/nim-lang/Nim/issues/24844 +from web3 import Quantity +discard newFuture[Quantity]() + logScope: topics = "Execution layer"