diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 8da46ed8eea..65663e0cf49 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -363,6 +363,8 @@ jobs: run: CARGO_HOME=$(readlink -f $HOME) make vendor - name: Markdown-linter run: make mdlint + - name: Spell-check + uses: rojopolis/spellcheck-github-actions@v0 check-msrv: name: check-msrv runs-on: ubuntu-latest @@ -426,6 +428,21 @@ jobs: cache-target: release - name: Run Makefile to trigger the bash script run: make cli-local + cargo-sort: + name: cargo-sort + needs: [check-labels] + if: needs.check-labels.outputs.skip_ci != 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-sort + - name: Run cargo sort to check if Cargo.toml files are sorted + run: cargo sort --check --workspace # This job succeeds ONLY IF all others succeed. It is used by the merge queue to determine whether # a PR is safe to merge. New jobs should be added here. test-suite-success: @@ -453,6 +470,7 @@ jobs: 'compile-with-beta-compiler', 'cli-check', 'lockbud', + 'cargo-sort', ] steps: - uses: actions/checkout@v4 diff --git a/.spellcheck.yml b/.spellcheck.yml new file mode 100644 index 00000000000..692bc4d176c --- /dev/null +++ b/.spellcheck.yml @@ -0,0 +1,35 @@ +matrix: +- name: Markdown + sources: + - './book/**/*.md' + - 'README.md' + - 'CONTRIBUTING.md' + - 'SECURITY.md' + - './scripts/local_testnet/README.md' + default_encoding: utf-8 + aspell: + lang: en + dictionary: + wordlists: + - wordlist.txt + encoding: utf-8 + pipeline: + - pyspelling.filters.url: + - pyspelling.filters.markdown: + markdown_extensions: + - pymdownx.superfences: + - pymdownx.highlight: + - pymdownx.striphtml: + - pymdownx.magiclink: + - pyspelling.filters.html: + comments: false + ignores: + - code + - pre + - pyspelling.filters.context: + context_visible_first: true + delimiters: + # Ignore hex strings + - open: '0x[a-fA-F0-9]' + close: '[^a-fA-F0-9]' + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3c53558a100..4cad219c89f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -85,7 +85,7 @@ steps: 5. Commit your changes and push them to your fork with `$ git push origin your_feature_name`. 6. Go to your fork on github.com and use the web interface to create a pull - request into the sigp/lighthouse repo. + request into the sigp/lighthouse repository. From there, the repository maintainers will review the PR and either accept it or provide some constructive feedback. diff --git a/Cargo.lock b/Cargo.lock index ea992e989ca..60d564c2d82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2235,6 +2235,23 @@ dependencies = [ "syn 2.0.89", ] +[[package]] +name = "doppelganger_service" +version = "0.1.0" +dependencies = [ + "beacon_node_fallback", + "environment", + "eth2", + "futures", + "logging", + "parking_lot 0.12.3", + "slog", + "slot_clock", + "task_executor", + "tokio", + "types", +] + [[package]] name = "dsl_auto_type" version = "0.1.2" @@ -2559,6 +2576,7 @@ dependencies = [ "proto_array", "psutil", "reqwest", + "reqwest-eventsource", "sensitive_url", "serde", "serde_json", @@ -2960,6 +2978,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eventsource-stream" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74fef4569247a5f429d9156b9d0a2599914385dd189c539334c625d8099d90ab" +dependencies = [ + "futures-core", + "nom", + "pin-project-lite", +] + [[package]] name = "execution_engine_integration" version = "0.1.0" @@ -7162,6 +7191,22 @@ dependencies = [ "winreg", ] +[[package]] +name = "reqwest-eventsource" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f529a5ff327743addc322af460761dff5b50e0c826b9e6ac44c3195c50bb2026" +dependencies = [ + "eventsource-stream", + "futures-core", + "futures-timer", + "mime", + "nom", + "pin-project-lite", + "reqwest", + "thiserror 1.0.69", +] + [[package]] name = "resolv-conf" version = "0.7.0" @@ -9484,6 +9529,7 @@ dependencies = [ "clap_utils", "directory", "dirs", + "doppelganger_service", "environment", "eth2", "fdlimit", @@ -9536,6 +9582,7 @@ dependencies = [ "deposit_contract", "directory", "dirs", + "doppelganger_service", "eth2", "eth2_keystore", "ethereum_serde_utils", @@ -9627,6 +9674,7 @@ version = "0.1.0" dependencies = [ "beacon_node_fallback", "bls", + "doppelganger_service", "environment", "eth2", "futures", @@ -9648,6 +9696,7 @@ version = "0.1.0" dependencies = [ "account_utils", "beacon_node_fallback", + "doppelganger_service", "environment", "eth2", "futures", diff --git a/Cargo.toml b/Cargo.toml index bb2e96ac891..23e52a306b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,11 +8,11 @@ members = [ "beacon_node/builder_client", "beacon_node/client", "beacon_node/eth1", - "beacon_node/lighthouse_network", - "beacon_node/lighthouse_network/gossipsub", "beacon_node/execution_layer", "beacon_node/http_api", "beacon_node/http_metrics", + "beacon_node/lighthouse_network", + "beacon_node/lighthouse_network/gossipsub", "beacon_node/network", "beacon_node/store", "beacon_node/timer", @@ -30,40 +30,40 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", - "common/metrics", "common/lighthouse_version", "common/lockfile", "common/logging", "common/lru_cache", "common/malloc_utils", + "common/metrics", + "common/monitoring_api", "common/oneshot_broadcast", "common/pretty_reqwest_error", "common/sensitive_url", "common/slot_clock", "common/system_health", - "common/task_executor", "common/target_check", + "common/task_executor", "common/test_random_derive", "common/unused_port", "common/validator_dir", "common/warp_utils", - "common/monitoring_api", - - "database_manager", - - "consensus/int_to_bytes", "consensus/fixed_bytes", "consensus/fork_choice", + + "consensus/int_to_bytes", "consensus/proto_array", "consensus/safe_arith", "consensus/state_processing", "consensus/swap_or_not_shuffle", "crypto/bls", - "crypto/kzg", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", + "crypto/kzg", + + "database_manager", "lcli", @@ -78,12 +78,13 @@ members = [ "testing/execution_engine_integration", "testing/node_test_rig", "testing/simulator", - "testing/test-test_logger", "testing/state_transition_vectors", + "testing/test-test_logger", "testing/web3signer_tests", "validator_client", "validator_client/beacon_node_fallback", + "validator_client/doppelganger_service", "validator_client/graffiti_file", "validator_client/http_api", "validator_client/http_metrics", @@ -125,8 +126,8 @@ delay_map = "0.4" derivative = "2" dirs = "3" either = "1.9" - # TODO: rust_eth_kzg is pinned for now while a perf regression is investigated - # The crate_crypto_* dependencies can be removed from this file completely once we update +# TODO: rust_eth_kzg is pinned for now while a perf regression is investigated +# The crate_crypto_* dependencies can be removed from this file completely once we update rust_eth_kzg = "=0.5.1" crate_crypto_internal_eth_kzg_bls12_381 = "=0.5.1" crate_crypto_internal_eth_kzg_erasure_codes = "=0.5.1" @@ -166,7 +167,13 @@ r2d2 = "0.8" rand = "0.8" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "stream", "rustls-tls", "native-tls-vendored"] } +reqwest = { version = "0.11", default-features = false, features = [ + "blocking", + "json", + "stream", + "rustls-tls", + "native-tls-vendored", +] } ring = "0.16" rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } @@ -175,7 +182,11 @@ serde_json = "1" serde_repr = "0.1" serde_yaml = "0.9" sha2 = "0.9" -slog = { version = "2", features = ["max_level_debug", "release_max_level_debug", "nested-values"] } +slog = { version = "2", features = [ + "max_level_debug", + "release_max_level_debug", + "nested-values", +] } slog-async = "2" slog-term = "2" sloggers = { version = "2", features = ["json"] } @@ -187,7 +198,12 @@ superstruct = "0.8" syn = "1" sysinfo = "0.26" tempfile = "3" -tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal", "macros"] } +tokio = { version = "1", features = [ + "rt-multi-thread", + "sync", + "signal", + "macros", +] } tokio-stream = { version = "0.1", features = ["sync"] } tokio-util = { version = "0.7", features = ["codec", "compat", "time"] } tracing = "0.1.40" @@ -214,6 +230,7 @@ clap_utils = { path = "common/clap_utils" } compare_fields = { path = "common/compare_fields" } deposit_contract = { path = "common/deposit_contract" } directory = { path = "common/directory" } +doppelganger_service = { path = "validator_client/doppelganger_service" } validator_services = { path = "validator_client/validator_services" } environment = { path = "lighthouse/environment" } eth1 = { path = "beacon_node/eth1" } @@ -265,7 +282,7 @@ validator_dir = { path = "common/validator_dir" } validator_http_api = { path = "validator_client/http_api" } validator_http_metrics = { path = "validator_client/http_metrics" } validator_metrics = { path = "validator_client/validator_metrics" } -validator_store= { path = "validator_client/validator_store" } +validator_store = { path = "validator_client/validator_store" } warp_utils = { path = "common/warp_utils" } xdelta3 = { git = "http://github.com/sigp/xdelta3-rs", rev = "50d63cdf1878e5cf3538e9aae5eed34a22c64e4a" } zstd = "0.13" diff --git a/Makefile b/Makefile index ab239c94d33..958abf87058 100644 --- a/Makefile +++ b/Makefile @@ -204,7 +204,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ + RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ -D clippy::manual_let_else \ -D clippy::large_stack_frames \ diff --git a/README.md b/README.md index 4b22087bcdc..147a06e5040 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Lighthouse is: - Built in [Rust](https://www.rust-lang.org), a modern language providing unique safety guarantees and excellent performance (comparable to C++). - Funded by various organisations, including Sigma Prime, the - Ethereum Foundation, ConsenSys, the Decentralization Foundation and private individuals. + Ethereum Foundation, Consensys, the Decentralization Foundation and private individuals. - Actively involved in the specification and security analysis of the Ethereum proof-of-stake consensus specification. diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 48230bb2812..a7752d621ff 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -8,25 +8,25 @@ authors = [ edition = { workspace = true } [dependencies] +account_utils = { workspace = true } bls = { workspace = true } clap = { workspace = true } -types = { workspace = true } -environment = { workspace = true } -eth2_network_config = { workspace = true } clap_utils = { workspace = true } directory = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +eth2_keystore = { workspace = true } +eth2_network_config = { workspace = true } eth2_wallet = { workspace = true } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } -validator_dir = { workspace = true } -tokio = { workspace = true } -eth2_keystore = { workspace = true } -account_utils = { workspace = true } -slashing_protection = { workspace = true } -eth2 = { workspace = true } -safe_arith = { workspace = true } -slot_clock = { workspace = true } filesystem = { workspace = true } +safe_arith = { workspace = true } sensitive_url = { workspace = true } +slashing_protection = { workspace = true } +slot_clock = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } zeroize = { workspace = true } [dev-dependencies] diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 3fb0e50d225..ea1a24da1ff 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -409,6 +409,6 @@ mod tests { ) .unwrap(); - assert_eq!(expected_pk, kp.pk.into()); + assert_eq!(expected_pk, kp.pk); } } diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 15cdf15dc5d..7da65ad7426 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -20,28 +20,28 @@ write_ssz_files = [ ] # Writes debugging .ssz files to /tmp during block processing. [dependencies] -eth2_config = { workspace = true } +account_utils = { workspace = true } beacon_chain = { workspace = true } -types = { workspace = true } -store = { workspace = true } -client = { path = "client" } clap = { workspace = true } -slog = { workspace = true } -dirs = { workspace = true } +clap_utils = { workspace = true } +client = { path = "client" } directory = { workspace = true } +dirs = { workspace = true } environment = { workspace = true } -task_executor = { workspace = true } -genesis = { workspace = true } +eth2_config = { workspace = true } execution_layer = { workspace = true } -lighthouse_network = { workspace = true } -serde_json = { workspace = true } -clap_utils = { workspace = true } -hyper = { workspace = true } +genesis = { workspace = true } hex = { workspace = true } -slasher = { workspace = true } +http_api = { workspace = true } +hyper = { workspace = true } +lighthouse_network = { workspace = true } monitoring_api = { workspace = true } sensitive_url = { workspace = true } -http_api = { workspace = true } -unused_port = { workspace = true } +serde_json = { workspace = true } +slasher = { workspace = true } +slog = { workspace = true } +store = { workspace = true } strum = { workspace = true } -account_utils = { workspace = true } +task_executor = { workspace = true } +types = { workspace = true } +unused_port = { workspace = true } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index b0fa0131808..7b725d35197 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -18,9 +18,9 @@ portable = ["bls/supranational-portable"] test_backfill = [] [dev-dependencies] +criterion = { workspace = true } maplit = { workspace = true } serde_json = { workspace = true } -criterion = { workspace = true } [dependencies] alloy-primitives = { workspace = true } @@ -42,11 +42,11 @@ hex = { workspace = true } int_to_bytes = { workspace = true } itertools = { workspace = true } kzg = { workspace = true } -metrics = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } lru = { workspace = true } merkle_proof = { workspace = true } +metrics = { workspace = true } oneshot_broadcast = { path = "../../common/oneshot_broadcast/" } operation_pool = { workspace = true } parking_lot = { workspace = true } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 5e13f0624da..92d24c53c00 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -7,7 +7,6 @@ //! So, this module contains functions that one might expect to find in other crates, but they live //! here for good reason. -use crate::otb_verification_service::OptimisticTransitionBlock; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, BlockProductionError, ExecutionPayloadError, @@ -284,9 +283,6 @@ pub async fn validate_merge_block<'a, T: BeaconChainTypes>( "block_hash" => ?execution_payload.parent_hash(), "msg" => "the terminal block/parent was unavailable" ); - // Store Optimistic Transition Block in Database for later Verification - OptimisticTransitionBlock::from_block(block) - .persist_in_store::(&chain.store)?; Ok(()) } else { Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into()) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 2953516fb1a..d9728b9fd41 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -47,7 +47,6 @@ pub mod observed_block_producers; pub mod observed_data_sidecars; pub mod observed_operations; mod observed_slashable; -pub mod otb_verification_service; mod persisted_beacon_chain; mod persisted_fork_choice; mod pre_finalization_cache; diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs deleted file mode 100644 index 31034a7d59b..00000000000 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ /dev/null @@ -1,381 +0,0 @@ -use crate::execution_payload::{validate_merge_block, AllowOptimisticImport}; -use crate::{ - BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, ExecutionPayloadError, - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, -}; -use itertools::process_results; -use proto_array::InvalidationOperation; -use slog::{crit, debug, error, info, warn}; -use slot_clock::SlotClock; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_block_processing::is_merge_transition_complete; -use std::sync::Arc; -use store::{DBColumn, Error as StoreError, HotColdDB, KeyValueStore, StoreItem}; -use task_executor::{ShutdownReason, TaskExecutor}; -use tokio::time::sleep; -use tree_hash::TreeHash; -use types::{BeaconBlockRef, EthSpec, Hash256, Slot}; -use DBColumn::OptimisticTransitionBlock as OTBColumn; - -#[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub struct OptimisticTransitionBlock { - root: Hash256, - slot: Slot, -} - -impl OptimisticTransitionBlock { - // types::BeaconBlockRef<'_, ::EthSpec> - pub fn from_block(block: BeaconBlockRef) -> Self { - Self { - root: block.tree_hash_root(), - slot: block.slot(), - } - } - - pub fn root(&self) -> &Hash256 { - &self.root - } - - pub fn slot(&self) -> &Slot { - &self.slot - } - - pub fn persist_in_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - if store - .as_ref() - .item_exists::(&self.root)? - { - Ok(()) - } else { - store.as_ref().put_item(&self.root, self) - } - } - - pub fn remove_from_store(&self, store: A) -> Result<(), StoreError> - where - T: BeaconChainTypes, - A: AsRef>, - { - store - .as_ref() - .hot_db - .key_delete(OTBColumn.into(), self.root.as_slice()) - } - - fn is_canonical( - &self, - chain: &BeaconChain, - ) -> Result { - Ok(chain - .forwards_iter_block_roots_until(self.slot, self.slot)? - .next() - .transpose()? - .map(|(root, _)| root) - == Some(self.root)) - } -} - -impl StoreItem for OptimisticTransitionBlock { - fn db_column() -> DBColumn { - OTBColumn - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } -} - -/// The routine is expected to run once per epoch, 1/4th through the epoch. -pub const EPOCH_DELAY_FACTOR: u32 = 4; - -/// Spawns a routine which checks the validity of any optimistically imported transition blocks -/// -/// This routine will run once per epoch, at `epoch_duration / EPOCH_DELAY_FACTOR` after -/// the start of each epoch. -/// -/// The service will not be started if there is no `execution_layer` on the `chain`. -pub fn start_otb_verification_service( - executor: TaskExecutor, - chain: Arc>, -) { - // Avoid spawning the service if there's no EL, it'll just error anyway. - if chain.execution_layer.is_some() { - executor.spawn( - async move { otb_verification_service(chain).await }, - "otb_verification_service", - ); - } -} - -pub fn load_optimistic_transition_blocks( - chain: &BeaconChain, -) -> Result, StoreError> { - process_results( - chain.store.hot_db.iter_column::(OTBColumn), - |iter| { - iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) - .collect() - }, - )? -} - -#[derive(Debug)] -pub enum Error { - ForkChoice(String), - BeaconChain(BeaconChainError), - StoreError(StoreError), - NoBlockFound(OptimisticTransitionBlock), -} - -pub async fn validate_optimistic_transition_blocks( - chain: &Arc>, - otbs: Vec, -) -> Result<(), Error> { - let finalized_slot = chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_err(|e| Error::ForkChoice(format!("{:?}", e)))? - .slot; - - // separate otbs into - // non-canonical - // finalized canonical - // unfinalized canonical - let mut non_canonical_otbs = vec![]; - let (finalized_canonical_otbs, unfinalized_canonical_otbs) = process_results( - otbs.into_iter().map(|otb| { - otb.is_canonical(chain) - .map(|is_canonical| (otb, is_canonical)) - }), - |pair_iter| { - pair_iter - .filter_map(|(otb, is_canonical)| { - if is_canonical { - Some(otb) - } else { - non_canonical_otbs.push(otb); - None - } - }) - .partition::, _>(|otb| *otb.slot() <= finalized_slot) - }, - ) - .map_err(Error::BeaconChain)?; - - // remove non-canonical blocks that conflict with finalized checkpoint from the database - for otb in non_canonical_otbs { - if *otb.slot() <= finalized_slot { - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - } - } - - // ensure finalized canonical otb are valid, otherwise kill client - for otb in finalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - chain.log, - "Validated merge transition block"; - "block_root" => ?otb.root(), - "type" => "finalized" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Finalized Merge Transition Block is Invalid! Kill the Client! - crit!( - chain.log, - "Finalized merge transition block is invalid!"; - "msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \ - You may be on a hostile network.", - "block_hash" => ?block.canonical_root() - ); - let mut shutdown_sender = chain.shutdown_sender(); - if let Err(e) = shutdown_sender.try_send(ShutdownReason::Failure( - INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, - )) { - crit!( - chain.log, - "Failed to shut down client"; - "error" => ?e, - "shutdown_reason" => INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON - ); - } - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - // attempt to validate any non-finalized canonical otb blocks - for otb in unfinalized_canonical_otbs { - match chain.get_block(otb.root()).await { - Ok(Some(block)) => { - match validate_merge_block(chain, block.message(), AllowOptimisticImport::No).await - { - Ok(()) => { - // merge transition block is valid, remove it from OTB - otb.remove_from_store::(&chain.store) - .map_err(Error::StoreError)?; - info!( - chain.log, - "Validated merge transition block"; - "block_root" => ?otb.root(), - "type" => "not finalized" - ); - } - // The block was not able to be verified by the EL. Leave the OTB in the - // database since the EL is likely still syncing and may verify the block - // later. - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::UnverifiedNonOptimisticCandidate, - )) => (), - Err(BlockError::ExecutionPayloadError( - ExecutionPayloadError::InvalidTerminalPoWBlock { .. }, - )) => { - // Unfinalized Merge Transition Block is Invalid -> Run process_invalid_execution_payload - warn!( - chain.log, - "Merge transition block invalid"; - "block_root" => ?otb.root() - ); - chain - .process_invalid_execution_payload( - &InvalidationOperation::InvalidateOne { - block_root: *otb.root(), - }, - ) - .await - .map_err(|e| { - warn!( - chain.log, - "Error checking merge transition block"; - "error" => ?e, - "location" => "process_invalid_execution_payload" - ); - Error::BeaconChain(e) - })?; - } - _ => {} - } - } - Ok(None) => return Err(Error::NoBlockFound(otb)), - // Our database has pruned the payload and the payload was unavailable on the EL since - // the EL is still syncing or the payload is non-canonical. - Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => (), - Err(e) => return Err(Error::BeaconChain(e)), - } - } - - Ok(()) -} - -/// Loop until any optimistically imported merge transition blocks have been verified and -/// the merge has been finalized. -async fn otb_verification_service(chain: Arc>) { - let epoch_duration = chain.slot_clock.slot_duration() * T::EthSpec::slots_per_epoch() as u32; - loop { - match chain - .slot_clock - .duration_to_next_epoch(T::EthSpec::slots_per_epoch()) - { - Some(duration) => { - let additional_delay = epoch_duration / EPOCH_DELAY_FACTOR; - sleep(duration + additional_delay).await; - - debug!( - chain.log, - "OTB verification service firing"; - ); - - if !is_merge_transition_complete( - &chain.canonical_head.cached_head().snapshot.beacon_state, - ) { - // We are pre-merge. Nothing to do yet. - continue; - } - - // load all optimistically imported transition blocks from the database - match load_optimistic_transition_blocks(chain.as_ref()) { - Ok(otbs) => { - if otbs.is_empty() { - if chain - .canonical_head - .fork_choice_read_lock() - .get_finalized_block() - .map_or(false, |block| { - block.execution_status.is_execution_enabled() - }) - { - // there are no optimistic blocks in the database, we can exit - // the service since the merge transition is finalized and we'll - // never see another transition block - break; - } else { - debug!( - chain.log, - "No optimistic transition blocks"; - "info" => "waiting for the merge transition to finalize" - ) - } - } - if let Err(e) = validate_optimistic_transition_blocks(&chain, otbs).await { - warn!( - chain.log, - "Error while validating optimistic transition blocks"; - "error" => ?e - ); - } - } - Err(e) => { - error!( - chain.log, - "Error loading optimistic transition blocks"; - "error" => ?e - ); - } - }; - } - None => { - error!(chain.log, "Failed to read slot clock"); - // If we can't read the slot clock, just wait another slot. - sleep(chain.slot_clock.slot_duration()).await; - } - }; - } - debug!( - chain.log, - "No optimistic transition blocks in database"; - "msg" => "shutting down OTB verification service" - ); -} diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index a662cc49c9d..da1d60db17d 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -512,7 +512,7 @@ mod test { } assert!( - !cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0), + !cache.contains(&shuffling_id_and_committee_caches.first().unwrap().0), "should not contain oldest epoch shuffling id" ); assert_eq!( diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 0b121356b9d..87fefe71146 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -70,12 +70,12 @@ async fn produces_attestations_from_attestation_simulator_service() { } // Compare the prometheus metrics that evaluates the performance of the unaggregated attestations - let hit_prometheus_metrics = vec![ + let hit_prometheus_metrics = [ metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_HIT_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_HIT_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_HIT_TOTAL, ]; - let miss_prometheus_metrics = vec![ + let miss_prometheus_metrics = [ metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_HEAD_ATTESTER_MISS_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_TARGET_ATTESTER_MISS_TOTAL, metrics::VALIDATOR_MONITOR_ATTESTATION_SIMULATOR_SOURCE_ATTESTER_MISS_TOTAL, diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index e168cbb6f4d..dcc63ddf620 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -431,10 +431,12 @@ impl GossipTester { .chain .verify_aggregated_attestation_for_gossip(&aggregate) .err() - .expect(&format!( - "{} should error during verify_aggregated_attestation_for_gossip", - desc - )); + .unwrap_or_else(|| { + panic!( + "{} should error during verify_aggregated_attestation_for_gossip", + desc + ) + }); inspect_err(&self, err); /* @@ -449,10 +451,12 @@ impl GossipTester { .unwrap(); assert_eq!(results.len(), 2); - let batch_err = results.pop().unwrap().err().expect(&format!( - "{} should error during batch_verify_aggregated_attestations_for_gossip", - desc - )); + let batch_err = results.pop().unwrap().err().unwrap_or_else(|| { + panic!( + "{} should error during batch_verify_aggregated_attestations_for_gossip", + desc + ) + }); inspect_err(&self, batch_err); self @@ -475,10 +479,12 @@ impl GossipTester { .chain .verify_unaggregated_attestation_for_gossip(&attn, Some(subnet_id)) .err() - .expect(&format!( - "{} should error during verify_unaggregated_attestation_for_gossip", - desc - )); + .unwrap_or_else(|| { + panic!( + "{} should error during verify_unaggregated_attestation_for_gossip", + desc + ) + }); inspect_err(&self, err); /* @@ -496,10 +502,12 @@ impl GossipTester { ) .unwrap(); assert_eq!(results.len(), 2); - let batch_err = results.pop().unwrap().err().expect(&format!( - "{} should error during batch_verify_unaggregated_attestations_for_gossip", - desc - )); + let batch_err = results.pop().unwrap().err().unwrap_or_else(|| { + panic!( + "{} should error during batch_verify_unaggregated_attestations_for_gossip", + desc + ) + }); inspect_err(&self, batch_err); self @@ -816,7 +824,7 @@ async fn aggregated_gossip_verification() { let (index, sk) = tester.non_aggregator(); *a = SignedAggregateAndProof::from_aggregate( index as u64, - tester.valid_aggregate.message().aggregate().clone(), + tester.valid_aggregate.message().aggregate(), None, &sk, &chain.canonical_head.cached_head().head_fork(), diff --git a/beacon_node/beacon_chain/tests/bellatrix.rs b/beacon_node/beacon_chain/tests/bellatrix.rs index 5bd3452623a..5080b0890bd 100644 --- a/beacon_node/beacon_chain/tests/bellatrix.rs +++ b/beacon_node/beacon_chain/tests/bellatrix.rs @@ -82,7 +82,7 @@ async fn merge_with_terminal_block_hash_override() { let block = &harness.chain.head_snapshot().beacon_block; - let execution_payload = block.message().body().execution_payload().unwrap().clone(); + let execution_payload = block.message().body().execution_payload().unwrap(); if i == 0 { assert_eq!(execution_payload.block_hash(), genesis_pow_block_hash); } @@ -133,7 +133,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(bellatrix_fork_slot).await; + Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; let bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!(bellatrix_head.as_bellatrix().is_ok()); @@ -207,15 +207,7 @@ async fn base_altair_bellatrix_with_terminal_block_after_fork() { harness.extend_slots(1).await; let block = &harness.chain.head_snapshot().beacon_block; - execution_payloads.push( - block - .message() - .body() - .execution_payload() - .unwrap() - .clone() - .into(), - ); + execution_payloads.push(block.message().body().execution_payload().unwrap().into()); } verify_execution_payload_chain(execution_payloads.as_slice()); diff --git a/beacon_node/beacon_chain/tests/capella.rs b/beacon_node/beacon_chain/tests/capella.rs index ac97a95721d..3ce5702f2ea 100644 --- a/beacon_node/beacon_chain/tests/capella.rs +++ b/beacon_node/beacon_chain/tests/capella.rs @@ -54,7 +54,7 @@ async fn base_altair_bellatrix_capella() { /* * Do the Altair fork. */ - harness.extend_to_slot(altair_fork_slot).await; + Box::pin(harness.extend_to_slot(altair_fork_slot)).await; let altair_head = &harness.chain.head_snapshot().beacon_block; assert!(altair_head.as_altair().is_ok()); @@ -63,7 +63,7 @@ async fn base_altair_bellatrix_capella() { /* * Do the Bellatrix fork, without a terminal PoW block. */ - harness.extend_to_slot(bellatrix_fork_slot).await; + Box::pin(harness.extend_to_slot(bellatrix_fork_slot)).await; let bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!(bellatrix_head.as_bellatrix().is_ok()); @@ -81,7 +81,7 @@ async fn base_altair_bellatrix_capella() { /* * Next Bellatrix block shouldn't include an exec payload. */ - harness.extend_slots(1).await; + Box::pin(harness.extend_slots(1)).await; let one_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( @@ -112,7 +112,7 @@ async fn base_altair_bellatrix_capella() { terminal_block.timestamp = timestamp; } }); - harness.extend_slots(1).await; + Box::pin(harness.extend_slots(1)).await; let two_after_bellatrix_head = &harness.chain.head_snapshot().beacon_block; assert!( diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index 729d88450f4..01b790bb25b 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -413,7 +413,7 @@ async fn invalid_payload_invalidates_parent() { rig.import_block(Payload::Valid).await; // Import a valid transition block. rig.move_to_first_justification(Payload::Syncing).await; - let roots = vec![ + let roots = [ rig.import_block(Payload::Syncing).await, rig.import_block(Payload::Syncing).await, rig.import_block(Payload::Syncing).await, @@ -1052,7 +1052,7 @@ async fn invalid_parent() { // Ensure the block built atop an invalid payload is invalid for gossip. assert!(matches!( - rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await, + rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) if invalid_root == parent_root )); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 522020e476d..e1258ccdea7 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -330,7 +330,7 @@ async fn long_skip() { final_blocks as usize, BlockStrategy::ForkCanonicalChainAt { previous_slot: Slot::new(initial_blocks), - first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1), + first_slot: Slot::new(initial_blocks + skip_slots + 1), }, AttestationStrategy::AllValidators, ) @@ -381,8 +381,7 @@ async fn randao_genesis_storage() { .beacon_state .randao_mixes() .iter() - .find(|x| **x == genesis_value) - .is_some()); + .any(|x| *x == genesis_value)); // Then upon adding one more block, it isn't harness.advance_slot(); @@ -393,14 +392,13 @@ async fn randao_genesis_storage() { AttestationStrategy::AllValidators, ) .await; - assert!(harness + assert!(!harness .chain .head_snapshot() .beacon_state .randao_mixes() .iter() - .find(|x| **x == genesis_value) - .is_none()); + .any(|x| *x == genesis_value)); check_finalization(&harness, num_slots); check_split_slot(&harness, store); @@ -1062,7 +1060,7 @@ fn check_shuffling_compatible( let current_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( &block_root, head_state.current_epoch(), - &head_state, + head_state, ); // Check for consistency with the more expensive shuffling lookup. @@ -1102,7 +1100,7 @@ fn check_shuffling_compatible( let previous_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible( &block_root, head_state.previous_epoch(), - &head_state, + head_state, ); harness .chain @@ -1130,14 +1128,11 @@ fn check_shuffling_compatible( // Targeting two epochs before the current epoch should always return false if head_state.current_epoch() >= 2 { - assert_eq!( - harness.chain.shuffling_is_compatible( - &block_root, - head_state.current_epoch() - 2, - &head_state - ), - false - ); + assert!(!harness.chain.shuffling_is_compatible( + &block_root, + head_state.current_epoch() - 2, + head_state + )); } } } @@ -1559,14 +1554,13 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() { .map(Into::into) .collect(); let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); - let (canonical_blocks, _, _, _) = rig - .add_attested_blocks_at_slots( - canonical_state, - canonical_state_root, - &canonical_slots, - &honest_validators, - ) - .await; + let (canonical_blocks, _, _, _) = Box::pin(rig.add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + )) + .await; // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1939,7 +1933,7 @@ async fn prune_single_block_long_skip() { 2 * slots_per_epoch, 1, 2 * slots_per_epoch, - 2 * slots_per_epoch as u64, + 2 * slots_per_epoch, 1, ) .await; @@ -1961,31 +1955,45 @@ async fn prune_shared_skip_states_mid_epoch() { #[tokio::test] async fn prune_shared_skip_states_epoch_boundaries() { let slots_per_epoch = E::slots_per_epoch(); - pruning_test(slots_per_epoch - 1, 1, slots_per_epoch, 2, slots_per_epoch).await; - pruning_test(slots_per_epoch - 1, 2, slots_per_epoch, 1, slots_per_epoch).await; - pruning_test( + Box::pin(pruning_test( + slots_per_epoch - 1, + 1, + slots_per_epoch, + 2, + slots_per_epoch, + )) + .await; + Box::pin(pruning_test( + slots_per_epoch - 1, + 2, + slots_per_epoch, + 1, + slots_per_epoch, + )) + .await; + Box::pin(pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, - slots_per_epoch as u64 / 2, + slots_per_epoch / 2, slots_per_epoch, - slots_per_epoch as u64 / 2 + 1, + slots_per_epoch / 2 + 1, slots_per_epoch, - ) + )) .await; - pruning_test( + Box::pin(pruning_test( 2 * slots_per_epoch + slots_per_epoch / 2, - slots_per_epoch as u64 / 2, + slots_per_epoch / 2, slots_per_epoch, - slots_per_epoch as u64 / 2 + 1, + slots_per_epoch / 2 + 1, slots_per_epoch, - ) + )) .await; - pruning_test( + Box::pin(pruning_test( 2 * slots_per_epoch - 1, - slots_per_epoch as u64, + slots_per_epoch, 1, 0, 2 * slots_per_epoch, - ) + )) .await; } @@ -2094,7 +2102,7 @@ async fn pruning_test( ); check_chain_dump( &harness, - (num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1) as u64, + num_initial_blocks + num_canonical_middle_blocks + num_finalization_blocks + 1, ); let all_canonical_states = harness @@ -2613,8 +2621,7 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { harness.advance_slot(); } harness.extend_to_slot(finalizing_slot - 1).await; - harness - .add_block_at_slot(finalizing_slot, harness.get_current_state()) + Box::pin(harness.add_block_at_slot(finalizing_slot, harness.get_current_state())) .await .unwrap(); @@ -2789,6 +2796,7 @@ async fn finalizes_after_resuming_from_db() { ); } +#[allow(clippy::large_stack_frames)] #[tokio::test] async fn revert_minority_fork_on_resume() { let validator_count = 16; diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index d1b3139d42c..6d30b8a4e32 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -73,7 +73,7 @@ fn get_valid_sync_committee_message_for_block( let head_state = harness.chain.head_beacon_state_cloned(); let (signature, _) = harness .make_sync_committee_messages(&head_state, block_root, slot, relative_sync_committee) - .get(0) + .first() .expect("sync messages should exist") .get(message_index) .expect("first sync message should exist") @@ -104,7 +104,7 @@ fn get_valid_sync_contribution( ); let (_, contribution_opt) = sync_contributions - .get(0) + .first() .expect("sync contributions should exist"); let contribution = contribution_opt .as_ref() diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 7ae34ccf387..c641f32b820 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -170,7 +170,7 @@ async fn find_reorgs() { harness .extend_chain( - num_blocks_produced as usize, + num_blocks_produced, BlockStrategy::OnCanonicalHead, // No need to produce attestations for this test. AttestationStrategy::SomeValidators(vec![]), @@ -203,7 +203,7 @@ async fn find_reorgs() { assert_eq!( find_reorg_slot( &harness.chain, - &head_state, + head_state, harness.chain.head_beacon_block().canonical_root() ), head_slot @@ -503,7 +503,6 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() { .unwrap(); let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) - .into_iter() .map(|validator_index| { let slot = state .get_attestation_duties(validator_index, RelativeEpoch::Current) diff --git a/beacon_node/beacon_processor/Cargo.toml b/beacon_node/beacon_processor/Cargo.toml index 9273137bf6d..c96e0868d73 100644 --- a/beacon_node/beacon_processor/Cargo.toml +++ b/beacon_node/beacon_processor/Cargo.toml @@ -4,22 +4,22 @@ version = "0.1.0" edition = { workspace = true } [dependencies] -slog = { workspace = true } -itertools = { workspace = true } -logging = { workspace = true } -tokio = { workspace = true } -tokio-util = { workspace = true } -futures = { workspace = true } fnv = { workspace = true } -strum = { workspace = true } -task_executor = { workspace = true } -slot_clock = { workspace = true } +futures = { workspace = true } +itertools = { workspace = true } lighthouse_network = { workspace = true } -types = { workspace = true } +logging = { workspace = true } metrics = { workspace = true } -parking_lot = { workspace = true } num_cpus = { workspace = true } +parking_lot = { workspace = true } serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +strum = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-util = { workspace = true } +types = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["test-util"] } diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index c3658f45c73..3531e81c847 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -5,8 +5,8 @@ edition = { workspace = true } authors = ["Sean Anderson "] [dependencies] +eth2 = { workspace = true } +lighthouse_version = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } -eth2 = { workspace = true } serde = { workspace = true } -lighthouse_version = { workspace = true } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 4df13eb3d4f..614115eb588 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,41 +5,41 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dev-dependencies] +operation_pool = { workspace = true } serde_yaml = { workspace = true } state_processing = { workspace = true } -operation_pool = { workspace = true } tokio = { workspace = true } [dependencies] beacon_chain = { workspace = true } -store = { workspace = true } -network = { workspace = true } -timer = { path = "../timer" } -lighthouse_network = { workspace = true } -types = { workspace = true } -eth2_config = { workspace = true } -slot_clock = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } -futures = { workspace = true } +beacon_processor = { workspace = true } +directory = { workspace = true } dirs = { workspace = true } +environment = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } -kzg = { workspace = true } -sensitive_url = { workspace = true } +eth2_config = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } genesis = { workspace = true } -task_executor = { workspace = true } -environment = { workspace = true } -metrics = { workspace = true } -time = "0.3.5" -directory = { workspace = true } http_api = { workspace = true } http_metrics = { path = "../http_metrics" } +kzg = { workspace = true } +lighthouse_network = { workspace = true } +metrics = { workspace = true } +monitoring_api = { workspace = true } +network = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } slasher = { workspace = true } slasher_service = { path = "../../slasher/service" } -monitoring_api = { workspace = true } -execution_layer = { workspace = true } -beacon_processor = { workspace = true } -ethereum_ssz = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true } +task_executor = { workspace = true } +time = "0.3.5" +timer = { path = "../timer" } +tokio = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 961f5140f92..7c6a253aca4 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -7,7 +7,6 @@ use crate::Client; use beacon_chain::attestation_simulator::start_attestation_simulator_service; use beacon_chain::data_availability_checker::start_availability_cache_maintenance_service; use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_service; -use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ @@ -970,7 +969,6 @@ where } start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone()); - start_otb_verification_service(runtime_context.executor.clone(), beacon_chain.clone()); start_availability_cache_maintenance_service( runtime_context.executor.clone(), beacon_chain.clone(), diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 50400a77e06..8ccd50aad8d 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -5,27 +5,27 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dev-dependencies] +environment = { workspace = true } eth1_test_rig = { workspace = true } serde_yaml = { workspace = true } sloggers = { workspace = true } -environment = { workspace = true } [dependencies] +eth2 = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } -serde = { workspace = true } -types = { workspace = true } +logging = { workspace = true } merkle_proof = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } slog = { workspace = true } -logging = { workspace = true } -superstruct = { workspace = true } -tokio = { workspace = true } state_processing = { workspace = true } -metrics = { workspace = true } +superstruct = { workspace = true } task_executor = { workspace = true } -eth2 = { workspace = true } -sensitive_url = { workspace = true } +tokio = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 0ef101fae7c..7eb7b4a15e1 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -2,54 +2,53 @@ name = "execution_layer" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +alloy-consensus = { workspace = true } alloy-primitives = { workspace = true } -types = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } -logging = { workspace = true } -sensitive_url = { workspace = true } -reqwest = { workspace = true } -ethereum_serde_utils = { workspace = true } -serde_json = { workspace = true } -serde = { workspace = true } -warp = { workspace = true } -jsonwebtoken = "9" +alloy-rlp = { workspace = true } +arc-swap = "1.6.0" +builder_client = { path = "../builder_client" } bytes = { workspace = true } -task_executor = { workspace = true } -hex = { workspace = true } -ethereum_ssz = { workspace = true } -ssz_types = { workspace = true } eth2 = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethers-core = { workspace = true } +fixed_bytes = { workspace = true } +fork_choice = { workspace = true } +hash-db = "0.15.2" +hash256-std-hasher = "0.15.2" +hex = { workspace = true } +jsonwebtoken = "9" +keccak-hash = "0.10.0" kzg = { workspace = true } -state_processing = { workspace = true } -superstruct = { workspace = true } +lighthouse_version = { workspace = true } +logging = { workspace = true } lru = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } +pretty_reqwest_error = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } slot_clock = { workspace = true } +ssz_types = { workspace = true } +state_processing = { workspace = true } +strum = { workspace = true } +superstruct = { workspace = true } +task_executor = { workspace = true } tempfile = { workspace = true } -rand = { workspace = true } -zeroize = { workspace = true } -metrics = { workspace = true } -ethers-core = { workspace = true } -builder_client = { path = "../builder_client" } -fork_choice = { workspace = true } +tokio = { workspace = true } tokio-stream = { workspace = true } -strum = { workspace = true } -keccak-hash = "0.10.0" -hash256-std-hasher = "0.15.2" +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } triehash = "0.8.4" -hash-db = "0.15.2" -pretty_reqwest_error = { workspace = true } -arc-swap = "1.6.0" -eth2_network_config = { workspace = true } -alloy-rlp = { workspace = true } -alloy-consensus = { workspace = true } -lighthouse_version = { workspace = true } -fixed_bytes = { workspace = true } -sha2 = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +zeroize = { workspace = true } diff --git a/beacon_node/execution_layer/src/block_hash.rs b/beacon_node/execution_layer/src/block_hash.rs index cdc172cff47..d3a32c7929b 100644 --- a/beacon_node/execution_layer/src/block_hash.rs +++ b/beacon_node/execution_layer/src/block_hash.rs @@ -7,7 +7,7 @@ use keccak_hash::KECCAK_EMPTY_LIST_RLP; use triehash::ordered_trie_root; use types::{ EncodableExecutionBlockHeader, EthSpec, ExecutionBlockHash, ExecutionBlockHeader, - ExecutionPayloadRef, Hash256, + ExecutionPayloadRef, ExecutionRequests, Hash256, }; /// Calculate the block hash of an execution block. @@ -17,6 +17,7 @@ use types::{ pub fn calculate_execution_block_hash( payload: ExecutionPayloadRef, parent_beacon_block_root: Option, + execution_requests: Option<&ExecutionRequests>, ) -> (ExecutionBlockHash, Hash256) { // Calculate the transactions root. // We're currently using a deprecated Parity library for this. We should move to a @@ -38,6 +39,7 @@ pub fn calculate_execution_block_hash( let rlp_blob_gas_used = payload.blob_gas_used().ok(); let rlp_excess_blob_gas = payload.excess_blob_gas().ok(); + let requests_root = execution_requests.map(|requests| requests.requests_hash()); // Construct the block header. let exec_block_header = ExecutionBlockHeader::from_payload( @@ -48,6 +50,7 @@ pub fn calculate_execution_block_hash( rlp_blob_gas_used, rlp_excess_blob_gas, parent_beacon_block_root, + requests_root, ); // Hash the RLP encoding of the block header. @@ -118,6 +121,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_hash = @@ -149,6 +153,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_hash = @@ -181,6 +186,7 @@ mod test { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, + requests_root: None, }; let expected_hash = Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") @@ -211,6 +217,7 @@ mod test { blob_gas_used: Some(0x0u64), excess_blob_gas: Some(0x0u64), parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + requests_root: None, }; let expected_hash = Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") @@ -221,29 +228,30 @@ mod test { #[test] fn test_rlp_encode_block_electra() { let header = ExecutionBlockHeader { - parent_hash: Hash256::from_str("172864416698b842f4c92f7b476be294b4ef720202779df194cd225f531053ab").unwrap(), + parent_hash: Hash256::from_str("a628f146df398a339768bd101f7dc41d828be79aca5dd02cc878a51bdbadd761").unwrap(), ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(), - beneficiary: Address::from_str("878705ba3f8bc32fcf7f4caa1a35e72af65cf766").unwrap(), - state_root: Hash256::from_str("c6457d0df85c84c62d1c68f68138b6e796e8a44fb44de221386fb2d5611c41e0").unwrap(), - transactions_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - receipts_root: Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - logs_bloom:<[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(), + beneficiary: Address::from_str("f97e180c050e5ab072211ad2c213eb5aee4df134").unwrap(), + state_root: Hash256::from_str("fdff009f8280bd113ebb4df8ce4e2dcc9322d43184a0b506e70b7f4823ca1253").unwrap(), + transactions_root: Hash256::from_str("452806578b4fa881cafb019c47e767e37e2249accf859159f00cddefb2579bb5").unwrap(), + receipts_root: Hash256::from_str("72ceac0f16a32041c881b3220d39ca506a286bef163c01a4d0821cd4027d31c7").unwrap(), + logs_bloom:<[u8; 256]>::from_hex("10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000").unwrap().into(), difficulty: Uint256::ZERO, - number: Uint256::from(97), - gas_limit: Uint256::from(27482534), - gas_used: Uint256::ZERO, - timestamp: 1692132829u64, - extra_data: hex::decode("d883010d00846765746888676f312e32302e37856c696e7578").unwrap(), - mix_hash: Hash256::from_str("0b493c22d2ad4ca76c77ae6ad916af429b42b1dc98fdcb8e5ddbd049bbc5d623").unwrap(), + number: Uint256::from(8230), + gas_limit: Uint256::from(30000000), + gas_used: Uint256::from(3716848), + timestamp: 1730921268, + extra_data: hex::decode("d883010e0c846765746888676f312e32332e32856c696e7578").unwrap(), + mix_hash: Hash256::from_str("e87ca9a45b2e61bbe9080d897db1d584b5d2367d22e898af901091883b7b96ec").unwrap(), nonce: Hash64::ZERO, - base_fee_per_gas: Uint256::from(2374u64), + base_fee_per_gas: Uint256::from(7u64), withdrawals_root: Some(Hash256::from_str("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap()), - blob_gas_used: Some(0x0u64), - excess_blob_gas: Some(0x0u64), - parent_beacon_block_root: Some(Hash256::from_str("f7d327d2c04e4f12e9cdd492e53d39a1d390f8b1571e3b2a22ac6e1e170e5b1a").unwrap()), + blob_gas_used: Some(786432), + excess_blob_gas: Some(44695552), + parent_beacon_block_root: Some(Hash256::from_str("f3a888fee010ebb1ae083547004e96c254b240437823326fdff8354b1fc25629").unwrap()), + requests_root: Some(Hash256::from_str("9440d3365f07573919e1e9ac5178c20ec6fe267357ee4baf8b6409901f331b62").unwrap()), }; let expected_hash = - Hash256::from_str("a7448e600ead0a23d16f96aa46e8dea9eef8a7c5669a5f0a5ff32709afe9c408") + Hash256::from_str("61e67afc96bf21be6aab52c1ace1db48de7b83f03119b0644deb4b69e87e09e1") .unwrap(); test_rlp_encoding(&header, None, expected_hash); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index d4734be448d..33dc60d0378 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -812,7 +812,7 @@ impl HttpJsonRpc { new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, new_payload_request_electra - .execution_requests_list + .execution_requests .get_execution_requests_list(), ]); diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index efd68f1023d..1c6639804e3 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -6,7 +6,9 @@ use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; -use types::execution_requests::{ConsolidationRequests, DepositRequests, WithdrawalRequests}; +use types::execution_requests::{ + ConsolidationRequests, DepositRequests, RequestPrefix, WithdrawalRequests, +}; use types::{Blob, FixedVector, KzgProof, Unsigned}; #[derive(Debug, PartialEq, Serialize, Deserialize)] @@ -339,25 +341,6 @@ impl From> for ExecutionPayload { } } -/// This is used to index into the `execution_requests` array. -#[derive(Debug, Copy, Clone)] -enum RequestPrefix { - Deposit, - Withdrawal, - Consolidation, -} - -impl RequestPrefix { - pub fn from_prefix(prefix: u8) -> Option { - match prefix { - 0 => Some(Self::Deposit), - 1 => Some(Self::Withdrawal), - 2 => Some(Self::Consolidation), - _ => None, - } - } -} - /// Format of `ExecutionRequests` received over the engine api. /// /// Array of ssz-encoded requests list encoded as hex bytes. @@ -379,7 +362,8 @@ impl TryFrom for ExecutionRequests { for (i, request) in value.0.into_iter().enumerate() { // hex string - let decoded_bytes = hex::decode(request).map_err(|e| format!("Invalid hex {:?}", e))?; + let decoded_bytes = hex::decode(request.strip_prefix("0x").unwrap_or(&request)) + .map_err(|e| format!("Invalid hex {:?}", e))?; match RequestPrefix::from_prefix(i as u8) { Some(RequestPrefix::Deposit) => { requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) @@ -431,7 +415,7 @@ pub struct JsonGetPayloadResponse { #[superstruct(only(V3, V4))] pub should_override_builder: bool, #[superstruct(only(V4))] - pub requests: JsonExecutionRequests, + pub execution_requests: JsonExecutionRequests, } impl TryFrom> for GetPayloadResponse { @@ -464,7 +448,7 @@ impl TryFrom> for GetPayloadResponse { block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - requests: response.requests.try_into()?, + requests: response.execution_requests.try_into()?, })) } } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 318779b7f3e..60bc8489744 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -44,7 +44,7 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { #[superstruct(only(Deneb, Electra))] pub parent_beacon_block_root: Hash256, #[superstruct(only(Electra))] - pub execution_requests_list: &'block ExecutionRequests, + pub execution_requests: &'block ExecutionRequests, } impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { @@ -121,8 +121,11 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); - let (header_hash, rlp_transactions_root) = - calculate_execution_block_hash(payload, parent_beacon_block_root); + let (header_hash, rlp_transactions_root) = calculate_execution_block_hash( + payload, + parent_beacon_block_root, + self.execution_requests().ok().copied(), + ); if header_hash != self.block_hash() { return Err(Error::BlockHashMismatch { @@ -185,7 +188,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> .map(kzg_commitment_to_versioned_hash) .collect(), parent_beacon_block_root: block_ref.parent_root, - execution_requests_list: &block_ref.body.execution_requests, + execution_requests: &block_ref.body.execution_requests, })), } } diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 786ac9ad9c9..9365024ffb7 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -374,7 +374,7 @@ pub async fn handle_rpc( .into(), should_override_builder: false, // TODO(electra): add EL requests in mock el - requests: Default::default(), + execution_requests: Default::default(), }) .unwrap() } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 638fe0f2192..5d601008bc0 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -6,49 +6,49 @@ edition = { workspace = true } autotests = false # using a single test binary compiles faster [dependencies] -warp = { workspace = true } -serde = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -types = { workspace = true } -hex = { workspace = true } beacon_chain = { workspace = true } +beacon_processor = { workspace = true } +bs58 = "0.4.0" +bytes = { workspace = true } +directory = { workspace = true } +eth1 = { workspace = true } eth2 = { workspace = true } -slog = { workspace = true } -network = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } lighthouse_network = { workspace = true } -eth1 = { workspace = true } -state_processing = { workspace = true } lighthouse_version = { workspace = true } +logging = { workspace = true } +lru = { workspace = true } metrics = { workspace = true } -warp_utils = { workspace = true } -slot_clock = { workspace = true } -ethereum_ssz = { workspace = true } -bs58 = "0.4.0" -futures = { workspace = true } -execution_layer = { workspace = true } +network = { workspace = true } +operation_pool = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } safe_arith = { workspace = true } -task_executor = { workspace = true } -lru = { workspace = true } -tree_hash = { workspace = true } -sysinfo = { workspace = true } -system_health = { path = "../../common/system_health" } -directory = { workspace = true } -logging = { workspace = true } -ethereum_serde_utils = { workspace = true } -operation_pool = { workspace = true } sensitive_url = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +state_processing = { workspace = true } store = { workspace = true } -bytes = { workspace = true } -beacon_processor = { workspace = true } -rand = { workspace = true } +sysinfo = { workspace = true } +system_health = { path = "../../common/system_health" } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } [dev-dependencies] -serde_json = { workspace = true } -proto_array = { workspace = true } genesis = { workspace = true } logging = { workspace = true } +proto_array = { workspace = true } +serde_json = { workspace = true } [[test]] name = "bn_http_api_tests" diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 1338f4f1802..e1ecf2d4fc3 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -322,7 +322,7 @@ pub async fn consensus_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn consensus_partial_pass_only_consensus() { /* this test targets gossip-level validation */ - let validation_level: Option = Some(BroadcastValidation::Consensus); + let validation_level = BroadcastValidation::Consensus; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -378,7 +378,7 @@ pub async fn consensus_partial_pass_only_consensus() { tester.harness.chain.clone(), &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -615,8 +615,7 @@ pub async fn equivocation_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ - let validation_level: Option = - Some(BroadcastValidation::ConsensusAndEquivocation); + let validation_level = BroadcastValidation::ConsensusAndEquivocation; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -671,7 +670,7 @@ pub async fn equivocation_consensus_late_equivocation() { tester.harness.chain, &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -1228,8 +1227,7 @@ pub async fn blinded_equivocation_gossip() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] pub async fn blinded_equivocation_consensus_late_equivocation() { /* this test targets gossip-level validation */ - let validation_level: Option = - Some(BroadcastValidation::ConsensusAndEquivocation); + let validation_level = BroadcastValidation::ConsensusAndEquivocation; // Validator count needs to be at least 32 or proposer boost gets set to 0 when computing // `validator_count // 32`. @@ -1311,7 +1309,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { tester.harness.chain, &channel.0, test_logger, - validation_level.unwrap(), + validation_level, StatusCode::ACCEPTED, network_globals, ) @@ -1465,8 +1463,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { "need at least 2 blobs for partial reveal" ); - let partial_kzg_proofs = vec![blobs.0.get(0).unwrap().clone()]; - let partial_blobs = vec![blobs.1.get(0).unwrap().clone()]; + let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; + let partial_blobs = vec![blobs.1.first().unwrap().clone()]; // Simulate the block being seen on gossip. block diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 627b0d0b179..e45dcf221cc 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -139,7 +139,7 @@ impl ForkChoiceUpdates { fn insert(&mut self, update: ForkChoiceUpdateMetadata) { self.updates .entry(update.state.head_block_hash) - .or_insert_with(Vec::new) + .or_default() .push(update); } diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index 01731530d36..dd481f23bae 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -57,18 +57,18 @@ async fn el_syncing_then_synced() { mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); // EL synced mock_el.server.set_syncing_response(Ok(false)); mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `syncing` endpoint when the EL is offline (errors on upcheck). @@ -85,9 +85,9 @@ async fn el_offline() { mock_el.el.upcheck().await; let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, true); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `syncing` endpoint when the EL errors on newPaylod but is not fully offline. @@ -128,9 +128,9 @@ async fn el_error_on_new_payload() { // The EL should now be *offline* according to the API. let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, true); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); // Processing a block successfully should remove the status. mock_el.server.set_new_payload_status( @@ -144,9 +144,9 @@ async fn el_error_on_new_payload() { harness.process_block_result((block, blobs)).await.unwrap(); let api_response = tester.client.get_node_syncing().await.unwrap().data; - assert_eq!(api_response.el_offline, false); - assert_eq!(api_response.is_optimistic, false); - assert_eq!(api_response.is_syncing, false); + assert!(!api_response.el_offline); + assert!(!api_response.is_optimistic); + assert!(!api_response.is_syncing); } /// Check `node health` endpoint when the EL is offline. diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 080a393b4d0..7007a14466c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -274,10 +274,10 @@ impl ApiTester { let mock_builder_server = harness.set_mock_builder(beacon_url.clone()); // Start the mock builder service prior to building the chain out. - harness.runtime.task_executor.spawn( - async move { mock_builder_server.await }, - "mock_builder_server", - ); + harness + .runtime + .task_executor + .spawn(mock_builder_server, "mock_builder_server"); let mock_builder = harness.mock_builder.clone(); @@ -641,7 +641,7 @@ impl ApiTester { self } - pub async fn test_beacon_blocks_finalized(self) -> Self { + pub async fn test_beacon_blocks_finalized(self) -> Self { for block_id in self.interesting_block_ids() { let block_root = block_id.root(&self.chain); let block = block_id.full_block(&self.chain).await; @@ -678,7 +678,7 @@ impl ApiTester { self } - pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { + pub async fn test_beacon_blinded_blocks_finalized(self) -> Self { for block_id in self.interesting_block_ids() { let block_root = block_id.root(&self.chain); let block = block_id.full_block(&self.chain).await; @@ -819,7 +819,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let unsupported_media_response = self @@ -859,7 +859,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let validator_pubkey_ids = validator_indices .iter() @@ -910,7 +910,7 @@ impl ApiTester { for i in validator_indices { if i < state.balances().len() as u64 { validators.push(ValidatorBalanceData { - index: i as u64, + index: i, balance: *state.balances().get(i as usize).unwrap(), }); } @@ -944,7 +944,7 @@ impl ApiTester { let validator_index_ids = validator_indices .iter() .cloned() - .map(|i| ValidatorId::Index(i)) + .map(ValidatorId::Index) .collect::>(); let validator_pubkey_ids = validator_indices .iter() @@ -1012,7 +1012,7 @@ impl ApiTester { || statuses.contains(&status.superstatus()) { validators.push(ValidatorData { - index: i as u64, + index: i, balance: *state.balances().get(i as usize).unwrap(), status, validator, @@ -1641,11 +1641,7 @@ impl ApiTester { let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let num_blobs = block.num_expected_blobs(); let blob_indices = if use_indices { - Some( - (0..num_blobs.saturating_sub(1) as u64) - .into_iter() - .collect::>(), - ) + Some((0..num_blobs.saturating_sub(1) as u64).collect::>()) } else { None }; @@ -1663,7 +1659,7 @@ impl ApiTester { blob_indices.map_or(num_blobs, |indices| indices.len()) ); let expected = block.slot(); - assert_eq!(result.get(0).unwrap().slot(), expected); + assert_eq!(result.first().unwrap().slot(), expected); self } @@ -1701,9 +1697,9 @@ impl ApiTester { break; } } - let test_slot = test_slot.expect(&format!( - "should be able to find a block matching zero_blobs={zero_blobs}" - )); + let test_slot = test_slot.unwrap_or_else(|| { + panic!("should be able to find a block matching zero_blobs={zero_blobs}") + }); match self .client @@ -1772,7 +1768,6 @@ impl ApiTester { .attestations() .map(|att| att.clone_as_attestation()) .collect::>() - .into() }, ); @@ -1909,7 +1904,7 @@ impl ApiTester { let result = match self .client - .get_beacon_light_client_updates::(current_sync_committee_period as u64, 1) + .get_beacon_light_client_updates::(current_sync_committee_period, 1) .await { Ok(result) => result, @@ -1921,7 +1916,7 @@ impl ApiTester { .light_client_server_cache .get_light_client_updates( &self.chain.store, - current_sync_committee_period as u64, + current_sync_committee_period, 1, &self.chain.spec, ) @@ -2314,7 +2309,7 @@ impl ApiTester { .unwrap() .data .is_syncing; - assert_eq!(is_syncing, true); + assert!(is_syncing); // Reset sync state. *self @@ -2364,7 +2359,7 @@ impl ApiTester { pub async fn test_get_node_peers_by_id(self) -> Self { let result = self .client - .get_node_peers_by_id(self.external_peer_id.clone()) + .get_node_peers_by_id(self.external_peer_id) .await .unwrap() .data; @@ -3514,6 +3509,7 @@ impl ApiTester { self } + #[allow(clippy::await_holding_lock)] // This is a test, so it should be fine. pub async fn test_get_validator_aggregate_attestation(self) -> Self { if self .chain @@ -4058,7 +4054,7 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); @@ -4085,7 +4081,7 @@ impl ApiTester { ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); // This is the graffiti of the mock execution layer, not the builder. assert_eq!(payload.extra_data(), mock_el_extra_data::()); @@ -4113,7 +4109,7 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); @@ -4137,7 +4133,7 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), DEFAULT_GAS_LIMIT); @@ -4183,7 +4179,7 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), builder_limit); @@ -4267,7 +4263,7 @@ impl ApiTester { ProduceBlockV3Response::Full(_) => panic!("Expecting a blinded payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); assert_eq!(payload.gas_limit(), 30_000_000); @@ -5140,9 +5136,8 @@ impl ApiTester { pub async fn test_builder_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. - self.harness.mock_execution_layer.as_ref().map(|el| { + self.harness.mock_execution_layer.as_ref().inspect(|el| { el.server.all_payloads_syncing(true); - el }); self.harness .extend_chain( @@ -5169,7 +5164,7 @@ impl ApiTester { .unwrap() .into(); - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); // If this cache is populated, it indicates fallback to the local EE was correctly used. @@ -5188,9 +5183,8 @@ impl ApiTester { pub async fn test_builder_v3_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. - self.harness.mock_execution_layer.as_ref().map(|el| { + self.harness.mock_execution_layer.as_ref().inspect(|el| { el.server.all_payloads_syncing(true); - el }); self.harness .extend_chain( @@ -5220,7 +5214,7 @@ impl ApiTester { ProduceBlockV3Response::Blinded(_) => panic!("Expecting a full payload"), }; - let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + let expected_fee_recipient = Address::from_low_u64_be(proposer_index); assert_eq!(payload.fee_recipient(), expected_fee_recipient); self @@ -6101,16 +6095,17 @@ impl ApiTester { assert_eq!(result.execution_optimistic, Some(false)); // Change head to be optimistic. - self.chain + if let Some(head_node) = self + .chain .canonical_head .fork_choice_write_lock() .proto_array_mut() .core_proto_array_mut() .nodes .last_mut() - .map(|head_node| { - head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) - }); + { + head_node.execution_status = ExecutionStatus::Optimistic(ExecutionBlockHash::zero()) + } // Check responses are now optimistic. let result = self @@ -6143,8 +6138,8 @@ async fn poll_events, eth2::Error>> + Unpin }; tokio::select! { - _ = collect_stream_fut => {events} - _ = tokio::time::sleep(timeout) => { return events; } + _ = collect_stream_fut => { events } + _ = tokio::time::sleep(timeout) => { events } } } @@ -6180,31 +6175,31 @@ async fn test_unsupported_media_response() { } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -async fn beacon_get() { +async fn beacon_get_state_hashes() { ApiTester::new() - .await - .test_beacon_genesis() .await .test_beacon_states_root_finalized() .await - .test_beacon_states_fork_finalized() - .await .test_beacon_states_finality_checkpoints_finalized() .await - .test_beacon_headers_block_id_finalized() + .test_beacon_states_root() .await - .test_beacon_blocks_finalized::() + .test_beacon_states_finality_checkpoints() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_state_info() { + ApiTester::new() .await - .test_beacon_blinded_blocks_finalized::() + .test_beacon_genesis() .await - .test_debug_beacon_states_finalized() + .test_beacon_states_fork_finalized() .await - .test_beacon_states_root() + .test_debug_beacon_states_finalized() .await .test_beacon_states_fork() .await - .test_beacon_states_finality_checkpoints() - .await .test_beacon_states_validators() .await .test_beacon_states_validator_balances() @@ -6214,6 +6209,18 @@ async fn beacon_get() { .test_beacon_states_validator_id() .await .test_beacon_states_randao() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_blocks() { + ApiTester::new() + .await + .test_beacon_headers_block_id_finalized() + .await + .test_beacon_blocks_finalized() + .await + .test_beacon_blinded_blocks_finalized() .await .test_beacon_headers_all_slots() .await @@ -6228,6 +6235,12 @@ async fn beacon_get() { .test_beacon_blocks_attestations() .await .test_beacon_blocks_root() + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn beacon_get_pools() { + ApiTester::new() .await .test_get_beacon_pool_attestations() .await diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index 97ba72a2ac6..d92f986440c 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -3,24 +3,23 @@ name = "http_metrics" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { workspace = true } -serde = { workspace = true } -slog = { workspace = true } beacon_chain = { workspace = true } -store = { workspace = true } lighthouse_network = { workspace = true } -slot_clock = { workspace = true } -metrics = { workspace = true } lighthouse_version = { workspace = true } -warp_utils = { workspace = true } malloc_utils = { workspace = true } +metrics = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +store = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } [dev-dependencies] -tokio = { workspace = true } +logging = { workspace = true } reqwest = { workspace = true } +tokio = { workspace = true } types = { workspace = true } -logging = { workspace = true } diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index eccc244d597..485f32b37a7 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -5,49 +5,49 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dependencies] -alloy-primitives = { workspace = true} +alloy-primitives = { workspace = true } +alloy-rlp = { workspace = true } +bytes = { workspace = true } +delay_map = { workspace = true } +directory = { workspace = true } +dirs = { workspace = true } discv5 = { workspace = true } -gossipsub = { workspace = true } -unsigned-varint = { version = "0.8", features = ["codec"] } -ssz_types = { workspace = true } -types = { workspace = true } -serde = { workspace = true } +either = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -slog = { workspace = true } -lighthouse_version = { workspace = true } -tokio = { workspace = true } -futures = { workspace = true } -dirs = { workspace = true } fnv = { workspace = true } -metrics = { workspace = true } -smallvec = { workspace = true } -tokio-io-timeout = "1" +futures = { workspace = true } +gossipsub = { workspace = true } +hex = { workspace = true } +itertools = { workspace = true } +libp2p-mplex = "0.42" +lighthouse_version = { workspace = true } lru = { workspace = true } lru_cache = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } -sha2 = { workspace = true } -snap = { workspace = true } -hex = { workspace = true } -tokio-util = { workspace = true } -tiny-keccak = "2" -task_executor = { workspace = true } +prometheus-client = "0.22.0" rand = { workspace = true } -directory = { workspace = true } regex = { workspace = true } +serde = { workspace = true } +sha2 = { workspace = true } +slog = { workspace = true } +smallvec = { workspace = true } +snap = { workspace = true } +ssz_types = { workspace = true } strum = { workspace = true } superstruct = { workspace = true } -prometheus-client = "0.22.0" +task_executor = { workspace = true } +tiny-keccak = "2" +tokio = { workspace = true } +tokio-io-timeout = "1" +tokio-util = { workspace = true } +types = { workspace = true } +unsigned-varint = { version = "0.8", features = ["codec"] } unused_port = { workspace = true } -delay_map = { workspace = true } -bytes = { workspace = true } -either = { workspace = true } -itertools = { workspace = true } -alloy-rlp = { workspace = true } # Local dependencies void = "1.0.2" -libp2p-mplex = "0.42" [dependencies.libp2p] version = "0.54" @@ -55,13 +55,13 @@ default-features = false features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"] [dev-dependencies] -slog-term = { workspace = true } -slog-async = { workspace = true } -tempfile = { workspace = true } -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } async-channel = { workspace = true } logging = { workspace = true } +quickcheck = { workspace = true } +quickcheck_macros = { workspace = true } +slog-async = { workspace = true } +slog-term = { workspace = true } +tempfile = { workspace = true } [features] libp2p-websocket = [] diff --git a/beacon_node/lighthouse_network/gossipsub/Cargo.toml b/beacon_node/lighthouse_network/gossipsub/Cargo.toml index 6cbe6d3a1cc..61f5730c08f 100644 --- a/beacon_node/lighthouse_network/gossipsub/Cargo.toml +++ b/beacon_node/lighthouse_network/gossipsub/Cargo.toml @@ -24,9 +24,10 @@ fnv = "1.0.7" futures = "0.3.30" futures-timer = "3.0.2" getrandom = "0.2.12" -hashlink.workspace = true +hashlink = { workspace = true } hex_fmt = "0.3.0" libp2p = { version = "0.54", default-features = false } +prometheus-client = "0.22.0" quick-protobuf = "0.8" quick-protobuf-codec = "0.3" rand = "0.8" @@ -35,7 +36,6 @@ serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" tracing = "0.1.37" void = "1.0.2" -prometheus-client = "0.22.0" web-time = "1.1.0" [dev-dependencies] diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index e76d6d27866..0a0a6ca754f 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -964,6 +964,9 @@ where request_info: (Id, RequestType), error: StreamUpgradeError, ) { + // This dialing is now considered failed + self.dial_negotiated -= 1; + let (id, req) = request_info; // map the error @@ -989,9 +992,6 @@ where StreamUpgradeError::Apply(other) => other, }; - // This dialing is now considered failed - self.dial_negotiated -= 1; - self.outbound_io_error_retries = 0; self.events_out .push(HandlerEvent::Err(HandlerErr::Outbound { diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 0ad31ff2e80..e46c69dc716 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -250,18 +250,17 @@ impl futures::stream::Stream for GossipCache { Poll::Ready(Some(expired)) => { let expected_key = expired.key(); let (topic, data) = expired.into_inner(); - match self.topic_msgs.get_mut(&topic) { - Some(msgs) => { - let key = msgs.remove(&data); - debug_assert_eq!(key, Some(expected_key)); - if msgs.is_empty() { - // no more messages for this topic. - self.topic_msgs.remove(&topic); - } - } - None => { - #[cfg(debug_assertions)] - panic!("Topic for registered message is not present.") + let topic_msg = self.topic_msgs.get_mut(&topic); + debug_assert!( + topic_msg.is_some(), + "Topic for registered message is not present." + ); + if let Some(msgs) = topic_msg { + let key = msgs.remove(&data); + debug_assert_eq!(key, Some(expected_key)); + if msgs.is_empty() { + // no more messages for this topic. + self.topic_msgs.remove(&topic); } } Poll::Ready(Some(Ok(topic))) diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 6fc818e9c96..44f6c54bbc4 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,51 +5,51 @@ authors = ["Sigma Prime "] edition = { workspace = true } [dev-dependencies] -sloggers = { workspace = true } +bls = { workspace = true } +eth2 = { workspace = true } +eth2_network_config = { workspace = true } genesis = { workspace = true } +gossipsub = { workspace = true } +kzg = { workspace = true } matches = "0.1.8" serde_json = { workspace = true } -slog-term = { workspace = true } slog-async = { workspace = true } -eth2 = { workspace = true } -gossipsub = { workspace = true } -eth2_network_config = { workspace = true } -kzg = { workspace = true } -bls = { workspace = true } +slog-term = { workspace = true } +sloggers = { workspace = true } [dependencies] alloy-primitives = { workspace = true } -async-channel = { workspace = true } +alloy-rlp = { workspace = true } anyhow = { workspace = true } +async-channel = { workspace = true } beacon_chain = { workspace = true } -store = { workspace = true } -lighthouse_network = { workspace = true } -types = { workspace = true } -slot_clock = { workspace = true } -slog = { workspace = true } -hex = { workspace = true } +beacon_processor = { workspace = true } +delay_map = { workspace = true } +derivative = { workspace = true } ethereum_ssz = { workspace = true } -ssz_types = { workspace = true } -futures = { workspace = true } -tokio = { workspace = true } -tokio-stream = { workspace = true } -smallvec = { workspace = true } -rand = { workspace = true } +execution_layer = { workspace = true } fnv = { workspace = true } -alloy-rlp = { workspace = true } -metrics = { workspace = true } -logging = { workspace = true } -task_executor = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } igd-next = "0.14" itertools = { workspace = true } +lighthouse_network = { workspace = true } +logging = { workspace = true } lru_cache = { workspace = true } -strum = { workspace = true } -derivative = { workspace = true } -delay_map = { workspace = true } +metrics = { workspace = true } operation_pool = { workspace = true } -execution_layer = { workspace = true } -beacon_processor = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +smallvec = { workspace = true } +ssz_types = { workspace = true } +store = { workspace = true } +strum = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } +types = { workspace = true } [features] # NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 9d774d97c15..7e27a91bd6b 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -527,7 +527,7 @@ impl TestRig { self.assert_event_journal( &expected .iter() - .map(|ev| Into::<&'static str>::into(ev)) + .map(Into::<&'static str>::into) .chain(std::iter::once(WORKER_FREED)) .chain(std::iter::once(NOTHING_TO_DO)) .collect::>(), diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index c46e46e0fae..32bbfcbcaa1 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -1,235 +1,229 @@ -#[cfg(not(debug_assertions))] -#[cfg(test)] -mod tests { - use crate::persisted_dht::load_dht; - use crate::{NetworkConfig, NetworkService}; - use beacon_chain::test_utils::BeaconChainHarness; - use beacon_chain::BeaconChainTypes; - use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; - use futures::StreamExt; - use lighthouse_network::types::{GossipEncoding, GossipKind}; - use lighthouse_network::{Enr, GossipTopic}; - use slog::{o, Drain, Level, Logger}; - use sloggers::{null::NullLoggerBuilder, Build}; - use std::str::FromStr; - use std::sync::Arc; - use tokio::runtime::Runtime; - use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; - - impl NetworkService { - fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { - self.libp2p.get_topic_params(topic) - } +#![cfg(not(debug_assertions))] +#![cfg(test)] +use crate::persisted_dht::load_dht; +use crate::{NetworkConfig, NetworkService}; +use beacon_chain::test_utils::BeaconChainHarness; +use beacon_chain::BeaconChainTypes; +use beacon_processor::{BeaconProcessorChannels, BeaconProcessorConfig}; +use futures::StreamExt; +use lighthouse_network::types::{GossipEncoding, GossipKind}; +use lighthouse_network::{Enr, GossipTopic}; +use slog::{o, Drain, Level, Logger}; +use sloggers::{null::NullLoggerBuilder, Build}; +use std::str::FromStr; +use std::sync::Arc; +use tokio::runtime::Runtime; +use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; + +impl NetworkService { + fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { + self.libp2p.get_topic_params(topic) } +} - fn get_logger(actual_log: bool) -> Logger { - if actual_log { - let drain = { - let decorator = slog_term::TermDecorator::new().build(); - let decorator = - logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); - let drain = slog_term::FullFormat::new(decorator).build().fuse(); - let drain = slog_async::Async::new(drain).chan_size(2048).build(); - drain.filter_level(Level::Debug) - }; - - Logger::root(drain.fuse(), o!()) - } else { - let builder = NullLoggerBuilder; - builder.build().expect("should build logger") - } - } - - #[test] - fn test_dht_persistence() { - let log = get_logger(false); - - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .default_spec() - .deterministic_keypairs(8) - .fresh_ephemeral_store() - .build() - .chain; - - let store = beacon_chain.store.clone(); +fn get_logger(actual_log: bool) -> Logger { + if actual_log { + let drain = { + let decorator = slog_term::TermDecorator::new().build(); + let decorator = + logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).chan_size(2048).build(); + drain.filter_level(Level::Debug) + }; - let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); - let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); - let enrs = vec![enr1, enr2]; + Logger::root(drain.fuse(), o!()) + } else { + let builder = NullLoggerBuilder; + builder.build().expect("should build logger") + } +} - let runtime = Arc::new(Runtime::new().unwrap()); +#[test] +fn test_dht_persistence() { + let log = get_logger(false); + + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .default_spec() + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .build() + .chain; + + let store = beacon_chain.store.clone(); + + let enr1 = Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap(); + let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap(); + let enrs = vec![enr1, enr2]; + + let runtime = Arc::new(Runtime::new().unwrap()); + + let (signal, exit) = async_channel::bounded(1); + let (shutdown_tx, _) = futures::channel::mpsc::channel(1); + let executor = + task_executor::TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx); + + let mut config = NetworkConfig::default(); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); + config.discv5_config.table_filter = |_| true; // Do not ignore local IPs + config.upnp_enabled = false; + config.boot_nodes_enr = enrs.clone(); + let config = Arc::new(config); + runtime.block_on(async move { + // Create a new network service which implicitly gets dropped at the + // end of the block. + + let BeaconProcessorChannels { + beacon_processor_tx, + beacon_processor_rx: _beacon_processor_rx, + work_reprocessing_tx, + work_reprocessing_rx: _work_reprocessing_rx, + } = <_>::default(); + + let _network_service = NetworkService::start( + beacon_chain.clone(), + config, + executor, + None, + beacon_processor_tx, + work_reprocessing_tx, + ) + .await + .unwrap(); + drop(signal); + }); + + let raw_runtime = Arc::try_unwrap(runtime).unwrap(); + raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300)); + + // Load the persisted dht from the store + let persisted_enrs = load_dht(store); + assert!( + persisted_enrs.contains(&enrs[0]), + "should have persisted the first ENR to store" + ); + assert!( + persisted_enrs.contains(&enrs[1]), + "should have persisted the second ENR to store" + ); +} - let (signal, exit) = async_channel::bounded(1); +// Test removing topic weight on old topics when a fork happens. +#[test] +fn test_removing_topic_weight_on_old_topics() { + let runtime = Arc::new(Runtime::new().unwrap()); + + // Capella spec + let mut spec = MinimalEthSpec::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(1)); + + // Build beacon chain. + let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) + .spec(spec.clone().into()) + .deterministic_keypairs(8) + .fresh_ephemeral_store() + .mock_execution_layer() + .build() + .chain; + let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); + assert_eq!(next_fork_name, ForkName::Capella); + + // Build network service. + let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { + let (_, exit) = async_channel::bounded(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let executor = task_executor::TaskExecutor::new( Arc::downgrade(&runtime), exit, - log.clone(), + get_logger(false), shutdown_tx, ); let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21212, 21212, 21213); + config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); config.discv5_config.table_filter = |_| true; // Do not ignore local IPs config.upnp_enabled = false; - config.boot_nodes_enr = enrs.clone(); let config = Arc::new(config); - runtime.block_on(async move { - // Create a new network service which implicitly gets dropped at the - // end of the block. - - let BeaconProcessorChannels { - beacon_processor_tx, - beacon_processor_rx: _beacon_processor_rx, - work_reprocessing_tx, - work_reprocessing_rx: _work_reprocessing_rx, - } = <_>::default(); - - let _network_service = NetworkService::start( - beacon_chain.clone(), - config, - executor, - None, - beacon_processor_tx, - work_reprocessing_tx, - ) - .await - .unwrap(); - drop(signal); - }); - - let raw_runtime = Arc::try_unwrap(runtime).unwrap(); - raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(300)); - - // Load the persisted dht from the store - let persisted_enrs = load_dht(store); - assert!( - persisted_enrs.contains(&enrs[0]), - "should have persisted the first ENR to store" - ); - assert!( - persisted_enrs.contains(&enrs[1]), - "should have persisted the second ENR to store" - ); - } - // Test removing topic weight on old topics when a fork happens. - #[test] - fn test_removing_topic_weight_on_old_topics() { - let runtime = Arc::new(Runtime::new().unwrap()); - - // Capella spec - let mut spec = MinimalEthSpec::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.capella_fork_epoch = Some(Epoch::new(1)); - - // Build beacon chain. - let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) - .spec(spec.clone().into()) - .deterministic_keypairs(8) - .fresh_ephemeral_store() - .mock_execution_layer() - .build() - .chain; - let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); - assert_eq!(next_fork_name, ForkName::Capella); - - // Build network service. - let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { - let (_, exit) = async_channel::bounded(1); - let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = task_executor::TaskExecutor::new( - Arc::downgrade(&runtime), - exit, - get_logger(false), - shutdown_tx, - ); - - let mut config = NetworkConfig::default(); - config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, 21214, 21214, 21215); - config.discv5_config.table_filter = |_| true; // Do not ignore local IPs - config.upnp_enabled = false; - let config = Arc::new(config); - - let beacon_processor_channels = - BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); - NetworkService::build( - beacon_chain.clone(), - config, - executor.clone(), - None, - beacon_processor_channels.beacon_processor_tx, - beacon_processor_channels.work_reprocessing_tx, - ) - .await - .unwrap() - }); - - // Subscribe to the topics. - runtime.block_on(async { - while network_globals.gossipsub_subscriptions.read().len() < 2 { - if let Some(msg) = network_service.subnet_service.next().await { - network_service.on_subnet_service_msg(msg); - } + let beacon_processor_channels = + BeaconProcessorChannels::new(&BeaconProcessorConfig::default()); + NetworkService::build( + beacon_chain.clone(), + config, + executor.clone(), + None, + beacon_processor_channels.beacon_processor_tx, + beacon_processor_channels.work_reprocessing_tx, + ) + .await + .unwrap() + }); + + // Subscribe to the topics. + runtime.block_on(async { + while network_globals.gossipsub_subscriptions.read().len() < 2 { + if let Some(msg) = network_service.subnet_service.next().await { + network_service.on_subnet_service_msg(msg); } - }); - - // Make sure the service is subscribed to the topics. - let (old_topic1, old_topic2) = { - let mut subnets = SubnetId::compute_attestation_subnets( - network_globals.local_enr().node_id().raw(), - &spec, - ) - .collect::>(); - assert_eq!(2, subnets.len()); - - let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; - let old_topic1 = GossipTopic::new( - GossipKind::Attestation(subnets.pop().unwrap()), - GossipEncoding::SSZSnappy, - old_fork_digest, - ); - let old_topic2 = GossipTopic::new( - GossipKind::Attestation(subnets.pop().unwrap()), - GossipEncoding::SSZSnappy, - old_fork_digest, - ); - - (old_topic1, old_topic2) - }; - let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); - assert_eq!(2, subscriptions.len()); - assert!(subscriptions.contains(&old_topic1)); - assert!(subscriptions.contains(&old_topic2)); - let old_topic_params1 = network_service - .get_topic_params(old_topic1.clone()) - .expect("topic score params"); - assert!(old_topic_params1.topic_weight > 0.0); - let old_topic_params2 = network_service - .get_topic_params(old_topic2.clone()) - .expect("topic score params"); - assert!(old_topic_params2.topic_weight > 0.0); - - // Advance slot to the next fork - for _ in 0..MinimalEthSpec::slots_per_epoch() { - beacon_chain.slot_clock.advance_slot(); } + }); + + // Make sure the service is subscribed to the topics. + let (old_topic1, old_topic2) = { + let mut subnets = SubnetId::compute_attestation_subnets( + network_globals.local_enr().node_id().raw(), + &spec, + ) + .collect::>(); + assert_eq!(2, subnets.len()); + + let old_fork_digest = beacon_chain.enr_fork_id().fork_digest; + let old_topic1 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); + let old_topic2 = GossipTopic::new( + GossipKind::Attestation(subnets.pop().unwrap()), + GossipEncoding::SSZSnappy, + old_fork_digest, + ); - // Run `NetworkService::update_next_fork()`. - runtime.block_on(async { - network_service.update_next_fork(); - }); - - // Check that topic_weight on the old topics has been zeroed. - let old_topic_params1 = network_service - .get_topic_params(old_topic1) - .expect("topic score params"); - assert_eq!(0.0, old_topic_params1.topic_weight); - - let old_topic_params2 = network_service - .get_topic_params(old_topic2) - .expect("topic score params"); - assert_eq!(0.0, old_topic_params2.topic_weight); + (old_topic1, old_topic2) + }; + let subscriptions = network_globals.gossipsub_subscriptions.read().clone(); + assert_eq!(2, subscriptions.len()); + assert!(subscriptions.contains(&old_topic1)); + assert!(subscriptions.contains(&old_topic2)); + let old_topic_params1 = network_service + .get_topic_params(old_topic1.clone()) + .expect("topic score params"); + assert!(old_topic_params1.topic_weight > 0.0); + let old_topic_params2 = network_service + .get_topic_params(old_topic2.clone()) + .expect("topic score params"); + assert!(old_topic_params2.topic_weight > 0.0); + + // Advance slot to the next fork + for _ in 0..MinimalEthSpec::slots_per_epoch() { + beacon_chain.slot_clock.advance_slot(); } + + // Run `NetworkService::update_next_fork()`. + runtime.block_on(async { + network_service.update_next_fork(); + }); + + // Check that topic_weight on the old topics has been zeroed. + let old_topic_params1 = network_service + .get_topic_params(old_topic1) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params1.topic_weight); + + let old_topic_params2 = network_service + .get_topic_params(old_topic2) + .expect("topic score params"); + assert_eq!(0.0, old_topic_params2.topic_weight); } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 3a002bf8703..d01c73118c6 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -877,11 +877,11 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(1); // Only run this test on the phase0 hard-fork. - if spec.altair_fork_epoch != None { + if spec.altair_fork_epoch.is_some() { return; } - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let slot = state.slot(); let committees = state .get_beacon_committees_at_slot(slot) @@ -902,10 +902,10 @@ mod release_tests { ); for (atts, aggregate) in &attestations { - let att2 = aggregate.as_ref().unwrap().message().aggregate().clone(); + let att2 = aggregate.as_ref().unwrap().message().aggregate(); let att1 = atts - .into_iter() + .iter() .map(|(att, _)| att) .take(2) .fold::>, _>(None, |att, new_att| { @@ -946,7 +946,7 @@ mod release_tests { .unwrap(); assert_eq!( - committees.get(0).unwrap().committee.len() - 2, + committees.first().unwrap().committee.len() - 2, earliest_attestation_validators( &att2_split.as_ref(), &state, @@ -963,7 +963,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(1); let op_pool = OperationPool::::new(); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let slot = state.slot(); let committees = state @@ -1020,7 +1020,7 @@ mod release_tests { let agg_att = &block_attestations[0]; assert_eq!( agg_att.num_set_aggregation_bits(), - spec.target_committee_size as usize + spec.target_committee_size ); // Prune attestations shouldn't do anything at this point. @@ -1039,7 +1039,7 @@ mod release_tests { fn attestation_duplicate() { let (harness, ref spec) = attestation_test_state::(1); - let state = get_current_state_initialize_epoch_cache(&harness, &spec); + let state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1082,7 +1082,7 @@ mod release_tests { fn attestation_pairwise_overlapping() { let (harness, ref spec) = attestation_test_state::(1); - let state = get_current_state_initialize_epoch_cache(&harness, &spec); + let state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1113,19 +1113,17 @@ mod release_tests { let aggs1 = atts1 .chunks_exact(step_size * 2) .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { + let agg = chunk + .iter() + .map(|(att, _)| att) + .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) } - }, - ); + }); agg.unwrap() }) .collect::>(); @@ -1136,19 +1134,17 @@ mod release_tests { .as_slice() .chunks_exact(step_size * 2) .map(|chunk| { - let agg = chunk.into_iter().map(|(att, _)| att).fold::, - >, _>( - None, - |att, new_att| { + let agg = chunk + .iter() + .map(|(att, _)| att) + .fold::>, _>(None, |att, new_att| { if let Some(mut a) = att { a.aggregate(new_att.to_ref()); Some(a) } else { Some(new_att.clone()) } - }, - ); + }); agg.unwrap() }) .collect::>(); @@ -1181,7 +1177,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(num_committees); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); @@ -1194,7 +1190,7 @@ mod release_tests { .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); - let target_committee_size = spec.target_committee_size as usize; + let target_committee_size = spec.target_committee_size; let num_validators = num_committees * MainnetEthSpec::slots_per_epoch() as usize * spec.target_committee_size; @@ -1209,12 +1205,12 @@ mod release_tests { let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, step_size| { - let att_0 = attestations.get(0).unwrap().0.clone(); + let att_0 = attestations.first().unwrap().0.clone(); let aggs = attestations .chunks_exact(step_size) .map(|chunk| { chunk - .into_iter() + .iter() .map(|(att, _)| att) .fold::, _>( att_0.clone(), @@ -1296,7 +1292,7 @@ mod release_tests { let (harness, ref spec) = attestation_test_state::(num_committees); - let mut state = get_current_state_initialize_epoch_cache(&harness, &spec); + let mut state = get_current_state_initialize_epoch_cache(&harness, spec); let op_pool = OperationPool::::new(); let slot = state.slot(); @@ -1308,7 +1304,7 @@ mod release_tests { .collect::>(); let max_attestations = ::MaxAttestations::to_usize(); - let target_committee_size = spec.target_committee_size as usize; + let target_committee_size = spec.target_committee_size; // Each validator will have a multiple of 1_000_000_000 wei. // Safe from overflow unless there are about 18B validators (2^64 / 1_000_000_000). @@ -1329,12 +1325,12 @@ mod release_tests { let insert_attestations = |attestations: Vec<(Attestation, SubnetId)>, step_size| { - let att_0 = attestations.get(0).unwrap().0.clone(); + let att_0 = attestations.first().unwrap().0.clone(); let aggs = attestations .chunks_exact(step_size) .map(|chunk| { chunk - .into_iter() + .iter() .map(|(att, _)| att) .fold::, _>( att_0.clone(), @@ -1615,7 +1611,6 @@ mod release_tests { let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1674,7 +1669,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1711,7 +1705,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, @@ -1791,7 +1784,6 @@ mod release_tests { let state = harness.get_current_state(); let block_root = *state .get_block_root(state.slot() - Slot::new(1)) - .ok() .expect("block root should exist at slot"); let contributions = harness.make_sync_contributions( &state, diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7cee16c3535..21d0cf8dec8 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -5,34 +5,34 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } beacon_chain = { workspace = true } criterion = { workspace = true } rand = { workspace = true, features = ["small_rng"] } +tempfile = { workspace = true } [dependencies] +bls = { workspace = true } db-key = "0.0.5" -leveldb = { version = "0.8" } -parking_lot = { workspace = true } -itertools = { workspace = true } +directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -superstruct = { workspace = true } -types = { workspace = true } +itertools = { workspace = true } +leveldb = { version = "0.8" } +logging = { workspace = true } +lru = { workspace = true } +metrics = { workspace = true } +parking_lot = { workspace = true } safe_arith = { workspace = true } -state_processing = { workspace = true } -slog = { workspace = true } serde = { workspace = true } -metrics = { workspace = true } -lru = { workspace = true } +slog = { workspace = true } sloggers = { workspace = true } -directory = { workspace = true } +smallvec = { workspace = true } +state_processing = { workspace = true } strum = { workspace = true } +superstruct = { workspace = true } +types = { workspace = true } xdelta3 = { workspace = true } zstd = { workspace = true } -bls = { workspace = true } -smallvec = { workspace = true } -logging = { workspace = true } [[bench]] name = "hdiff" diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 0498c7c1e2c..09ae9a32dd0 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -332,7 +332,7 @@ pub enum DBColumn { BeaconRandaoMixes, #[strum(serialize = "dht")] DhtEnrs, - /// For Optimistically Imported Merge Transition Blocks + /// DEPRECATED. For Optimistically Imported Merge Transition Blocks #[strum(serialize = "otb")] OptimisticTransitionBlock, /// DEPRECATED. Can be removed once schema v22 is buried by a hard fork. diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 2eb40f47b18..22eecdcc605 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -136,7 +136,7 @@ where pub earliest_consolidation_epoch: Epoch, #[superstruct(only(Electra))] - pub pending_balance_deposits: List, + pub pending_deposits: List, #[superstruct(only(Electra))] pub pending_partial_withdrawals: List, @@ -403,7 +403,7 @@ impl TryInto> for PartialBeaconState { earliest_exit_epoch, consolidation_balance_to_consume, earliest_consolidation_epoch, - pending_balance_deposits, + pending_deposits, pending_partial_withdrawals, pending_consolidations ], diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index afb93f3657d..546cc2ed41c 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } [dependencies] beacon_chain = { workspace = true } -slot_clock = { workspace = true } -tokio = { workspace = true } slog = { workspace = true } +slot_clock = { workspace = true } task_executor = { workspace = true } +tokio = { workspace = true } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 02683a11727..44d7702e5ff 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -33,9 +33,8 @@ * [Signature Header](./api-vc-sig-header.md) * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) - * [Installation](./ui-installation.md) - * [Authentication](./ui-authentication.md) * [Configuration](./ui-configuration.md) + * [Authentication](./ui-authentication.md) * [Usage](./ui-usage.md) * [FAQs](./ui-faqs.md) * [Advanced Usage](./advanced.md) diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index d8d6ea61a18..b558279730e 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -56,7 +56,7 @@ that we have observed are: _a lot_ of space. It's even possible to push beyond that with `--hierarchy-exponents 0` which would store a full state every single slot (NOT RECOMMENDED). - **Less diff layers are not necessarily faster**. One might expect that the fewer diff layers there - are, the less work Lighthouse would have to do to reconstruct any particular state. In practise + are, the less work Lighthouse would have to do to reconstruct any particular state. In practice this seems to be offset by the increased size of diffs in each layer making the diffs take longer to apply. We observed no significant performance benefit from `--hierarchy-exponents 5,7,11`, and a substantial increase in space consumed. diff --git a/book/src/advanced_networking.md b/book/src/advanced_networking.md index 732b4f51e65..c0f6b5485ef 100644 --- a/book/src/advanced_networking.md +++ b/book/src/advanced_networking.md @@ -68,7 +68,7 @@ The steps to do port forwarding depends on the router, but the general steps are 1. Determine the default gateway IP: - On Linux: open a terminal and run `ip route | grep default`, the result should look something similar to `default via 192.168.50.1 dev wlp2s0 proto dhcp metric 600`. The `192.168.50.1` is your router management default gateway IP. - - On MacOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. + - On macOS: open a terminal and run `netstat -nr|grep default` and it should return the default gateway IP. - On Windows: open a command prompt and run `ipconfig` and look for the `Default Gateway` which will show you the gateway IP. The default gateway IP usually looks like 192.168.X.X. Once you obtain the IP, enter it to a web browser and it will lead you to the router management page. @@ -91,7 +91,7 @@ The steps to do port forwarding depends on the router, but the general steps are - Internal port: `9001` - IP address: Choose the device that is running Lighthouse. -1. To check that you have successfully opened the ports, go to [yougetsignal](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). +1. To check that you have successfully opened the ports, go to [`yougetsignal`](https://www.yougetsignal.com/tools/open-ports/) and enter `9000` in the `port number`. If it shows "open", then you have successfully set up port forwarding. If it shows "closed", double check your settings, and also check that you have allowed firewall rules on port 9000. Note: this will only confirm if port 9000/TCP is open. You will need to ensure you have correctly setup port forwarding for the UDP ports (`9000` and `9001` by default). ## ENR Configuration @@ -141,7 +141,7 @@ To listen over both IPv4 and IPv6: - Set two listening addresses using the `--listen-address` flag twice ensuring the two addresses are one IPv4, and the other IPv6. When doing so, the `--port` and `--discovery-port` flags will apply exclusively to IPv4. Note - that this behaviour differs from the Ipv6 only case described above. + that this behaviour differs from the IPv6 only case described above. - If necessary, set the `--port6` flag to configure the port used for TCP and UDP over IPv6. This flag has no effect when listening over IPv6 only. - If necessary, set the `--discovery-port6` flag to configure the IPv6 UDP diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index b63505c4901..5428ab8f9ae 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -508,23 +508,31 @@ curl "http://localhost:5052/lighthouse/database/info" | jq ```json { - "schema_version": 18, + "schema_version": 22, "config": { - "slots_per_restore_point": 8192, - "slots_per_restore_point_set_explicitly": false, "block_cache_size": 5, + "state_cache_size": 128, + "compression_level": 1, "historic_state_cache_size": 1, + "hdiff_buffer_cache_size": 16, "compact_on_init": false, "compact_on_prune": true, "prune_payloads": true, + "hierarchy_config": { + "exponents": [ + 5, + 7, + 11 + ] + }, "prune_blobs": true, "epochs_per_blob_prune": 1, "blob_prune_margin_epochs": 0 }, "split": { - "slot": "7454656", - "state_root": "0xbecfb1c8ee209854c611ebc967daa77da25b27f1a8ef51402fdbe060587d7653", - "block_root": "0x8730e946901b0a406313d36b3363a1b7091604e1346a3410c1a7edce93239a68" + "slot": "10530592", + "state_root": "0xd27e6ce699637cf9b5c7ca632118b7ce12c2f5070bb25a27ac353ff2799d4466", + "block_root": "0x71509a1cb374773d680cd77148c73ab3563526dacb0ab837bb0c87e686962eae" }, "anchor": { "anchor_slot": "7451168", @@ -543,8 +551,19 @@ curl "http://localhost:5052/lighthouse/database/info" | jq For more information about the split point, see the [Database Configuration](./advanced_database.md) docs. -The `anchor` will be `null` unless the node has been synced with checkpoint sync and state -reconstruction has yet to be completed. For more information +For archive nodes, the `anchor` will be: + +```json +"anchor": { + "anchor_slot": "0", + "oldest_block_slot": "0", + "oldest_block_parent": "0x0000000000000000000000000000000000000000000000000000000000000000", + "state_upper_limit": "0", + "state_lower_limit": "0" + }, +``` + +indicating that all states with slots `>= 0` are available, i.e., full state history. For more information on the specific meanings of these fields see the docs on [Checkpoint Sync](./checkpoint-sync.md#reconstructing-states). diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md index feb93724c06..f792ee870e0 100644 --- a/book/src/api-vc-auth-header.md +++ b/book/src/api-vc-auth-header.md @@ -20,11 +20,11 @@ Authorization: Bearer hGut6B8uEujufDXSmZsT0thnxvdvKFBvh The API token is stored as a file in the `validators` directory. For most users this is `~/.lighthouse/{network}/validators/api-token.txt`, unless overridden using the `--http-token-path` CLI parameter. Here's an -example using the `cat` command to print the token to the terminal, but any +example using the `cat` command to print the token for mainnet to the terminal, but any text editor will suffice: ```bash -cat api-token.txt +cat ~/.lighthouse/mainnet/validators/api-token.txt hGut6B8uEujufDXSmZsT0thnxvdvKFBvh ``` diff --git a/book/src/faq.md b/book/src/faq.md index 04e5ce5bc8f..d23951c8c77 100644 --- a/book/src/faq.md +++ b/book/src/faq.md @@ -92,7 +92,7 @@ If the reason for the error message is caused by no. 1 above, you may want to lo - Power outage. If power outages are an issue at your place, consider getting a UPS to avoid ungraceful shutdown of services. - The service file is not stopped properly. To overcome this, make sure that the process is stopped properly, e.g., during client updates. -- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. +- Out of memory (oom) error. This can happen when the system memory usage has reached its maximum and causes the execution engine to be killed. To confirm that the error is due to oom, run `sudo dmesg -T | grep killed` to look for killed processes. If you are using Geth as the execution client, a short term solution is to reduce the resources used. For example, you can reduce the cache by adding the flag `--cache 2048`. If the oom occurs rather frequently, a long term solution is to increase the memory capacity of the computer. ### I see beacon logs showing `Error during execution engine upcheck`, what should I do? @@ -302,7 +302,7 @@ An example of the log: (debug logs can be found under `$datadir/beacon/logs`): Delayed head block, set_as_head_time_ms: 27, imported_time_ms: 168, attestable_delay_ms: 4209, available_delay_ms: 4186, execution_time_ms: 201, blob_delay_ms: 3815, observed_delay_ms: 3984, total_delay_ms: 4381, slot: 1886014, proposer_index: 733, block_root: 0xa7390baac88d50f1cbb5ad81691915f6402385a12521a670bbbd4cd5f8bf3934, service: beacon, module: beacon_chain::canonical_head:1441 ``` -The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation wil fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. +The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation will fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late. Another example of log: @@ -315,7 +315,7 @@ In this example, we see that the `execution_time_ms` is 4694ms. The `execution_t ### Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? -In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. +In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone else's performance. You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block. diff --git a/book/src/graffiti.md b/book/src/graffiti.md index ba9c7d05d70..7b402ea866f 100644 --- a/book/src/graffiti.md +++ b/book/src/graffiti.md @@ -4,7 +4,7 @@ Lighthouse provides four options for setting validator graffiti. ## 1. Using the "--graffiti-file" flag on the validator client -Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded everytime a validator is chosen to propose a block. +Users can specify a file with the `--graffiti-file` flag. This option is useful for dynamically changing graffitis for various use cases (e.g. drawing on the beaconcha.in graffiti wall). This file is loaded once on startup and reloaded every time a validator is chosen to propose a block. Usage: `lighthouse vc --graffiti-file graffiti_file.txt` diff --git a/book/src/homebrew.md b/book/src/homebrew.md index da92dcb26ce..f94764889e6 100644 --- a/book/src/homebrew.md +++ b/book/src/homebrew.md @@ -31,6 +31,6 @@ Alternatively, you can find the `lighthouse` binary at: The [formula][] is kept up-to-date by the Homebrew community and a bot that lists for new releases. -The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repo. +The package source can be found in the [homebrew-core](https://github.com/Homebrew/homebrew-core/blob/master/Formula/l/lighthouse.rb) repository. [formula]: https://formulae.brew.sh/formula/lighthouse diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index 4a00f33aa44..fca156bda3f 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -46,24 +46,31 @@ You can track the reasons for re-orgs being attempted (or not) via Lighthouse's A pair of messages at `INFO` level will be logged if a re-org opportunity is detected: -> INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 - -> INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 +```text +INFO Attempting re-org due to weak head threshold_weight: 45455983852725, head_weight: 0, parent: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, weak_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 +``` This should be followed shortly after by a `INFO` log indicating that a re-org occurred. This is expected and normal: -> INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +```text +INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +``` In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a block as normal and log the reason the re-org was not attempted at debug level: -> DEBG Not attempting re-org reason: head not late +```text +DEBG Not attempting re-org reason: head not late +``` If you are interested in digging into the timing of `forkchoiceUpdated` messages sent to the execution layer, there is also a debug log for the suppression of `forkchoiceUpdated` messages when Lighthouse thinks that a re-org is likely: -> DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +```text +DEBG Fork choice update overridden slot: 1105320, override: 0x09d953b69041f280758400c671130d174113bbf57c2d26553a77fb514cad4890, canonical_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +``` [the spec]: https://github.com/ethereum/consensus-specs/pull/3034 diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 106a5e89472..f2662f4a69a 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -21,7 +21,6 @@ The UI is currently in active development. It resides in the See the following Siren specific topics for more context-specific information: -- [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup and configure Siren. - [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md index 9e3a94db78d..81b867bae26 100644 --- a/book/src/ui-authentication.md +++ b/book/src/ui-authentication.md @@ -2,12 +2,12 @@ ## Siren Session -For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of user validators. The session password must be set during the [installation](./ui-installation.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. +For enhanced security, Siren will require users to authenticate with their session password to access the dashboard. This is crucial because Siren now includes features that can permanently alter the status of the user's validators. The session password must be set during the [configuration](./ui-configuration.md) process before running the Docker or local build, either in an `.env` file or via Docker flags. ![exit](imgs/ui-session.png) ## Protected Actions -Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [installation process](./ui-installation.md). +Prior to executing any sensitive validator action, Siren will request authentication of the session password. If you wish to update your password please refer to the Siren [configuration process](./ui-configuration.md). ![exit](imgs/ui-auth.png) diff --git a/book/src/ui-configuration.md b/book/src/ui-configuration.md index eeb2c9a51cd..34cc9fe7ca6 100644 --- a/book/src/ui-configuration.md +++ b/book/src/ui-configuration.md @@ -1,37 +1,116 @@ -# Configuration +# 📦 Installation -Siren requires a connection to both a Lighthouse Validator Client and a Lighthouse Beacon Node. -To enable connection, you must generate .env file based on the provided .env.example +Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, MacOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren). + +## Version Requirement + +To ensure proper functionality, the Siren app requires Lighthouse v4.3.0 or higher. You can find these versions on the [releases](https://github.com/sigp/lighthouse/releases) page of the Lighthouse repository. -## Connecting to the Clients +## Configuration + +Siren requires a connection to both a Lighthouse Validator Client and a Lighthouse Beacon Node. Both the Beacon node and the Validator client need to have their HTTP APIs enabled. -These ports should be accessible from Siren. +These ports should be accessible from Siren. This means adding the flag `--http` on both beacon node and validator client. To enable the HTTP API for the beacon node, utilize the `--gui` CLI flag. This action ensures that the HTTP API can be accessed by other software on the same machine. > The Beacon Node must be run with the `--gui` flag set. -If you require accessibility from another machine within the network, configure the `--http-address` to match the local LAN IP of the system running the Beacon Node and Validator Client. +## Running the Docker container (Recommended) + +We recommend running Siren's container next to your beacon node (on the same server), as it's essentially a webapp that you can access with any browser. + + 1. Create a directory to run Siren: + + ```bash + cd ~ + mkdir Siren + cd Siren + ``` + + 1. Create a configuration file in the `Siren` directory: `nano .env` and insert the following fields to the `.env` file. The field values are given here as an example, modify the fields as necessary. For example, the `API_TOKEN` can be obtained from [`Validator Client Authorization Header`](./api-vc-auth-header.md) + + A full example with all possible configuration options can be found [here](https://github.com/sigp/siren/blob/stable/.env.example). + + ``` + BEACON_URL=http://localhost:5052 + VALIDATOR_URL=http://localhost:5062 + API_TOKEN=R6YhbDO6gKjNMydtZHcaCovFbQ0izq5Hk + SESSION_PASSWORD=your_password + ``` + + 1. You can now start Siren with: + + ```bash + docker run --rm -ti --name siren --env-file $PWD/.env --net host sigp/siren + ``` + + Note that, due to the `--net=host` flag, this will expose Siren on ports 3000, 80, and 443. Preferably, only the latter should be accessible. Adjust your firewall and/or skip the flag wherever possible. + + If it fails to start, an error message will be shown. For example, the error + + ``` + http://localhost:5062 unreachable, check settings and connection + ``` + + means that the validator client is not running, or the `--http` flag is not provided, or otherwise inaccessible from within the container. Another common error is: + + ``` + validator api issue, server response: 403 + ``` + + which means that the API token is incorrect. Check that you have provided the correct token in the field `API_TOKEN` in `.env`. + + When Siren has successfully started, you should see the log `LOG [NestApplication] Nest application successfully started +118ms`, indicating that Siren has started. + + 1. Siren is now accessible at `https://` (when used with `--net=host`). You will get a warning about an invalid certificate, this can be safely ignored. + + > Note: We recommend setting a strong password when running Siren to protect it from unauthorized access. + +Advanced users can mount their own certificates or disable SSL altogether, see the `SSL Certificates` section below. + +## Building From Source + +### Docker + +The docker image can be built with the following command: +`docker build -f Dockerfile -t siren .` + +### Building locally + +To build from source, ensure that your system has `Node v18.18` and `yarn` installed. + +#### Build and run the backend + +Navigate to the backend directory `cd backend`. Install all required Node packages by running `yarn`. Once the installation is complete, compile the backend with `yarn build`. Deploy the backend in a production environment, `yarn start:production`. This ensures optimal performance. + +#### Build and run the frontend + +After initializing the backend, return to the root directory. Install all frontend dependencies by executing `yarn`. Build the frontend using `yarn build`. Start the frontend production server with `yarn start`. + +This will allow you to access siren at `http://localhost:3000` by default. + +## Advanced configuration + +### About self-signed SSL certificates + +By default, internally, Siren is running on port 80 (plain, behind nginx), port 3000 (plain, direct) and port 443 (with SSL, behind nginx)). Siren will generate and use a self-signed certificate on startup. This will generate a security warning when you try to access the interface. We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). + +#### Generating persistent SSL certificates and installing them to your system -> To access from another machine on the same network (192.168.0.200) set the Beacon Node and Validator Client `--http-address` as `192.168.0.200`. When this is set, the validator client requires the flag `--beacon-nodes http://192.168.0.200:5052` to connect to the beacon node. +[mkcert](https://github.com/FiloSottile/mkcert) is a tool that makes it super easy to generate a self-signed certificate that is trusted by your browser. -In a similar manner, the validator client requires activation of the `--http` flag, along with the optional consideration of configuring the `--http-address` flag. If `--http-address` flag is set on the Validator Client, then the `--unencrypted-http-transport` flag is required as well. These settings will ensure compatibility with Siren's connectivity requirements. +To use it for `siren`, install it following the instructions. Then, run `mkdir certs; mkcert -cert-file certs/cert.pem -key-file certs/key.pem 127.0.0.1 localhost` (add or replace any IP or hostname that you would use to access it at the end of this command). +To use these generated certificates, add this to to your `docker run` command: `-v $PWD/certs:/certs` -If you run the Docker container, it will fail to startup if your BN/VC are not accessible, or if you provided a wrong API token. +The nginx SSL config inside Siren's container expects 3 files: `/certs/cert.pem` `/certs/key.pem` `/certs/key.pass`. If `/certs/cert.pem` does not exist, it will generate a self-signed certificate as mentioned above. If `/certs/cert.pem` does exist, it will attempt to use your provided or persisted certificates. -## API Token +### Configuration through environment variables -The API Token is a secret key that allows you to connect to the validator -client. The validator client's HTTP API is guarded by this key because it -contains sensitive validator information and the ability to modify -validators. Please see [`Validator Authorization`](./api-vc-auth-header.md) -for further details. +For those who prefer to use environment variables to configure Siren instead of using an `.env` file, this is fully supported. In some cases this may even be preferred. -Siren requires this token in order to connect to the Validator client. -The token is located in the default data directory of the validator -client. The default path is -`~/.lighthouse//validators/api-token.txt`. +#### Docker installed through `snap` -The contents of this file for the desired validator client needs to be -entered. +If you installed Docker through a snap (i.e. on Ubuntu), Docker will have trouble accessing the `.env` file. In this case it is highly recommended to pass the config to the container with environment variables. +Note that the defaults in `.env.example` will be used as fallback, if no other value is provided. diff --git a/book/src/ui-faqs.md b/book/src/ui-faqs.md index efa6d3d4ab2..29de889e5fc 100644 --- a/book/src/ui-faqs.md +++ b/book/src/ui-faqs.md @@ -6,19 +6,20 @@ Yes, the most current Siren version requires Lighthouse v4.3.0 or higher to func ## 2. Where can I find my API token? -The required Api token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). +The required API token may be found in the default data directory of the validator client. For more information please refer to the lighthouse ui configuration [`api token section`](./api-vc-auth-header.md). ## 3. How do I fix the Node Network Errors? -If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui configuration and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +If you receive a red notification with a BEACON or VALIDATOR NODE NETWORK ERROR you can refer to the lighthouse ui [`configuration`](./ui-configuration.md#configuration). ## 4. How do I connect Siren to Lighthouse from a different computer on the same network? -Siren is a webapp, you can access it like any other website. We don't recommend exposing it to the internet; if you require remote access a VPN or (authenticated) reverse proxy is highly recommended. +Siren is a webapp, you can access it like any other website. We don't recommend exposing it to the internet; if you require remote access a VPN or (authenticated) reverse proxy is highly recommended. +That being said, it is entirely possible to have it published over the internet, how to do that goes well beyond the scope of this document but we want to emphasize once more the need for *at least* SSL encryption if you choose to do so. ## 5. How can I use Siren to monitor my validators remotely when I am not at home? -Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`connecting to clients section`](./ui-configuration.md#connecting-to-the-clients). +Most contemporary home routers provide options for VPN access in various ways. A VPN permits a remote computer to establish a connection with internal computers within a home network. With a VPN configuration in place, connecting to the VPN enables you to treat your computer as if it is part of your local home network. The connection process involves following the setup steps for connecting via another machine on the same network on the Siren configuration page and [`configuration`](./ui-configuration.md#configuration). ## 6. Does Siren support reverse proxy or DNS named addresses? diff --git a/book/src/ui-installation.md b/book/src/ui-installation.md index 1444c0d6331..9cd84e5160b 100644 --- a/book/src/ui-installation.md +++ b/book/src/ui-installation.md @@ -1,6 +1,6 @@ # 📦 Installation -Siren supports any operating system that supports container runtimes and/or NodeJS 18, this includes Linux, MacOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. +Siren supports any operating system that supports containers and/or NodeJS 18, this includes Linux, macOS, and Windows. The recommended way of running Siren is by launching the [docker container](https://hub.docker.com/r/sigp/siren) , but running the application directly is also possible. ## Version Requirement diff --git a/book/src/validator-inclusion.md b/book/src/validator-inclusion.md index 092c813a1ea..eef563dcdb7 100644 --- a/book/src/validator-inclusion.md +++ b/book/src/validator-inclusion.md @@ -56,7 +56,6 @@ The following fields are returned: able to vote) during the current epoch. - `current_epoch_target_attesting_gwei`: the total staked gwei that attested to the majority-elected Casper FFG target epoch during the current epoch. -- `previous_epoch_active_gwei`: as per `current_epoch_active_gwei`, but during the previous epoch. - `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. - `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a head beacon block that is in the canonical chain. diff --git a/book/src/validator-manager.md b/book/src/validator-manager.md index a71fab1e3ad..11df2af0378 100644 --- a/book/src/validator-manager.md +++ b/book/src/validator-manager.md @@ -32,3 +32,4 @@ The `validator-manager` boasts the following features: - [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md) - [Moving validators between two VCs using the `move` command.](./validator-manager-move.md) +- [Managing validators such as delete, import and list validators.](./validator-manager-api.md) diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 6439ea83a32..bbc95460ec9 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -134,7 +134,7 @@ validator_monitor_attestation_simulator_source_attester_hit_total validator_monitor_attestation_simulator_source_attester_miss_total ``` -A grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). +A Grafana dashboard to view the metrics for attestation simulator is available [here](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/AttestationSimulator.json). The attestation simulator provides an insight into the attestation performance of a beacon node. It can be used as an indication of how expediently the beacon node has completed importing blocks within the 4s time frame for an attestation to be made. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index c60d308cbbf..7c8d2b16fd4 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -6,19 +6,19 @@ edition = { workspace = true } [dependencies] beacon_node = { workspace = true } +bytes = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -lighthouse_network = { workspace = true } -types = { workspace = true } +eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } -slog = { workspace = true } -tokio = { workspace = true } +hex = { workspace = true } +lighthouse_network = { workspace = true } log = { workspace = true } -slog-term = { workspace = true } logging = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } slog-async = { workspace = true } slog-scope = "4.3.0" -hex = { workspace = true } -serde = { workspace = true } -eth2_network_config = { workspace = true } -bytes = { workspace = true } +slog-term = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index e66bf14233a..dece975d37e 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -3,20 +3,19 @@ name = "account_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = { workspace = true } -eth2_wallet = { workspace = true } +directory = { workspace = true } eth2_keystore = { workspace = true } +eth2_wallet = { workspace = true } filesystem = { workspace = true } -zeroize = { workspace = true } +rand = { workspace = true } +regex = { workspace = true } +rpassword = "5.0.0" serde = { workspace = true } serde_yaml = { workspace = true } slog = { workspace = true } types = { workspace = true } validator_dir = { workspace = true } -regex = { workspace = true } -rpassword = "5.0.0" -directory = { workspace = true } +zeroize = { workspace = true } diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 73823ae24e9..f3c166bda9e 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -3,16 +3,15 @@ name = "clap_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] alloy-primitives = { workspace = true } clap = { workspace = true } -hex = { workspace = true } dirs = { workspace = true } eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } +hex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml index b4bbbaa4369..19682bf3673 100644 --- a/common/compare_fields_derive/Cargo.toml +++ b/common/compare_fields_derive/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } proc-macro = true [dependencies] -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index a03ac2178f8..953fde1af72 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,13 +7,13 @@ edition = { workspace = true } build = "build.rs" [build-dependencies] +hex = { workspace = true } reqwest = { workspace = true } serde_json = { workspace = true } sha2 = { workspace = true } -hex = { workspace = true } [dependencies] -types = { workspace = true } +ethabi = "16.0.0" ethereum_ssz = { workspace = true } tree_hash = { workspace = true } -ethabi = "16.0.0" +types = { workspace = true } diff --git a/common/directory/Cargo.toml b/common/directory/Cargo.toml index f7243372618..9c3ced90977 100644 --- a/common/directory/Cargo.toml +++ b/common/directory/Cargo.toml @@ -3,7 +3,6 @@ name = "directory" version = "0.1.0" authors = ["pawan "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index f735b4c6888..9d6dea100d4 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -3,30 +3,30 @@ name = "eth2" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde = { workspace = true } -serde_json = { workspace = true } -ssz_types = { workspace = true } -types = { workspace = true } -reqwest = { workspace = true } -lighthouse_network = { workspace = true } -proto_array = { workspace = true } -ethereum_serde_utils = { workspace = true } +derivative = { workspace = true } eth2_keystore = { workspace = true } -zeroize = { workspace = true } -sensitive_url = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -futures-util = "0.3.8" futures = { workspace = true } -store = { workspace = true } -slashing_protection = { workspace = true } +futures-util = "0.3.8" +lighthouse_network = { workspace = true } mediatype = "0.19.13" pretty_reqwest_error = { workspace = true } -derivative = { workspace = true } +proto_array = { workspace = true } +reqwest = { workspace = true } +reqwest-eventsource = "0.5.0" +sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +slashing_protection = { workspace = true } +ssz_types = { workspace = true } +store = { workspace = true } +types = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] tokio = { workspace = true } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 522c6414eae..12b1538984e 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -27,6 +27,7 @@ use reqwest::{ Body, IntoUrl, RequestBuilder, Response, }; pub use reqwest::{StatusCode, Url}; +use reqwest_eventsource::{Event, EventSource}; pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use ssz::Encode; @@ -52,6 +53,8 @@ pub const SSZ_CONTENT_TYPE_HEADER: &str = "application/octet-stream"; pub enum Error { /// The `reqwest` client raised an error. HttpClient(PrettyReqwestError), + /// The `reqwest_eventsource` client raised an error. + SseClient(reqwest_eventsource::Error), /// The server returned an error message where the body was able to be parsed. ServerMessage(ErrorMessage), /// The server returned an error message with an array of errors. @@ -93,6 +96,13 @@ impl Error { pub fn status(&self) -> Option { match self { Error::HttpClient(error) => error.inner().status(), + Error::SseClient(error) => { + if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error { + Some(*status) + } else { + None + } + } Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), Error::StatusCode(status) => Some(*status), @@ -2592,16 +2602,29 @@ impl BeaconNodeHttpClient { .join(","); path.query_pairs_mut().append_pair("topics", &topic_string); - Ok(self - .client - .get(path) - .send() - .await? - .bytes_stream() - .map(|next| match next { - Ok(bytes) => EventKind::from_sse_bytes(bytes.as_ref()), - Err(e) => Err(Error::HttpClient(e.into())), - })) + let mut es = EventSource::get(path); + // If we don't await `Event::Open` here, then the consumer + // will not get any Message events until they start awaiting the stream. + // This is a way to register the stream with the sse server before + // message events start getting emitted. + while let Some(event) = es.next().await { + match event { + Ok(Event::Open) => break, + Err(err) => return Err(Error::SseClient(err)), + // This should never happen as we are guaranteed to get the + // Open event before any message starts coming through. + Ok(Event::Message(_)) => continue, + } + } + Ok(Box::pin(es.filter_map(|event| async move { + match event { + Ok(Event::Open) => None, + Ok(Event::Message(message)) => { + Some(EventKind::from_sse_bytes(&message.event, &message.data)) + } + Err(err) => Some(Err(Error::SseClient(err))), + } + }))) } /// `POST validator/duties/sync/{epoch}` diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index c187399ebd7..a303953a863 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -13,7 +13,7 @@ use serde_json::Value; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; use std::fmt::{self, Display}; -use std::str::{from_utf8, FromStr}; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use types::beacon_block_body::KzgCommitments; @@ -1153,24 +1153,7 @@ impl EventKind { } } - pub fn from_sse_bytes(message: &[u8]) -> Result { - let s = from_utf8(message) - .map_err(|e| ServerError::InvalidServerSentEvent(format!("{:?}", e)))?; - - let mut split = s.split('\n'); - let event = split - .next() - .ok_or_else(|| { - ServerError::InvalidServerSentEvent("Could not parse event tag".to_string()) - })? - .trim_start_matches("event:"); - let data = split - .next() - .ok_or_else(|| { - ServerError::InvalidServerSentEvent("Could not parse data tag".to_string()) - })? - .trim_start_matches("data:"); - + pub fn from_sse_bytes(event: &str, data: &str) -> Result { match event { "attestation" => Ok(EventKind::Attestation(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Attestation: {:?}", e)), diff --git a/common/eth2_config/Cargo.toml b/common/eth2_config/Cargo.toml index 20c3b0b6f26..509f5ff87e2 100644 --- a/common/eth2_config/Cargo.toml +++ b/common/eth2_config/Cargo.toml @@ -5,5 +5,5 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -types = { workspace = true } paste = { workspace = true } +types = { workspace = true } diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 5971b934e0c..c19b32014e1 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -3,16 +3,15 @@ name = "eth2_interop_keypairs" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -num-bigint = "0.4.2" +bls = { workspace = true } ethereum_hashing = { workspace = true } hex = { workspace = true } -serde_yaml = { workspace = true } +num-bigint = "0.4.2" serde = { workspace = true } -bls = { workspace = true } +serde_yaml = { workspace = true } [dev-dependencies] base64 = "0.13.0" diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 09cf2072d2f..a255e042291 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -7,25 +7,25 @@ edition = { workspace = true } build = "build.rs" [build-dependencies] -zip = { workspace = true } eth2_config = { workspace = true } +zip = { workspace = true } [dev-dependencies] +ethereum_ssz = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } -ethereum_ssz = { workspace = true } [dependencies] -serde_yaml = { workspace = true } -types = { workspace = true } -eth2_config = { workspace = true } +bytes = { workspace = true } discv5 = { workspace = true } -reqwest = { workspace = true } +eth2_config = { workspace = true } +kzg = { workspace = true } +logging = { workspace = true } pretty_reqwest_error = { workspace = true } -sha2 = { workspace = true } -url = { workspace = true } +reqwest = { workspace = true } sensitive_url = { workspace = true } +serde_yaml = { workspace = true } +sha2 = { workspace = true } slog = { workspace = true } -logging = { workspace = true } -bytes = { workspace = true } -kzg = { workspace = true } +types = { workspace = true } +url = { workspace = true } diff --git a/common/eth2_wallet_manager/Cargo.toml b/common/eth2_wallet_manager/Cargo.toml index f4717570653..a6eb24c78c2 100644 --- a/common/eth2_wallet_manager/Cargo.toml +++ b/common/eth2_wallet_manager/Cargo.toml @@ -3,7 +3,6 @@ name = "eth2_wallet_manager" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/eth2_wallet_manager/src/wallet_manager.rs b/common/eth2_wallet_manager/src/wallet_manager.rs index 3dd419a48b5..c988ca4135e 100644 --- a/common/eth2_wallet_manager/src/wallet_manager.rs +++ b/common/eth2_wallet_manager/src/wallet_manager.rs @@ -296,10 +296,10 @@ mod tests { ) .expect("should create first wallet"); - let uuid = w.wallet().uuid().clone(); + let uuid = *w.wallet().uuid(); assert_eq!( - load_wallet_raw(&base_dir, &uuid).nextaccount(), + load_wallet_raw(base_dir, &uuid).nextaccount(), 0, "should start wallet with nextaccount 0" ); @@ -308,7 +308,7 @@ mod tests { w.next_validator(WALLET_PASSWORD, &[50; 32], &[51; 32]) .expect("should create validator"); assert_eq!( - load_wallet_raw(&base_dir, &uuid).nextaccount(), + load_wallet_raw(base_dir, &uuid).nextaccount(), i, "should update wallet with nextaccount {}", i @@ -333,54 +333,54 @@ mod tests { let base_dir = dir.path(); let mgr = WalletManager::open(base_dir).unwrap(); - let uuid_a = create_wallet(&mgr, 0).wallet().uuid().clone(); - let uuid_b = create_wallet(&mgr, 1).wallet().uuid().clone(); + let uuid_a = *create_wallet(&mgr, 0).wallet().uuid(); + let uuid_b = *create_wallet(&mgr, 1).wallet().uuid(); - let locked_a = LockedWallet::open(&base_dir, &uuid_a).expect("should open wallet a"); + let locked_a = LockedWallet::open(base_dir, &uuid_a).expect("should open wallet a"); assert!( - lockfile_path(&base_dir, &uuid_a).exists(), + lockfile_path(base_dir, &uuid_a).exists(), "lockfile should exist" ); drop(locked_a); assert!( - !lockfile_path(&base_dir, &uuid_a).exists(), + !lockfile_path(base_dir, &uuid_a).exists(), "lockfile have been cleaned up" ); - let locked_a = LockedWallet::open(&base_dir, &uuid_a).expect("should open wallet a"); - let locked_b = LockedWallet::open(&base_dir, &uuid_b).expect("should open wallet b"); + let locked_a = LockedWallet::open(base_dir, &uuid_a).expect("should open wallet a"); + let locked_b = LockedWallet::open(base_dir, &uuid_b).expect("should open wallet b"); assert!( - lockfile_path(&base_dir, &uuid_a).exists(), + lockfile_path(base_dir, &uuid_a).exists(), "lockfile a should exist" ); assert!( - lockfile_path(&base_dir, &uuid_b).exists(), + lockfile_path(base_dir, &uuid_b).exists(), "lockfile b should exist" ); - match LockedWallet::open(&base_dir, &uuid_a) { + match LockedWallet::open(base_dir, &uuid_a) { Err(Error::LockfileError(_)) => {} _ => panic!("did not get locked error"), }; drop(locked_a); - LockedWallet::open(&base_dir, &uuid_a) + LockedWallet::open(base_dir, &uuid_a) .expect("should open wallet a after previous instance is dropped"); - match LockedWallet::open(&base_dir, &uuid_b) { + match LockedWallet::open(base_dir, &uuid_b) { Err(Error::LockfileError(_)) => {} _ => panic!("did not get locked error"), }; drop(locked_b); - LockedWallet::open(&base_dir, &uuid_b) + LockedWallet::open(base_dir, &uuid_b) .expect("should open wallet a after previous instance is dropped"); } } diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index 3c4f9fe50ce..164e3e47a7a 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -3,7 +3,6 @@ name = "lighthouse_version" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index 73cbdf44d42..b2829a48d8f 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -19,7 +19,7 @@ sloggers = { workspace = true } take_mut = "0.2.2" tokio = { workspace = true, features = [ "time" ] } tracing = "0.1" +tracing-appender = { workspace = true } tracing-core = { workspace = true } tracing-log = { workspace = true } tracing-subscriber = { workspace = true } -tracing-appender = { workspace = true } diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 79a07eed166..64fb7b9aadd 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -metrics = { workspace = true } libc = "0.2.79" +metrics = { workspace = true } parking_lot = { workspace = true } tikv-jemalloc-ctl = { version = "0.6.0", optional = true, features = ["stats"] } diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 2da32c307ee..5008c86e858 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -3,19 +3,18 @@ name = "monitoring_api" version = "0.1.0" authors = ["pawan "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -reqwest = { workspace = true } -task_executor = { workspace = true } -tokio = { workspace = true } eth2 = { workspace = true } -serde_json = { workspace = true } -serde = { workspace = true } lighthouse_version = { workspace = true } metrics = { workspace = true } -slog = { workspace = true } -store = { workspace = true } regex = { workspace = true } +reqwest = { workspace = true } sensitive_url = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +slog = { workspace = true } +store = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } diff --git a/common/oneshot_broadcast/Cargo.toml b/common/oneshot_broadcast/Cargo.toml index 12c9b40bc85..8a358ef8510 100644 --- a/common/oneshot_broadcast/Cargo.toml +++ b/common/oneshot_broadcast/Cargo.toml @@ -2,7 +2,6 @@ name = "oneshot_broadcast" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/pretty_reqwest_error/Cargo.toml b/common/pretty_reqwest_error/Cargo.toml index dc79832cd3d..4311601bcdd 100644 --- a/common/pretty_reqwest_error/Cargo.toml +++ b/common/pretty_reqwest_error/Cargo.toml @@ -2,7 +2,6 @@ name = "pretty_reqwest_error" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml index d218c8d93a1..ff562097225 100644 --- a/common/sensitive_url/Cargo.toml +++ b/common/sensitive_url/Cargo.toml @@ -3,9 +3,8 @@ name = "sensitive_url" version = "0.1.0" authors = ["Mac L "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -url = { workspace = true } serde = { workspace = true } +url = { workspace = true } diff --git a/common/slot_clock/Cargo.toml b/common/slot_clock/Cargo.toml index c2f330cd507..2e1982efb1a 100644 --- a/common/slot_clock/Cargo.toml +++ b/common/slot_clock/Cargo.toml @@ -5,6 +5,6 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -types = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } +types = { workspace = true } diff --git a/common/system_health/Cargo.toml b/common/system_health/Cargo.toml index be339f27792..034683f72e0 100644 --- a/common/system_health/Cargo.toml +++ b/common/system_health/Cargo.toml @@ -5,7 +5,7 @@ edition = { workspace = true } [dependencies] lighthouse_network = { workspace = true } -types = { workspace = true } -sysinfo = { workspace = true } -serde = { workspace = true } parking_lot = { workspace = true } +serde = { workspace = true } +sysinfo = { workspace = true } +types = { workspace = true } diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 26bcd7b339c..c1ac4b55a91 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -11,10 +11,10 @@ tracing = ["dep:tracing"] [dependencies] async-channel = { workspace = true } -tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } -slog = { workspace = true, optional = true } futures = { workspace = true } +logging = { workspace = true, optional = true } metrics = { workspace = true } +slog = { workspace = true, optional = true } sloggers = { workspace = true, optional = true } -logging = { workspace = true, optional = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } tracing = { workspace = true, optional = true } diff --git a/common/test_random_derive/Cargo.toml b/common/test_random_derive/Cargo.toml index 79308797a4b..b38d5ef63a5 100644 --- a/common/test_random_derive/Cargo.toml +++ b/common/test_random_derive/Cargo.toml @@ -9,5 +9,5 @@ description = "Procedural derive macros for implementation of TestRandom trait" proc-macro = true [dependencies] -syn = { workspace = true } quote = { workspace = true } +syn = { workspace = true } diff --git a/common/unused_port/Cargo.toml b/common/unused_port/Cargo.toml index 95dbf591861..2d771cd6008 100644 --- a/common/unused_port/Cargo.toml +++ b/common/unused_port/Cargo.toml @@ -2,7 +2,6 @@ name = "unused_port" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index ae8742fe07b..773431c93c6 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -6,21 +6,20 @@ edition = { workspace = true } [features] insecure_keys = [] - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] bls = { workspace = true } +deposit_contract = { workspace = true } +derivative = { workspace = true } +directory = { workspace = true } eth2_keystore = { workspace = true } filesystem = { workspace = true } -types = { workspace = true } -rand = { workspace = true } -deposit_contract = { workspace = true } -tree_hash = { workspace = true } hex = { workspace = true } -derivative = { workspace = true } lockfile = { workspace = true } -directory = { workspace = true } +rand = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index a9407c392d9..4a3cde54a9a 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -3,20 +3,19 @@ name = "warp_utils" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -warp = { workspace = true } -eth2 = { workspace = true } -types = { workspace = true } beacon_chain = { workspace = true } -state_processing = { workspace = true } +bytes = { workspace = true } +eth2 = { workspace = true } +headers = "0.3.2" +metrics = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } +serde_array_query = "0.1.0" serde_json = { workspace = true } +state_processing = { workspace = true } tokio = { workspace = true } -headers = "0.3.2" -metrics = { workspace = true } -serde_array_query = "0.1.0" -bytes = { workspace = true } +types = { workspace = true } +warp = { workspace = true } diff --git a/consensus/fixed_bytes/Cargo.toml b/consensus/fixed_bytes/Cargo.toml index e5201a04551..ab29adfb1b9 100644 --- a/consensus/fixed_bytes/Cargo.toml +++ b/consensus/fixed_bytes/Cargo.toml @@ -3,7 +3,6 @@ name = "fixed_bytes" version = "0.1.0" authors = ["Eitan Seri-Levi "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index b32e0aa6656..3bd18e922aa 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -3,17 +3,16 @@ name = "fork_choice" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -types = { workspace = true } -state_processing = { workspace = true } -proto_array = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } metrics = { workspace = true } +proto_array = { workspace = true } slog = { workspace = true } +state_processing = { workspace = true } +types = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 29265e34e4d..ef017159a02 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1156,18 +1156,20 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot() { }; // recreate the chain exactly - ForkChoiceTest::new_with_chain_config(chain_config.clone()) - .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) - .await - .unwrap() - .skip_slots(E::slots_per_epoch() as usize) - .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) - .await - .unwrap() - .apply_blocks(1) - .await - .assert_finalized_epoch(5) - .assert_shutdown_signal_not_sent(); + Box::pin( + ForkChoiceTest::new_with_chain_config(chain_config.clone()) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) + .await + .unwrap() + .skip_slots(E::slots_per_epoch() as usize) + .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch < 5) + .await + .unwrap() + .apply_blocks(1), + ) + .await + .assert_finalized_epoch(5) + .assert_shutdown_signal_not_sent(); } #[tokio::test] diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index e99d1af8e56..c639dfce8d6 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -8,5 +8,5 @@ edition = { workspace = true } bytes = { workspace = true } [dev-dependencies] -yaml-rust2 = "0.8" hex = { workspace = true } +yaml-rust2 = "0.8" diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 99f98cf545f..bd6757c0fad 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -9,10 +9,10 @@ name = "proto_array" path = "src/bin.rs" [dependencies] -types = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +safe_arith = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } -safe_arith = { workspace = true } superstruct = { workspace = true } +types = { workspace = true } diff --git a/consensus/safe_arith/Cargo.toml b/consensus/safe_arith/Cargo.toml index 6f2e4b811c7..9ac9fe28d3a 100644 --- a/consensus/safe_arith/Cargo.toml +++ b/consensus/safe_arith/Cargo.toml @@ -3,7 +3,6 @@ name = "safe_arith" version = "0.1.0" authors = ["Michael Sproul "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index b7f6ef7b2a9..502ffe3cf65 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -5,30 +5,30 @@ authors = ["Paul Hauner ", "Michael Sproul = committee_count_per_slot { - return Err(BeaconStateError::InvalidCommitteeIndex(index)); + if committee_index >= committee_count_per_slot { + return Err(BeaconStateError::InvalidCommitteeIndex(committee_index)); } participant_count.safe_add_assign(beacon_committee.committee.len() as u64)?; let committee_attesters = beacon_committee @@ -127,6 +127,12 @@ pub mod attesting_indices_electra { }) .collect::>(); + // Require at least a single non-zero bit for each attesting committee bitfield. + // This check is new to the spec's `process_attestation` in Electra. + if committee_attesters.is_empty() { + return Err(BeaconStateError::EmptyCommittee); + } + attesting_indices.extend(committee_attesters); committee_offset.safe_add_assign(beacon_committee.committee.len())?; } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index f289b6e0817..436f4934b90 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -514,6 +514,7 @@ pub fn get_expected_withdrawals( // Consume pending partial withdrawals let partial_withdrawals_count = if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { + let mut partial_withdrawals_count = 0; for withdrawal in partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize @@ -546,8 +547,9 @@ pub fn get_expected_withdrawals( }); withdrawal_index.safe_add_assign(1)?; } + partial_withdrawals_count.safe_add_assign(1)?; } - Some(withdrawals.len()) + Some(partial_withdrawals_count) } else { None }; diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index a53dc15126f..22d8592364c 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -7,7 +7,6 @@ use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; use types::typenum::U33; -use types::validator::is_compounding_withdrawal_credential; pub fn process_operations>( state: &mut BeaconState, @@ -378,7 +377,7 @@ pub fn process_deposits( if state.eth1_deposit_index() < eth1_deposit_index_limit { let expected_deposit_len = std::cmp::min( E::MaxDeposits::to_u64(), - state.get_outstanding_deposit_len()?, + eth1_deposit_index_limit.safe_sub(state.eth1_deposit_index())?, ); block_verify!( deposits.len() as u64 == expected_deposit_len, @@ -450,39 +449,46 @@ pub fn apply_deposit( if let Some(index) = validator_index { // [Modified in Electra:EIP7251] - if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { - pending_balance_deposits.push(PendingBalanceDeposit { index, amount })?; - - let validator = state - .validators() - .get(index as usize) - .ok_or(BeaconStateError::UnknownValidator(index as usize))?; - - if is_compounding_withdrawal_credential(deposit_data.withdrawal_credentials, spec) - && validator.has_eth1_withdrawal_credential(spec) - && is_valid_deposit_signature(&deposit_data, spec).is_ok() - { - state.switch_to_compounding_validator(index as usize, spec)?; - } + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, + amount, + signature: deposit_data.signature, + slot: spec.genesis_slot, // Use `genesis_slot` to distinguish from a pending deposit request + })?; } else { // Update the existing validator balance. increase_balance(state, index as usize, amount)?; } - } else { + } + // New validator + else { // The signature should be checked for new validators. Return early for a bad // signature. if is_valid_deposit_signature(&deposit_data, spec).is_err() { return Ok(()); } - state.add_validator_to_registry(&deposit_data, spec)?; - let new_validator_index = state.validators().len().safe_sub(1)? as u64; + state.add_validator_to_registry( + deposit_data.pubkey, + deposit_data.withdrawal_credentials, + if state.fork_name_unchecked() >= ForkName::Electra { + 0 + } else { + amount + }, + spec, + )?; // [New in Electra:EIP7251] - if let Ok(pending_balance_deposits) = state.pending_balance_deposits_mut() { - pending_balance_deposits.push(PendingBalanceDeposit { - index: new_validator_index, + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: deposit_data.pubkey, + withdrawal_credentials: deposit_data.withdrawal_credentials, amount, + signature: deposit_data.signature, + slot: spec.genesis_slot, // Use `genesis_slot` to distinguish from a pending deposit request })?; } } @@ -596,13 +602,18 @@ pub fn process_deposit_requests( if state.deposit_requests_start_index()? == spec.unset_deposit_requests_start_index { *state.deposit_requests_start_index_mut()? = request.index } - let deposit_data = DepositData { - pubkey: request.pubkey, - withdrawal_credentials: request.withdrawal_credentials, - amount: request.amount, - signature: request.signature.clone().into(), - }; - apply_deposit(state, deposit_data, None, false, spec)? + let slot = state.slot(); + + // [New in Electra:EIP7251] + if let Ok(pending_deposits) = state.pending_deposits_mut() { + pending_deposits.push(PendingDeposit { + pubkey: request.pubkey, + withdrawal_credentials: request.withdrawal_credentials, + amount: request.amount, + signature: request.signature.clone(), + slot, + })?; + } } Ok(()) @@ -621,11 +632,84 @@ pub fn process_consolidation_requests( Ok(()) } +fn is_valid_switch_to_compounding_request( + state: &BeaconState, + consolidation_request: &ConsolidationRequest, + spec: &ChainSpec, +) -> Result { + // Switch to compounding requires source and target be equal + if consolidation_request.source_pubkey != consolidation_request.target_pubkey { + return Ok(false); + } + + // Verify pubkey exists + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist + return Ok(false); + }; + + let source_validator = state.get_validator(source_index)?; + // Verify the source withdrawal credentials + // Note: We need to specifically check for eth1 withdrawal credentials here + // If the validator is already compounding, the compounding request is not valid. + if let Some(withdrawal_address) = source_validator + .has_eth1_withdrawal_credential(spec) + .then(|| { + source_validator + .withdrawal_credentials + .as_slice() + .get(12..) + .map(Address::from_slice) + }) + .flatten() + { + if withdrawal_address != consolidation_request.source_address { + return Ok(false); + } + } else { + // Source doesn't have eth1 withdrawal credentials + return Ok(false); + } + + // Verify the source is active + let current_epoch = state.current_epoch(); + if !source_validator.is_active_at(current_epoch) { + return Ok(false); + } + // Verify exits for source has not been initiated + if source_validator.exit_epoch != spec.far_future_epoch { + return Ok(false); + } + + Ok(true) +} + pub fn process_consolidation_request( state: &mut BeaconState, consolidation_request: &ConsolidationRequest, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { + if is_valid_switch_to_compounding_request(state, consolidation_request, spec)? { + let Some(source_index) = state + .pubkey_cache() + .get(&consolidation_request.source_pubkey) + else { + // source validator doesn't exist. This is unreachable as `is_valid_switch_to_compounding_request` + // will return false in that case. + return Ok(()); + }; + state.switch_to_compounding_validator(source_index, spec)?; + return Ok(()); + } + + // Verify that source != target, so a consolidation cannot be used as an exit. + if consolidation_request.source_pubkey == consolidation_request.target_pubkey { + return Ok(()); + } + // If the pending consolidations queue is full, consolidation requests are ignored if state.pending_consolidations()?.len() == E::PendingConsolidationsLimit::to_usize() { return Ok(()); @@ -649,10 +733,6 @@ pub fn process_consolidation_request( // target validator doesn't exist return Ok(()); }; - // Verify that source != target, so a consolidation cannot be used as an exit. - if source_index == target_index { - return Ok(()); - } let source_validator = state.get_validator(source_index)?; // Verify the source withdrawal credentials @@ -699,5 +779,10 @@ pub fn process_consolidation_request( target_index: target_index as u64, })?; + let target_validator = state.get_validator(target_index)?; + // Churn any target excess active balance of target and raise its max + if target_validator.has_eth1_withdrawal_credential(spec) { + state.switch_to_compounding_validator(target_index, spec)?; + } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index b6c9dbea521..f45c55a7acf 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -28,6 +28,7 @@ pub enum EpochProcessingError { SinglePassMissingActivationQueue, MissingEarliestExitEpoch, MissingExitBalanceToConsume, + PendingDepositsLogicError, } impl From for EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index fcb480a37cf..904e68e3686 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -4,6 +4,7 @@ use crate::{ update_progressive_balances_cache::initialize_progressive_balances_cache, }, epoch_cache::{initialize_epoch_cache, PreEpochCache}, + per_block_processing::is_valid_deposit_signature, per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; @@ -16,9 +17,9 @@ use types::{ TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, milhouse::Cow, - ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, - ExitCache, ForkName, List, ParticipationFlags, PendingBalanceDeposit, ProgressiveBalancesCache, - RelativeEpoch, Unsigned, Validator, + ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, + EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, + ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, }; pub struct SinglePassConfig { @@ -26,7 +27,7 @@ pub struct SinglePassConfig { pub rewards_and_penalties: bool, pub registry_updates: bool, pub slashings: bool, - pub pending_balance_deposits: bool, + pub pending_deposits: bool, pub pending_consolidations: bool, pub effective_balance_updates: bool, } @@ -44,7 +45,7 @@ impl SinglePassConfig { rewards_and_penalties: true, registry_updates: true, slashings: true, - pending_balance_deposits: true, + pending_deposits: true, pending_consolidations: true, effective_balance_updates: true, } @@ -56,7 +57,7 @@ impl SinglePassConfig { rewards_and_penalties: false, registry_updates: false, slashings: false, - pending_balance_deposits: false, + pending_deposits: false, pending_consolidations: false, effective_balance_updates: false, } @@ -85,15 +86,17 @@ struct SlashingsContext { penalty_per_effective_balance_increment: u64, } -struct PendingBalanceDepositsContext { +struct PendingDepositsContext { /// The value to set `next_deposit_index` to *after* processing completes. next_deposit_index: usize, /// The value to set `deposit_balance_to_consume` to *after* processing completes. deposit_balance_to_consume: u64, /// Total balance increases for each validator due to pending balance deposits. validator_deposits_to_process: HashMap, - /// The deposits to append to `pending_balance_deposits` after processing all applicable deposits. - deposits_to_postpone: Vec, + /// The deposits to append to `pending_deposits` after processing all applicable deposits. + deposits_to_postpone: Vec, + /// New validators to be added to the state *after* processing completes. + new_validator_deposits: Vec, } struct EffectiveBalancesContext { @@ -138,6 +141,7 @@ pub fn process_epoch_single_pass( state.build_exit_cache(spec)?; state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.update_pubkey_cache()?; let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); @@ -163,12 +167,11 @@ pub fn process_epoch_single_pass( let slashings_ctxt = &SlashingsContext::new(state, state_ctxt, spec)?; let mut next_epoch_cache = PreEpochCache::new_for_next_epoch(state)?; - let pending_balance_deposits_ctxt = - if fork_name.electra_enabled() && conf.pending_balance_deposits { - Some(PendingBalanceDepositsContext::new(state, spec)?) - } else { - None - }; + let pending_deposits_ctxt = if fork_name.electra_enabled() && conf.pending_deposits { + Some(PendingDepositsContext::new(state, spec, &conf)?) + } else { + None + }; let mut earliest_exit_epoch = state.earliest_exit_epoch().ok(); let mut exit_balance_to_consume = state.exit_balance_to_consume().ok(); @@ -303,9 +306,9 @@ pub fn process_epoch_single_pass( process_single_slashing(&mut balance, &validator, slashings_ctxt, state_ctxt, spec)?; } - // `process_pending_balance_deposits` - if let Some(pending_balance_deposits_ctxt) = &pending_balance_deposits_ctxt { - process_pending_balance_deposits_for_validator( + // `process_pending_deposits` + if let Some(pending_balance_deposits_ctxt) = &pending_deposits_ctxt { + process_pending_deposits_for_validator( &mut balance, validator_info, pending_balance_deposits_ctxt, @@ -342,20 +345,84 @@ pub fn process_epoch_single_pass( // Finish processing pending balance deposits if relevant. // // This *could* be reordered after `process_pending_consolidations` which pushes only to the end - // of the `pending_balance_deposits` list. But we may as well preserve the write ordering used + // of the `pending_deposits` list. But we may as well preserve the write ordering used // by the spec and do this first. - if let Some(ctxt) = pending_balance_deposits_ctxt { - let mut new_pending_balance_deposits = List::try_from_iter( + if let Some(ctxt) = pending_deposits_ctxt { + let mut new_balance_deposits = List::try_from_iter( state - .pending_balance_deposits()? + .pending_deposits()? .iter_from(ctxt.next_deposit_index)? .cloned(), )?; for deposit in ctxt.deposits_to_postpone { - new_pending_balance_deposits.push(deposit)?; + new_balance_deposits.push(deposit)?; } - *state.pending_balance_deposits_mut()? = new_pending_balance_deposits; + *state.pending_deposits_mut()? = new_balance_deposits; *state.deposit_balance_to_consume_mut()? = ctxt.deposit_balance_to_consume; + + // `new_validator_deposits` may contain multiple deposits with the same pubkey where + // the first deposit creates the new validator and the others are topups. + // Each item in the vec is a (pubkey, validator_index) + let mut added_validators = Vec::new(); + for deposit in ctxt.new_validator_deposits { + let deposit_data = DepositData { + pubkey: deposit.pubkey, + withdrawal_credentials: deposit.withdrawal_credentials, + amount: deposit.amount, + signature: deposit.signature, + }; + // Only check the signature if this is the first deposit for the validator, + // following the logic from `apply_pending_deposit` in the spec. + if let Some(validator_index) = state.get_validator_index(&deposit_data.pubkey)? { + state + .get_balance_mut(validator_index)? + .safe_add_assign(deposit_data.amount)?; + } else if is_valid_deposit_signature(&deposit_data, spec).is_ok() { + // Apply the new deposit to the state + let validator_index = state.add_validator_to_registry( + deposit_data.pubkey, + deposit_data.withdrawal_credentials, + deposit_data.amount, + spec, + )?; + added_validators.push((deposit_data.pubkey, validator_index)); + } + } + if conf.effective_balance_updates { + // Re-process effective balance updates for validators affected by top-up of new validators. + let ( + validators, + balances, + _, + current_epoch_participation, + _, + progressive_balances, + _, + _, + ) = state.mutable_validator_fields()?; + for (_, validator_index) in added_validators.iter() { + let balance = *balances + .get(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + let mut validator = validators + .get_cow(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + let validator_current_epoch_participation = *current_epoch_participation + .get(*validator_index) + .ok_or(BeaconStateError::UnknownValidator(*validator_index))?; + process_single_effective_balance_update( + *validator_index, + balance, + &mut validator, + validator_current_epoch_participation, + &mut next_epoch_cache, + progressive_balances, + effective_balances_ctxt, + state_ctxt, + spec, + )?; + } + } } // Process consolidations outside the single-pass loop, as they depend on balances for multiple @@ -819,8 +886,12 @@ fn process_single_slashing( Ok(()) } -impl PendingBalanceDepositsContext { - fn new(state: &BeaconState, spec: &ChainSpec) -> Result { +impl PendingDepositsContext { + fn new( + state: &BeaconState, + spec: &ChainSpec, + config: &SinglePassConfig, + ) -> Result { let available_for_processing = state .deposit_balance_to_consume()? .safe_add(state.get_activation_exit_churn_limit(spec)?)?; @@ -830,10 +901,31 @@ impl PendingBalanceDepositsContext { let mut next_deposit_index = 0; let mut validator_deposits_to_process = HashMap::new(); let mut deposits_to_postpone = vec![]; - - let pending_balance_deposits = state.pending_balance_deposits()?; - - for deposit in pending_balance_deposits.iter() { + let mut new_validator_deposits = vec![]; + let mut is_churn_limit_reached = false; + let finalized_slot = state + .finalized_checkpoint() + .epoch + .start_slot(E::slots_per_epoch()); + + let pending_deposits = state.pending_deposits()?; + + for deposit in pending_deposits.iter() { + // Do not process deposit requests if the Eth1 bridge deposits are not yet applied. + if deposit.slot > spec.genesis_slot + && state.eth1_deposit_index() < state.deposit_requests_start_index()? + { + break; + } + // Do not process is deposit slot has not been finalized. + if deposit.slot > finalized_slot { + break; + } + // Do not process if we have reached the limit for the number of deposits + // processed in an epoch. + if next_deposit_index >= E::max_pending_deposits_per_epoch() { + break; + } // We have to do a bit of indexing into `validators` here, but I can't see any way // around that without changing the spec. // @@ -844,48 +936,70 @@ impl PendingBalanceDepositsContext { // take, just whether it is non-default. Nor do we need to know the value of // `withdrawable_epoch`, because `next_epoch <= withdrawable_epoch` will evaluate to // `true` both for the actual value & the default placeholder value (`FAR_FUTURE_EPOCH`). - let validator = state.get_validator(deposit.index as usize)?; - let already_exited = validator.exit_epoch < spec.far_future_epoch; - // In the spec process_registry_updates is called before process_pending_balance_deposits - // so we must account for process_registry_updates ejecting the validator for low balance - // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective - // balance update does not happen until *after* the registry update, so we don't need to - // account for changes to the effective balance that would push it below the ejection - // balance here. - let will_be_exited = validator.is_active_at(current_epoch) - && validator.effective_balance <= spec.ejection_balance; - if already_exited || will_be_exited { - if next_epoch <= validator.withdrawable_epoch { - deposits_to_postpone.push(deposit.clone()); - } else { - // Deposited balance will never become active. Increase balance but do not - // consume churn. - validator_deposits_to_process - .entry(deposit.index as usize) - .or_insert(0) - .safe_add_assign(deposit.amount)?; - } - } else { - // Deposit does not fit in the churn, no more deposit processing in this epoch. - if processed_amount.safe_add(deposit.amount)? > available_for_processing { - break; - } - // Deposit fits in the churn, process it. Increase balance and consume churn. + let mut is_validator_exited = false; + let mut is_validator_withdrawn = false; + let opt_validator_index = state.pubkey_cache().get(&deposit.pubkey); + if let Some(validator_index) = opt_validator_index { + let validator = state.get_validator(validator_index)?; + let already_exited = validator.exit_epoch < spec.far_future_epoch; + // In the spec process_registry_updates is called before process_pending_deposits + // so we must account for process_registry_updates ejecting the validator for low balance + // and setting the exit_epoch to < far_future_epoch. Note that in the spec the effective + // balance update does not happen until *after* the registry update, so we don't need to + // account for changes to the effective balance that would push it below the ejection + // balance here. + // Note: we only consider this if registry_updates are enabled in the config. + // EF tests require us to run epoch_processing functions in isolation. + let will_be_exited = config.registry_updates + && (validator.is_active_at(current_epoch) + && validator.effective_balance <= spec.ejection_balance); + is_validator_exited = already_exited || will_be_exited; + is_validator_withdrawn = validator.withdrawable_epoch < next_epoch; + } + + if is_validator_withdrawn { + // Deposited balance will never become active. Queue a balance increase but do not + // consume churn. Validator index must be known if the validator is known to be + // withdrawn (see calculation of `is_validator_withdrawn` above). + let validator_index = + opt_validator_index.ok_or(Error::PendingDepositsLogicError)?; validator_deposits_to_process - .entry(deposit.index as usize) + .entry(validator_index) .or_insert(0) .safe_add_assign(deposit.amount)?; + } else if is_validator_exited { + // Validator is exiting, postpone the deposit until after withdrawable epoch + deposits_to_postpone.push(deposit.clone()); + } else { + // Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch. + is_churn_limit_reached = + processed_amount.safe_add(deposit.amount)? > available_for_processing; + if is_churn_limit_reached { + break; + } processed_amount.safe_add_assign(deposit.amount)?; + + // Deposit fits in the churn, process it. Increase balance and consume churn. + if let Some(validator_index) = state.pubkey_cache().get(&deposit.pubkey) { + validator_deposits_to_process + .entry(validator_index) + .or_insert(0) + .safe_add_assign(deposit.amount)?; + } else { + // The `PendingDeposit` is for a new validator + new_validator_deposits.push(deposit.clone()); + } } // Regardless of how the deposit was handled, we move on in the queue. next_deposit_index.safe_add_assign(1)?; } - let deposit_balance_to_consume = if next_deposit_index == pending_balance_deposits.len() { - 0 - } else { + // Accumulate churn only if the churn limit has been hit. + let deposit_balance_to_consume = if is_churn_limit_reached { available_for_processing.safe_sub(processed_amount)? + } else { + 0 }; Ok(Self { @@ -893,14 +1007,15 @@ impl PendingBalanceDepositsContext { deposit_balance_to_consume, validator_deposits_to_process, deposits_to_postpone, + new_validator_deposits, }) } } -fn process_pending_balance_deposits_for_validator( +fn process_pending_deposits_for_validator( balance: &mut Cow, validator_info: &ValidatorInfo, - pending_balance_deposits_ctxt: &PendingBalanceDepositsContext, + pending_balance_deposits_ctxt: &PendingDepositsContext, ) -> Result<(), Error> { if let Some(deposit_amount) = pending_balance_deposits_ctxt .validator_deposits_to_process @@ -941,21 +1056,20 @@ fn process_pending_consolidations( break; } - // Calculate the active balance while we have the source validator loaded. This is a safe - // reordering. - let source_balance = *state - .balances() - .get(source_index) - .ok_or(BeaconStateError::UnknownValidator(source_index))?; - let active_balance = - source_validator.get_active_balance(source_balance, spec, state_ctxt.fork_name); - - // Churn any target excess active balance of target and raise its max. - state.switch_to_compounding_validator(target_index, spec)?; + // Calculate the consolidated balance + let max_effective_balance = + source_validator.get_max_effective_balance(spec, state_ctxt.fork_name); + let source_effective_balance = std::cmp::min( + *state + .balances() + .get(source_index) + .ok_or(BeaconStateError::UnknownValidator(source_index))?, + max_effective_balance, + ); // Move active balance to target. Excess balance is withdrawable. - decrease_balance(state, source_index, active_balance)?; - increase_balance(state, target_index, active_balance)?; + decrease_balance(state, source_index, source_effective_balance)?; + increase_balance(state, target_index, source_effective_balance)?; affected_validators.insert(source_index); affected_validators.insert(target_index); diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 1e532d9f107..1e64ef28978 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -1,8 +1,10 @@ +use bls::Signature; +use itertools::Itertools; use safe_arith::SafeArith; use std::mem; use types::{ BeaconState, BeaconStateElectra, BeaconStateError as Error, ChainSpec, Epoch, EpochCache, - EthSpec, Fork, + EthSpec, Fork, PendingDeposit, }; /// Transform a `Deneb` state into an `Electra` state. @@ -38,29 +40,44 @@ pub fn upgrade_to_electra( // Add validators that are not yet active to pending balance deposits let validators = post.validators().clone(); - let mut pre_activation = validators + let pre_activation = validators .iter() .enumerate() .filter(|(_, validator)| validator.activation_epoch == spec.far_future_epoch) + .sorted_by_key(|(index, validator)| (validator.activation_eligibility_epoch, *index)) .collect::>(); - // Sort the indices by activation_eligibility_epoch and then by index - pre_activation.sort_by(|(index_a, val_a), (index_b, val_b)| { - if val_a.activation_eligibility_epoch == val_b.activation_eligibility_epoch { - index_a.cmp(index_b) - } else { - val_a - .activation_eligibility_epoch - .cmp(&val_b.activation_eligibility_epoch) - } - }); - // Process validators to queue entire balance and reset them for (index, _) in pre_activation { - post.queue_entire_balance_and_reset_validator(index, spec)?; + let balance = post + .balances_mut() + .get_mut(index) + .ok_or(Error::UnknownValidator(index))?; + let balance_copy = *balance; + *balance = 0_u64; + + let validator = post + .validators_mut() + .get_mut(index) + .ok_or(Error::UnknownValidator(index))?; + validator.effective_balance = 0; + validator.activation_eligibility_epoch = spec.far_future_epoch; + let pubkey = validator.pubkey; + let withdrawal_credentials = validator.withdrawal_credentials; + + post.pending_deposits_mut()? + .push(PendingDeposit { + pubkey, + withdrawal_credentials, + amount: balance_copy, + signature: Signature::infinity()?.into(), + slot: spec.genesis_slot, + }) + .map_err(Error::MilhouseError)?; } // Ensure early adopters of compounding credentials go through the activation churn + let validators = post.validators().clone(); for (index, validator) in validators.iter().enumerate() { if validator.has_compounding_withdrawal_credential(spec) { post.queue_excess_active_balance(index, spec)?; @@ -137,7 +154,7 @@ pub fn upgrade_state_to_electra( earliest_exit_epoch, consolidation_balance_to_consume: 0, earliest_consolidation_epoch, - pending_balance_deposits: Default::default(), + pending_deposits: Default::default(), pending_partial_withdrawals: Default::default(), pending_consolidations: Default::default(), // Caches diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 833231dca39..ad4484b86ae 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -59,6 +59,7 @@ pub enum Error { UnknownValidator(usize), UnableToDetermineProducer, InvalidBitfield, + EmptyCommittee, ValidatorIsWithdrawable, ValidatorIsInactive { val_index: usize, @@ -509,7 +510,7 @@ where #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] - pub pending_balance_deposits: List, + pub pending_deposits: List, #[compare_fields(as_iter)] #[test_random(default)] #[superstruct(only(Electra))] @@ -1547,19 +1548,23 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + /// Add a validator to the registry and return the validator index that was allocated for it. pub fn add_validator_to_registry( &mut self, - deposit_data: &DepositData, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, + amount: u64, spec: &ChainSpec, - ) -> Result<(), Error> { - let fork = self.fork_name_unchecked(); - let amount = if fork.electra_enabled() { - 0 - } else { - deposit_data.amount - }; - self.validators_mut() - .push(Validator::from_deposit(deposit_data, amount, fork, spec))?; + ) -> Result { + let index = self.validators().len(); + let fork_name = self.fork_name_unchecked(); + self.validators_mut().push(Validator::from_deposit( + pubkey, + withdrawal_credentials, + amount, + fork_name, + spec, + ))?; self.balances_mut().push(amount)?; // Altair or later initializations. @@ -1573,7 +1578,20 @@ impl BeaconState { inactivity_scores.push(0)?; } - Ok(()) + // Keep the pubkey cache up to date if it was up to date prior to this call. + // + // Doing this here while we know the pubkey and index is marginally quicker than doing it in + // a call to `update_pubkey_cache` later because we don't need to index into the validators + // tree again. + let pubkey_cache = self.pubkey_cache_mut(); + if pubkey_cache.len() == index { + let success = pubkey_cache.insert(pubkey, index); + if !success { + return Err(Error::PubkeyCacheInconsistent); + } + } + + Ok(index) } /// Safe copy-on-write accessor for the `validators` list. @@ -1780,19 +1798,6 @@ impl BeaconState { } } - /// Get the number of outstanding deposits. - /// - /// Returns `Err` if the state is invalid. - pub fn get_outstanding_deposit_len(&self) -> Result { - self.eth1_data() - .deposit_count - .checked_sub(self.eth1_deposit_index()) - .ok_or(Error::InvalidDepositState { - deposit_count: self.eth1_data().deposit_count, - deposit_index: self.eth1_deposit_index(), - }) - } - /// Build all caches (except the tree hash cache), if they need to be built. pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { self.build_all_committee_caches(spec)?; @@ -2149,27 +2154,6 @@ impl BeaconState { .map_err(Into::into) } - /// Get active balance for the given `validator_index`. - pub fn get_active_balance( - &self, - validator_index: usize, - spec: &ChainSpec, - current_fork: ForkName, - ) -> Result { - let max_effective_balance = self - .validators() - .get(validator_index) - .map(|validator| validator.get_max_effective_balance(spec, current_fork)) - .ok_or(Error::UnknownValidator(validator_index))?; - Ok(std::cmp::min( - *self - .balances() - .get(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?, - max_effective_balance, - )) - } - pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { let mut pending_balance = 0; for withdrawal in self @@ -2196,42 +2180,18 @@ impl BeaconState { if *balance > spec.min_activation_balance { let excess_balance = balance.safe_sub(spec.min_activation_balance)?; *balance = spec.min_activation_balance; - self.pending_balance_deposits_mut()? - .push(PendingBalanceDeposit { - index: validator_index as u64, - amount: excess_balance, - })?; + let validator = self.get_validator(validator_index)?.clone(); + self.pending_deposits_mut()?.push(PendingDeposit { + pubkey: validator.pubkey, + withdrawal_credentials: validator.withdrawal_credentials, + amount: excess_balance, + signature: Signature::infinity()?.into(), + slot: spec.genesis_slot, + })?; } Ok(()) } - pub fn queue_entire_balance_and_reset_validator( - &mut self, - validator_index: usize, - spec: &ChainSpec, - ) -> Result<(), Error> { - let balance = self - .balances_mut() - .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; - let balance_copy = *balance; - *balance = 0_u64; - - let validator = self - .validators_mut() - .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; - validator.effective_balance = 0; - validator.activation_eligibility_epoch = spec.far_future_epoch; - - self.pending_balance_deposits_mut()? - .push(PendingBalanceDeposit { - index: validator_index as u64, - amount: balance_copy, - }) - .map_err(Into::into) - } - /// Change the withdrawal prefix of the given `validator_index` to the compounding withdrawal validator prefix. pub fn switch_to_compounding_validator( &mut self, @@ -2242,12 +2202,10 @@ impl BeaconState { .validators_mut() .get_mut(validator_index) .ok_or(Error::UnknownValidator(validator_index))?; - if validator.has_eth1_withdrawal_credential(spec) { - AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = - spec.compounding_withdrawal_prefix_byte; + AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = + spec.compounding_withdrawal_prefix_byte; - self.queue_excess_active_balance(validator_index, spec)?; - } + self.queue_excess_active_balance(validator_index, spec)?; Ok(()) } diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index 3ad3ccf5617..bfa7bb86d24 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -307,43 +307,6 @@ mod committees { } } -mod get_outstanding_deposit_len { - use super::*; - - async fn state() -> BeaconState { - get_harness(16, Slot::new(0)) - .await - .chain - .head_beacon_state_cloned() - } - - #[tokio::test] - async fn returns_ok() { - let mut state = state().await; - assert_eq!(state.get_outstanding_deposit_len(), Ok(0)); - - state.eth1_data_mut().deposit_count = 17; - *state.eth1_deposit_index_mut() = 16; - assert_eq!(state.get_outstanding_deposit_len(), Ok(1)); - } - - #[tokio::test] - async fn returns_err_if_the_state_is_invalid() { - let mut state = state().await; - // The state is invalid, deposit count is lower than deposit index. - state.eth1_data_mut().deposit_count = 16; - *state.eth1_deposit_index_mut() = 17; - - assert_eq!( - state.get_outstanding_deposit_len(), - Err(BeaconStateError::InvalidDepositState { - deposit_count: 16, - deposit_index: 17, - }) - ); - } -} - #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit_request.rs index 7af949fef3a..a21760551b5 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit_request.rs @@ -1,5 +1,6 @@ use crate::test_utils::TestRandom; -use crate::{Hash256, PublicKeyBytes, Signature}; +use crate::{Hash256, PublicKeyBytes}; +use bls::SignatureBytes; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -10,7 +11,6 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, - Eq, Hash, Clone, Serialize, @@ -25,7 +25,7 @@ pub struct DepositRequest { pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, - pub signature: Signature, + pub signature: SignatureBytes, #[serde(with = "serde_utils::quoted_u64")] pub index: u64, } @@ -36,7 +36,7 @@ impl DepositRequest { pubkey: PublicKeyBytes::empty(), withdrawal_credentials: Hash256::ZERO, amount: 0, - signature: Signature::empty(), + signature: SignatureBytes::empty(), index: 0, } .as_ssz_bytes() diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 09ef8e3c1a7..23e82762096 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -151,7 +151,7 @@ pub trait EthSpec: /* * New in Electra */ - type PendingBalanceDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type PendingDepositsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingPartialWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type PendingConsolidationsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxConsolidationRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -159,6 +159,7 @@ pub trait EthSpec: type MaxAttesterSlashingsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxAttestationsElectra: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxWithdrawalRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxPendingDepositsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; fn default_spec() -> ChainSpec; @@ -331,9 +332,9 @@ pub trait EthSpec: .expect("Preset values are not configurable and never result in non-positive block body depth") } - /// Returns the `PENDING_BALANCE_DEPOSITS_LIMIT` constant for this specification. - fn pending_balance_deposits_limit() -> usize { - Self::PendingBalanceDepositsLimit::to_usize() + /// Returns the `PENDING_DEPOSITS_LIMIT` constant for this specification. + fn pending_deposits_limit() -> usize { + Self::PendingDepositsLimit::to_usize() } /// Returns the `PENDING_PARTIAL_WITHDRAWALS_LIMIT` constant for this specification. @@ -371,6 +372,11 @@ pub trait EthSpec: Self::MaxWithdrawalRequestsPerPayload::to_usize() } + /// Returns the `MAX_PENDING_DEPOSITS_PER_EPOCH` constant for this specification. + fn max_pending_deposits_per_epoch() -> usize { + Self::MaxPendingDepositsPerEpoch::to_usize() + } + fn kzg_commitments_inclusion_proof_depth() -> usize { Self::KzgCommitmentsInclusionProofDepth::to_usize() } @@ -430,7 +436,7 @@ impl EthSpec for MainnetEthSpec { type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U16; - type PendingBalanceDepositsLimit = U134217728; + type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidationRequestsPerPayload = U1; @@ -438,6 +444,7 @@ impl EthSpec for MainnetEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type MaxPendingDepositsPerEpoch = U16; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -500,7 +507,8 @@ impl EthSpec for MinimalEthSpec { MaxBlsToExecutionChanges, MaxBlobsPerBlock, BytesPerFieldElement, - PendingBalanceDepositsLimit, + PendingDepositsLimit, + MaxPendingDepositsPerEpoch, MaxConsolidationRequestsPerPayload, MaxAttesterSlashingsElectra, MaxAttestationsElectra @@ -557,7 +565,7 @@ impl EthSpec for GnosisEthSpec { type BytesPerFieldElement = U32; type BytesPerBlob = U131072; type KzgCommitmentInclusionProofDepth = U17; - type PendingBalanceDepositsLimit = U134217728; + type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; type MaxConsolidationRequestsPerPayload = U1; @@ -565,6 +573,7 @@ impl EthSpec for GnosisEthSpec { type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; + type MaxPendingDepositsPerEpoch = U16; type FieldElementsPerCell = U64; type FieldElementsPerExtBlob = U8192; type BytesPerCell = U2048; diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution_block_header.rs index 694162d6ffd..60f2960afbe 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution_block_header.rs @@ -52,9 +52,11 @@ pub struct ExecutionBlockHeader { pub blob_gas_used: Option, pub excess_blob_gas: Option, pub parent_beacon_block_root: Option, + pub requests_root: Option, } impl ExecutionBlockHeader { + #[allow(clippy::too_many_arguments)] pub fn from_payload( payload: ExecutionPayloadRef, rlp_empty_list_root: Hash256, @@ -63,6 +65,7 @@ impl ExecutionBlockHeader { rlp_blob_gas_used: Option, rlp_excess_blob_gas: Option, rlp_parent_beacon_block_root: Option, + rlp_requests_root: Option, ) -> Self { // Most of these field mappings are defined in EIP-3675 except for `mixHash`, which is // defined in EIP-4399. @@ -87,6 +90,7 @@ impl ExecutionBlockHeader { blob_gas_used: rlp_blob_gas_used, excess_blob_gas: rlp_excess_blob_gas, parent_beacon_block_root: rlp_parent_beacon_block_root, + requests_root: rlp_requests_root, } } } @@ -114,6 +118,7 @@ pub struct EncodableExecutionBlockHeader<'a> { pub blob_gas_used: Option, pub excess_blob_gas: Option, pub parent_beacon_block_root: Option<&'a [u8]>, + pub requests_root: Option<&'a [u8]>, } impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { @@ -139,6 +144,7 @@ impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: None, + requests_root: None, }; if let Some(withdrawals_root) = &header.withdrawals_root { encodable.withdrawals_root = Some(withdrawals_root.as_slice()); @@ -146,6 +152,9 @@ impl<'a> From<&'a ExecutionBlockHeader> for EncodableExecutionBlockHeader<'a> { if let Some(parent_beacon_block_root) = &header.parent_beacon_block_root { encodable.parent_beacon_block_root = Some(parent_beacon_block_root.as_slice()) } + if let Some(requests_root) = &header.requests_root { + encodable.requests_root = Some(requests_root.as_slice()) + } encodable } } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 778260dd841..96a39054207 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -1,7 +1,8 @@ use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, WithdrawalRequest}; +use crate::{ConsolidationRequest, DepositRequest, EthSpec, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; use derivative::Derivative; +use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -47,6 +48,43 @@ impl ExecutionRequests { let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] } + + /// Generate the execution layer `requests_hash` based on EIP-7685. + /// + /// `sha256(sha256(requests_0) ++ sha256(requests_1) ++ ...)` + pub fn requests_hash(&self) -> Hash256 { + let mut hasher = DynamicContext::new(); + + for (i, request) in self.get_execution_requests_list().iter().enumerate() { + let mut request_hasher = DynamicContext::new(); + request_hasher.update(&[i as u8]); + request_hasher.update(request); + let request_hash = request_hasher.finalize(); + + hasher.update(&request_hash); + } + + hasher.finalize().into() + } +} + +/// This is used to index into the `execution_requests` array. +#[derive(Debug, Copy, Clone)] +pub enum RequestPrefix { + Deposit, + Withdrawal, + Consolidation, +} + +impl RequestPrefix { + pub fn from_prefix(prefix: u8) -> Option { + match prefix { + 0 => Some(Self::Deposit), + 1 => Some(Self::Withdrawal), + 2 => Some(Self::Consolidation), + _ => None, + } + } } #[cfg(test)] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index eff52378342..dd304c6296c 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -54,8 +54,8 @@ pub mod light_client_finality_update; pub mod light_client_optimistic_update; pub mod light_client_update; pub mod pending_attestation; -pub mod pending_balance_deposit; pub mod pending_consolidation; +pub mod pending_deposit; pub mod pending_partial_withdrawal; pub mod proposer_preparation_data; pub mod proposer_slashing; @@ -170,7 +170,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; -pub use crate::execution_requests::ExecutionRequests; +pub use crate::execution_requests::{ExecutionRequests, RequestPrefix}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; @@ -210,8 +210,8 @@ pub use crate::payload::{ FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; -pub use crate::pending_balance_deposit::PendingBalanceDeposit; pub use crate::pending_consolidation::PendingConsolidation; +pub use crate::pending_deposit::PendingDeposit; pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; pub use crate::preset::{ AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, diff --git a/consensus/types/src/pending_balance_deposit.rs b/consensus/types/src/pending_deposit.rs similarity index 68% rename from consensus/types/src/pending_balance_deposit.rs rename to consensus/types/src/pending_deposit.rs index a2bce577f87..3bee86417de 100644 --- a/consensus/types/src/pending_balance_deposit.rs +++ b/consensus/types/src/pending_deposit.rs @@ -1,4 +1,5 @@ use crate::test_utils::TestRandom; +use crate::*; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -8,7 +9,6 @@ use tree_hash_derive::TreeHash; arbitrary::Arbitrary, Debug, PartialEq, - Eq, Hash, Clone, Serialize, @@ -18,16 +18,18 @@ use tree_hash_derive::TreeHash; TreeHash, TestRandom, )] -pub struct PendingBalanceDeposit { - #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, +pub struct PendingDeposit { + pub pubkey: PublicKeyBytes, + pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, + pub signature: SignatureBytes, + pub slot: Slot, } #[cfg(test)] mod tests { use super::*; - ssz_and_tree_hash_tests!(PendingBalanceDeposit); + ssz_and_tree_hash_tests!(PendingDeposit); } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 435a74bdc35..b469b7b777a 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -263,7 +263,7 @@ impl ElectraPreset { whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, max_pending_partials_per_withdrawals_sweep: spec .max_pending_partials_per_withdrawals_sweep, - pending_balance_deposits_limit: E::pending_balance_deposits_limit() as u64, + pending_balance_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 275101ddbe1..222b9292a2a 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -1,6 +1,6 @@ use crate::{ - test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, + test_utils::TestRandom, Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, + FixedBytesExtended, ForkName, Hash256, PublicKeyBytes, }; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -38,14 +38,15 @@ pub struct Validator { impl Validator { #[allow(clippy::arithmetic_side_effects)] pub fn from_deposit( - deposit_data: &DepositData, + pubkey: PublicKeyBytes, + withdrawal_credentials: Hash256, amount: u64, fork_name: ForkName, spec: &ChainSpec, ) -> Self { let mut validator = Validator { - pubkey: deposit_data.pubkey, - withdrawal_credentials: deposit_data.withdrawal_credentials, + pubkey, + withdrawal_credentials, activation_eligibility_epoch: spec.far_future_epoch, activation_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch, @@ -291,16 +292,6 @@ impl Validator { spec.max_effective_balance } } - - pub fn get_active_balance( - &self, - validator_balance: u64, - spec: &ChainSpec, - current_fork: ForkName, - ) -> u64 { - let max_effective_balance = self.get_max_effective_balance(spec, current_fork); - std::cmp::min(validator_balance, max_effective_balance) - } } impl Default for Validator { diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index b65b51230c3..d02e01b80cc 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -6,18 +6,18 @@ edition = { workspace = true } [dependencies] alloy-primitives = { workspace = true } +arbitrary = { workspace = true } +blst = { version = "0.3.3", optional = true } +ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } -tree_hash = { workspace = true } +fixed_bytes = { workspace = true } +hex = { workspace = true } rand = { workspace = true } +safe_arith = { workspace = true } serde = { workspace = true } -ethereum_serde_utils = { workspace = true } -hex = { workspace = true } -ethereum_hashing = { workspace = true } -arbitrary = { workspace = true } +tree_hash = { workspace = true } zeroize = { workspace = true } -blst = { version = "0.3.3", optional = true } -safe_arith = { workspace = true } -fixed_bytes = { workspace = true } [features] arbitrary = [] diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index a0237ba7ede..a893a9360dc 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -3,15 +3,14 @@ name = "eth2_key_derivation" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sha2 = { workspace = true } -zeroize = { workspace = true } +bls = { workspace = true } num-bigint-dig = { version = "0.8.4", features = ["zeroize"] } ring = { workspace = true } -bls = { workspace = true } +sha2 = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index bb6222807bd..61d2722efbd 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -3,25 +3,24 @@ name = "eth2_keystore" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = { workspace = true } +aes = { version = "0.7", features = ["ctr"] } +bls = { workspace = true } +eth2_key_derivation = { workspace = true } +hex = { workspace = true } hmac = "0.11.0" pbkdf2 = { version = "0.8.0", default-features = false } +rand = { workspace = true } scrypt = { version = "0.7.0", default-features = false } -sha2 = { workspace = true } -uuid = { workspace = true } -zeroize = { workspace = true } serde = { workspace = true } -serde_repr = { workspace = true } -hex = { workspace = true } -bls = { workspace = true } serde_json = { workspace = true } -eth2_key_derivation = { workspace = true } +serde_repr = { workspace = true } +sha2 = { workspace = true } unicode-normalization = "0.1.16" -aes = { version = "0.7", features = ["ctr"] } +uuid = { workspace = true } +zeroize = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/crypto/eth2_keystore/tests/eip2335_vectors.rs b/crypto/eth2_keystore/tests/eip2335_vectors.rs index 3702a218163..e6852cc6081 100644 --- a/crypto/eth2_keystore/tests/eip2335_vectors.rs +++ b/crypto/eth2_keystore/tests/eip2335_vectors.rs @@ -58,7 +58,7 @@ fn eip2335_test_vector_scrypt() { } "#; - let keystore = decode_and_check_sk(&vector); + let keystore = decode_and_check_sk(vector); assert_eq!( *keystore.uuid(), Uuid::parse_str("1d85ae20-35c5-4611-98e8-aa14a633906f").unwrap(), @@ -102,7 +102,7 @@ fn eip2335_test_vector_pbkdf() { } "#; - let keystore = decode_and_check_sk(&vector); + let keystore = decode_and_check_sk(vector); assert_eq!( *keystore.uuid(), Uuid::parse_str("64625def-3331-4eea-ab6f-782f3ed16a83").unwrap(), diff --git a/crypto/eth2_keystore/tests/tests.rs b/crypto/eth2_keystore/tests/tests.rs index 0df884b8a27..20bf9f1653d 100644 --- a/crypto/eth2_keystore/tests/tests.rs +++ b/crypto/eth2_keystore/tests/tests.rs @@ -54,25 +54,17 @@ fn file() { let dir = tempdir().unwrap(); let path = dir.path().join("keystore.json"); - let get_file = || { - File::options() - .write(true) - .read(true) - .create(true) - .open(path.clone()) - .expect("should create file") - }; - let keystore = KeystoreBuilder::new(&keypair, GOOD_PASSWORD, "".into()) .unwrap() .build() .unwrap(); keystore - .to_json_writer(&mut get_file()) + .to_json_writer(File::create_new(&path).unwrap()) .expect("should write to file"); - let decoded = Keystore::from_json_reader(&mut get_file()).expect("should read from file"); + let decoded = + Keystore::from_json_reader(File::open(&path).unwrap()).expect("should read from file"); assert_eq!( decoded.decrypt_keypair(BAD_PASSWORD).err().unwrap(), diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index f3af6aab592..5327bdc163b 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -3,18 +3,17 @@ name = "eth2_wallet" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +eth2_key_derivation = { workspace = true } +eth2_keystore = { workspace = true } +rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } -uuid = { workspace = true } -rand = { workspace = true } -eth2_keystore = { workspace = true } -eth2_key_derivation = { workspace = true } tiny-bip39 = "1" +uuid = { workspace = true } [dev-dependencies] hex = { workspace = true } diff --git a/crypto/eth2_wallet/tests/tests.rs b/crypto/eth2_wallet/tests/tests.rs index fe4565e0dbc..3dc073f764d 100644 --- a/crypto/eth2_wallet/tests/tests.rs +++ b/crypto/eth2_wallet/tests/tests.rs @@ -132,20 +132,11 @@ fn file_round_trip() { let dir = tempdir().unwrap(); let path = dir.path().join("keystore.json"); - let get_file = || { - File::options() - .write(true) - .read(true) - .create(true) - .open(path.clone()) - .expect("should create file") - }; - wallet - .to_json_writer(&mut get_file()) + .to_json_writer(File::create_new(&path).unwrap()) .expect("should write to file"); - let decoded = Wallet::from_json_reader(&mut get_file()).unwrap(); + let decoded = Wallet::from_json_reader(File::open(&path).unwrap()).unwrap(); assert_eq!( decoded.decrypt_seed(&[1, 2, 3]).err().unwrap(), diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index ce55f83639b..bfe0f19cd0e 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -3,22 +3,21 @@ name = "kzg" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = "2021" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] arbitrary = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } +c-kzg = { workspace = true } derivative = { workspace = true } -serde = { workspace = true } +ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } hex = { workspace = true } -ethereum_hashing = { workspace = true } -c-kzg = { workspace = true } rust_eth_kzg = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } +tree_hash = { workspace = true } [dev-dependencies] criterion = { workspace = true } diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index 96176f3fba5..a7a54b1416c 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -10,8 +10,8 @@ clap = { workspace = true } clap_utils = { workspace = true } environment = { workspace = true } hex = { workspace = true } -store = { workspace = true } -types = { workspace = true } +serde = { workspace = true } slog = { workspace = true } +store = { workspace = true } strum = { workspace = true } -serde = { workspace = true } +types = { workspace = true } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 9612bded475..72be77a70bd 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -11,36 +11,36 @@ fake_crypto = ['bls/fake_crypto'] jemalloc = ["malloc_utils/jemalloc"] [dependencies] +account_utils = { workspace = true } +beacon_chain = { workspace = true } bls = { workspace = true } clap = { workspace = true } -log = { workspace = true } -sloggers = { workspace = true } -serde = { workspace = true } -serde_yaml = { workspace = true } -serde_json = { workspace = true } +clap_utils = { workspace = true } +deposit_contract = { workspace = true } env_logger = { workspace = true } -types = { workspace = true } -state_processing = { workspace = true } -ethereum_hashing = { workspace = true } -ethereum_ssz = { workspace = true } environment = { workspace = true } +eth2 = { workspace = true } eth2_network_config = { workspace = true } -deposit_contract = { workspace = true } -tree_hash = { workspace = true } -clap_utils = { workspace = true } +eth2_wallet = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_ssz = { workspace = true } +execution_layer = { workspace = true } +hex = { workspace = true } lighthouse_network = { workspace = true } -validator_dir = { workspace = true } lighthouse_version = { workspace = true } -account_utils = { workspace = true } -eth2_wallet = { workspace = true } -eth2 = { workspace = true } -snap = { workspace = true } -beacon_chain = { workspace = true } -store = { workspace = true } +log = { workspace = true } malloc_utils = { workspace = true } rayon = { workspace = true } -execution_layer = { workspace = true } -hex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_yaml = { workspace = true } +sloggers = { workspace = true } +snap = { workspace = true } +state_processing = { workspace = true } +store = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } +validator_dir = { workspace = true } [package.metadata.cargo-udeps.ignore] normal = ["malloc_utils"] diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 1c91b18e9cc..eda9a2ebf27 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -34,48 +34,47 @@ malloc_utils = { workspace = true, features = ["jemalloc"] } malloc_utils = { workspace = true } [dependencies] +account_manager = { "path" = "../account_manager" } +account_utils = { workspace = true } beacon_node = { workspace = true } -slog = { workspace = true } -types = { workspace = true } bls = { workspace = true } -ethereum_hashing = { workspace = true } -clap = { workspace = true } -environment = { workspace = true } boot_node = { path = "../boot_node" } -futures = { workspace = true } -validator_client = { workspace = true } -account_manager = { "path" = "../account_manager" } +clap = { workspace = true } clap_utils = { workspace = true } +database_manager = { path = "../database_manager" } +directory = { workspace = true } +environment = { workspace = true } eth2_network_config = { workspace = true } +ethereum_hashing = { workspace = true } +futures = { workspace = true } lighthouse_version = { workspace = true } -account_utils = { workspace = true } +logging = { workspace = true } +malloc_utils = { workspace = true } metrics = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } +slasher = { workspace = true } +slog = { workspace = true } task_executor = { workspace = true } -malloc_utils = { workspace = true } -directory = { workspace = true } +types = { workspace = true } unused_port = { workspace = true } -database_manager = { path = "../database_manager" } -slasher = { workspace = true } +validator_client = { workspace = true } validator_manager = { path = "../validator_manager" } -logging = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } -validator_dir = { workspace = true } -slashing_protection = { workspace = true } -lighthouse_network = { workspace = true } -sensitive_url = { workspace = true } +beacon_node_fallback = { workspace = true } +beacon_processor = { workspace = true } eth1 = { workspace = true } eth2 = { workspace = true } -beacon_processor = { workspace = true } -beacon_node_fallback = { workspace = true } initialized_validators = { workspace = true } +lighthouse_network = { workspace = true } +sensitive_url = { workspace = true } +slashing_protection = { workspace = true } +tempfile = { workspace = true } +validator_dir = { workspace = true } zeroize = { workspace = true } - [[test]] name = "lighthouse_tests" path = "tests/main.rs" diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index f95751392c8..02b8e0b6552 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -6,19 +6,19 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } -tokio = { workspace = true } -slog = { workspace = true } -sloggers = { workspace = true } -types = { workspace = true } eth2_config = { workspace = true } -task_executor = { workspace = true } eth2_network_config = { workspace = true } +futures = { workspace = true } logging = { workspace = true } -slog-term = { workspace = true } +serde = { workspace = true } +slog = { workspace = true } slog-async = { workspace = true } -futures = { workspace = true } slog-json = "2.3.0" -serde = { workspace = true } +slog-term = { workspace = true } +sloggers = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } [target.'cfg(not(target_family = "unix"))'.dependencies] ctrlc = { version = "3.1.6", features = ["termination"] } diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index c7153f48ef5..d53d042fa4e 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -115,7 +115,7 @@ fn create_wallet>( .arg(base_dir.as_ref().as_os_str()) .arg(CREATE_CMD) .arg(format!("--{}", NAME_FLAG)) - .arg(&name) + .arg(name) .arg(format!("--{}", PASSWORD_FLAG)) .arg(password.as_ref().as_os_str()) .arg(format!("--{}", MNEMONIC_FLAG)) @@ -273,16 +273,16 @@ impl TestValidator { .expect("stdout is not utf8") .to_string(); - if stdout == "" { + if stdout.is_empty() { return Ok(vec![]); } let pubkeys = stdout[..stdout.len() - 1] .split("\n") - .filter_map(|line| { + .map(|line| { let tab = line.find("\t").expect("line must have tab"); let (_, pubkey) = line.split_at(tab + 1); - Some(pubkey.to_string()) + pubkey.to_string() }) .collect::>(); @@ -446,7 +446,9 @@ fn validator_import_launchpad() { } } - stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + stdin + .write_all(format!("{}\n", PASSWORD).as_bytes()) + .unwrap(); child.wait().unwrap(); @@ -504,7 +506,7 @@ fn validator_import_launchpad() { }; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); @@ -525,7 +527,7 @@ fn validator_import_launchpad() { expected_def.enabled = true; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); } @@ -582,7 +584,7 @@ fn validator_import_launchpad_no_password_then_add_password() { let mut child = validator_import_key_cmd(); wait_for_password_prompt(&mut child); let stdin = child.stdin.as_mut().unwrap(); - stdin.write("\n".as_bytes()).unwrap(); + stdin.write_all("\n".as_bytes()).unwrap(); child.wait().unwrap(); assert!( @@ -628,14 +630,16 @@ fn validator_import_launchpad_no_password_then_add_password() { }; assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); let mut child = validator_import_key_cmd(); wait_for_password_prompt(&mut child); let stdin = child.stdin.as_mut().unwrap(); - stdin.write(format!("{}\n", PASSWORD).as_bytes()).unwrap(); + stdin + .write_all(format!("{}\n", PASSWORD).as_bytes()) + .unwrap(); child.wait().unwrap(); let expected_def = ValidatorDefinition { @@ -657,7 +661,7 @@ fn validator_import_launchpad_no_password_then_add_password() { let defs = ValidatorDefinitions::open(&dst_dir).unwrap(); assert!( - defs.as_slice() == &[expected_def.clone()], + defs.as_slice() == [expected_def.clone()], "validator defs file should be accurate" ); } @@ -759,7 +763,7 @@ fn validator_import_launchpad_password_file() { }; assert!( - defs.as_slice() == &[expected_def], + defs.as_slice() == [expected_def], "validator defs file should be accurate" ); } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 80986653c16..88e05dfa12d 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -9,7 +9,6 @@ use beacon_node::beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_processor::BeaconProcessorConfig; use eth1::Eth1Endpoint; use lighthouse_network::PeerId; -use lighthouse_version; use std::fs::File; use std::io::{Read, Write}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; @@ -128,7 +127,7 @@ fn allow_insecure_genesis_sync_default() { CommandLineTest::new() .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, false); + assert!(!config.allow_insecure_genesis_sync); }); } @@ -146,7 +145,7 @@ fn allow_insecure_genesis_sync_enabled() { .flag("allow-insecure-genesis-sync", None) .run_with_zero_port_and_no_genesis_sync() .with_config(|config| { - assert_eq!(config.allow_insecure_genesis_sync, true); + assert!(config.allow_insecure_genesis_sync); }); } @@ -359,11 +358,11 @@ fn default_graffiti() { #[test] fn trusted_peers_flag() { - let peers = vec![PeerId::random(), PeerId::random()]; + let peers = [PeerId::random(), PeerId::random()]; CommandLineTest::new() .flag( "trusted-peers", - Some(format!("{},{}", peers[0].to_string(), peers[1].to_string()).as_str()), + Some(format!("{},{}", peers[0], peers[1]).as_str()), ) .run_with_zero_port() .with_config(|config| { @@ -383,7 +382,7 @@ fn genesis_backfill_flag() { CommandLineTest::new() .flag("genesis-backfill", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); + .with_config(|config| assert!(config.chain.genesis_backfill)); } /// The genesis backfill flag should be enabled if historic states flag is set. @@ -392,7 +391,7 @@ fn genesis_backfill_with_historic_flag() { CommandLineTest::new() .flag("reconstruct-historic-states", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); + .with_config(|config| assert!(config.chain.genesis_backfill)); } // Tests for Eth1 flags. @@ -448,7 +447,7 @@ fn eth1_cache_follow_distance_manual() { // Tests for Bellatrix flags. fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { use sensitive_url::SensitiveUrl; - let urls = vec!["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; + let urls = ["http://sigp.io/no-way:1337", "http://infura.not_real:4242"]; // we don't support redundancy for execution-endpoints // only the first provided endpoint is parsed. @@ -480,10 +479,10 @@ fn run_bellatrix_execution_endpoints_flag_test(flag: &str) { .run_with_zero_port() .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); - assert_eq!(config.execution_endpoint.is_some(), true); + assert!(config.execution_endpoint.is_some()); assert_eq!( config.execution_endpoint.as_ref().unwrap().clone(), - SensitiveUrl::parse(&urls[0]).unwrap() + SensitiveUrl::parse(urls[0]).unwrap() ); // Only the first secret file should be used. assert_eq!( @@ -595,7 +594,7 @@ fn run_payload_builder_flag_test(flag: &str, builders: &str) { let config = config.execution_layer.as_ref().unwrap(); // Only first provided endpoint is parsed as we don't support // redundancy. - assert_eq!(config.builder_url, all_builders.get(0).cloned()); + assert_eq!(config.builder_url, all_builders.first().cloned()); }) } fn run_payload_builder_flag_test_with_config( @@ -661,7 +660,7 @@ fn builder_fallback_flags() { Some("builder-fallback-disable-checks"), None, |config| { - assert_eq!(config.chain.builder_fallback_disable_checks, true); + assert!(config.chain.builder_fallback_disable_checks); }, ); } @@ -1657,19 +1656,19 @@ fn http_enable_beacon_processor() { CommandLineTest::new() .flag("http", None) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + .with_config(|config| assert!(config.http_api.enable_beacon_processor)); CommandLineTest::new() .flag("http", None) .flag("http-enable-beacon-processor", Some("true")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, true)); + .with_config(|config| assert!(config.http_api.enable_beacon_processor)); CommandLineTest::new() .flag("http", None) .flag("http-enable-beacon-processor", Some("false")) .run_with_zero_port() - .with_config(|config| assert_eq!(config.http_api.enable_beacon_processor, false)); + .with_config(|config| assert!(!config.http_api.enable_beacon_processor)); } #[test] fn http_tls_flags() { @@ -2221,7 +2220,7 @@ fn slasher_broadcast_flag_false() { }); } -#[cfg(all(feature = "slasher-lmdb"))] +#[cfg(feature = "slasher-lmdb")] #[test] fn slasher_backend_override_to_default() { // Hard to test this flag because all but one backend is disabled by default and the backend @@ -2429,7 +2428,7 @@ fn logfile_no_restricted_perms_flag() { .flag("logfile-no-restricted-perms", None) .run_with_zero_port() .with_config(|config| { - assert!(config.logger_config.is_restricted == false); + assert!(!config.logger_config.is_restricted); }); } #[test] @@ -2454,7 +2453,7 @@ fn logfile_format_flag() { fn sync_eth1_chain_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); + .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] @@ -2467,7 +2466,7 @@ fn sync_eth1_chain_execution_endpoints_flag() { dir.path().join("jwt-file").as_os_str().to_str(), ) .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, true)); + .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] @@ -2481,7 +2480,7 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { dir.path().join("jwt-file").as_os_str().to_str(), ) .run_with_zero_port() - .with_config(|config| assert_eq!(config.sync_eth1_chain, false)); + .with_config(|config| assert!(!config.sync_eth1_chain)); } #[test] @@ -2504,9 +2503,9 @@ fn light_client_server_default() { CommandLineTest::new() .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enable_light_client_server, false); - assert_eq!(config.chain.enable_light_client_server, false); - assert_eq!(config.http_api.enable_light_client_server, false); + assert!(!config.network.enable_light_client_server); + assert!(!config.chain.enable_light_client_server); + assert!(!config.http_api.enable_light_client_server); }); } @@ -2516,8 +2515,8 @@ fn light_client_server_enabled() { .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.network.enable_light_client_server, true); - assert_eq!(config.chain.enable_light_client_server, true); + assert!(config.network.enable_light_client_server); + assert!(config.chain.enable_light_client_server); }); } @@ -2528,7 +2527,7 @@ fn light_client_http_server_enabled() { .flag("light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.http_api.enable_light_client_server, true); + assert!(config.http_api.enable_light_client_server); }); } diff --git a/lighthouse/tests/boot_node.rs b/lighthouse/tests/boot_node.rs index 659dea468de..b243cd6001e 100644 --- a/lighthouse/tests/boot_node.rs +++ b/lighthouse/tests/boot_node.rs @@ -149,7 +149,7 @@ fn disable_packet_filter_flag() { .flag("disable-packet-filter", None) .run_with_ip() .with_config(|config| { - assert_eq!(config.disable_packet_filter, true); + assert!(config.disable_packet_filter); }); } @@ -159,7 +159,7 @@ fn enable_enr_auto_update_flag() { .flag("enable-enr-auto-update", None) .run_with_ip() .with_config(|config| { - assert_eq!(config.enable_enr_auto_update, true); + assert!(config.enable_enr_auto_update); }); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 587001f77bd..c5b303e4d18 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -136,7 +136,7 @@ fn beacon_nodes_tls_certs_flag() { .flag( "beacon-nodes-tls-certs", Some( - vec![ + [ dir.path().join("certificate.crt").to_str().unwrap(), dir.path().join("certificate2.crt").to_str().unwrap(), ] @@ -205,7 +205,7 @@ fn graffiti_file_with_pk_flag() { let mut file = File::create(dir.path().join("graffiti.txt")).expect("Unable to create file"); let new_key = Keypair::random(); let pubkeybytes = PublicKeyBytes::from(new_key.pk); - let contents = format!("{}:nice-graffiti", pubkeybytes.to_string()); + let contents = format!("{}:nice-graffiti", pubkeybytes); file.write_all(contents.as_bytes()) .expect("Unable to write to file"); CommandLineTest::new() @@ -419,13 +419,13 @@ pub fn malloc_tuning_flag() { CommandLineTest::new() .flag("disable-malloc-tuning", None) .run() - .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, false)); + .with_config(|config| assert!(!config.http_metrics.allocator_metrics_enabled)); } #[test] pub fn malloc_tuning_default() { CommandLineTest::new() .run() - .with_config(|config| assert_eq!(config.http_metrics.allocator_metrics_enabled, true)); + .with_config(|config| assert!(config.http_metrics.allocator_metrics_enabled)); } #[test] fn doppelganger_protection_flag() { diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index 999f3c31415..04e3eafe6eb 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -136,7 +136,7 @@ pub fn validator_create_defaults() { count: 1, deposit_gwei: MainnetEthSpec::default_spec().max_effective_balance, mnemonic_path: None, - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), disable_deposits: false, specify_voting_keystore_password: false, eth1_withdrawal_address: None, @@ -201,7 +201,7 @@ pub fn validator_create_disable_deposits() { .flag("--disable-deposits", None) .flag("--builder-proposals", Some("false")) .assert_success(|config| { - assert_eq!(config.disable_deposits, true); + assert!(config.disable_deposits); assert_eq!(config.builder_proposals, Some(false)); }); } @@ -300,7 +300,7 @@ pub fn validator_move_defaults() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -350,7 +350,7 @@ pub fn validator_move_misc_flags_1() { .flag("--src-vc-token", Some("./1.json")) .flag("--dest-vc-url", Some("http://localhost:2")) .flag("--dest-vc-token", Some("./2.json")) - .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--validators", Some(EXAMPLE_PUBKEY_0)) .flag("--builder-proposals", Some("false")) .flag("--prefer-builder-proposals", Some("false")) .assert_success(|config| { @@ -368,7 +368,7 @@ pub fn validator_move_misc_flags_1() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -382,7 +382,7 @@ pub fn validator_move_misc_flags_2() { .flag("--src-vc-token", Some("./1.json")) .flag("--dest-vc-url", Some("http://localhost:2")) .flag("--dest-vc-token", Some("./2.json")) - .flag("--validators", Some(&format!("{}", EXAMPLE_PUBKEY_0))) + .flag("--validators", Some(EXAMPLE_PUBKEY_0)) .flag("--builder-proposals", Some("false")) .flag("--builder-boost-factor", Some("100")) .assert_success(|config| { @@ -400,7 +400,7 @@ pub fn validator_move_misc_flags_2() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); @@ -428,7 +428,7 @@ pub fn validator_move_count() { fee_recipient: None, gas_limit: None, password_source: PasswordSource::Interactive { - stdin_inputs: cfg!(windows) || false, + stdin_inputs: cfg!(windows), }, }; assert_eq!(expected, config); diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index ca701eb7e91..159c89badbc 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -1,6 +1,6 @@ # Simple Local Testnet -These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 geth execution clients using Kurtosis. +These scripts allow for running a small local testnet with a default of 4 beacon nodes, 4 validator clients and 4 Geth execution clients using Kurtosis. This setup can be useful for testing and development. ## Installation @@ -9,7 +9,7 @@ This setup can be useful for testing and development. 1. Install [Kurtosis](https://docs.kurtosis.com/install/). Verify that Kurtosis has been successfully installed by running `kurtosis version` which should display the version. -1. Install [yq](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. +1. Install [`yq`](https://github.com/mikefarah/yq). If you are on Ubuntu, you can install `yq` by running `snap install yq`. ## Starting the testnet @@ -22,7 +22,7 @@ cd ./scripts/local_testnet It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end. You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key. -Full configuration reference for kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). +Full configuration reference for Kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). To view all running services: @@ -36,7 +36,7 @@ To view the logs: kurtosis service logs local-testnet $SERVICE_NAME ``` -where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and geth: +where `$SERVICE_NAME` is obtained by inspecting the running services above. For example, to view the logs of the first beacon node, validator client and Geth: ```bash kurtosis service logs local-testnet -f cl-1-lighthouse-geth diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index 56a023df0bb..fcecc2fc233 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -17,31 +17,31 @@ byteorder = { workspace = true } derivative = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } -metrics = { workspace = true } filesystem = { workspace = true } +flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } +lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } +lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lru = { workspace = true } + +# MDBX is pinned at the last version with Windows and macOS support. +mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = "e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a", optional = true } +metrics = { workspace = true } parking_lot = { workspace = true } rand = { workspace = true } + +redb = { version = "2.1.4", optional = true } safe_arith = { workspace = true } serde = { workspace = true } slog = { workspace = true } +ssz_types = { workspace = true } +strum = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } types = { workspace = true } -strum = { workspace = true } -ssz_types = { workspace = true } - -# MDBX is pinned at the last version with Windows and macOS support. -mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", rev = "e6ff4b9377c1619bcf0bfdf52bee5a980a432a1a", optional = true } -lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } -lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } - -redb = { version = "2.1.4", optional = true } [dev-dependencies] +logging = { workspace = true } maplit = { workspace = true } rayon = { workspace = true } tempfile = { workspace = true } -logging = { workspace = true } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 6012283e111..d93f3a55788 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -12,28 +12,28 @@ portable = ["beacon_chain/portable"] [dependencies] alloy-primitives = { workspace = true } +beacon_chain = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } derivative = { workspace = true } +eth2_network_config = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +fs2 = { workspace = true } hex = { workspace = true } kzg = { workspace = true } +logging = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } -eth2_network_config = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -tree_hash = { workspace = true } -tree_hash_derive = { workspace = true } +snap = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } +tree_hash = { workspace = true } +tree_hash_derive = { workspace = true } types = { workspace = true } -snap = { workspace = true } -fs2 = { workspace = true } -beacon_chain = { workspace = true } -fork_choice = { workspace = true } -execution_layer = { workspace = true } -logging = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 390711079f4..d5f4997bb7e 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.6 +TESTS_TAG := v1.5.0-alpha.8 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index dfd782a22b3..c1adf107704 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -86,7 +86,7 @@ type_name!(RewardsAndPenalties, "rewards_and_penalties"); type_name!(RegistryUpdates, "registry_updates"); type_name!(Slashings, "slashings"); type_name!(Eth1DataReset, "eth1_data_reset"); -type_name!(PendingBalanceDeposits, "pending_balance_deposits"); +type_name!(PendingBalanceDeposits, "pending_deposits"); type_name!(PendingConsolidations, "pending_consolidations"); type_name!(EffectiveBalanceUpdates, "effective_balance_updates"); type_name!(SlashingsReset, "slashings_reset"); @@ -193,7 +193,7 @@ impl EpochTransition for PendingBalanceDeposits { state, spec, SinglePassConfig { - pending_balance_deposits: true, + pending_deposits: true, ..SinglePassConfig::disable_all() }, ) @@ -363,7 +363,7 @@ impl> Case for EpochProcessing { } if !fork_name.electra_enabled() - && (T::name() == "pending_consolidations" || T::name() == "pending_balance_deposits") + && (T::name() == "pending_consolidations" || T::name() == "pending_deposits") { return false; } diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index a9322e5dd5e..c50032a63de 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -134,7 +134,7 @@ type_name_generic!(LightClientUpdateElectra, "LightClientUpdate"); type_name_generic!(PendingAttestation); type_name!(PendingConsolidation); type_name!(PendingPartialWithdrawal); -type_name!(PendingBalanceDeposit); +type_name!(PendingDeposit); type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedAggregateAndProofBase, "SignedAggregateAndProof"); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 3f802d89447..292625a371a 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -243,8 +243,7 @@ mod ssz_static { use types::historical_summary::HistoricalSummary; use types::{ AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, - LightClientBootstrapAltair, PendingBalanceDeposit, PendingPartialWithdrawal, - WithdrawalRequest, *, + LightClientBootstrapAltair, PendingDeposit, PendingPartialWithdrawal, WithdrawalRequest, *, }; ssz_static_test!(attestation_data, AttestationData); @@ -661,8 +660,8 @@ mod ssz_static { #[test] fn pending_balance_deposit() { - SszStaticHandler::::electra_and_later().run(); - SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); + SszStaticHandler::::electra_and_later().run(); } #[test] diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index c76ef91183b..9b0ac5ec9b3 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -tokio = { workspace = true } +deposit_contract = { workspace = true } +ethers-contract = "1.0.2" ethers-core = { workspace = true } ethers-providers = { workspace = true } -ethers-contract = "1.0.2" -types = { workspace = true } +hex = { workspace = true } serde_json = { workspace = true } -deposit_contract = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } unused_port = { workspace = true } -hex = { workspace = true } diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 015a632ff40..3cba908261a 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -1,6 +1,6 @@ //! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. //! -//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil) to simulate +//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil) to simulate //! the deposit contract for testing beacon node eth1 integration. //! //! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index 159561d5dd8..28ff944799c 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -5,22 +5,22 @@ edition = { workspace = true } [dependencies] async-channel = { workspace = true } -tempfile = { workspace = true } +deposit_contract = { workspace = true } +ethers-core = { workspace = true } +ethers-providers = { workspace = true } +execution_layer = { workspace = true } +fork_choice = { workspace = true } +futures = { workspace = true } +hex = { workspace = true } +logging = { workspace = true } +reqwest = { workspace = true } +sensitive_url = { workspace = true } serde_json = { workspace = true } task_executor = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } -futures = { workspace = true } -execution_layer = { workspace = true } -sensitive_url = { workspace = true } types = { workspace = true } unused_port = { workspace = true } -ethers-providers = { workspace = true } -ethers-core = { workspace = true } -deposit_contract = { workspace = true } -reqwest = { workspace = true } -hex = { workspace = true } -fork_choice = { workspace = true } -logging = { workspace = true } [features] portable = ["types/portable"] diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index 97e73b8a2f3..0d9db528da4 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -5,14 +5,14 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -environment = { workspace = true } beacon_node = { workspace = true } -types = { workspace = true } -tempfile = { workspace = true } -eth2 = { workspace = true } -validator_client = { workspace = true } beacon_node_fallback = { workspace = true } -validator_dir = { workspace = true, features = ["insecure_keys"] } -sensitive_url = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } execution_layer = { workspace = true } +sensitive_url = { workspace = true } +tempfile = { workspace = true } tokio = { workspace = true } +types = { workspace = true } +validator_client = { workspace = true } +validator_dir = { workspace = true, features = ["insecure_keys"] } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 7772523284a..77645dba457 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -3,20 +3,19 @@ name = "simulator" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -node_test_rig = { path = "../node_test_rig" } +clap = { workspace = true } +env_logger = { workspace = true } +eth2_network_config = { workspace = true } execution_layer = { workspace = true } -types = { workspace = true } -parking_lot = { workspace = true } futures = { workspace = true } -tokio = { workspace = true } -env_logger = { workspace = true } -clap = { workspace = true } +kzg = { workspace = true } +node_test_rig = { path = "../node_test_rig" } +parking_lot = { workspace = true } rayon = { workspace = true } sensitive_url = { path = "../../common/sensitive_url" } -eth2_network_config = { workspace = true } serde_json = { workspace = true } -kzg = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 142a657f07e..7c297153463 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -3,15 +3,14 @@ name = "state_transition_vectors" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -state_processing = { workspace = true } -types = { workspace = true } -ethereum_ssz = { workspace = true } beacon_chain = { workspace = true } +ethereum_ssz = { workspace = true } +state_processing = { workspace = true } tokio = { workspace = true } +types = { workspace = true } [features] -portable = ["beacon_chain/portable"] \ No newline at end of file +portable = ["beacon_chain/portable"] diff --git a/testing/test-test_logger/Cargo.toml b/testing/test-test_logger/Cargo.toml index 63bb87c06e5..d2d705f714a 100644 --- a/testing/test-test_logger/Cargo.toml +++ b/testing/test-test_logger/Cargo.toml @@ -2,7 +2,6 @@ name = "test-test_logger" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index 0096d74f647..376aa13406e 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -2,31 +2,30 @@ name = "web3signer_tests" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] [dev-dependencies] +account_utils = { workspace = true } async-channel = { workspace = true } +environment = { workspace = true } eth2_keystore = { workspace = true } -types = { workspace = true } -tempfile = { workspace = true } -tokio = { workspace = true } -reqwest = { workspace = true } -url = { workspace = true } -slot_clock = { workspace = true } +eth2_network_config = { workspace = true } futures = { workspace = true } -task_executor = { workspace = true } -environment = { workspace = true } -account_utils = { workspace = true } +initialized_validators = { workspace = true } +logging = { workspace = true } +parking_lot = { workspace = true } +reqwest = { workspace = true } serde = { workspace = true } -serde_yaml = { workspace = true } -eth2_network_config = { workspace = true } serde_json = { workspace = true } -zip = { workspace = true } -parking_lot = { workspace = true } -logging = { workspace = true } -initialized_validators = { workspace = true } +serde_yaml = { workspace = true } slashing_protection = { workspace = true } +slot_clock = { workspace = true } +task_executor = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +url = { workspace = true } validator_store = { workspace = true } +zip = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 4099d80f687..5b90cb40b80 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -173,6 +173,8 @@ mod tests { } impl Web3SignerRig { + // We need to hold that lock as we want to get the binary only once + #[allow(clippy::await_holding_lock)] pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { GET_WEB3SIGNER_BIN .get_or_init(|| async { @@ -210,7 +212,7 @@ mod tests { keystore_password_file: keystore_password_filename.to_string(), }; let key_config_file = - File::create(&keystore_dir.path().join("key-config.yaml")).unwrap(); + File::create(keystore_dir.path().join("key-config.yaml")).unwrap(); serde_yaml::to_writer(key_config_file, &key_config).unwrap(); let tls_keystore_file = tls_dir().join("web3signer").join("key.p12"); diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 582a44b6f72..504d96ae1c1 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -18,8 +18,10 @@ clap = { workspace = true } clap_utils = { workspace = true } directory = { workspace = true } dirs = { workspace = true } -eth2 = { workspace = true } +doppelganger_service = { workspace = true } environment = { workspace = true } +eth2 = { workspace = true } +fdlimit = "0.3.0" graffiti_file = { workspace = true } hyper = { workspace = true } initialized_validators = { workspace = true } @@ -28,15 +30,14 @@ monitoring_api = { workspace = true } parking_lot = { workspace = true } reqwest = { workspace = true } sensitive_url = { workspace = true } -slashing_protection = { workspace = true } serde = { workspace = true } +slashing_protection = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } +tokio = { workspace = true } types = { workspace = true } validator_http_api = { workspace = true } validator_http_metrics = { workspace = true } validator_metrics = { workspace = true } validator_services = { workspace = true } validator_store = { workspace = true } -tokio = { workspace = true } -fdlimit = "0.3.0" diff --git a/validator_client/doppelganger_service/Cargo.toml b/validator_client/doppelganger_service/Cargo.toml new file mode 100644 index 00000000000..66b61a411b6 --- /dev/null +++ b/validator_client/doppelganger_service/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "doppelganger_service" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +beacon_node_fallback = { workspace = true } +environment = { workspace = true } +eth2 = { workspace = true } +parking_lot = { workspace = true } +slog = { workspace = true } +slot_clock = { workspace = true } +task_executor = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } + +[dev-dependencies] +futures = { workspace = true } +logging = { workspace = true } diff --git a/validator_client/validator_store/src/doppelganger_service.rs b/validator_client/doppelganger_service/src/lib.rs similarity index 99% rename from validator_client/validator_store/src/doppelganger_service.rs rename to validator_client/doppelganger_service/src/lib.rs index 0fc3272ed8b..69d0e8e8dec 100644 --- a/validator_client/validator_store/src/doppelganger_service.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -42,8 +42,6 @@ use task_executor::ShutdownReason; use tokio::time::sleep; use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; -use crate::ValidatorStore; - /// A wrapper around `PublicKeyBytes` which encodes information about the status of a validator /// pubkey with regards to doppelganger protection. #[derive(Debug, PartialEq)] @@ -115,6 +113,13 @@ struct LivenessResponses { /// validators on the network. pub const DEFAULT_REMAINING_DETECTION_EPOCHS: u64 = 1; +/// This crate cannot depend on ValidatorStore as validator_store depends on this crate and +/// initialises the doppelganger protection. For this reason, we abstract the validator store +/// functions this service needs through the following trait +pub trait DoppelgangerValidatorStore { + fn get_validator_index(&self, pubkey: &PublicKeyBytes) -> Option; +} + /// Store the per-validator status of doppelganger checking. #[derive(Debug, PartialEq)] pub struct DoppelgangerState { @@ -281,16 +286,17 @@ impl DoppelgangerService { /// Starts a reoccurring future which will try to keep the doppelganger service updated each /// slot. - pub fn start_update_service( + pub fn start_update_service( service: Arc, context: RuntimeContext, - validator_store: Arc>, + validator_store: Arc, beacon_nodes: Arc>, slot_clock: T, ) -> Result<(), String> where E: EthSpec, T: 'static + SlotClock, + V: DoppelgangerValidatorStore + Send + Sync + 'static, { // Define the `get_index` function as one that uses the validator store. let get_index = move |pubkey| validator_store.get_validator_index(&pubkey); diff --git a/validator_client/graffiti_file/Cargo.toml b/validator_client/graffiti_file/Cargo.toml index 02e48849d10..8868f5aec81 100644 --- a/validator_client/graffiti_file/Cargo.toml +++ b/validator_client/graffiti_file/Cargo.toml @@ -9,11 +9,11 @@ name = "graffiti_file" path = "src/lib.rs" [dependencies] -serde = { workspace = true } bls = { workspace = true } -types = { workspace = true } +serde = { workspace = true } slog = { workspace = true } +types = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } hex = { workspace = true } +tempfile = { workspace = true } diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 92cd631e3b0..76a021ab8c3 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -10,24 +10,25 @@ path = "src/lib.rs" [dependencies] account_utils = { workspace = true } -bls = { workspace = true } beacon_node_fallback = { workspace = true } +bls = { workspace = true } deposit_contract = { workspace = true } directory = { workspace = true } dirs = { workspace = true } -graffiti_file = { workspace = true } +doppelganger_service = { workspace = true } eth2 = { workspace = true } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } +filesystem = { workspace = true } +graffiti_file = { workspace = true } initialized_validators = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } parking_lot = { workspace = true } -filesystem = { workspace = true } rand = { workspace = true } +sensitive_url = { workspace = true } serde = { workspace = true } signing_method = { workspace = true } -sensitive_url = { workspace = true } slashing_protection = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } @@ -38,15 +39,15 @@ tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } types = { workspace = true } +url = { workspace = true } validator_dir = { workspace = true } -validator_store = { workspace = true } validator_services = { workspace = true } -url = { workspace = true } -warp_utils = { workspace = true } +validator_store = { workspace = true } warp = { workspace = true } +warp_utils = { workspace = true } zeroize = { workspace = true } [dev-dependencies] -itertools = { workspace = true } futures = { workspace = true } +itertools = { workspace = true } rand = { workspace = true, features = ["small_rng"] } diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 10a772e1fe6..5712689fc65 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -5,6 +5,7 @@ use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, }; use deposit_contract::decode_eth1_tx_data; +use doppelganger_service::DoppelgangerService; use eth2::{ lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, types::ErrorMessage as ApiErrorMessage, @@ -26,7 +27,6 @@ use task_executor::test_utils::TestRuntime; use tempfile::{tempdir, TempDir}; use tokio::sync::oneshot; use validator_services::block_service::BlockService; -use validator_store::doppelganger_service::DoppelgangerService; use validator_store::{Config as ValidatorStoreConfig, ValidatorStore}; use zeroize::Zeroizing; diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index d369d905d9b..1f2c5b5a261 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -3,8 +3,8 @@ mod keystores; +use doppelganger_service::DoppelgangerService; use initialized_validators::{Config as InitializedValidatorsConfig, InitializedValidators}; -use validator_store::doppelganger_service::DoppelgangerService; use crate::{ApiSecret, Config as HttpConfig, Context}; use account_utils::{ @@ -52,8 +52,10 @@ struct ApiTester { impl ApiTester { pub async fn new() -> Self { - let mut config = ValidatorStoreConfig::default(); - config.fee_recipient = Some(TEST_DEFAULT_FEE_RECIPIENT); + let config = ValidatorStoreConfig { + fee_recipient: Some(TEST_DEFAULT_FEE_RECIPIENT), + ..Default::default() + }; Self::new_with_config(config).await } @@ -138,7 +140,7 @@ impl ApiTester { let (listening_socket, server) = super::serve(ctx, test_runtime.task_executor.exit()).unwrap(); - tokio::spawn(async { server.await }); + tokio::spawn(server); let url = SensitiveUrl::parse(&format!( "http://{}:{}", @@ -344,22 +346,21 @@ impl ApiTester { .set_nextaccount(s.key_derivation_path_offset) .unwrap(); - for i in 0..s.count { + for validator in response.iter().take(s.count) { let keypairs = wallet .next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES) .unwrap(); let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap(); assert_eq!( - response[i].voting_pubkey, + validator.voting_pubkey, voting_keypair.pk.clone().into(), "the locally generated voting pk should match the server response" ); let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); - let deposit_bytes = - serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + let deposit_bytes = serde_utils::hex::decode(&validator.eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index 2dde087a7fd..6559a2bb9e5 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -130,7 +130,7 @@ fn check_keystore_get_response<'a>( for (ks1, ks2) in response.data.iter().zip_eq(expected_keystores) { assert_eq!(ks1.validating_pubkey, keystore_pubkey(ks2)); assert_eq!(ks1.derivation_path, ks2.path()); - assert!(ks1.readonly == None || ks1.readonly == Some(false)); + assert!(ks1.readonly.is_none() || ks1.readonly == Some(false)); } } @@ -147,7 +147,7 @@ fn check_keystore_import_response( } } -fn check_keystore_delete_response<'a>( +fn check_keystore_delete_response( response: &DeleteKeystoresResponse, expected_statuses: impl IntoIterator, ) { @@ -634,7 +634,7 @@ async fn check_get_set_fee_recipient() { assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: TEST_DEFAULT_FEE_RECIPIENT, } ); @@ -654,7 +654,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[1], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_public_key_1.clone(), + ethaddress: fee_recipient_public_key_1, }, ) .await @@ -667,14 +667,14 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_public_key_1.clone() + fee_recipient_public_key_1 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -686,7 +686,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[2], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_public_key_2.clone(), + ethaddress: fee_recipient_public_key_2, }, ) .await @@ -699,16 +699,16 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_public_key_1.clone() + fee_recipient_public_key_1 } else if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -720,7 +720,7 @@ async fn check_get_set_fee_recipient() { .post_fee_recipient( &all_pubkeys[1], &UpdateFeeRecipientRequest { - ethaddress: fee_recipient_override.clone(), + ethaddress: fee_recipient_override, }, ) .await @@ -732,16 +732,16 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 1 { - fee_recipient_override.clone() + fee_recipient_override } else if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -761,14 +761,14 @@ async fn check_get_set_fee_recipient() { .await .expect("should get fee recipient"); let expected = if i == 2 { - fee_recipient_public_key_2.clone() + fee_recipient_public_key_2 } else { TEST_DEFAULT_FEE_RECIPIENT }; assert_eq!( get_res, GetFeeRecipientResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, ethaddress: expected, } ); @@ -814,7 +814,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: DEFAULT_GAS_LIMIT, } ); @@ -843,14 +843,14 @@ async fn check_get_set_gas_limit() { .await .expect("should get gas limit"); let expected = if i == 1 { - gas_limit_public_key_1.clone() + gas_limit_public_key_1 } else { DEFAULT_GAS_LIMIT }; assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -884,7 +884,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -917,7 +917,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -944,7 +944,7 @@ async fn check_get_set_gas_limit() { assert_eq!( get_res, GetGasLimitResponse { - pubkey: pubkey.clone(), + pubkey: *pubkey, gas_limit: expected, } ); @@ -1305,7 +1305,7 @@ async fn delete_concurrent_with_signing() { let handle = handle.spawn(async move { for j in 0..num_attestations { let mut att = make_attestation(j, j + 1); - for (_validator_id, public_key) in thread_pubkeys.iter().enumerate() { + for public_key in thread_pubkeys.iter() { let _ = validator_store .sign_attestation(*public_key, 0, &mut att, Epoch::new(j + 1)) .await; @@ -2084,7 +2084,7 @@ async fn import_remotekey_web3signer_disabled() { web3signer_req.enable = false; // Import web3signers. - let _ = tester + tester .client .post_lighthouse_validators_web3signer(&vec![web3signer_req]) .await @@ -2148,8 +2148,11 @@ async fn import_remotekey_web3signer_enabled() { // 1 validator imported. assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); - let vals = tester.initialized_validators.read(); - let web3_vals = vals.validator_definitions(); + let web3_vals = tester + .initialized_validators + .read() + .validator_definitions() + .to_vec(); // Import remotekeys. let import_res = tester @@ -2166,11 +2169,13 @@ async fn import_remotekey_web3signer_enabled() { assert_eq!(tester.vals_total(), 1); assert_eq!(tester.vals_enabled(), 1); - let vals = tester.initialized_validators.read(); - let remote_vals = vals.validator_definitions(); + { + let vals = tester.initialized_validators.read(); + let remote_vals = vals.validator_definitions(); - // Web3signer should not be overwritten since it is enabled. - assert!(web3_vals == remote_vals); + // Web3signer should not be overwritten since it is enabled. + assert!(web3_vals == remote_vals); + } // Remotekey should not be imported. let expected_responses = vec![SingleListRemotekeysResponse { diff --git a/validator_client/http_metrics/Cargo.toml b/validator_client/http_metrics/Cargo.toml index a9de26a55bb..c29a4d18fa0 100644 --- a/validator_client/http_metrics/Cargo.toml +++ b/validator_client/http_metrics/Cargo.toml @@ -5,16 +5,16 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +lighthouse_version = { workspace = true } malloc_utils = { workspace = true } -slot_clock = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } serde = { workspace = true } slog = { workspace = true } -warp_utils = { workspace = true } -warp = { workspace = true } -lighthouse_version = { workspace = true } +slot_clock = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } validator_services = { workspace = true } validator_store = { workspace = true } -validator_metrics = { workspace = true } -types = { workspace = true } +warp = { workspace = true } +warp_utils = { workspace = true } diff --git a/validator_client/initialized_validators/Cargo.toml b/validator_client/initialized_validators/Cargo.toml index 9c7a3f19d60..05e85261f9a 100644 --- a/validator_client/initialized_validators/Cargo.toml +++ b/validator_client/initialized_validators/Cargo.toml @@ -5,23 +5,23 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] -signing_method = { workspace = true } account_utils = { workspace = true } +bincode = { workspace = true } +bls = { workspace = true } eth2_keystore = { workspace = true } -metrics = { workspace = true } +filesystem = { workspace = true } lockfile = { workspace = true } +metrics = { workspace = true } parking_lot = { workspace = true } +rand = { workspace = true } reqwest = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +signing_method = { workspace = true } slog = { workspace = true } +tokio = { workspace = true } types = { workspace = true } url = { workspace = true } validator_dir = { workspace = true } -rand = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -bls = { workspace = true } -tokio = { workspace = true } -bincode = { workspace = true } -filesystem = { workspace = true } validator_metrics = { workspace = true } zeroize = { workspace = true } diff --git a/validator_client/signing_method/Cargo.toml b/validator_client/signing_method/Cargo.toml index 0f3852eff67..3e1a48142f9 100644 --- a/validator_client/signing_method/Cargo.toml +++ b/validator_client/signing_method/Cargo.toml @@ -6,12 +6,12 @@ authors = ["Sigma Prime "] [dependencies] eth2_keystore = { workspace = true } +ethereum_serde_utils = { workspace = true } lockfile = { workspace = true } parking_lot = { workspace = true } reqwest = { workspace = true } +serde = { workspace = true } task_executor = { workspace = true } types = { workspace = true } url = { workspace = true } validator_metrics = { workspace = true } -serde = { workspace = true } -ethereum_serde_utils = { workspace = true } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 6982958bd56..1a098742d89 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -10,16 +10,16 @@ name = "slashing_protection_tests" path = "tests/main.rs" [dependencies] -tempfile = { workspace = true } -types = { workspace = true } -rusqlite = { workspace = true } +arbitrary = { workspace = true, features = ["derive"] } +ethereum_serde_utils = { workspace = true } +filesystem = { workspace = true } r2d2 = { workspace = true } r2d2_sqlite = "0.21.0" +rusqlite = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -ethereum_serde_utils = { workspace = true } -filesystem = { workspace = true } -arbitrary = { workspace = true, features = ["derive"] } +tempfile = { workspace = true } +types = { workspace = true } [dev-dependencies] rayon = { workspace = true } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 8a3c44dbe8c..625fd96cbba 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -17,6 +17,7 @@ use beacon_node_fallback::{ use account_utils::validator_definitions::ValidatorDefinitions; use clap::ArgMatches; +use doppelganger_service::DoppelgangerService; use environment::RuntimeContext; use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Timeouts}; use initialized_validators::Error::UnableToOpenVotingKeystore; @@ -46,7 +47,6 @@ use validator_services::{ sync::SyncDutiesMap, sync_committee_service::SyncCommitteeService, }; -use validator_store::doppelganger_service::DoppelgangerService; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. diff --git a/validator_client/validator_services/Cargo.toml b/validator_client/validator_services/Cargo.toml index 7aed9b2b2d0..21f0ae2d776 100644 --- a/validator_client/validator_services/Cargo.toml +++ b/validator_client/validator_services/Cargo.toml @@ -6,17 +6,18 @@ authors = ["Sigma Prime "] [dependencies] beacon_node_fallback = { workspace = true } -validator_metrics = { workspace = true } -validator_store = { workspace = true } -graffiti_file = { workspace = true } +bls = { workspace = true } +doppelganger_service = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } +graffiti_file = { workspace = true } parking_lot = { workspace = true } safe_arith = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } tokio = { workspace = true } -types = { workspace = true } tree_hash = { workspace = true } -bls = { workspace = true } +types = { workspace = true } +validator_metrics = { workspace = true } +validator_store = { workspace = true } diff --git a/validator_client/validator_services/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs index 110f7306291..4be1c871fa7 100644 --- a/validator_client/validator_services/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -10,6 +10,7 @@ use crate::block_service::BlockServiceNotification; use crate::sync::poll_sync_committee_duties; use crate::sync::SyncDutiesMap; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use doppelganger_service::DoppelgangerStatus; use environment::RuntimeContext; use eth2::types::{ AttesterData, BeaconCommitteeSubscription, DutiesResponse, ProposerData, StateId, ValidatorId, @@ -27,7 +28,6 @@ use std::time::Duration; use tokio::{sync::mpsc::Sender, time::sleep}; use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; use validator_metrics::{get_int_gauge, set_int_gauge, ATTESTATION_DUTY}; -use validator_store::doppelganger_service::DoppelgangerStatus; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; /// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. diff --git a/validator_client/validator_services/src/preparation_service.rs b/validator_client/validator_services/src/preparation_service.rs index 827e3355a42..4f06440ee5f 100644 --- a/validator_client/validator_services/src/preparation_service.rs +++ b/validator_client/validator_services/src/preparation_service.rs @@ -1,5 +1,6 @@ use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; use bls::PublicKeyBytes; +use doppelganger_service::DoppelgangerStatus; use environment::RuntimeContext; use parking_lot::RwLock; use slog::{debug, error, info, warn}; @@ -14,7 +15,6 @@ use types::{ Address, ChainSpec, EthSpec, ProposerPreparationData, SignedValidatorRegistrationData, ValidatorRegistrationData, }; -use validator_store::doppelganger_service::DoppelgangerStatus; use validator_store::{Error as ValidatorStoreError, ProposalData, ValidatorStore}; /// Number of epochs before the Bellatrix hard fork to begin posting proposer preparations. diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index c693b53a03f..4e410fac635 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -1,4 +1,5 @@ use crate::duties_service::{DutiesService, Error}; +use doppelganger_service::DoppelgangerStatus; use futures::future::join_all; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; use slog::{crit, debug, info, warn}; @@ -7,7 +8,6 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; use std::sync::Arc; use types::{ChainSpec, EthSpec, PublicKeyBytes, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId}; -use validator_store::doppelganger_service::DoppelgangerStatus; use validator_store::Error as ValidatorStoreError; /// Number of epochs in advance to compute selection proofs when not in `distributed` mode. diff --git a/validator_client/validator_store/Cargo.toml b/validator_client/validator_store/Cargo.toml index b7679d86c5e..2303b257ace 100644 --- a/validator_client/validator_store/Cargo.toml +++ b/validator_client/validator_store/Cargo.toml @@ -11,6 +11,7 @@ path = "src/lib.rs" [dependencies] account_utils = { workspace = true } beacon_node_fallback = { workspace = true } +doppelganger_service = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } initialized_validators = { workspace = true } @@ -21,8 +22,8 @@ slashing_protection = { workspace = true } slog = { workspace = true } slot_clock = { workspace = true } task_executor = { workspace = true } -types = { workspace = true } tokio = { workspace = true } +types = { workspace = true } validator_metrics = { workspace = true } [dev-dependencies] diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index b317ba15372..d767eb86f1f 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,5 +1,5 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; -use doppelganger_service::{DoppelgangerService, DoppelgangerStatus}; +use doppelganger_service::{DoppelgangerService, DoppelgangerStatus, DoppelgangerValidatorStore}; use initialized_validators::InitializedValidators; use parking_lot::{Mutex, RwLock}; use serde::{Deserialize, Serialize}; @@ -22,8 +22,6 @@ use types::{ ValidatorRegistrationData, VoluntaryExit, }; -pub mod doppelganger_service; - #[derive(Debug, PartialEq)] pub enum Error { DoppelgangerProtected(PublicKeyBytes), @@ -96,7 +94,7 @@ pub struct ValidatorStore { slots_per_epoch: u64, } -impl ValidatorStore { +impl DoppelgangerValidatorStore for ValidatorStore { fn get_validator_index(&self, pubkey: &PublicKeyBytes) -> Option { self.validator_index(pubkey) } diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 36df2568410..7cb05616f47 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -2,28 +2,27 @@ name = "validator_manager" version = "0.1.0" edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +account_utils = { workspace = true } clap = { workspace = true } -types = { workspace = true } +clap_utils = { workspace = true } +derivative = { workspace = true } environment = { workspace = true } +eth2 = { workspace = true } eth2_network_config = { workspace = true } -clap_utils = { workspace = true } eth2_wallet = { workspace = true } -account_utils = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } ethereum_serde_utils = { workspace = true } -tree_hash = { workspace = true } -eth2 = { workspace = true } hex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } tokio = { workspace = true } -derivative = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } zeroize = { workspace = true } [dev-dependencies] -tempfile = { workspace = true } regex = { workspace = true } +tempfile = { workspace = true } validator_http_api = { workspace = true } diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 2e8821f0db9..3cebc10bb38 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -520,7 +520,7 @@ pub mod tests { let local_validators: Vec = { let contents = - fs::read_to_string(&self.import_config.validators_file_path.unwrap()) + fs::read_to_string(self.import_config.validators_file_path.unwrap()) .unwrap(); serde_json::from_str(&contents).unwrap() }; @@ -557,7 +557,7 @@ pub mod tests { self.vc.ensure_key_cache_consistency().await; let local_keystore: Keystore = - Keystore::from_json_file(&self.import_config.keystore_file_path.unwrap()) + Keystore::from_json_file(self.import_config.keystore_file_path.unwrap()) .unwrap(); let list_keystores_response = self.vc.client.get_keystores().await.unwrap().data; diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index c039728e6f8..4d0820f5a8b 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -978,13 +978,13 @@ mod test { }) .unwrap(); // Set all definitions to use the same password path as the primary. - definitions.iter_mut().enumerate().for_each(|(_, def)| { - match &mut def.signing_definition { - SigningDefinition::LocalKeystore { - voting_keystore_password_path: Some(path), - .. - } => *path = primary_path.clone(), - _ => (), + definitions.iter_mut().for_each(|def| { + if let SigningDefinition::LocalKeystore { + voting_keystore_password_path: Some(path), + .. + } = &mut def.signing_definition + { + *path = primary_path.clone() } }) } diff --git a/watch/Cargo.toml b/watch/Cargo.toml index 9e8da3b293b..41cfb58e287 100644 --- a/watch/Cargo.toml +++ b/watch/Cargo.toml @@ -10,37 +10,36 @@ path = "src/lib.rs" [[bin]] name = "watch" path = "src/main.rs" - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +axum = "0.7" +beacon_node = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -log = { workspace = true } +diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } +diesel_migrations = { version = "2.0.0", features = ["postgres"] } env_logger = { workspace = true } -types = { workspace = true } eth2 = { workspace = true } -beacon_node = { workspace = true } -tokio = { workspace = true } -axum = "0.7" hyper = { workspace = true } +log = { workspace = true } +r2d2 = { workspace = true } +rand = { workspace = true } +reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } -reqwest = { workspace = true } -url = { workspace = true } -rand = { workspace = true } -diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } -diesel_migrations = { version = "2.0.0", features = ["postgres"] } -bls = { workspace = true } -r2d2 = { workspace = true } serde_yaml = { workspace = true } +tokio = { workspace = true } +types = { workspace = true } +url = { workspace = true } [dev-dependencies] -tokio-postgres = "0.7.5" -http_api = { workspace = true } beacon_chain = { workspace = true } +http_api = { workspace = true } +logging = { workspace = true } network = { workspace = true } +task_executor = { workspace = true } testcontainers = "0.15" +tokio-postgres = "0.7.5" unused_port = { workspace = true } -task_executor = { workspace = true } -logging = { workspace = true } diff --git a/wordlist.txt b/wordlist.txt new file mode 100644 index 00000000000..6287366cbcb --- /dev/null +++ b/wordlist.txt @@ -0,0 +1,234 @@ +APIs +ARMv +AUR +Backends +Backfilling +Beaconcha +Besu +Broadwell +BIP +BLS +BN +BNs +BTC +BTEC +Casper +CentOS +Chiado +CMake +CoinCashew +Consensys +CORS +CPUs +DBs +DES +DHT +DNS +Dockerhub +DoS +EIP +ENR +Erigon +Esat's +ETH +EthDocker +Ethereum +Ethstaker +Exercism +Extractable +FFG +Geth +Gitcoin +Gnosis +Goerli +Grafana +Holesky +Homebrew +Infura +IPs +IPv +JSON +KeyManager +Kurtosis +LMDB +LLVM +LRU +LTO +Mainnet +MDBX +Merkle +MEV +MSRV +NAT's +Nethermind +NodeJS +NullLogger +PathBuf +PowerShell +PPA +Pre +Proto +PRs +Prysm +QUIC +RasPi +README +RESTful +Reth +RHEL +Ropsten +RPC +Ryzen +Sepolia +Somer +SSD +SSL +SSZ +Styleguide +TCP +Teku +TLS +TODOs +UDP +UI +UPnP +USD +UX +Validator +VC +VCs +VPN +Withdrawable +WSL +YAML +aarch +anonymize +api +attester +backend +backends +backfill +backfilling +beaconcha +bitfield +blockchain +bn +cli +clippy +config +cpu +cryptocurrencies +cryptographic +danksharding +datadir +datadirs +de +decrypt +decrypted +dest +dir +disincentivise +doppelgänger +dropdown +else's +env +eth +ethdo +ethereum +ethstaker +filesystem +frontend +gapped +github +graffitis +gwei +hdiffs +homebrew +hostname +html +http +https +hDiff +implementers +interoperable +io +iowait +jemalloc +json +jwt +kb +keymanager +keypair +keypairs +keystore +keystores +linter +linux +localhost +lossy +macOS +mainnet +makefile +mdBook +mev +misconfiguration +mkcert +namespace +natively +nd +ness +nginx +nitty +oom +orging +orgs +os +paul +pem +performant +pid +pre +pubkey +pubkeys +rc +reimport +resync +roadmap +rustfmt +rustup +schemas +sigmaprime +sigp +slashable +slashings +spec'd +src +stakers +subnet +subnets +systemd +testnet +testnets +th +toml +topologies +tradeoffs +transactional +tweakers +ui +unadvanced +unaggregated +unencrypted +unfinalized +untrusted +uptimes +url +validator +validators +validator's +vc +virt +webapp +withdrawable +yaml +yml