diff --git a/chain/chain-primitives/src/error.rs b/chain/chain-primitives/src/error.rs index a3395eb02f2..da7b84564d0 100644 --- a/chain/chain-primitives/src/error.rs +++ b/chain/chain-primitives/src/error.rs @@ -235,6 +235,9 @@ pub enum Error { /// Resharding error. #[error("Resharding Error: {0}")] ReshardingError(String), + /// EpochSyncProof validation error. + #[error("EpochSyncProof Validation Error: {0}")] + InvalidEpochSyncProof(String), /// Anything else #[error("Other Error: {0}")] Other(String), @@ -300,6 +303,7 @@ impl Error { | Error::MaliciousChallenge | Error::IncorrectNumberOfChunkHeaders | Error::InvalidEpochHash + | Error::InvalidEpochSyncProof(_) | Error::InvalidNextBPHash | Error::NotEnoughApprovals | Error::InvalidFinalityInfo @@ -377,6 +381,7 @@ impl Error { Error::MaliciousChallenge => "malicious_challenge", Error::IncorrectNumberOfChunkHeaders => "incorrect_number_of_chunk_headers", Error::InvalidEpochHash => "invalid_epoch_hash", + Error::InvalidEpochSyncProof(_) => "invalid_epoch_sync_proof", Error::InvalidNextBPHash => "invalid_next_bp_hash", Error::NotEnoughApprovals => "not_enough_approvals", Error::InvalidFinalityInfo => "invalid_finality_info", diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index d62835b0c9b..c231faea45d 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -18,7 +18,7 @@ use crate::state_snapshot_actor::SnapshotCallbacks; use crate::stateless_validation::chunk_endorsement::{ validate_chunk_endorsements_in_block, validate_chunk_endorsements_in_header, }; -use crate::store::{ChainStore, ChainStoreAccess, ChainStoreUpdate}; +use crate::store::{ChainStore, ChainStoreAccess, ChainStoreUpdate, MerkleProofAccess}; use crate::types::{ AcceptedBlock, ApplyChunkBlockContext, BlockEconomicsConfig, ChainConfig, RuntimeAdapter, StorageDataSource, @@ -58,9 +58,7 @@ use near_primitives::congestion_info::CongestionInfo; use near_primitives::epoch_block_info::BlockInfo; use near_primitives::errors::EpochError; use near_primitives::hash::{hash, CryptoHash}; -use near_primitives::merkle::{ - combine_hash, merklize, verify_path, Direction, MerklePath, MerklePathItem, PartialMerkleTree, -}; +use near_primitives::merkle::{merklize, verify_path, PartialMerkleTree}; use near_primitives::receipt::Receipt; use near_primitives::sandbox::state_patch::SandboxStatePatch; use near_primitives::shard_layout::{account_id_to_shard_id, ShardLayout, ShardUId}; @@ -78,6 +76,7 @@ use near_primitives::stateless_validation::state_witness::{ }; use near_primitives::transaction::{ExecutionOutcomeWithIdAndProof, SignedTransaction}; use near_primitives::types::chunk_extra::ChunkExtra; +use near_primitives::types::validator_stake::ValidatorStake; use near_primitives::types::{ AccountId, Balance, BlockExtra, BlockHeight, BlockHeightDelta, EpochId, Gas, MerkleHash, NumBlocks, ShardId, StateRoot, @@ -586,13 +585,20 @@ impl Chain { last_known_hash: &CryptoHash, ) -> Result { let bps = epoch_manager.get_epoch_block_producers_ordered(&epoch_id, last_known_hash)?; + let validator_stakes = bps.into_iter().map(|(bp, _)| bp).collect_vec(); let protocol_version = epoch_manager.get_epoch_protocol_version(&prev_epoch_id)?; + Self::compute_bp_hash_from_validator_stakes(&validator_stakes, protocol_version) + } + + pub fn compute_bp_hash_from_validator_stakes( + validator_stakes: &Vec, + protocol_version: ProtocolVersion, + ) -> Result { if checked_feature!("stable", BlockHeaderV3, protocol_version) { - let validator_stakes = bps.into_iter().map(|(bp, _)| bp); Ok(CryptoHash::hash_borsh_iter(validator_stakes)) } else { - let validator_stakes = bps.into_iter().map(|(bp, _)| bp.into_v1()); - Ok(CryptoHash::hash_borsh_iter(validator_stakes)) + let stakes = validator_stakes.into_iter().map(|stake| stake.clone().into_v1()); + Ok(CryptoHash::hash_borsh_iter(stakes)) } } @@ -1521,6 +1527,16 @@ impl Chain { (accepted_blocks, errors) } + fn chain_update(&mut self) -> ChainUpdate { + ChainUpdate::new( + &mut self.chain_store, + self.epoch_manager.clone(), + self.runtime_adapter.clone(), + self.doomslug_threshold_mode, + self.transaction_validity_period, + ) + } + /// Process challenge to invalidate chain. This is done between blocks to unroll the chain as /// soon as possible and allow next block producer to skip invalid blocks. pub fn process_challenge(&mut self, challenge: &Challenge) { @@ -4011,186 +4027,16 @@ fn get_should_apply_chunk( } } -/// Implement block merkle proof retrieval. -impl Chain { - fn combine_maybe_hashes( - hash1: Option, - hash2: Option, - ) -> Option { - match (hash1, hash2) { - (Some(h1), Some(h2)) => Some(combine_hash(&h1, &h2)), - (Some(h1), None) => Some(h1), - (None, Some(_)) => { - debug_assert!(false, "Inconsistent state in merkle proof computation: left node is None but right node exists"); - None - } - _ => None, - } - } - - fn chain_update(&mut self) -> ChainUpdate { - ChainUpdate::new( - &mut self.chain_store, - self.epoch_manager.clone(), - self.runtime_adapter.clone(), - self.doomslug_threshold_mode, - self.transaction_validity_period, - ) - } - - /// Get node at given position (index, level). If the node does not exist, return `None`. - fn get_merkle_tree_node( - &self, - index: u64, - level: u64, - counter: u64, - tree_size: u64, - tree_nodes: &mut HashMap<(u64, u64), Option>, - ) -> Result, Error> { - if let Some(hash) = tree_nodes.get(&(index, level)) { - Ok(*hash) - } else { - if level == 0 { - let maybe_hash = if index >= tree_size { - None - } else { - Some(self.chain_store().get_block_hash_from_ordinal(index)?) - }; - tree_nodes.insert((index, level), maybe_hash); - Ok(maybe_hash) - } else { - let cur_tree_size = (index + 1) * counter; - let maybe_hash = if cur_tree_size > tree_size { - if index * counter <= tree_size { - let left_hash = self.get_merkle_tree_node( - index * 2, - level - 1, - counter / 2, - tree_size, - tree_nodes, - )?; - let right_hash = self.reconstruct_merkle_tree_node( - index * 2 + 1, - level - 1, - counter / 2, - tree_size, - tree_nodes, - )?; - Self::combine_maybe_hashes(left_hash, right_hash) - } else { - None - } - } else { - Some( - *self - .chain_store() - .get_block_merkle_tree_from_ordinal(cur_tree_size)? - .get_path() - .last() - .ok_or_else(|| Error::Other("Merkle tree node missing".to_string()))?, - ) - }; - tree_nodes.insert((index, level), maybe_hash); - Ok(maybe_hash) - } - } - } - - /// Reconstruct node at given position (index, level). If the node does not exist, return `None`. - fn reconstruct_merkle_tree_node( +impl MerkleProofAccess for Chain { + fn get_block_merkle_tree( &self, - index: u64, - level: u64, - counter: u64, - tree_size: u64, - tree_nodes: &mut HashMap<(u64, u64), Option>, - ) -> Result, Error> { - if let Some(hash) = tree_nodes.get(&(index, level)) { - Ok(*hash) - } else { - if level == 0 { - let maybe_hash = if index >= tree_size { - None - } else { - Some(self.chain_store().get_block_hash_from_ordinal(index)?) - }; - tree_nodes.insert((index, level), maybe_hash); - Ok(maybe_hash) - } else { - let left_hash = self.get_merkle_tree_node( - index * 2, - level - 1, - counter / 2, - tree_size, - tree_nodes, - )?; - let right_hash = self.reconstruct_merkle_tree_node( - index * 2 + 1, - level - 1, - counter / 2, - tree_size, - tree_nodes, - )?; - let maybe_hash = Self::combine_maybe_hashes(left_hash, right_hash); - tree_nodes.insert((index, level), maybe_hash); - - Ok(maybe_hash) - } - } + block_hash: &CryptoHash, + ) -> Result, Error> { + ChainStoreAccess::get_block_merkle_tree(self.chain_store(), block_hash) } - /// Get merkle proof for block with hash `block_hash` in the merkle tree of `head_block_hash`. - pub fn get_block_proof( - &self, - block_hash: &CryptoHash, - head_block_hash: &CryptoHash, - ) -> Result { - let leaf_index = self.chain_store().get_block_merkle_tree(block_hash)?.size(); - let tree_size = self.chain_store().get_block_merkle_tree(head_block_hash)?.size(); - if leaf_index >= tree_size { - if block_hash == head_block_hash { - // special case if the block to prove is the same as head - return Ok(vec![]); - } - return Err(Error::Other(format!( - "block {} is ahead of head block {}", - block_hash, head_block_hash - ))); - } - let mut level = 0; - let mut counter = 1; - let mut cur_index = leaf_index; - let mut path = vec![]; - let mut tree_nodes = HashMap::new(); - let mut iter = tree_size; - while iter > 1 { - if cur_index % 2 == 0 { - cur_index += 1 - } else { - cur_index -= 1; - } - let direction = if cur_index % 2 == 0 { Direction::Left } else { Direction::Right }; - let maybe_hash = if cur_index % 2 == 1 { - // node not immediately available. Needs to be reconstructed - self.reconstruct_merkle_tree_node( - cur_index, - level, - counter, - tree_size, - &mut tree_nodes, - )? - } else { - self.get_merkle_tree_node(cur_index, level, counter, tree_size, &mut tree_nodes)? - }; - if let Some(hash) = maybe_hash { - path.push(MerklePathItem { hash, direction }); - } - cur_index /= 2; - iter = (iter + 1) / 2; - level += 1; - counter *= 2; - } - Ok(path) + fn get_block_hash_from_ordinal(&self, block_ordinal: NumBlocks) -> Result { + ChainStoreAccess::get_block_hash_from_ordinal(self.chain_store(), block_ordinal) } } diff --git a/chain/chain/src/lib.rs b/chain/chain/src/lib.rs index fb228431928..14f95262fbc 100644 --- a/chain/chain/src/lib.rs +++ b/chain/chain/src/lib.rs @@ -7,7 +7,9 @@ pub use doomslug::{Doomslug, DoomslugBlockProductionReadiness, DoomslugThreshold pub use lightclient::{create_light_client_block_view, get_epoch_block_producers_view}; pub use near_chain_primitives::{self, Error}; pub use near_primitives::receipt::ReceiptResult; -pub use store::{ChainStore, ChainStoreAccess, ChainStoreUpdate, LatestWitnessesInfo}; +pub use store::{ + ChainStore, ChainStoreAccess, ChainStoreUpdate, LatestWitnessesInfo, MerkleProofAccess, +}; pub use store_validator::{ErrorMessage, StoreValidator}; pub use types::{Block, BlockHeader, BlockStatus, ChainGenesis, LatestKnown, Provenance}; diff --git a/chain/chain/src/store/merkle_proof.rs b/chain/chain/src/store/merkle_proof.rs new file mode 100644 index 00000000000..d664ca975b4 --- /dev/null +++ b/chain/chain/src/store/merkle_proof.rs @@ -0,0 +1,228 @@ +use std::{collections::HashMap, sync::Arc}; + +use near_chain_primitives::Error; +use near_primitives::{ + hash::CryptoHash, + merkle::{combine_hash, Direction, MerklePath, MerklePathItem, PartialMerkleTree}, + types::{MerkleHash, NumBlocks}, + utils::index_to_bytes, +}; +use near_store::{DBCol, Store}; + +/// Implement block merkle proof retrieval. +/// +/// The logic was originally a part of `Chain` implementation. +/// This trait is introduced because we want to support `Store` when we don't have the `ChainStore`, +/// but we want to support `ChainStore`` if we have it so we can make use of the caches. +pub trait MerkleProofAccess { + fn get_block_merkle_tree( + &self, + block_hash: &CryptoHash, + ) -> Result, Error>; + + fn get_block_hash_from_ordinal(&self, block_ordinal: NumBlocks) -> Result; + + /// Get merkle proof for block with hash `block_hash` in the merkle tree of `head_block_hash`. + fn compute_past_block_proof_in_merkle_tree_of_later_block( + &self, + block_hash: &CryptoHash, + head_block_hash: &CryptoHash, + ) -> Result { + let leaf_index = self.get_block_merkle_tree(block_hash)?.size(); + let tree_size = self.get_block_merkle_tree(head_block_hash)?.size(); + if leaf_index >= tree_size { + if block_hash == head_block_hash { + // special case if the block to prove is the same as head + return Ok(vec![]); + } + return Err(Error::Other(format!( + "block {} is ahead of head block {}", + block_hash, head_block_hash + ))); + } + let mut level = 0; + let mut counter = 1; + let mut cur_index = leaf_index; + let mut path = vec![]; + let mut tree_nodes = HashMap::new(); + let mut iter = tree_size; + while iter > 1 { + if cur_index % 2 == 0 { + cur_index += 1 + } else { + cur_index -= 1; + } + let direction = if cur_index % 2 == 0 { Direction::Left } else { Direction::Right }; + let maybe_hash = if cur_index % 2 == 1 { + // node not immediately available. Needs to be reconstructed + reconstruct_merkle_tree_node( + self, + cur_index, + level, + counter, + tree_size, + &mut tree_nodes, + )? + } else { + get_merkle_tree_node(self, cur_index, level, counter, tree_size, &mut tree_nodes)? + }; + if let Some(hash) = maybe_hash { + path.push(MerklePathItem { hash, direction }); + } + cur_index /= 2; + iter = (iter + 1) / 2; + level += 1; + counter *= 2; + } + Ok(path) + } +} + +fn get_block_merkle_tree_from_ordinal( + this: &(impl MerkleProofAccess + ?Sized), + block_ordinal: NumBlocks, +) -> Result, Error> { + let block_hash = this.get_block_hash_from_ordinal(block_ordinal)?; + this.get_block_merkle_tree(&block_hash) +} + +/// Get node at given position (index, level). If the node does not exist, return `None`. +fn get_merkle_tree_node( + this: &(impl MerkleProofAccess + ?Sized), + index: u64, + level: u64, + counter: u64, + tree_size: u64, + tree_nodes: &mut HashMap<(u64, u64), Option>, +) -> Result, Error> { + if let Some(hash) = tree_nodes.get(&(index, level)) { + Ok(*hash) + } else { + if level == 0 { + let maybe_hash = if index >= tree_size { + None + } else { + Some(this.get_block_hash_from_ordinal(index)?) + }; + tree_nodes.insert((index, level), maybe_hash); + Ok(maybe_hash) + } else { + let cur_tree_size = (index + 1) * counter; + let maybe_hash = if cur_tree_size > tree_size { + if index * counter <= tree_size { + let left_hash = get_merkle_tree_node( + this, + index * 2, + level - 1, + counter / 2, + tree_size, + tree_nodes, + )?; + let right_hash = reconstruct_merkle_tree_node( + this, + index * 2 + 1, + level - 1, + counter / 2, + tree_size, + tree_nodes, + )?; + combine_maybe_hashes(left_hash, right_hash) + } else { + None + } + } else { + Some( + *get_block_merkle_tree_from_ordinal(this, cur_tree_size)? + .get_path() + .last() + .ok_or_else(|| Error::Other("Merkle tree node missing".to_string()))?, + ) + }; + tree_nodes.insert((index, level), maybe_hash); + Ok(maybe_hash) + } + } +} + +/// Reconstruct node at given position (index, level). If the node does not exist, return `None`. +fn reconstruct_merkle_tree_node( + this: &(impl MerkleProofAccess + ?Sized), + index: u64, + level: u64, + counter: u64, + tree_size: u64, + tree_nodes: &mut HashMap<(u64, u64), Option>, +) -> Result, Error> { + if let Some(hash) = tree_nodes.get(&(index, level)) { + Ok(*hash) + } else { + if level == 0 { + let maybe_hash = if index >= tree_size { + None + } else { + Some(this.get_block_hash_from_ordinal(index)?) + }; + tree_nodes.insert((index, level), maybe_hash); + Ok(maybe_hash) + } else { + let left_hash = get_merkle_tree_node( + this, + index * 2, + level - 1, + counter / 2, + tree_size, + tree_nodes, + )?; + let right_hash = reconstruct_merkle_tree_node( + this, + index * 2 + 1, + level - 1, + counter / 2, + tree_size, + tree_nodes, + )?; + let maybe_hash = combine_maybe_hashes(left_hash, right_hash); + tree_nodes.insert((index, level), maybe_hash); + + Ok(maybe_hash) + } + } +} + +fn combine_maybe_hashes( + hash1: Option, + hash2: Option, +) -> Option { + match (hash1, hash2) { + (Some(h1), Some(h2)) => Some(combine_hash(&h1, &h2)), + (Some(h1), None) => Some(h1), + (None, Some(_)) => { + debug_assert!(false, "Inconsistent state in merkle proof computation: left node is None but right node exists"); + None + } + _ => None, + } +} + +impl MerkleProofAccess for Store { + fn get_block_merkle_tree( + &self, + block_hash: &CryptoHash, + ) -> Result, Error> { + match self.get_ser::( + DBCol::BlockMerkleTree, + &borsh::to_vec(&block_hash).unwrap(), + )? { + Some(block_merkle_tree) => Ok(Arc::new(block_merkle_tree)), + None => { + Err(Error::Other(format!("Could not find merkle proof for block {}", block_hash))) + } + } + } + + fn get_block_hash_from_ordinal(&self, block_ordinal: NumBlocks) -> Result { + self.get_ser::(DBCol::BlockOrdinal, &index_to_bytes(block_ordinal))?.ok_or( + Error::Other(format!("Could not find block hash from ordinal {}", block_ordinal)), + ) + } +} diff --git a/chain/chain/src/store/mod.rs b/chain/chain/src/store/mod.rs index 68286d07cb0..40c38a55a00 100644 --- a/chain/chain/src/store/mod.rs +++ b/chain/chain/src/store/mod.rs @@ -56,7 +56,9 @@ use near_store::db::{StoreStatistics, STATE_SYNC_DUMP_KEY}; use std::sync::Arc; mod latest_witnesses; +mod merkle_proof; pub use latest_witnesses::LatestWitnessesInfo; +pub use merkle_proof::MerkleProofAccess; /// lru cache size #[cfg(not(feature = "no_cache"))] @@ -331,14 +333,6 @@ pub trait ChainStoreAccess { fn get_block_hash_from_ordinal(&self, block_ordinal: NumBlocks) -> Result; - fn get_block_merkle_tree_from_ordinal( - &self, - block_ordinal: NumBlocks, - ) -> Result, Error> { - let block_hash = self.get_block_hash_from_ordinal(block_ordinal)?; - self.get_block_merkle_tree(&block_hash) - } - fn is_height_processed(&self, height: BlockHeight) -> Result; fn get_block_height(&self, hash: &CryptoHash) -> Result { diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index 409a0c6b773..3679aefeb4c 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -947,6 +947,17 @@ impl EpochManagerAdapter for MockEpochManager { Ok(true) } + fn verify_approval_with_approvers_info( + &self, + _prev_block_hash: &CryptoHash, + _prev_block_height: BlockHeight, + _block_height: BlockHeight, + _approvals: &[Option>], + _info: Vec<(ApprovalStake, bool)>, + ) -> Result { + Ok(true) + } + fn verify_approvals_and_threshold_orphan( &self, epoch_id: &EpochId, diff --git a/chain/client/src/sync/epoch.rs b/chain/client/src/sync/epoch.rs index e7be21c4f5a..d6d2a39365c 100644 --- a/chain/client/src/sync/epoch.rs +++ b/chain/client/src/sync/epoch.rs @@ -1,6 +1,7 @@ use crate::client_actor::ClientActorInner; use crate::metrics; use borsh::BorshDeserialize; +use itertools::Itertools; use near_async::futures::{AsyncComputationSpawner, AsyncComputationSpawnerExt}; use near_async::messaging::{CanSend, Handler}; use near_async::time::Clock; @@ -21,11 +22,14 @@ use near_primitives::epoch_sync::{ CompressedEpochSyncProof, EpochSyncProof, EpochSyncProofCurrentEpochData, EpochSyncProofLastEpochData, EpochSyncProofPastEpochData, }; +use near_primitives::hash::CryptoHash; use near_primitives::merkle::PartialMerkleTree; use near_primitives::network::PeerId; +use near_primitives::types::validator_stake::ValidatorStake; use near_primitives::types::{BlockHeight, EpochId}; -use near_primitives::utils::compression::CompressedData; +use near_primitives::utils::{compression::CompressedData, index_to_bytes}; use near_store::{DBCol, Store, FINAL_HEAD_KEY}; +use near_vm_runner::logic::ProtocolVersion; use rand::seq::SliceRandom; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use std::collections::HashMap; @@ -241,22 +245,24 @@ impl EpochSync { )? .ok_or_else(|| Error::Other("Could not find first block of next epoch".to_string()))?; - let first_block_info_of_current_epoch = store - .get_ser::( - DBCol::BlockInfo, - block_info_for_final_block_of_current_epoch.epoch_first_block().as_bytes(), - )? - .ok_or_else(|| { - Error::Other("Could not find first block info of next epoch".to_string()) - })?; + // TODO(#12255) That currently does not work because we might need some old block hashes + // in order to build the merkle proof. + // let merkle_proof_for_first_block_of_current_epoch = store + // .compute_past_block_proof_in_merkle_tree_of_later_block( + // first_block_of_current_epoch.hash(), + // final_block_header_in_current_epoch.hash(), + // )?; + let merkle_proof_for_first_block_of_current_epoch = Default::default(); - let merkle_proof_for_first_block_of_current_epoch = store + let partial_merkle_tree_for_first_block_of_current_epoch = store .get_ser::( DBCol::BlockMerkleTree, first_block_of_current_epoch.hash().as_bytes(), )? .ok_or_else(|| { - Error::Other("Could not find merkle proof for first block".to_string()) + Error::Other( + "Could not find partial merkle tree for first block of next epoch".to_string(), + ) })?; let all_past_epochs_including_old_proof = existing_epoch_sync_proof @@ -280,10 +286,11 @@ impl EpochSync { }, current_epoch: EpochSyncProofCurrentEpochData { first_block_header_in_epoch: first_block_of_current_epoch, - first_block_info_in_epoch: first_block_info_of_current_epoch, last_block_header_in_prev_epoch: last_block_of_prev_epoch, second_last_block_header_in_prev_epoch: second_last_block_of_prev_epoch, merkle_proof_for_first_block: merkle_proof_for_first_block_of_current_epoch, + partial_merkle_tree_for_first_block: + partial_merkle_tree_for_first_block_of_current_epoch, }, }; @@ -395,6 +402,7 @@ impl EpochSync { .collect(), last_final_block_header: third_last_block_header, approvals_for_last_final_block: second_last_block_header.approvals().to_vec(), + protocol_version: epoch_info.protocol_version(), }) }) .collect::, _>>()?; @@ -479,7 +487,7 @@ impl EpochSync { return Ok(()); } - // TODO(#11932): Verify the proof. + self.verify_proof(&proof, epoch_manager)?; let mut store_update = chain.chain_store.store().store_update(); @@ -519,15 +527,27 @@ impl EpochSync { proof.last_epoch.next_next_epoch_info, )?; + // At this point `update` contains headers of 3 last blocks of last past epoch + // and header of the first block of current epoch. + // At least the third last block of last past epoch is final. + // It means that `update` contains header of last final block of the first block of current epoch. + let last_header_last_finalized_height = + update.get_block_header(last_header.last_final_block())?.height(); + let mut first_block_info_in_epoch = + BlockInfo::from_header(&last_header, last_header_last_finalized_height); + // We need to populate fields below manually, as they are set to defaults by `BlockInfo::from_header`. + *first_block_info_in_epoch.epoch_first_block_mut() = *last_header.hash(); + *first_block_info_in_epoch.epoch_id_mut() = *last_header.epoch_id(); + store_update.insert_ser( DBCol::BlockInfo, - &borsh::to_vec(proof.current_epoch.first_block_info_in_epoch.hash()).unwrap(), - &proof.current_epoch.first_block_info_in_epoch, + &borsh::to_vec(first_block_info_in_epoch.hash()).unwrap(), + &first_block_info_in_epoch, )?; store_update.set_ser( DBCol::BlockOrdinal, - &borsh::to_vec(&proof.current_epoch.merkle_proof_for_first_block.size()).unwrap(), + &index_to_bytes(proof.current_epoch.partial_merkle_tree_for_first_block.size()), last_header.hash(), )?; @@ -540,7 +560,7 @@ impl EpochSync { store_update.set_ser( DBCol::BlockMerkleTree, last_header.hash().as_bytes(), - &proof.current_epoch.merkle_proof_for_first_block, + &proof.current_epoch.partial_merkle_tree_for_first_block, )?; update.merge(store_update); @@ -551,6 +571,168 @@ impl EpochSync { Ok(()) } + + // Verify EpochSyncProof + fn verify_proof( + &self, + proof: &EpochSyncProof, + epoch_manager: &dyn EpochManagerAdapter, + ) -> Result<(), Error> { + let EpochSyncProof { past_epochs, last_epoch, current_epoch } = proof; + if past_epochs.is_empty() { + return Err(Error::InvalidEpochSyncProof("empty past_epochs".to_string())); + } + + // Verify block producer handoff to the first epoch after genesis + let genesis_protocol_version = + epoch_manager.get_epoch_protocol_version(self.genesis.epoch_id())?; + Self::verify_block_producer_handoff( + past_epochs.first().unwrap(), + genesis_protocol_version, + self.genesis.next_bp_hash(), + epoch_manager, + )?; + + // Verify block producer handoff between all past epochs + for epoch_index in 1..past_epochs.len() { + let epoch = &past_epochs[epoch_index]; + let prev_epoch = &past_epochs[epoch_index - 1]; + Self::verify_block_producer_handoff( + epoch, + prev_epoch.protocol_version, + prev_epoch.last_final_block_header.next_bp_hash(), + epoch_manager, + )?; + } + + Self::verify_epoch_sync_data_hash( + &last_epoch, + &past_epochs.last().unwrap().block_producers, + epoch_manager, + )?; + + Self::verify_current_epoch_data( + current_epoch, + &last_epoch.final_block_header_in_next_epoch, + )?; + Ok(()) + } + + fn verify_current_epoch_data( + current_epoch: &EpochSyncProofCurrentEpochData, + _final_block_header: &BlockHeader, + ) -> Result<(), Error> { + // Verify first_block_header_in_epoch + let first_block_header = ¤t_epoch.first_block_header_in_epoch; + // TODO(#12255) Uncomment the check below when `merkle_proof_for_first_block` is generated. + // if !merkle::verify_hash( + // *final_block_header.block_merkle_root(), + // ¤t_epoch.merkle_proof_for_first_block, + // *first_block_header.hash(), + // ) { + // return Err(Error::InvalidEpochSyncProof( + // "invalid merkle_proof_for_first_block".to_string(), + // )); + // } + + // Verify partial_merkle_tree_for_first_block + if current_epoch.partial_merkle_tree_for_first_block.root() + != *first_block_header.block_merkle_root() + { + return Err(Error::InvalidEpochSyncProof( + "invalid path in partial_merkle_tree_for_first_block".to_string(), + )); + } + // TODO(#12256) Investigate why "+1" was needed here, looks like it should not be there. + if current_epoch.partial_merkle_tree_for_first_block.size() + 1 + != first_block_header.block_ordinal() + { + return Err(Error::InvalidEpochSyncProof( + "invalid size in partial_merkle_tree_for_first_block".to_string(), + )); + } + + Ok(()) + } + + fn verify_epoch_sync_data_hash( + last_epoch: &EpochSyncProofLastEpochData, + last_epoch_block_producers: &Vec, + epoch_manager: &dyn EpochManagerAdapter, + ) -> Result<(), Error> { + // Verify epoch_sync_data_hash matches final_block_header_in_next_epoch.epoch_sync_data_hash + let _epoch_sync_data_hash = CryptoHash::hash_borsh(&( + &last_epoch.first_block_in_epoch, + &last_epoch.second_last_block_in_epoch, + &last_epoch.last_block_in_epoch, + &last_epoch.epoch_info, + &last_epoch.next_epoch_info, + &last_epoch.next_next_epoch_info, + )); + // TODO(#12258) This currently fails because `epoch_sync_data_hash` is missing in `final_block_header_in_next_epoch`. + // let expected_epoch_sync_data_hash = last_epoch + // .final_block_header_in_next_epoch + // .epoch_sync_data_hash() + // .ok_or(Error::InvalidEpochSyncProof("missing epoch_sync_data_hash".to_string()))?; + // if epoch_sync_data_hash != expected_epoch_sync_data_hash { + // return Err(Error::InvalidEpochSyncProof("invalid epoch_sync_data_hash".to_string())); + // } + + // Verify final_block_header_in_next_epoch + let approvers_info = last_epoch_block_producers + .iter() + .map(|validator| (validator.get_approval_stake(false), false)) + .collect_vec(); + let block_header = &last_epoch.final_block_header_in_next_epoch; + if !epoch_manager.verify_approval_with_approvers_info( + block_header.hash(), + block_header.height(), + block_header.height() + 1, + &last_epoch.approvals_for_final_block_in_next_epoch, + approvers_info, + )? { + return Err(Error::InvalidEpochSyncProof( + "invalid final_block_header_in_next_epoch".to_string(), + )); + } + Ok(()) + } + + fn verify_block_producer_handoff( + epoch: &EpochSyncProofPastEpochData, + prev_epoch_protocol_version: ProtocolVersion, + prev_epoch_next_bp_hash: &CryptoHash, + epoch_manager: &dyn EpochManagerAdapter, + ) -> Result<(), Error> { + // Verify epoch.block_producers + let bp_hash = Chain::compute_bp_hash_from_validator_stakes( + &epoch.block_producers, + prev_epoch_protocol_version, + )?; + if bp_hash != *prev_epoch_next_bp_hash { + return Err(Error::InvalidEpochSyncProof("invalid block_producers".to_string())); + } + + // Verify epoch.last_final_block_header + let last_final_block_header = &epoch.last_final_block_header; + let approvers_info = epoch + .block_producers + .iter() + .map(|validator| (validator.get_approval_stake(false), false)) + .collect_vec(); + if !epoch_manager.verify_approval_with_approvers_info( + last_final_block_header.hash(), + last_final_block_header.height(), + last_final_block_header.height() + 1, + &epoch.approvals_for_last_final_block, + approvers_info, + )? { + return Err(Error::InvalidEpochSyncProof( + "invalid last_final_block_header".to_string(), + )); + } + Ok(()) + } } impl Handler for ClientActorInner { diff --git a/chain/client/src/view_client_actor.rs b/chain/client/src/view_client_actor.rs index 33f66d08d72..d91d956a243 100644 --- a/chain/client/src/view_client_actor.rs +++ b/chain/client/src/view_client_actor.rs @@ -12,7 +12,9 @@ use near_async::time::{Clock, Duration, Instant}; use near_chain::types::{RuntimeAdapter, Tip}; use near_chain::{ get_epoch_block_producers_view, Chain, ChainGenesis, ChainStoreAccess, DoomslugThresholdMode, + MerkleProofAccess, }; + use near_chain_configs::{ClientConfig, MutableValidatorSigner, ProtocolConfigView}; use near_chain_primitives::error::EpochErrorResultToChainError; use near_client_primitives::types::{ @@ -1194,7 +1196,10 @@ impl Handler for ViewClientActorInner { let head_block_header = self.chain.get_block_header(&msg.head_block_hash)?; self.chain.check_blocks_final_and_canonical(&[block_header.clone(), head_block_header])?; let block_header_lite = block_header.into(); - let proof = self.chain.get_block_proof(&msg.block_hash, &msg.head_block_hash)?; + let proof = self.chain.compute_past_block_proof_in_merkle_tree_of_later_block( + &msg.block_hash, + &msg.head_block_hash, + )?; Ok(GetBlockProofResponse { block_header_lite, proof }) } } diff --git a/chain/epoch-manager/src/adapter.rs b/chain/epoch-manager/src/adapter.rs index b88c6da90a5..647be9badc5 100644 --- a/chain/epoch-manager/src/adapter.rs +++ b/chain/epoch-manager/src/adapter.rs @@ -423,6 +423,16 @@ pub trait EpochManagerAdapter: Send + Sync { approvals: &[Option>], ) -> Result; + /// Verify aggregated bls signature given block approvers info + fn verify_approval_with_approvers_info( + &self, + prev_block_hash: &CryptoHash, + prev_block_height: BlockHeight, + block_height: BlockHeight, + approvals: &[Option>], + info: Vec<(ApprovalStake, bool)>, + ) -> Result; + /// Verify approvals and check threshold, but ignore next epoch approvals and slashing fn verify_approvals_and_threshold_orphan( &self, @@ -1022,6 +1032,23 @@ impl EpochManagerAdapter for EpochManagerHandle { let epoch_manager = self.read(); epoch_manager.get_all_block_approvers_ordered(prev_block_hash)? }; + self.verify_approval_with_approvers_info( + prev_block_hash, + prev_block_height, + block_height, + approvals, + info, + ) + } + + fn verify_approval_with_approvers_info( + &self, + prev_block_hash: &CryptoHash, + prev_block_height: BlockHeight, + block_height: BlockHeight, + approvals: &[Option>], + info: Vec<(ApprovalStake, bool)>, + ) -> Result { if approvals.len() > info.len() { return Ok(false); } diff --git a/core/primitives/src/epoch_sync.rs b/core/primitives/src/epoch_sync.rs index d7299cebbbb..2e8aa5cd77c 100644 --- a/core/primitives/src/epoch_sync.rs +++ b/core/primitives/src/epoch_sync.rs @@ -1,12 +1,13 @@ -use crate::block_header::BlockHeader; use crate::epoch_block_info::BlockInfo; use crate::epoch_info::EpochInfo; use crate::merkle::PartialMerkleTree; use crate::types::validator_stake::ValidatorStake; use crate::utils::compression::CompressedData; +use crate::{block_header::BlockHeader, merkle::MerklePathItem}; use borsh::{BorshDeserialize, BorshSerialize}; use bytesize::ByteSize; use near_crypto::Signature; +use near_primitives_core::types::ProtocolVersion; use near_schema_checker_lib::ProtocolSchema; use std::fmt::Debug; @@ -74,6 +75,9 @@ pub struct EpochSyncProofPastEpochData { /// Since it has a consecutive height from the final block, the approvals are guaranteed to /// be endorsements which directly endorse the final block. pub approvals_for_last_final_block: Vec>>, + /// Protocol version for this epoch. This is verified together with `block_producers` + /// against the `next_bp_hash` of the `last_final_block_header` of the epoch before this. + pub protocol_version: ProtocolVersion, } /// Data needed to initialize the epoch sync boundary. @@ -105,14 +109,17 @@ pub struct EpochSyncProofCurrentEpochData { /// to prove this like the other cases, because the first block header may not have a /// consecutive height afterwards. pub first_block_header_in_epoch: BlockHeader, - // TODO(#11932): can this be proven or derived? - pub first_block_info_in_epoch: BlockInfo, // The last two block headers are also needed for various purposes after epoch sync. // TODO(#11931): do we really need these? + // TODO(#12259) These 2 fields are currently unverified. pub last_block_header_in_prev_epoch: BlockHeader, pub second_last_block_header_in_prev_epoch: BlockHeader, - // TODO(#11932): I'm not sure if this can be used to prove the block against the merkle root - // included in the final block in this next epoch (included in LastEpochData). We may need to - // include another merkle proof. - pub merkle_proof_for_first_block: PartialMerkleTree, + // Used to prove the block against the merkle root + // included in the final block in this next epoch (included in LastEpochData). + // TODO(#12255) This field is currently ungenerated and unverified. + pub merkle_proof_for_first_block: Vec, + // Partial merkle tree for the first block in this next epoch. + // It is necessary and sufficient to calculate next blocks merkle roots. + // It is proven using `first_block_header_in_epoch`. + pub partial_merkle_tree_for_first_block: PartialMerkleTree, } diff --git a/integration-tests/src/test_loop/tests/epoch_sync.rs b/integration-tests/src/test_loop/tests/epoch_sync.rs index 68d5178c6c4..ba7642b53ab 100644 --- a/integration-tests/src/test_loop/tests/epoch_sync.rs +++ b/integration-tests/src/test_loop/tests/epoch_sync.rs @@ -315,7 +315,7 @@ fn sanity_check_epoch_sync_proof( (final_head_height - genesis_config.genesis_height - 1) / genesis_config.epoch_length + 1; let expected_current_epoch_height = epoch_height_of_final_block - 1; assert_eq!( - proof.current_epoch.first_block_info_in_epoch.height(), + proof.current_epoch.first_block_header_in_epoch.height(), genesis_config.genesis_height + (expected_current_epoch_height - 1) * genesis_config.epoch_length + 1 diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index 8838492f313..a8b52371b87 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -12,8 +12,8 @@ use near_async::time::{Clock, Duration}; use near_chain::test_utils::ValidatorSchedule; use near_chain::types::{LatestKnown, RuntimeAdapter}; use near_chain::validate::validate_chunk_with_chunk_extra; -use near_chain::ChainStore; use near_chain::{Block, BlockProcessingArtifact, ChainStoreAccess, Error, Provenance}; +use near_chain::{ChainStore, MerkleProofAccess}; use near_chain_configs::test_utils::{TESTING_INIT_BALANCE, TESTING_INIT_STAKE}; use near_chain_configs::{Genesis, GenesisConfig, DEFAULT_GC_NUM_EPOCHS_TO_KEEP, NEAR_BASE}; use near_client::test_utils::{ @@ -2064,7 +2064,10 @@ fn test_block_merkle_proof_with_len(n: NumBlocks, rng: &mut StdRng) { } } for block in blocks { - let proof = env.clients[0].chain.get_block_proof(block.hash(), head.hash()).unwrap(); + let proof = env.clients[0] + .chain + .compute_past_block_proof_in_merkle_tree_of_later_block(block.hash(), head.hash()) + .unwrap(); assert!(verify_hash(*root, &proof, *block.hash())); } } @@ -2081,8 +2084,13 @@ fn test_block_merkle_proof() { fn test_block_merkle_proof_same_hash() { let env = TestEnv::default_builder().mock_epoch_managers().build(); let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); - let proof = - env.clients[0].chain.get_block_proof(genesis_block.hash(), genesis_block.hash()).unwrap(); + let proof = env.clients[0] + .chain + .compute_past_block_proof_in_merkle_tree_of_later_block( + genesis_block.hash(), + genesis_block.hash(), + ) + .unwrap(); assert!(proof.is_empty()); }