Skip to content

Commit

Permalink
fix: sync tests
Browse files Browse the repository at this point in the history
  • Loading branch information
merklefruit committed Feb 25, 2024
1 parent b4c8a6f commit 800b2f4
Show file tree
Hide file tree
Showing 12 changed files with 74 additions and 49 deletions.
8 changes: 7 additions & 1 deletion bin/magi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use serde::Serialize;
#[tokio::main]
async fn main() -> Result<()> {
let cli = Cli::parse();
let sync_mode = cli.sync_mode.clone();
let sync_mode = cli.sync_mode;
let verbose = cli.verbose;
let logs_dir = cli.logs_dir.clone();
let logs_rotation = cli.logs_rotation.clone();
Expand All @@ -25,6 +25,12 @@ async fn main() -> Result<()> {
let _guards = telemetry::init(verbose, logs_dir, logs_rotation);
metrics::init()?;

tracing::info!(
target: "magi",
"Starting Magi. sync mode={}, network={}",
sync_mode, config.chain.network
);

let runner = Runner::from_config(config)
.with_sync_mode(sync_mode)
.with_checkpoint_hash(checkpoint_hash);
Expand Down
4 changes: 2 additions & 2 deletions docker/start-magi.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ then
--jwt-secret $JWT_SECRET \
--l1-rpc-url $L1_RPC_URL \
--l1-beacon-url $L1_BEACON_RPC_URL \
--l2-rpc-url http://${EXECUTION_CLIENT}:8545 \
--l2-engine-url http://${EXECUTION_CLIENT}:8551 \
--l2-rpc-url http://${EXECUTION_CLIENT}:8565 \
--l2-engine-url http://${EXECUTION_CLIENT}:8561 \
--rpc-port $RPC_PORT \
$DEVNET \
--sync-mode $SYNC_MODE
Expand Down
29 changes: 18 additions & 11 deletions src/config/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::{iter, path::PathBuf, process::exit, str::FromStr};
use std::{fmt, iter, path::PathBuf, process::exit, str::FromStr};

use ethers::types::{Address, H256, U256};
use figment::{
Expand All @@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize};
use crate::common::{BlockInfo, Epoch};

/// Sync Mode Specifies how `magi` should sync the L2 chain
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum SyncMode {
/// Fast sync mode
Fast,
Expand All @@ -36,6 +36,17 @@ impl FromStr for SyncMode {
}
}

impl fmt::Display for SyncMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Fast => write!(f, "fast"),
Self::Checkpoint => write!(f, "checkpoint"),
Self::Challenge => write!(f, "challenge"),
Self::Full => write!(f, "full"),
}
}
}

/// The global `Magi` configuration.
#[derive(Debug, Clone, Deserialize, Default)]
pub struct Config {
Expand Down Expand Up @@ -274,20 +285,16 @@ impl ChainConfig {
}

/// Returns true if the block is the first block subject to the Ecotone hardfork
pub fn is_ecotone_activation_block(&self, block_time: u64) -> bool {
if block_time < self.blocktime {
return false;
}

block_time - self.blocktime < self.ecotone_time
pub fn is_ecotone_activation_block(&self, l2_block_timestamp: u64) -> bool {
l2_block_timestamp == self.ecotone_time
}

/// Returns true if Ecotone hardfork is active but the block is not the
/// first block subject to the hardfork. Ecotone activation at genesis does not count.
pub fn is_ecotone_but_not_first_block(&self, block_time: u64) -> bool {
let is_ecotone = block_time >= self.ecotone_time;
pub fn is_ecotone_but_not_first_block(&self, l2_block_timestamp: u64) -> bool {
let is_ecotone = l2_block_timestamp >= self.ecotone_time;

is_ecotone && !self.is_ecotone_activation_block(block_time)
is_ecotone && !self.is_ecotone_activation_block(l2_block_timestamp)
}

/// [ChainConfig] for Optimism
Expand Down
6 changes: 3 additions & 3 deletions src/derive/stages/attributes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ impl Attributes {
///
/// Calls `derive_transactions` to generate the raw transactions
fn derive_attributes(&mut self, input: BlockInput<Epoch>) -> PayloadAttributes {
tracing::debug!("attributes derived from block {}", input.epoch.number);
tracing::debug!("batch epoch hash {:?}", input.epoch.hash);
tracing::debug!("deriving attributes from block: {}", input.epoch.number);
tracing::debug!("batch epoch hash: {:?}", input.epoch.hash);

self.update_sequence_number(input.epoch.hash);

Expand Down Expand Up @@ -137,9 +137,9 @@ impl Attributes {
.chain
.is_ecotone_activation_block(input.timestamp)
{
tracing::debug!("found Ecotone activation block; Upgrade transactions added");
let mut ecotone_upgrade_txs = get_ecotone_upgrade_transactions();
transactions.append(&mut ecotone_upgrade_txs);
tracing::debug!("found Ecotone activation block; Upgrade transactions added");
}

// Remaining transactions
Expand Down
17 changes: 10 additions & 7 deletions src/derive/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,13 +175,16 @@ async fn l2_refs(
for i in start..=head_num {
let l2_block = provider.get_block_with_txs(i).await;
if let Ok(Some(l2_block)) = l2_block {
if let Ok(head_info) = HeadInfo::try_from_l2_block(config, l2_block) {
refs.insert(
head_info.l2_block_info.number,
(head_info.l2_block_info, head_info.l1_epoch),
);
} else {
tracing::warn!("could not get head info for L2 block {}", i);
match HeadInfo::try_from_l2_block(config, l2_block) {
Ok(head_info) => {
refs.insert(
head_info.l2_block_info.number,
(head_info.l2_block_info, head_info.l1_epoch),
);
}
Err(e) => {
tracing::warn!(err = ?e, "could not get head info for L2 block {}", i);
}
}
}
}
Expand Down
4 changes: 1 addition & 3 deletions src/driver/engine_driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,9 +224,6 @@ fn should_skip(block: &Block<Transaction>, attributes: &PayloadAttributes) -> Re
attributes.timestamp
);

tracing::debug!("block: {:?}", block);
tracing::debug!("attributes: {:?}", attributes);

let attributes_hashes = attributes
.transactions
.as_ref()
Expand All @@ -242,6 +239,7 @@ fn should_skip(block: &Block<Transaction>, attributes: &PayloadAttributes) -> Re
.collect::<Vec<_>>();

tracing::debug!("attribute hashes: {:?}", attributes_hashes);
tracing::debug!("block hashes: {:?}", block_hashes);

let is_same = attributes_hashes == block_hashes
&& attributes.timestamp.as_u64() == block.timestamp.as_u64()
Expand Down
36 changes: 25 additions & 11 deletions src/driver/info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,19 +44,33 @@ pub struct HeadInfoQuery {}
impl HeadInfoQuery {
/// Fetches the latest finalized L2 block
pub async fn get_head_info<P: InnerProvider>(p: &P, config: &Config) -> HeadInfo {
p.get_block_with_txs(BlockId::Number(BlockNumber::Finalized))
let parsed_head_info = match p
.get_block_with_txs(BlockId::Number(BlockNumber::Finalized))
.await
.ok()
.flatten()
.and_then(|l2_block| HeadInfo::try_from_l2_block(config, l2_block).ok())
.unwrap_or_else(|| {
tracing::warn!("could not get head info. Falling back to the genesis head.");
HeadInfo {
l2_block_info: config.chain.l2_genesis,
l1_epoch: config.chain.l1_start_epoch,
sequence_number: 0,
{
Ok(Some(block)) => match HeadInfo::try_from_l2_block(config, block) {
Ok(head_info) => Some(head_info),
Err(e) => {
tracing::debug!(err = ?e, "could not parse L2 block into head info");
None
}
})
},
e => {
tracing::debug!("could not get finalied L2 block: {:?}", e);
None
}
};

if let Some(head_info) = parsed_head_info {
head_info
} else {
tracing::warn!("could not get head info. Falling back to the genesis head.");
HeadInfo {
l2_block_info: config.chain.l2_genesis,
l1_epoch: config.chain.l1_start_epoch,
sequence_number: 0,
}
}
}
}

Expand Down
3 changes: 2 additions & 1 deletion src/driver/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,8 @@ impl<E: Engine> Driver<E> {

self.engine_driver
.handle_attributes(next_attributes)
.await?;
.await
.map_err(|e| eyre::eyre!("failed to handle attributes: {}", e))?;

tracing::info!(
"safe head updated: {} {:?}",
Expand Down
6 changes: 3 additions & 3 deletions src/engine/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,8 @@ impl EngineApi {
body.insert("method".to_string(), Value::String(method.to_string()));
body.insert("params".to_string(), Value::Array(params));

tracing::debug!("Sending request to url: {:?}", self.base_url);
tracing::debug!("Sending request: {:?}", serde_json::to_string(&body));
tracing::trace!("Sending request to url: {:?}", self.base_url);
tracing::trace!("Sending request: {:?}", serde_json::to_string(&body));

// Send the client request
let client = self
Expand Down Expand Up @@ -166,7 +166,7 @@ impl EngineApi {
}

if let Some(err) = res.error {
eyre::bail!(err.message);
eyre::bail!("Engine API POST error: {}", err.message);
}

// This scenario shouldn't occur as the response should always have either data or an error
Expand Down
2 changes: 1 addition & 1 deletion src/engine/payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ mod tests {

#[tokio::test]
async fn test_from_block_hash_to_execution_paylaod() -> Result<()> {
if std::env::var("L1_TEST_RPC_URL").is_ok() && std::env::var("L2_TEST_RPC_URL").is_ok() {
if std::env::var("L2_TEST_RPC_URL").is_ok() {
let checkpoint_hash: H256 =
"0xc2794a16acacd9f7670379ffd12b6968ff98e2a602f57d7d1f880220aa5a4973".parse()?;

Expand Down
4 changes: 2 additions & 2 deletions src/l1/blob_fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,9 @@ impl BlobFetcher {
let slot = self.get_slot_from_time(block.timestamp.as_u64()).await?;
// perf: fetch only the required indexes instead of all
let blobs = self.fetch_blob_sidecars(slot).await?;
tracing::debug!("fetched {} blobs for slot {}", blobs.len(), slot);

for (blob_index, blob_hash) in indexed_blobs {
for (blob_index, _) in indexed_blobs {
let Some(blob_sidecar) = blobs.iter().find(|b| b.index == blob_index as u64) else {
// This can happen in the case the blob retention window has expired
// and the data is no longer available. This case is not handled yet.
Expand All @@ -109,7 +110,6 @@ impl BlobFetcher {

// decode the full blob
let decoded_blob_data = decode_blob_data(&blob_sidecar.blob)?;
tracing::debug!("successfully decoded blob data for hash {:?}", blob_hash);

batcher_transactions_data.push(decoded_blob_data);
}
Expand Down
4 changes: 0 additions & 4 deletions src/runner/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,6 @@ impl Runner {
/// Syncs via L1 block derivation from the latest finalized block the execution client has synced to.
/// Otherwise syncs from genesis
pub async fn full_sync(&self) -> Result<()> {
tracing::info!("starting full sync");

self.start_driver().await?;
Ok(())
}
Expand All @@ -102,8 +100,6 @@ impl Runner {
///
/// Note: the `admin` RPC method must be available on the execution client as checkpoint_sync relies on `admin_addPeer`
pub async fn checkpoint_sync(&self) -> Result<()> {
tracing::info!("starting checkpoint sync");

let l2_provider = Provider::try_from(&self.config.l2_rpc_url)?;
let checkpoint_sync_url =
Provider::try_from(self.config.checkpoint_sync_url.as_ref().ok_or(eyre::eyre!(
Expand Down

0 comments on commit 800b2f4

Please sign in to comment.