diff --git a/.github/workflows/compile.yaml b/.github/workflows/compile.yaml index 92846375..f80cd556 100644 --- a/.github/workflows/compile.yaml +++ b/.github/workflows/compile.yaml @@ -17,9 +17,6 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 - - name: Compile the fault proof program - run: cargo build --profile release-client-lto - working-directory: programs/fault-proof - name: Compile the range program run: cargo build --profile release-client-lto working-directory: programs/range @@ -37,9 +34,6 @@ jobs: ~/.sp1/bin/sp1up ~/.sp1/bin/cargo-prove prove --version source ~/.bashrc - - name: Compile the fault proof program - run: ~/.sp1/bin/cargo-prove prove build --binary fault-proof - working-directory: programs/fault-proof - name: Compile the range program run: ~/.sp1/bin/cargo-prove prove build --binary range working-directory: programs/range diff --git a/Cargo.lock b/Cargo.lock index 921b7808..b4358ff3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2360,6 +2360,14 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" +[[package]] +name = "dummy-range" +version = "0.1.0" +dependencies = [ + "op-succinct-client-utils", + "sp1-zkvm", +] + [[package]] name = "dunce" version = "1.0.5" @@ -2792,25 +2800,6 @@ dependencies = [ "bytes", ] -[[package]] -name = "fault-proof" -version = "0.1.0" -dependencies = [ - "alloy-consensus 0.6.4", - "anyhow", - "cfg-if", - "kona-driver", - "kona-executor", - "kona-proof", - "op-alloy-genesis", - "op-alloy-rpc-types-engine", - "op-succinct-client-utils", - "serde_json", - "sp1-zkvm", - "tracing", - "tracing-subscriber", -] - [[package]] name = "ff" version = "0.12.1" diff --git a/elf/dummy-range-elf b/elf/dummy-range-elf new file mode 100755 index 00000000..732689db Binary files /dev/null and b/elf/dummy-range-elf differ diff --git a/justfile b/justfile index 1ec26eda..2c451fbb 100644 --- a/justfile +++ b/justfile @@ -33,57 +33,6 @@ cost-estimator start end: #!/usr/bin/env bash cargo run --bin cost-estimator --release -- --start {{start}} --end {{end}} -# Runs the client program in native execution mode. Modified version of Kona Native Client execution: -# https://github.com/ethereum-optimism/kona/blob/ae71b9df103c941c06b0dc5400223c4f13fe5717/bin/client/justfile#L65-L108 -run-client-native l2_block_num l1_rpc='${L1_RPC}' l1_beacon_rpc='${L1_BEACON_RPC}' l2_rpc='${L2_RPC}' verbosity="-vvvv": - #!/usr/bin/env bash - L1_NODE_ADDRESS="{{l1_rpc}}" - L1_BEACON_ADDRESS="{{l1_beacon_rpc}}" - L2_NODE_ADDRESS="{{l2_rpc}}" - echo "L1 Node Address: $L1_NODE_ADDRESS" - echo "L1 Beacon Address: $L1_BEACON_ADDRESS" - echo "L2 Node Address: $L2_NODE_ADDRESS" - HOST_BIN_PATH="./kona-host" - CLIENT_BIN_PATH="$(pwd)/target/release-client-lto/fault-proof" - L2_BLOCK_NUMBER="{{l2_block_num}}" - L2_BLOCK_SAFE_HEAD=$((L2_BLOCK_NUMBER - 1)) - L2_OUTPUT_STATE_ROOT=$(cast block --rpc-url $L2_NODE_ADDRESS --field stateRoot $L2_BLOCK_SAFE_HEAD) - L2_HEAD=$(cast block --rpc-url $L2_NODE_ADDRESS --field hash $L2_BLOCK_SAFE_HEAD) - L2_OUTPUT_STORAGE_HASH=$(cast proof --rpc-url $L2_NODE_ADDRESS --block $L2_BLOCK_SAFE_HEAD 0x4200000000000000000000000000000000000016 | jq -r '.storageHash') - L2_OUTPUT_ENCODED=$(cast abi-encode "x(uint256,bytes32,bytes32,bytes32)" 0 $L2_OUTPUT_STATE_ROOT $L2_OUTPUT_STORAGE_HASH $L2_HEAD) - L2_OUTPUT_ROOT=$(cast keccak $L2_OUTPUT_ENCODED) - echo "L2 Safe Head: $L2_BLOCK_SAFE_HEAD" - echo "Safe Head Output Root: $L2_OUTPUT_ROOT" - L2_CLAIM_STATE_ROOT=$(cast block --rpc-url $L2_NODE_ADDRESS --field stateRoot $L2_BLOCK_NUMBER) - L2_CLAIM_HASH=$(cast block --rpc-url $L2_NODE_ADDRESS --field hash $L2_BLOCK_NUMBER) - L2_CLAIM_STORAGE_HASH=$(cast proof --rpc-url $L2_NODE_ADDRESS --block $L2_BLOCK_NUMBER 0x4200000000000000000000000000000000000016 | jq -r '.storageHash') - L2_CLAIM_ENCODED=$(cast abi-encode "x(uint256,bytes32,bytes32,bytes32)" 0 $L2_CLAIM_STATE_ROOT $L2_CLAIM_STORAGE_HASH $L2_CLAIM_HASH) - L2_CLAIM=$(cast keccak $L2_CLAIM_ENCODED) - echo "L2 Block Number: $L2_BLOCK_NUMBER" - echo "L2 Claim Root: $L2_CLAIM" - L2_BLOCK_TIMESTAMP=$(cast block --rpc-url $L2_NODE_ADDRESS $L2_BLOCK_NUMBER -j | jq -r .timestamp) - L1_HEAD=$(cast block --rpc-url $L1_NODE_ADDRESS $(cast find-block --rpc-url $L1_NODE_ADDRESS $(($(cast 2d $L2_BLOCK_TIMESTAMP) + 300))) -j | jq -r .hash) - echo "L1 Head: $L1_HEAD" - L2_CHAIN_ID=10 - DATA_DIRECTORY="./data/$L2_BLOCK_NUMBER" - echo "Saving Data to $DATA_DIRECTORY" - echo "Building client program..." - cargo build --bin fault-proof --profile release-client-lto - echo "Running host program with native client program..." - cargo run --bin op-succinct-witnessgen --release -- \ - --l1-head $L1_HEAD \ - --l2-head $L2_HEAD \ - --l2-claim $L2_CLAIM \ - --l2-output-root $L2_OUTPUT_ROOT \ - --l2-block-number $L2_BLOCK_NUMBER \ - --l2-chain-id $L2_CHAIN_ID \ - --l1-node-address $L1_NODE_ADDRESS \ - --l1-beacon-address $L1_BEACON_ADDRESS \ - --l2-node-address $L2_NODE_ADDRESS \ - --exec $CLIENT_BIN_PATH \ - --data-dir $DATA_DIRECTORY \ - {{verbosity}} - # Output the data required for the ZKVM execution. echo "$L1_HEAD $L2_OUTPUT_ROOT $L2_CLAIM $L2_BLOCK_NUMBER $L2_CHAIN_ID" diff --git a/programs/dummy-range/Cargo.toml b/programs/dummy-range/Cargo.toml new file mode 100644 index 00000000..95e3aa06 --- /dev/null +++ b/programs/dummy-range/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "dummy-range" +version = "0.1.0" +license.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +# sp1 +sp1-zkvm.workspace = true + +# op-succinct +op-succinct-client-utils.workspace = true diff --git a/programs/dummy-range/src/main.rs b/programs/dummy-range/src/main.rs new file mode 100644 index 00000000..2a19b31b --- /dev/null +++ b/programs/dummy-range/src/main.rs @@ -0,0 +1,19 @@ +//! A dummy replica of the `range` program. +//! +//! SAFETY: Does not perform any verification of the rollup state transition. + +#![no_main] +sp1_zkvm::entrypoint!(main); + +use op_succinct_client_utils::boot::BootInfoStruct; +use op_succinct_client_utils::BootInfoWithBytesConfig; + +pub fn main() { + let boot_info_with_bytes_config = sp1_zkvm::io::read::(); + + // BootInfoStruct is identical to BootInfoWithBytesConfig, except it replaces + // the rollup_config_bytes with a hash of those bytes (rollupConfigHash). Securely + // hashes the rollup config bytes. + let boot_info_struct = BootInfoStruct::from(boot_info_with_bytes_config.clone()); + sp1_zkvm::io::commit::(&boot_info_struct); +} diff --git a/programs/fault-proof/Cargo.toml b/programs/fault-proof/Cargo.toml deleted file mode 100644 index c0f44f59..00000000 --- a/programs/fault-proof/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "fault-proof" -version = "0.1.0" -edition.workspace = true -authors.workspace = true -license.workspace = true -repository.workspace = true -homepage.workspace = true - -[dependencies] -cfg-if.workspace = true -serde_json.workspace = true -tracing.workspace = true -anyhow.workspace = true - -# workspace (ethereum) -alloy-consensus.workspace = true - -# sp1 -sp1-zkvm = { workspace = true } - -# kona -kona-executor.workspace = true -kona-driver.workspace = true -kona-proof.workspace = true - -# op-succinct -op-succinct-client-utils.workspace = true - -# op-alloy -op-alloy-genesis.workspace = true -op-alloy-rpc-types-engine.workspace = true - -# `tracing-subscriber` feature dependencies -tracing-subscriber = { workspace = true, optional = true } - -[features] -tracing-subscriber = ["dep:tracing-subscriber"] diff --git a/programs/fault-proof/src/main.rs b/programs/fault-proof/src/main.rs deleted file mode 100644 index fcb9bf41..00000000 --- a/programs/fault-proof/src/main.rs +++ /dev/null @@ -1,149 +0,0 @@ -//! A program to verify a OP Stack chain's block STF in the zkVM. -//! -//! This binary contains the client program for executing the Optimism rollup state transition -//! across a single block, which can be used in an on chain dispute game. Depending on the -//! compilation pipeline, it will compile to be run either in native mode or in zkVM mode. In native -//! mode, the data for verifying the execute of the Optimism rollup's state transition is fetched -//! from RPC, while in zkVM mode, the data is supplied by the host binary to the verifiable program. - -#![cfg_attr(target_os = "zkvm", no_main)] - -extern crate alloc; - -use alloc::sync::Arc; - -use cfg_if::cfg_if; -use kona_driver::Driver; -use kona_proof::{ - executor::KonaExecutorConstructor, - l1::{OracleBlobProvider, OracleL1ChainProvider, OraclePipeline}, - l2::OracleL2ChainProvider, - sync::new_pipeline_cursor, - BootInfo, -}; -use op_succinct_client_utils::precompiles::zkvm_handle_register; - -cfg_if! { - if #[cfg(target_os = "zkvm")] { - sp1_zkvm::entrypoint!(main); - use op_succinct_client_utils::{InMemoryOracle, boot::BootInfoStruct, BootInfoWithBytesConfig}; - use op_alloy_genesis::RollupConfig; - use alloc::vec::Vec; - use serde_json; - } else { - use kona_proof::CachingOracle; - use op_succinct_client_utils::pipes::{ORACLE_READER, HINT_WRITER}; - } -} - -fn main() { - #[cfg(feature = "tracing-subscriber")] - { - use anyhow::anyhow; - use tracing::Level; - - let subscriber = tracing_subscriber::fmt() - .with_max_level(Level::INFO) - .finish(); - tracing::subscriber::set_global_default(subscriber) - .map_err(|e| anyhow!(e)) - .unwrap(); - } - - op_succinct_client_utils::block_on(async move { - //////////////////////////////////////////////////////////////// - // PROLOGUE // - //////////////////////////////////////////////////////////////// - - cfg_if! { - // If we are compiling for the zkVM, read inputs from SP1 to generate boot info - // and in memory oracle. - if #[cfg(target_os = "zkvm")] { - println!("cycle-tracker-start: boot-load"); - let boot_info_with_bytes_config = sp1_zkvm::io::read::(); - - // BootInfoStruct is identical to BootInfoWithBytesConfig, except it replaces - // the rollup_config_bytes with a hash of those bytes (rollupConfigHash). Securely - // hashes the rollup config bytes. - let boot_info_struct = BootInfoStruct::from(boot_info_with_bytes_config.clone()); - sp1_zkvm::io::commit::(&boot_info_struct); - - let rollup_config: RollupConfig = serde_json::from_slice(&boot_info_with_bytes_config.rollup_config_bytes).expect("failed to parse rollup config"); - let boot: Arc = Arc::new(BootInfo { - l1_head: boot_info_with_bytes_config.l1_head, - agreed_l2_output_root: boot_info_with_bytes_config.l2_output_root, - claimed_l2_output_root: boot_info_with_bytes_config.l2_claim, - claimed_l2_block_number: boot_info_with_bytes_config.l2_claim_block, - chain_id: boot_info_with_bytes_config.chain_id, - rollup_config, - }); - println!("cycle-tracker-end: boot-load"); - - println!("cycle-tracker-start: oracle-load"); - let in_memory_oracle_bytes: Vec = sp1_zkvm::io::read_vec(); - let oracle = Arc::new(InMemoryOracle::from_raw_bytes(in_memory_oracle_bytes)); - println!("cycle-tracker-end: oracle-load"); - - println!("cycle-tracker-start: oracle-verify"); - oracle.verify().expect("key value verification failed"); - println!("cycle-tracker-end: oracle-verify"); - } - // If we are compiling for online mode, create a caching oracle that speaks to the - // fetcher via hints, and gather boot info from this oracle. - else { - let oracle = Arc::new(CachingOracle::new(1024, ORACLE_READER, HINT_WRITER)); - let boot = Arc::new(BootInfo::load(oracle.as_ref()).await.unwrap()); - } - } - - let l1_provider = OracleL1ChainProvider::new(boot.clone(), oracle.clone()); - let l2_provider = OracleL2ChainProvider::new(boot.clone(), oracle.clone()); - let beacon = OracleBlobProvider::new(oracle.clone()); - - //////////////////////////////////////////////////////////////// - // DERIVATION & EXECUTION // - //////////////////////////////////////////////////////////////// - - let cursor = new_pipeline_cursor( - oracle.clone(), - &boot, - &mut l1_provider.clone(), - &mut l2_provider.clone(), - ) - .await - .unwrap(); - - let cfg = Arc::new(boot.rollup_config.clone()); - let pipeline = OraclePipeline::new( - cfg.clone(), - cursor.clone(), - oracle.clone(), - beacon, - l1_provider.clone(), - l2_provider.clone(), - ); - let executor = KonaExecutorConstructor::new( - &cfg, - l2_provider.clone(), - l2_provider, - zkvm_handle_register, - ); - let mut driver = Driver::new(cursor, executor, pipeline); - - println!("cycle-tracker-start: advance-to-target"); - let (number, output_root) = driver - .advance_to_target(&boot.rollup_config, boot.claimed_l2_block_number) - .await - .unwrap(); - println!("cycle-tracker-end: advance-to-target"); - - //////////////////////////////////////////////////////////////// - // EPILOGUE // - //////////////////////////////////////////////////////////////// - - assert_eq!(number, boot.claimed_l2_block_number); - assert_eq!(output_root, boot.claimed_l2_output_root); - - println!("Validated derivation and STF. Output Root: {}", output_root); - }); -} diff --git a/proposer/succinct/Dockerfile b/proposer/succinct/Dockerfile index e045e1de..ad935c4a 100644 --- a/proposer/succinct/Dockerfile +++ b/proposer/succinct/Dockerfile @@ -41,7 +41,6 @@ RUN --mount=type=ssh \ --mount=type=cache,target=/build/target \ cargo build --bin server --release && \ cp target/release/server /build/server && \ - cp target/release-client-lto/fault-proof /build/fault_proof && \ cp target/release-client-lto/range /build/range && \ cp target/native_host_runner/release/native_host_runner /build/native_host_runner @@ -73,7 +72,6 @@ RUN curl -L https://sp1.succinct.xyz | bash && \ # Copy only the built binaries from builder COPY --from=builder /build/server /usr/local/bin/server -COPY --from=builder /build/fault_proof /usr/local/bin/fault_proof COPY --from=builder /build/range /usr/local/bin/range COPY --from=builder /build/native_host_runner /usr/local/bin/native_host_runner diff --git a/proposer/succinct/bin/server.rs b/proposer/succinct/bin/server.rs index 10f41bac..f2e335d5 100644 --- a/proposer/succinct/bin/server.rs +++ b/proposer/succinct/bin/server.rs @@ -32,7 +32,7 @@ use sp1_sdk::{ use std::{env, str::FromStr, time::Duration}; use tower_http::limit::RequestBodyLimitLayer; -pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); +pub const RANGE_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); pub const AGG_ELF: &[u8] = include_bytes!("../../../elf/aggregation-elf"); #[tokio::main] @@ -42,7 +42,7 @@ async fn main() -> Result<()> { dotenv::dotenv().ok(); let prover = ProverClient::new(); - let (range_pk, range_vk) = prover.setup(MULTI_BLOCK_ELF); + let (range_pk, range_vk) = prover.setup(RANGE_ELF); let (agg_pk, agg_vk) = prover.setup(AGG_ELF); let multi_block_vkey_u8 = u32_to_u8(range_vk.vk.hash_u32()); let range_vkey_commitment = B256::from(multi_block_vkey_u8); @@ -205,10 +205,7 @@ async fn request_span_proof( // Set simulation to false on range proofs as they're large. env::set_var("SKIP_SIMULATION", "true"); - let vk_hash = match prover - .register_program(&state.range_vk, MULTI_BLOCK_ELF) - .await - { + let vk_hash = match prover.register_program(&state.range_vk, RANGE_ELF).await { Ok(vk_hash) => vk_hash, Err(e) => { error!("Failed to register program: {}", e); diff --git a/scripts/prove/Cargo.toml b/scripts/prove/Cargo.toml index ce1dcda4..5e91b543 100644 --- a/scripts/prove/Cargo.toml +++ b/scripts/prove/Cargo.toml @@ -7,10 +7,6 @@ authors.workspace = true repository.workspace = true homepage.workspace = true -[[bin]] -name = "single" -path = "bin/single.rs" - [[bin]] name = "multi" path = "bin/multi.rs" diff --git a/scripts/prove/bin/agg.rs b/scripts/prove/bin/agg.rs index c7e72f91..adef883b 100644 --- a/scripts/prove/bin/agg.rs +++ b/scripts/prove/bin/agg.rs @@ -7,14 +7,12 @@ use op_succinct_host_utils::{ fetcher::{OPSuccinctDataFetcher, RunContext}, get_agg_proof_stdin, }; +use op_succinct_prove::{AGG_ELF, RANGE_ELF}; use sp1_sdk::{ utils, HashableKey, ProverClient, SP1Proof, SP1ProofWithPublicValues, SP1VerifyingKey, }; use std::fs; -pub const AGG_ELF: &[u8] = include_bytes!("../../../elf/aggregation-elf"); -pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); - #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { @@ -76,7 +74,7 @@ async fn main() -> Result<()> { let prover = ProverClient::new(); let fetcher = OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Dev).await?; - let (_, vkey) = prover.setup(MULTI_BLOCK_ELF); + let (_, vkey) = prover.setup(RANGE_ELF); let (proofs, boot_infos) = load_aggregation_proof_data(args.proofs, &vkey, &prover); diff --git a/scripts/prove/bin/multi.rs b/scripts/prove/bin/multi.rs index 3485f61f..4dc318a0 100644 --- a/scripts/prove/bin/multi.rs +++ b/scripts/prove/bin/multi.rs @@ -7,7 +7,7 @@ use op_succinct_host_utils::{ stats::ExecutionStats, ProgramType, }; -use op_succinct_prove::{execute_multi, generate_witness, DEFAULT_RANGE, MULTI_BLOCK_ELF}; +use op_succinct_prove::{execute_multi, generate_witness, DEFAULT_RANGE, RANGE_ELF}; use sp1_sdk::{utils, ProverClient}; use std::{fs, path::PathBuf, time::Duration}; @@ -77,7 +77,7 @@ async fn main() -> Result<()> { if args.prove { // If the prove flag is set, generate a proof. - let (pk, _) = prover.setup(MULTI_BLOCK_ELF); + let (pk, _) = prover.setup(RANGE_ELF); // Generate proofs in compressed mode for aggregation verification. let proof = prover.prove(&pk, sp1_stdin).compressed().run().unwrap(); diff --git a/scripts/prove/bin/single.rs b/scripts/prove/bin/single.rs deleted file mode 100644 index 2daa32fe..00000000 --- a/scripts/prove/bin/single.rs +++ /dev/null @@ -1,122 +0,0 @@ -use anyhow::Result; -use clap::Parser; -use op_succinct_host_utils::{ - fetcher::{CacheMode, OPSuccinctDataFetcher, RunContext}, - get_proof_stdin, - stats::ExecutionStats, - witnessgen::WitnessGenExecutor, - ProgramType, -}; -use sp1_sdk::{utils, ProverClient}; -use std::time::Instant; - -pub const SINGLE_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/fault-proof-elf"); - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Args { - /// Start block number. - #[arg(short, long)] - l2_block: u64, - - /// Skip running native execution. - #[arg(short, long)] - use_cache: bool, - - /// Generate proof. - #[arg(short, long)] - prove: bool, - - /// Environment file. - #[arg(short, long, default_value = ".env")] - env_file: String, -} - -/// Execute the OP Succinct program for a single block. -#[tokio::main] -async fn main() -> Result<()> { - let args = Args::parse(); - dotenv::from_path(&args.env_file)?; - utils::setup_logger(); - - let data_fetcher = OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Dev).await?; - - let l2_chain_id = data_fetcher.get_l2_chain_id().await?; - - let l2_safe_head = args.l2_block - 1; - - let cache_mode = if args.use_cache { - CacheMode::KeepCache - } else { - CacheMode::DeleteCache - }; - - let host_cli = data_fetcher - .get_host_cli_args(l2_safe_head, args.l2_block, ProgramType::Single, cache_mode) - .await?; - - // By default, re-run the native execution unless the user passes `--use-cache`. - let start_time = Instant::now(); - if !args.use_cache { - // Start the server and native client. - let mut witnessgen_executor = WitnessGenExecutor::default(); - witnessgen_executor.spawn_witnessgen(&host_cli).await?; - witnessgen_executor.flush().await?; - } - let witness_generation_time_sec = start_time.elapsed(); - // Get the stdin for the block. - let sp1_stdin = get_proof_stdin(&host_cli)?; - - let prover = ProverClient::new(); - - if args.prove { - // If the prove flag is set, generate a proof. - let (pk, _) = prover.setup(SINGLE_BLOCK_ELF); - - // Generate proofs in PLONK mode for on-chain verification. - let proof = prover.prove(&pk, sp1_stdin).plonk().run().unwrap(); - - // Create a proof directory for the chain ID if it doesn't exist. - let proof_dir = format!("data/{}/proofs", l2_chain_id); - if !std::path::Path::new(&proof_dir).exists() { - std::fs::create_dir_all(&proof_dir)?; - } - proof - .save(format!("{}/{}.bin", proof_dir, args.l2_block)) - .expect("Failed to save proof"); - } else { - let start_time = Instant::now(); - let (_, report) = prover.execute(SINGLE_BLOCK_ELF, sp1_stdin).run().unwrap(); - let execution_duration = start_time.elapsed(); - - let report_path = format!( - "execution-reports/single/{}/{}.csv", - l2_chain_id, args.l2_block - ); - - // Create the report directory if it doesn't exist. - let report_dir = format!("execution-reports/single/{}", l2_chain_id); - if !std::path::Path::new(&report_dir).exists() { - std::fs::create_dir_all(&report_dir)?; - } - - let block_data = data_fetcher - .get_l2_block_data_range(args.l2_block, args.l2_block) - .await?; - - let stats = ExecutionStats::new( - &block_data, - &report, - witness_generation_time_sec.as_secs(), - execution_duration.as_secs(), - ); - println!("Execution Stats: \n{:?}", stats); - - // Write to CSV. - let mut csv_writer = csv::Writer::from_path(report_path)?; - csv_writer.serialize(&stats)?; - csv_writer.flush()?; - } - - Ok(()) -} diff --git a/scripts/prove/src/lib.rs b/scripts/prove/src/lib.rs index ae531349..8ebf0995 100644 --- a/scripts/prove/src/lib.rs +++ b/scripts/prove/src/lib.rs @@ -12,7 +12,9 @@ pub const DEFAULT_RANGE: u64 = 5; pub const TWO_WEEKS: Duration = Duration::from_secs(14 * 24 * 60 * 60); pub const ONE_HOUR: Duration = Duration::from_secs(60 * 60); -pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); +pub const AGG_ELF: &[u8] = include_bytes!("../../../elf/aggregation-elf"); +pub const RANGE_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); +pub const DUMMY_RANGE_ELF: &[u8] = include_bytes!("../../../elf/dummy-range-elf"); pub async fn generate_witness(host_cli: &HostCli) -> Result { let start_time = Instant::now(); @@ -39,10 +41,7 @@ pub async fn execute_multi( l2_end_block: u64, ) -> Result<(Vec, ExecutionReport, Duration)> { let start_time = Instant::now(); - let (_, report) = prover - .execute(MULTI_BLOCK_ELF, sp1_stdin.clone()) - .run() - .unwrap(); + let (_, report) = prover.execute(RANGE_ELF, sp1_stdin.clone()).run().unwrap(); let execution_duration = start_time.elapsed(); let block_data = data_fetcher diff --git a/scripts/utils/bin/cost_estimator.rs b/scripts/utils/bin/cost_estimator.rs index d870e8aa..71fcb57a 100644 --- a/scripts/utils/bin/cost_estimator.rs +++ b/scripts/utils/bin/cost_estimator.rs @@ -25,7 +25,7 @@ use std::{ time::{Duration, Instant}, }; -pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); +pub const RANGE_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); const TWELVE_HOURS: Duration = Duration::from_secs(60 * 60 * 12); @@ -81,7 +81,7 @@ async fn execute_blocks_and_write_stats_csv( let sp1_stdin = get_proof_stdin(host_cli).unwrap(); // FIXME: Implement retries with a smaller block range if this fails. - let result = prover.execute(MULTI_BLOCK_ELF, sp1_stdin).run(); + let result = prover.execute(RANGE_ELF, sp1_stdin).run(); // If the execution fails, skip this block range and log the error. if let Some(err) = result.as_ref().err() { diff --git a/scripts/utils/bin/gen_sp1_test_artifacts.rs b/scripts/utils/bin/gen_sp1_test_artifacts.rs index 3611d855..4cf0a0af 100644 --- a/scripts/utils/bin/gen_sp1_test_artifacts.rs +++ b/scripts/utils/bin/gen_sp1_test_artifacts.rs @@ -18,7 +18,7 @@ use std::{ path::PathBuf, }; -pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); +pub const RANGE_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); /// Run the zkVM execution process for each split range in parallel. Get the SP1Stdin and the range /// for each successful execution. @@ -34,7 +34,7 @@ async fn execute_blocks_parallel( .map(|(host_cli, range)| { let sp1_stdin = get_proof_stdin(host_cli).unwrap(); - let result = prover.execute(MULTI_BLOCK_ELF, sp1_stdin.clone()).run(); + let result = prover.execute(RANGE_ELF, sp1_stdin.clone()).run(); // If the execution fails, skip this block range and log the error. if let Some(err) = result.as_ref().err() { @@ -105,7 +105,7 @@ async fn main() -> Result<()> { let successful_ranges = execute_blocks_parallel(&host_clis, split_ranges, &prover).await; // Now, write the successful ranges to /sp1-testing-suite-artifacts/op-succinct-chain-{l2_chain_id}-{start}-{end} - // The folders should each have the MULTI_BLOCK_ELF as program.bin, and the serialized stdin should be + // The folders should each have the RANGE_ELF as program.bin, and the serialized stdin should be // written to stdin.bin. let cargo_metadata = cargo_metadata::MetadataCommand::new().exec().unwrap(); let root_dir = PathBuf::from(cargo_metadata.workspace_root).join("sp1-testing-suite-artifacts"); @@ -121,7 +121,7 @@ async fn main() -> Result<()> { )); fs::create_dir_all(&program_dir)?; - fs::write(program_dir.join("program.bin"), MULTI_BLOCK_ELF)?; + fs::write(program_dir.join("program.bin"), RANGE_ELF)?; fs::write( program_dir.join("stdin.bin"), bincode::serialize(&sp1_stdin).unwrap(), diff --git a/scripts/utils/bin/vkey.rs b/scripts/utils/bin/vkey.rs index 71bf18c4..2996ebeb 100644 --- a/scripts/utils/bin/vkey.rs +++ b/scripts/utils/bin/vkey.rs @@ -4,7 +4,7 @@ use op_succinct_client_utils::types::u32_to_u8; use sp1_sdk::{utils, HashableKey, ProverClient}; pub const AGG_ELF: &[u8] = include_bytes!("../../../elf/aggregation-elf"); -pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); +pub const RANGE_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); // Get the verification keys for the ELFs and check them against the contract. #[tokio::main] @@ -14,7 +14,7 @@ async fn main() -> Result<()> { let prover = ProverClient::new(); - let (_, range_vk) = prover.setup(MULTI_BLOCK_ELF); + let (_, range_vk) = prover.setup(RANGE_ELF); // Get the 32 byte commitment to the vkey from vkey.vk.hash_u32() let multi_block_vkey_u8 = u32_to_u8(range_vk.vk.hash_u32()); diff --git a/utils/build/src/lib.rs b/utils/build/src/lib.rs index 80362a8f..2b91214e 100644 --- a/utils/build/src/lib.rs +++ b/utils/build/src/lib.rs @@ -85,16 +85,15 @@ fn build_zkvm_program(program: &str) { /// Build all the native programs and the native host runner. Optional flag to build the zkVM /// programs. pub fn build_all() { - let programs = vec!["fault-proof", "range"]; - - for program in programs { - // Note: Don't comment this out, because the Docker program depends on the native program - // for range being built. - build_native_program(program); - // build_zkvm_program(program); - } + // Build range program. + build_native_program("range"); + // build_zkvm_program("range"); + // Build aggregation program. // build_zkvm_program("aggregation"); + // Build dummy range program. + // build_zkvm_program("dummy-range"); + // Note: Don't comment this out, because the Docker program depends on the native host runner // being built. build_native_host_runner();