Skip to content
Closed
Show file tree
Hide file tree
Changes from 18 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
453 changes: 318 additions & 135 deletions Cargo.lock

Large diffs are not rendered by default.

7 changes: 4 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@ repository = "https://github.com/scroll-tech/scroll"
version = "4.7.1"

[workspace.dependencies]
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
# with openvm 1.4.2
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2e8e29f" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2e8e29f" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", rev = "2e8e29f" }

sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
Expand Down
19 changes: 2 additions & 17 deletions crates/libzkp/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,8 @@ use serde_json::value::RawValue;
use std::{collections::HashMap, path::Path, sync::OnceLock};
use tasks::chunk_interpreter::{ChunkInterpreter, TryFromWithInterpreter};

pub(crate) fn witness_use_legacy_mode(fork_name: &str) -> eyre::Result<bool> {
ADDITIONAL_FEATURES
.get()
.and_then(|features| features.get(fork_name))
.map(|cfg| cfg.legacy_witness_encoding)
.ok_or_else(|| {
eyre::eyre!(
"can not find features setting for unrecognized fork {}",
fork_name
)
})
}

#[derive(Debug, Default, Clone)]
struct FeatureOptions {
legacy_witness_encoding: bool,
for_openvm_13_prover: bool,
}

Expand All @@ -41,11 +27,10 @@ impl FeatureOptions {
for feat_s in feats.split(':') {
match feat_s.trim().to_lowercase().as_str() {
"legacy_witness" => {
tracing::info!("set witness encoding for legacy mode");
ret.legacy_witness_encoding = true;
tracing::warn!("legacy witness is no longer supported");
}
"openvm_13" => {
tracing::info!("set prover should use openvm 13");
tracing::warn!("set prover should use openvm 13");
ret.for_openvm_13_prover = true;
}
s => tracing::warn!("unrecognized dynamic feature: {s}"),
Expand Down
11 changes: 2 additions & 9 deletions crates/libzkp/src/tasks/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,11 @@ use sbv_primitives::{B256, U256};
use scroll_zkvm_types::{
batch::{
build_point_eval_witness, BatchHeader, BatchHeaderV6, BatchHeaderV7, BatchHeaderValidium,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, LegacyBatchWitness,
ReferenceHeader, N_BLOB_BYTES,
BatchInfo, BatchWitness, Envelope, EnvelopeV6, EnvelopeV7, ReferenceHeader, N_BLOB_BYTES,
},
chunk::ChunkInfo,
public_inputs::{ForkName, MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
version::{Codec, Domain, STFVersion},
};

Expand Down Expand Up @@ -118,12 +116,7 @@ pub struct BatchProvingTask {
impl BatchProvingTask {
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, BatchInfo, B256)> {
let (witness, metadata, batch_pi_hash) = self.precheck()?;
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
let legacy_witness = LegacyBatchWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};
let serialized_witness = super::encode_task_to_witness(&witness)?;

let proving_task = ProvingTask {
identifier: self.batch_header.batch_hash().to_string(),
Expand Down
10 changes: 2 additions & 8 deletions crates/libzkp/src/tasks/bundle.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
use eyre::Result;
use sbv_primitives::B256;
use scroll_zkvm_types::{
bundle::{BundleInfo, BundleWitness, LegacyBundleWitness},
bundle::{BundleInfo, BundleWitness},
public_inputs::{MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};

use crate::proofs::BatchProof;
Expand All @@ -27,12 +26,7 @@ pub struct BundleProvingTask {
impl BundleProvingTask {
pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, BundleInfo, B256)> {
let (witness, bundle_info, bundle_pi_hash) = self.precheck()?;
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
let legacy = LegacyBundleWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};
let serialized_witness = super::encode_task_to_witness(&witness)?;

let proving_task = ProvingTask {
identifier: self.identifier(),
Expand Down
10 changes: 2 additions & 8 deletions crates/libzkp/src/tasks/chunk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@ use eyre::Result;
use sbv_core::BlockWitness;
use sbv_primitives::{types::consensus::BlockHeader, B256};
use scroll_zkvm_types::{
chunk::{execute, ChunkInfo, ChunkWitness, LegacyChunkWitness, ValidiumInputs},
chunk::{execute, ChunkInfo, ChunkWitness, ValidiumInputs},
public_inputs::{MultiVersionPublicInputs, Version},
task::ProvingTask,
utils::{to_rkyv_bytes, RancorError},
};

use super::chunk_interpreter::*;
Expand Down Expand Up @@ -117,12 +116,7 @@ impl ChunkProvingTask {

pub fn into_proving_task_with_precheck(self) -> Result<(ProvingTask, ChunkInfo, B256)> {
let (witness, chunk_info, chunk_pi_hash) = self.precheck()?;
let serialized_witness = if crate::witness_use_legacy_mode(&self.fork_name)? {
let legacy_witness = LegacyChunkWitness::from(witness);
to_rkyv_bytes::<RancorError>(&legacy_witness)?.into_vec()
} else {
super::encode_task_to_witness(&witness)?
};
let serialized_witness = super::encode_task_to_witness(&witness)?;

let proving_task = ProvingTask {
identifier: self.identifier(),
Expand Down
3 changes: 2 additions & 1 deletion crates/prover-bin/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ edition.workspace = true
scroll-zkvm-types.workspace = true
scroll-zkvm-prover.workspace = true
libzkp = { path = "../libzkp"}
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "05648db" }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", rev = "22ad34e" }
serde.workspace = true
serde_json.workspace = true
once_cell.workspace =true
Expand All @@ -34,6 +34,7 @@ clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"
url = { version = "2.5.4", features = ["serde",] }
serde_bytes = "0.11.15"
bincode = { version = "2.0", features = ["serde",] }

[features]
default = []
Expand Down
81 changes: 81 additions & 0 deletions crates/prover-bin/src/dumper.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
use async_trait::async_trait;
use libzkp::ProvingTaskExt;
use scroll_proving_sdk::prover::{
proving_service::{
GetVkRequest, GetVkResponse, ProveRequest, ProveResponse, QueryTaskRequest,
QueryTaskResponse, TaskStatus,
},
ProvingService,
};
use scroll_zkvm_types::ProvingTask;

#[derive(Default)]
pub struct Dumper {
#[allow(dead_code)]
target_path: String,
}
Comment on lines 12 to 17
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Unused target_path field while output paths are hardcoded.

The target_path field is marked as dead code and never used. However, the dump() method writes to hardcoded file paths "input_task.bin" and "agg_proofs.bin" in the current working directory (lines 24 and 33). This appears to be an incomplete implementation.

Consider using target_path to control the output directory:

🔎 Suggested fix to use `target_path` for output files:
 impl Dumper {
     fn dump(&self, input_string: &str) -> eyre::Result<()> {
         let task: ProvingTaskExt = serde_json::from_str(input_string)?;
         let task = ProvingTask::from(task);
 
         // stream-encode serialized_witness to input_task.bin using bincode 2.0
-        let input_file = std::fs::File::create("input_task.bin")?;
+        let input_path = std::path::Path::new(&self.target_path).join("input_task.bin");
+        let input_file = std::fs::File::create(input_path)?;
         let mut input_writer = std::io::BufWriter::new(input_file);
         bincode::serde::encode_into_std_write(
             &task.serialized_witness,
             &mut input_writer,
             bincode::config::standard(),
         )?;
 
         // stream-encode aggregated_proofs to agg_proofs.bin using bincode 2.0
-        let agg_file = std::fs::File::create("agg_proofs.bin")?;
+        let agg_path = std::path::Path::new(&self.target_path).join("agg_proofs.bin");
+        let agg_file = std::fs::File::create(agg_path)?;
         let mut agg_writer = std::io::BufWriter::new(agg_file);
         bincode::serde::encode_into_std_write(
             &task.aggregated_proofs,
             &mut agg_writer,
             bincode::config::standard(),
         )?;
 
         Ok(())
     }
 }

And remove the #[allow(dead_code)] attribute:

 #[derive(Default)]
 pub struct Dumper {
-    #[allow(dead_code)]
     target_path: String,
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
#[derive(Default)]
pub struct Dumper {
#[allow(dead_code)]
target_path: String,
}
#[derive(Default)]
pub struct Dumper {
target_path: String,
}


impl Dumper {
fn dump(&self, input_string: &str) -> eyre::Result<()> {
let task: ProvingTaskExt = serde_json::from_str(input_string)?;
let task = ProvingTask::from(task);

// stream-encode serialized_witness to input_task.bin using bincode 2.0
let input_file = std::fs::File::create("input_task.bin")?;
let mut input_writer = std::io::BufWriter::new(input_file);
bincode::encode_into_std_write(
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

find . -name "dumper.rs" -type f 2>/dev/null | head -20

Repository: scroll-tech/scroll

Length of output: 95


🏁 Script executed:

cat -n ./crates/prover-bin/src/dumper.rs | head -60

Repository: scroll-tech/scroll

Length of output: 2381


🏁 Script executed:

# Search for ProvingTask struct definition to understand field types
rg -A 20 "struct ProvingTask" --type rust

Repository: scroll-tech/scroll

Length of output: 1025


🏁 Script executed:

# Find where ProvingTask is defined (it's imported from scroll_zkvm_types)
rg "pub struct ProvingTask" --type rust -A 15

Repository: scroll-tech/scroll

Length of output: 820


🏁 Script executed:

# Find ProvingTask definition in scroll_zkvm_types crate
find . -name "*.rs" -type f -exec grep -l "pub struct ProvingTask" {} \; | head -5

Repository: scroll-tech/scroll

Length of output: 90


🏁 Script executed:

# Check the import statement and look for where ProvingTask comes from
rg "use.*ProvingTask|from scroll_zkvm_types" crates/prover-bin/src/dumper.rs

Repository: scroll-tech/scroll

Length of output: 125


🏁 Script executed:

# Find scroll_zkvm_types crate and its ProvingTask definition
find . -path "*/scroll_zkvm_types/*" -name "*.rs" -type f | head -10

Repository: scroll-tech/scroll

Length of output: 44


🏁 Script executed:

# Check Cargo.toml for prover-bin to understand bincode configuration
cat crates/prover-bin/Cargo.toml

Repository: scroll-tech/scroll

Length of output: 1152


🏁 Script executed:

# Check if scroll_zkvm_types is a workspace member
rg "scroll.zkvm.types|scroll-zkvm-types" workspace.members -A 2 -B 2 2>/dev/null || cat Cargo.toml | grep -A 50 "\[workspace\]"

Repository: scroll-tech/scroll

Length of output: 1695


🏁 Script executed:

# Search for deserialization or loading code for these binary files
rg "aggregated_proofs|serialized_witness" crates/ -B 2 -A 2 --type rust | head -60

Repository: scroll-tech/scroll

Length of output: 4156


🏁 Script executed:

# Try to get struct definitions from the external git dependency documentation
# Let's check what types serialized_witness and proof.proofs are
rg "struct.*Proof|serialized_witness.*:|proofs.*:" crates/libzkp/src/ --type rust -B 1 -A 3 | head -80

Repository: scroll-tech/scroll

Length of output: 4761


🏁 Script executed:

# Look for deserialization code that reads these files back
rg "decode_from_std_read|decode_from" crates/ --type rust -B 2 -A 2 | head -60

Repository: scroll-tech/scroll

Length of output: 44


🏁 Script executed:

# Search for any code that reads input_task.bin or agg_proofs.bin files
rg "input_task\.bin|agg_proofs\.bin" crates/ --type rust -B 3 -A 3

Repository: scroll-tech/scroll

Length of output: 1346


Clarify the bincode API usage and fix the loop encoding pattern.

Line 26 uses bincode::encode_into_std_write for serialized_witness (a native Rust Vec type), while line 36 uses bincode::serde::encode_into_std_write for proof.proofs (a serde type). This API split is intentional and correct based on trait implementations.

However, lines 35-41 encode each proof individually in a loop, concatenating multiple bincode values to agg_proofs.bin. Without a length prefix or container wrapper, deserialization cannot reliably determine where one proof ends and the next begins. Either encode the entire aggregated_proofs collection at once, or add explicit length prefixes for each proof.

🤖 Prompt for AI Agents
In crates/prover-bin/src/dumper.rs around lines 26 and 35-41, the current code
encodes each proof in a loop (lines ~35-41) which writes concatenated bincode
blobs without boundaries, making deserialization ambiguous; instead encode the
entire aggregated_proofs collection in one shot so bincode records lengths
internally. Replace the per-item loop with a single call to the serde bincode
encoder (e.g., bincode::serde::encode_into_std_write) that writes the whole
Vec/collection of proofs to agg_proofs.bin; keep the existing
bincode::encode_into_std_write usage for serialized_witness at line 26 (since it
uses the native encode API), but switch the proofs writing to a single serde
encode of the collection to ensure correct round-trip deserialization.

&task.serialized_witness,
&mut input_writer,
bincode::config::standard(),
)?;

// stream-encode aggregated_proofs to agg_proofs.bin using bincode 2.0
let agg_file = std::fs::File::create("agg_proofs.bin")?;
let mut agg_writer = std::io::BufWriter::new(agg_file);
for proof in &task.aggregated_proofs {
bincode::serde::encode_into_std_write(
&proof.proofs,
&mut agg_writer,
bincode::config::standard(),
)?;
}
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Loop encodes individual proofs, not the aggregated collection.

The code encodes each proof.proofs separately in a loop, concatenating multiple encoded values into agg_proofs.bin. This pattern makes deserialization ambiguous—a reader won't know how many proofs to decode without additional metadata.

The comment "stream-encode aggregated_proofs" suggests encoding the entire collection once. Encoding individual items without length prefixes or delimiters will likely cause deserialization failures.

🔎 Consider encoding the entire collection at once:
 // stream-encode aggregated_proofs to agg_proofs.bin using bincode 2.0
 let agg_file = std::fs::File::create("agg_proofs.bin")?;
 let mut agg_writer = std::io::BufWriter::new(agg_file);
-for proof in &task.aggregated_proofs {
-    bincode::serde::encode_into_std_write(
-        &proof.proofs,
-        &mut agg_writer,
-        bincode::config::standard(),
-    )?;
-}
+bincode::serde::encode_into_std_write(
+    &task.aggregated_proofs,
+    &mut agg_writer,
+    bincode::config::standard(),
+)?;

Alternatively, if individual encoding is required, prefix each entry with its length or use a proper container format.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
for proof in &task.aggregated_proofs {
bincode::serde::encode_into_std_write(
&proof.proofs,
&mut agg_writer,
bincode::config::standard(),
)?;
}
for proof in &task.aggregated_proofs {
bincode::serde::encode_into_std_write(
&task.aggregated_proofs,
&mut agg_writer,
bincode::config::standard(),
)?;
}
🤖 Prompt for AI Agents
In crates/prover-bin/src/dumper.rs around lines 35 to 41, the code currently
encodes each proof.proofs in a loop which concatenates multiple bincode blobs
into agg_proofs.bin and makes deserialization ambiguous; replace the loop by
encoding the entire aggregated_proofs collection once (i.e., serialize
task.aggregated_proofs as a single container) so a reader can decode the whole
collection in one call, or if you must stream individual entries, write a length
prefix before each encoded item (serialize the length then the item) so
deserializers can know where each entry ends.


Ok(())
}
}

#[async_trait]
impl ProvingService for Dumper {
fn is_local(&self) -> bool {
true
}
async fn get_vks(&self, _: GetVkRequest) -> GetVkResponse {
// get vk has been deprecated in new prover with dynamic asset loading scheme
GetVkResponse {
vks: vec![],
error: None,
}
}
async fn prove(&mut self, req: ProveRequest) -> ProveResponse {
let error = if let Err(e) = self.dump(&req.input) {
Some(format!("failed to dump: {}", e))
} else {
None
};

ProveResponse {
status: TaskStatus::Failed,
error,
..Default::default()
}
}

async fn query_task(&mut self, req: QueryTaskRequest) -> QueryTaskResponse {
QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some("dump file finished but need a fail return to exit".to_string()),
..Default::default()
}
}
}
34 changes: 33 additions & 1 deletion crates/prover-bin/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
mod dumper;
mod prover;
mod types;
mod zk_circuits_handler;

use clap::{ArgAction, Parser, Subcommand};
use clap::{ArgAction, Parser, Subcommand, ValueEnum};
use prover::{LocalProver, LocalProverConfig};
use scroll_proving_sdk::{
prover::{types::ProofType, ProverBuilder},
Expand Down Expand Up @@ -32,12 +33,33 @@ struct Args {
command: Option<Commands>,
}

#[derive(Clone, Debug, PartialEq, Eq, ValueEnum)]
enum TaskType {
Chunk,
Batch,
Bundle,
}

impl From<TaskType> for ProofType {
fn from(value: TaskType) -> Self {
match value {
TaskType::Chunk => ProofType::Chunk,
TaskType::Batch => ProofType::Batch,
TaskType::Bundle => ProofType::Bundle,
}
}
}

#[derive(Subcommand, Debug)]
enum Commands {
Handle {
/// path to save the verifier's asset
task_path: String,
},
Dump {
task_type: TaskType,
task_id: String,
},
}

#[derive(Debug, serde::Deserialize)]
Expand All @@ -63,6 +85,16 @@ async fn main() -> eyre::Result<()> {
let local_prover = LocalProver::new(cfg.clone());

match args.command {
Some(Commands::Dump { task_type, task_id }) => {
let prover = ProverBuilder::new(sdk_config, dumper::Dumper::default())
.build()
.await
.map_err(|e| eyre::eyre!("build prover fail: {e}"))?;

std::sync::Arc::new(prover)
.one_shot(&[task_id], task_type.into())
.await;
}
Some(Commands::Handle { task_path }) => {
let file = File::open(Path::new(&task_path))?;
let reader = BufReader::new(file);
Expand Down
39 changes: 28 additions & 11 deletions crates/prover-bin/src/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@ pub struct AssetsLocationData {
#[serde(default)]
/// a altered url for specififed vk
pub asset_detours: HashMap<String, url::Url>,
/// when asset file existed, do not verify from network, help for debugging stuffs
#[serde(default)]
pub debug_mode: bool,
}

impl AssetsLocationData {
Expand Down Expand Up @@ -79,6 +82,13 @@ impl AssetsLocationData {
// Get file metadata to check size
if let Ok(metadata) = std::fs::metadata(&local_file_path) {
// Make a HEAD request to get remote file size
if self.debug_mode {
println!(
"File {} already exists, skipping download under debugmode",
filename
);
continue;
}

if let Ok(head_resp) = client.head(download_url.clone()).send().await {
if let Some(content_length) = head_resp.headers().get("content-length") {
Expand Down Expand Up @@ -201,12 +211,20 @@ impl ProvingService for LocalProver {
error: Some(format!("proving task failed: {}", e)),
..Default::default()
},
Err(e) => QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task panicked: {}", e)),
..Default::default()
},
Err(e) => {
if e.is_panic() {
// simply re-throw panic for any panicking in proving process,
// cause worker loop and the whole prover exit
std::panic::resume_unwind(e.into_panic());
}

QueryTaskResponse {
task_id: req.task_id,
status: TaskStatus::Failed,
error: Some(format!("proving task failed: {}", e)),
..Default::default()
}
}
};
} else {
return QueryTaskResponse {
Expand Down Expand Up @@ -273,7 +291,9 @@ impl LocalProver {
let created_at = duration.as_secs() as f64 + duration.subsec_nanos() as f64 * 1e-9;

let prover_task = UniversalHandler::get_task_from_input(&req.input)?;
let is_openvm_13 = prover_task.use_openvm_13;
if prover_task.use_openvm_13 {
eyre::bail!("prover do not support snark params base on openvm 13");
}
let prover_task: ProvingTask = prover_task.into();
let vk = hex::encode(&prover_task.vk);
let handler = if let Some(handler) = self.handlers.get(&vk) {
Expand All @@ -300,10 +320,7 @@ impl LocalProver {
.location_data
.get_asset(&vk, &url_base, &base_config.workspace_path)
.await?;
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(
&asset_path,
is_openvm_13,
)?));
let circuits_handler = Arc::new(Mutex::new(UniversalHandler::new(&asset_path)?));
self.handlers.insert(vk, circuits_handler.clone());
circuits_handler
};
Expand Down
5 changes: 2 additions & 3 deletions crates/prover-bin/src/zk_circuits_handler/universal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,14 @@ pub struct UniversalHandler {
unsafe impl Send for UniversalHandler {}

impl UniversalHandler {
pub fn new(workspace_path: impl AsRef<Path>, is_openvm_v13: bool) -> Result<Self> {
pub fn new(workspace_path: impl AsRef<Path>) -> Result<Self> {
let path_app_exe = workspace_path.as_ref().join("app.vmexe");
let path_app_config = workspace_path.as_ref().join("openvm.toml");
let segment_len = Some((1 << 21) - 100);
let segment_len = Some((1 << 22) - 100);
let config = ProverConfig {
path_app_config,
path_app_exe,
segment_len,
is_openvm_v13,
};

let prover = Prover::setup(config, None)?;
Expand Down
3 changes: 3 additions & 0 deletions tests/prover-e2e/mainnet-galileo/.make.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
BEGIN_BLOCK?=26653680
END_BLOCK?=26653686
SCROLL_FORK_NAME=galileo
Loading