Skip to content

Commit ad870d8

Browse files
authoredDec 14, 2024··
chore: error handling (#280)
* feat: better proposer error handling * feat: fix proposer witnessgen timeout * add
1 parent f486cae commit ad870d8

File tree

7 files changed

+155
-112
lines changed

7 files changed

+155
-112
lines changed
 

‎proposer/op/go.sum

+6
Original file line numberDiff line numberDiff line change
@@ -328,6 +328,10 @@ github.com/slack-go/slack v0.14.0 h1:6c0UTfbRnvRssZUsZ2qe0Iu07VAMPjRqOa6oX8ewF4k
328328
github.com/slack-go/slack v0.14.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
329329
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
330330
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
331+
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
332+
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
333+
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
334+
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
331335
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
332336
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
333337
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -443,6 +447,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
443447
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
444448
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
445449
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
450+
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
451+
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
446452
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
447453
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
448454
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

‎proposer/op/proposer/db/db.go

+3-44
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ func InitDB(dbPath string, useCachedDb bool) (*ProofDB, error) {
3838
}
3939

4040
// Use the TL;DR SQLite settings from https://kerkour.com/sqlite-for-servers.
41-
connectionUrl := fmt.Sprintf("file:%s?_fk=1&journal_mode=WAL&synchronous=normal&cache_size=100000000&busy_timeout=15000&_txlock=immediate", dbPath)
41+
connectionUrl := fmt.Sprintf("file:%s?_fk=1&journal_mode=WAL&synchronous=normal&cache_size=100000000&busy_timeout=30000&_txlock=immediate", dbPath)
4242

4343
writeDrv, err := sql.Open("sqlite3", connectionUrl)
4444
if err != nil {
@@ -48,15 +48,15 @@ func InitDB(dbPath string, useCachedDb bool) (*ProofDB, error) {
4848

4949
// The write lock only allows one connection to the DB at a time.
5050
writeDb.SetMaxOpenConns(1)
51-
writeDb.SetConnMaxLifetime(time.Hour)
51+
writeDb.SetConnMaxLifetime(10 * time.Minute)
5252

5353
readDrv, err := sql.Open("sqlite3", connectionUrl)
5454
if err != nil {
5555
return nil, fmt.Errorf("failed opening connection to sqlite: %v", err)
5656
}
5757
readDb := readDrv.DB()
5858
readDb.SetMaxOpenConns(max(4, runtime.NumCPU()/4))
59-
readDb.SetConnMaxLifetime(time.Hour)
59+
readDb.SetConnMaxLifetime(10 * time.Minute)
6060

6161
readClient := ent.NewClient(ent.Driver(readDrv))
6262
writeClient := ent.NewClient(ent.Driver(writeDrv))
@@ -256,47 +256,6 @@ func (db *ProofDB) GetLatestEndBlock() (uint64, error) {
256256
return uint64(maxEnd.EndBlock), nil
257257
}
258258

259-
// When restarting the L2OutputSubmitter, some proofs may have been left in a "requested" state without a prover request ID on the server. Until we
260-
// implement a mechanism for querying the status of the witness generation, we need to time out these proofs after a period of time so they can be requested.
261-
func (db *ProofDB) GetWitnessGenerationTimeoutProofsOnServer() ([]*ent.ProofRequest, error) {
262-
currentTime := time.Now().Unix()
263-
twentyMinutesAgo := currentTime - 20*60
264-
265-
proofs, err := db.readClient.ProofRequest.Query().
266-
Where(
267-
proofrequest.StatusEQ(proofrequest.StatusWITNESSGEN),
268-
proofrequest.ProverRequestIDIsNil(),
269-
proofrequest.LastUpdatedTimeLT(uint64(twentyMinutesAgo)),
270-
).
271-
All(context.Background())
272-
273-
if err != nil {
274-
return nil, fmt.Errorf("failed to query witness generation timeout proofs: %w", err)
275-
}
276-
277-
return proofs, nil
278-
}
279-
280-
// If a proof failed to be sent to the prover network, it's status will be set to FAILED, but the prover request ID will be empty.
281-
// This function returns all such proofs.
282-
func (db *ProofDB) GetProofsFailedOnServer() ([]*ent.ProofRequest, error) {
283-
proofs, err := db.readClient.ProofRequest.Query().
284-
Where(
285-
proofrequest.StatusEQ(proofrequest.StatusFAILED),
286-
proofrequest.ProverRequestIDEQ(""),
287-
).
288-
All(context.Background())
289-
290-
if err != nil {
291-
if ent.IsNotFound(err) {
292-
return nil, nil
293-
}
294-
return nil, fmt.Errorf("failed to query failed proof: %w", err)
295-
}
296-
297-
return proofs, nil
298-
}
299-
300259
// GetAllProofsWithStatus returns all proofs with the given status.
301260
func (db *ProofDB) GetAllProofsWithStatus(status proofrequest.Status) ([]*ent.ProofRequest, error) {
302261
proofs, err := db.readClient.ProofRequest.Query().

‎proposer/op/proposer/db/ent/migrate/schema.go

+5
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

‎proposer/op/proposer/driver.go

+13-4
Original file line numberDiff line numberDiff line change
@@ -639,13 +639,22 @@ func (l *L2OutputSubmitter) loopL2OO(ctx context.Context) {
639639
continue
640640
}
641641

642-
// 2) Check the statuses of all requested proofs.
642+
// 2) Check the statuses of PROVING requests.
643643
// If it's successfully returned, we validate that we have it on disk and set status = "COMPLETE".
644644
// If it fails or times out, we set status = "FAILED" (and, if it's a span proof, split the request in half to try again).
645-
l.Log.Info("Stage 2: Processing Pending Proofs...")
646-
err = l.ProcessPendingProofs()
645+
l.Log.Info("Stage 2: Processing PROVING requests...")
646+
err = l.ProcessProvingRequests()
647647
if err != nil {
648-
l.Log.Error("failed to update requested proofs", "err", err)
648+
l.Log.Error("failed to update PROVING requests", "err", err)
649+
continue
650+
}
651+
652+
// 3) Check the statuses of WITNESSGEN requests.
653+
// If the witness generation request has been in the WITNESSGEN state for longer than the timeout, set status to FAILED and retry.
654+
l.Log.Info("Stage 3: Processing WITNESSGEN requests...")
655+
err = l.ProcessWitnessgenRequests()
656+
if err != nil {
657+
l.Log.Error("failed to update WITNESSGEN requests", "err", err)
649658
continue
650659
}
651660

‎proposer/op/proposer/prove.go

+24-5
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,14 @@ import (
1717
)
1818

1919
const PROOF_STATUS_TIMEOUT = 30 * time.Second
20-
const WITNESS_GEN_TIMEOUT = 20 * time.Minute
20+
const WITNESSGEN_TIMEOUT = 20 * time.Minute
2121

2222
// This limit is set to prevent overloading the witness generation server. Until Kona improves their native I/O API (https://github.com/anton-rs/kona/issues/553)
2323
// the maximum number of concurrent witness generation requests is roughly num_cpu / 2. Set it to 5 for now to be safe.
2424
const MAX_CONCURRENT_WITNESS_GEN = 5
2525

26-
// Process all of the pending proofs.
27-
func (l *L2OutputSubmitter) ProcessPendingProofs() error {
26+
// Process all of requests in PROVING state.
27+
func (l *L2OutputSubmitter) ProcessProvingRequests() error {
2828
// Get all proof requests that are currently in the PROVING state.
2929
reqs, err := l.db.GetAllProofsWithStatus(proofrequest.StatusPROVING)
3030
if err != nil {
@@ -65,6 +65,25 @@ func (l *L2OutputSubmitter) ProcessPendingProofs() error {
6565
return nil
6666
}
6767

68+
// Process all of requests in WITNESSGEN state.
69+
func (l *L2OutputSubmitter) ProcessWitnessgenRequests() error {
70+
// Get all proof requests that are currently in the WITNESSGEN state.
71+
reqs, err := l.db.GetAllProofsWithStatus(proofrequest.StatusWITNESSGEN)
72+
if err != nil {
73+
return err
74+
}
75+
for _, req := range reqs {
76+
// If the request has been in the WITNESSGEN state for longer than the timeout, set status to FAILED.
77+
// This is a catch-all in case the witness generation state update failed.
78+
if req.LastUpdatedTime+uint64(WITNESSGEN_TIMEOUT.Seconds()) < uint64(time.Now().Unix()) {
79+
// Retry the request if it timed out.
80+
l.RetryRequest(req, ProofStatusResponse{})
81+
}
82+
}
83+
84+
return nil
85+
}
86+
6887
// Retry a proof request. Sets the status of a proof to FAILED and retries the proof based on the optional proof status response.
6988
// If an error response is received:
7089
// - Range Proof: Split in two if the block range is > 1. Retry the same request if range is 1 block.
@@ -297,13 +316,13 @@ func (l *L2OutputSubmitter) makeProofRequest(proofType proofrequest.Type, jsonBo
297316
}
298317
req.Header.Set("Content-Type", "application/json")
299318

300-
client := &http.Client{Timeout: WITNESS_GEN_TIMEOUT}
319+
client := &http.Client{Timeout: WITNESSGEN_TIMEOUT}
301320
resp, err := client.Do(req)
302321
if err != nil {
303322
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
304323
l.Log.Error("Witness generation request timed out", "err", err)
305324
l.Metr.RecordWitnessGenFailure("Timeout")
306-
return nil, fmt.Errorf("request timed out after %s: %w", WITNESS_GEN_TIMEOUT, err)
325+
return nil, fmt.Errorf("request timed out after %s: %w", WITNESSGEN_TIMEOUT, err)
307326
}
308327
return nil, fmt.Errorf("failed to send request: %w", err)
309328
}

‎proposer/op/proposer/range.go

-15
Original file line numberDiff line numberDiff line change
@@ -187,21 +187,6 @@ func (l *L2OutputSubmitter) GetRangeProofBoundaries(ctx context.Context) error {
187187

188188
spans := l.SplitRangeBasic(newL2StartBlock, newL2EndBlock)
189189

190-
// // Check if the safeDB is activated on the L2 node. If it is, we use the safeHead based range
191-
// // splitting algorithm. Otherwise, we use the simple range splitting algorithm.
192-
// safeDBActivated, err := l.isSafeDBActivated(ctx, rollupClient)
193-
// if err != nil {
194-
// l.Log.Warn("safeDB is not activated. Using simple range splitting algorithm.", "err", err)
195-
// }
196-
// if safeDBActivated {
197-
// safeHeadSpans, err := l.SplitRangeBasedOnSafeHeads(ctx, newL2StartBlock, newL2EndBlock)
198-
// if err == nil {
199-
// spans = safeHeadSpans
200-
// } else {
201-
// l.Log.Warn("failed to split range based on safe heads, using basic range splitting", "err", err)
202-
// }
203-
// }
204-
205190
// Add each span to the DB. If there are no spans, we will not create any proofs.
206191
for _, span := range spans {
207192
err := l.db.NewEntry(proofrequest.TypeSPAN, span.Start, span.End)

‎proposer/succinct/bin/server.rs

+104-44
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use axum::{
77
routing::{get, post},
88
Json, Router,
99
};
10-
use log::info;
10+
use log::{error, info};
1111
use op_succinct_client_utils::{
1212
boot::{hash_rollup_config, BootInfoStruct},
1313
types::u32_to_u8,
@@ -121,7 +121,13 @@ async fn request_span_proof(
121121
Json(payload): Json<SpanProofRequest>,
122122
) -> Result<(StatusCode, Json<ProofResponse>), AppError> {
123123
info!("Received span proof request: {:?}", payload);
124-
let fetcher = OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await?;
124+
let fetcher = match OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await {
125+
Ok(f) => f,
126+
Err(e) => {
127+
error!("Failed to create data fetcher: {}", e);
128+
return Err(AppError(e));
129+
}
130+
};
125131

126132
let host_cli = match fetcher
127133
.get_host_cli_args(
@@ -134,7 +140,7 @@ async fn request_span_proof(
134140
{
135141
Ok(cli) => cli,
136142
Err(e) => {
137-
log::error!("Failed to get host CLI args: {}", e);
143+
error!("Failed to get host CLI args: {}", e);
138144
return Err(AppError(anyhow::anyhow!(
139145
"Failed to get host CLI args: {}",
140146
e
@@ -147,15 +153,15 @@ async fn request_span_proof(
147153
// host, and return an ID that the client can poll on to check if the proof was submitted.
148154
let mut witnessgen_executor = WitnessGenExecutor::new(WITNESSGEN_TIMEOUT, RunContext::Docker);
149155
if let Err(e) = witnessgen_executor.spawn_witnessgen(&host_cli).await {
150-
log::error!("Failed to spawn witness generation: {}", e);
156+
error!("Failed to spawn witness generation: {}", e);
151157
return Err(AppError(anyhow::anyhow!(
152158
"Failed to spawn witness generation: {}",
153159
e
154160
)));
155161
}
156162
// Log any errors from running the witness generation process.
157163
if let Err(e) = witnessgen_executor.flush().await {
158-
log::error!("Failed to generate witness: {}", e);
164+
error!("Failed to generate witness: {}", e);
159165
return Err(AppError(anyhow::anyhow!(
160166
"Failed to generate witness: {}",
161167
e
@@ -165,7 +171,7 @@ async fn request_span_proof(
165171
let sp1_stdin = match get_proof_stdin(&host_cli) {
166172
Ok(stdin) => stdin,
167173
Err(e) => {
168-
log::error!("Failed to get proof stdin: {}", e);
174+
error!("Failed to get proof stdin: {}", e);
169175
return Err(AppError(anyhow::anyhow!(
170176
"Failed to get proof stdin: {}",
171177
e
@@ -176,7 +182,7 @@ async fn request_span_proof(
176182
let private_key = match env::var("SP1_PRIVATE_KEY") {
177183
Ok(private_key) => private_key,
178184
Err(e) => {
179-
log::error!("Failed to get SP1 private key: {}", e);
185+
error!("Failed to get SP1 private key: {}", e);
180186
return Err(AppError(anyhow::anyhow!(
181187
"Failed to get SP1 private key: {}",
182188
e
@@ -186,7 +192,7 @@ async fn request_span_proof(
186192
let rpc_url = match env::var("PROVER_NETWORK_RPC") {
187193
Ok(rpc_url) => rpc_url,
188194
Err(e) => {
189-
log::error!("Failed to get PROVER_NETWORK_RPC: {}", e);
195+
error!("Failed to get PROVER_NETWORK_RPC: {}", e);
190196
return Err(AppError(anyhow::anyhow!(
191197
"Failed to get PROVER_NETWORK_RPC: {}",
192198
e
@@ -205,7 +211,7 @@ async fn request_span_proof(
205211
{
206212
Ok(vk_hash) => vk_hash,
207213
Err(e) => {
208-
log::error!("Failed to register program: {}", e);
214+
error!("Failed to register program: {}", e);
209215
return Err(AppError(anyhow::anyhow!(
210216
"Failed to register program: {}",
211217
e
@@ -224,7 +230,7 @@ async fn request_span_proof(
224230
{
225231
Ok(proof_id) => proof_id,
226232
Err(e) => {
227-
log::error!("Failed to request proof: {}", e);
233+
error!("Failed to request proof: {}", e);
228234
return Err(AppError(anyhow::anyhow!("Failed to request proof: {}", e)));
229235
}
230236
};
@@ -263,14 +269,18 @@ async fn request_agg_proof(
263269
)?;
264270
let l1_head: [u8; 32] = l1_head_bytes.try_into().unwrap();
265271

266-
let fetcher = OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await?;
267-
let res = fetcher
272+
let fetcher = match OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await {
273+
Ok(f) => f,
274+
Err(e) => return Err(AppError(anyhow::anyhow!("Failed to create fetcher: {}", e))),
275+
};
276+
277+
let headers = match fetcher
268278
.get_header_preimages(&boot_infos, l1_head.into())
269-
.await;
270-
let headers = match res {
271-
Ok(headers) => headers,
279+
.await
280+
{
281+
Ok(h) => h,
272282
Err(e) => {
273-
log::error!("Failed to get header preimages: {}", e);
283+
error!("Failed to get header preimages: {}", e);
274284
return Err(AppError(anyhow::anyhow!(
275285
"Failed to get header preimages: {}",
276286
e
@@ -286,42 +296,39 @@ async fn request_agg_proof(
286296

287297
let stdin =
288298
match get_agg_proof_stdin(proofs, boot_infos, headers, &state.range_vk, l1_head.into()) {
289-
Ok(stdin) => stdin,
299+
Ok(s) => s,
290300
Err(e) => {
291-
log::error!("Failed to get agg proof stdin: {}", e);
301+
error!("Failed to get agg proof stdin: {}", e);
292302
return Err(AppError(anyhow::anyhow!(
293303
"Failed to get agg proof stdin: {}",
294304
e
295305
)));
296306
}
297307
};
298308

299-
let res = prover.register_program(&state.agg_vk, AGG_ELF).await;
300-
let vk_hash = match res {
309+
let vk_hash = match prover.register_program(&state.agg_vk, AGG_ELF).await {
301310
Ok(vk_hash) => vk_hash,
302311
Err(e) => {
303-
log::error!("Failed to register program: {}", e);
312+
error!("Failed to register program: {}", e);
304313
return Err(AppError(anyhow::anyhow!(
305314
"Failed to register program: {}",
306315
e
307316
)));
308317
}
309318
};
310-
let res = prover
319+
let proof_id = match prover
311320
.request_proof(
312321
&vk_hash,
313322
&stdin,
314323
ProofMode::Groth16,
315324
1_000_000_000_000,
316325
None,
317326
)
318-
.await;
319-
320-
// Check if error, otherwise get proof ID.
321-
let proof_id = match res {
322-
Ok(proof_id) => proof_id,
327+
.await
328+
{
329+
Ok(id) => id,
323330
Err(e) => {
324-
log::error!("Failed to request proof: {}", e);
331+
error!("Failed to request proof: {}", e);
325332
return Err(AppError(anyhow::anyhow!("Failed to request proof: {}", e)));
326333
}
327334
};
@@ -335,33 +342,54 @@ async fn request_mock_span_proof(
335342
Json(payload): Json<SpanProofRequest>,
336343
) -> Result<(StatusCode, Json<ProofStatus>), AppError> {
337344
info!("Received mock span proof request: {:?}", payload);
338-
let fetcher = OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await?;
345+
let fetcher = match OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await {
346+
Ok(f) => f,
347+
Err(e) => {
348+
error!("Failed to create data fetcher: {}", e);
349+
return Err(AppError(e));
350+
}
351+
};
339352

340-
let host_cli = fetcher
353+
let host_cli = match fetcher
341354
.get_host_cli_args(
342355
payload.start,
343356
payload.end,
344357
ProgramType::Multi,
345358
CacheMode::DeleteCache,
346359
)
347-
.await?;
360+
.await
361+
{
362+
Ok(cli) => cli,
363+
Err(e) => {
364+
error!("Failed to get host CLI args: {}", e);
365+
return Err(AppError(e));
366+
}
367+
};
348368

349369
// Start the server and native client with a timeout.
350370
// Note: Ideally, the server should call out to a separate process that executes the native
351371
// host, and return an ID that the client can poll on to check if the proof was submitted.
352372
let mut witnessgen_executor = WitnessGenExecutor::new(WITNESSGEN_TIMEOUT, RunContext::Docker);
353-
witnessgen_executor.spawn_witnessgen(&host_cli).await?;
373+
if let Err(e) = witnessgen_executor.spawn_witnessgen(&host_cli).await {
374+
error!("Failed to spawn witness generator: {}", e);
375+
return Err(AppError(e));
376+
}
354377
// Log any errors from running the witness generation process.
355-
let res = witnessgen_executor.flush().await;
356-
if let Err(e) = res {
357-
log::error!("Failed to generate witness: {}", e);
378+
if let Err(e) = witnessgen_executor.flush().await {
379+
error!("Failed to generate witness: {}", e);
358380
return Err(AppError(anyhow::anyhow!(
359381
"Failed to generate witness: {}",
360382
e
361383
)));
362384
}
363385

364-
let sp1_stdin = get_proof_stdin(&host_cli)?;
386+
let sp1_stdin = match get_proof_stdin(&host_cli) {
387+
Ok(stdin) => stdin,
388+
Err(e) => {
389+
error!("Failed to get proof stdin: {}", e);
390+
return Err(AppError(e));
391+
}
392+
};
365393

366394
let prover = ProverClient::mock();
367395
let proof = prover
@@ -404,30 +432,62 @@ async fn request_mock_agg_proof(
404432
.map(|proof| proof.proof.clone())
405433
.collect();
406434

407-
let l1_head_bytes = hex::decode(
435+
let l1_head_bytes = match hex::decode(
408436
payload
409437
.head
410438
.strip_prefix("0x")
411439
.expect("Invalid L1 head, no 0x prefix."),
412-
)?;
440+
) {
441+
Ok(bytes) => bytes,
442+
Err(e) => {
443+
error!("Failed to decode L1 head: {}", e);
444+
return Err(AppError(anyhow::anyhow!("Failed to decode L1 head: {}", e)));
445+
}
446+
};
413447
let l1_head: [u8; 32] = l1_head_bytes.try_into().unwrap();
414448

415-
let fetcher = OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await?;
416-
let headers = fetcher
449+
let fetcher = match OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Docker).await {
450+
Ok(f) => f,
451+
Err(e) => {
452+
error!("Failed to create data fetcher: {}", e);
453+
return Err(AppError(e));
454+
}
455+
};
456+
let headers = match fetcher
417457
.get_header_preimages(&boot_infos, l1_head.into())
418-
.await?;
458+
.await
459+
{
460+
Ok(h) => h,
461+
Err(e) => {
462+
error!("Failed to get header preimages: {}", e);
463+
return Err(AppError(e));
464+
}
465+
};
419466

420467
let prover = ProverClient::mock();
421468

422469
let stdin =
423-
get_agg_proof_stdin(proofs, boot_infos, headers, &state.range_vk, l1_head.into()).unwrap();
470+
match get_agg_proof_stdin(proofs, boot_infos, headers, &state.range_vk, l1_head.into()) {
471+
Ok(s) => s,
472+
Err(e) => {
473+
error!("Failed to get aggregation proof stdin: {}", e);
474+
return Err(AppError(e));
475+
}
476+
};
424477

425478
// Simulate the mock proof. proof.bytes() returns an empty byte array for mock proofs.
426-
let proof = prover
479+
let proof = match prover
427480
.prove(&state.agg_pk, stdin)
428481
.set_skip_deferred_proof_verification(true)
429482
.groth16()
430-
.run()?;
483+
.run()
484+
{
485+
Ok(p) => p,
486+
Err(e) => {
487+
error!("Failed to generate proof: {}", e);
488+
return Err(AppError(e));
489+
}
490+
};
431491

432492
Ok((
433493
StatusCode::OK,

0 commit comments

Comments
 (0)
Please sign in to comment.