diff --git a/.env.example b/.env.example index c5da4f78..704faa2b 100644 --- a/.env.example +++ b/.env.example @@ -11,7 +11,7 @@ PRIVATE_KEY= ETHERSCAN_API_KEY= # OP Proposer -L2OO_ADDRESS= +L2OO_ADDRESS=0x0E8d5C8041bc99E1869bdE4d7384AD00C41865b # Proof Server PROVER_NETWORK_RPC= diff --git a/.gitignore b/.gitignore index 09bb1bcc..97bd7280 100644 --- a/.gitignore +++ b/.gitignore @@ -28,7 +28,7 @@ execution-reports/ **/rollup-config.json # DB -db/proofs.db +db/* **/bin/op-proposer diff --git a/rollup-configs/11155420.json b/configs/11155420/rollup.json similarity index 100% rename from rollup-configs/11155420.json rename to configs/11155420/rollup.json diff --git a/rollup-configs/13269.json b/configs/13269/rollup.json similarity index 67% rename from rollup-configs/13269.json rename to configs/13269/rollup.json index bb03f30c..cd7d9e08 100644 --- a/rollup-configs/13269.json +++ b/configs/13269/rollup.json @@ -1,21 +1,23 @@ { "genesis": { "l1": { - "hash": "0x327e6431af3e1e05df17598ae31977cd545ea996f2755920fe75fb9fb65941e3", - "number": "0x62fd94" + "number": 6487444, + "hash": "0x327e6431af3e1e05df17598ae31977cd545ea996f2755920fe75fb9fb65941e3" }, "l2": { - "hash": "0xa3344560b989c772875de98d68ccc3f8b5503c94e2e4ad2f403bca6bb186c6bf", - "number": "0x0" + "number": 0, + "hash": "0xa3344560b989c772875de98d68ccc3f8b5503c94e2e4ad2f403bca6bb186c6bf" }, "l2_time": 1723487352, "system_config": { - "batcherAddr": "0x6322c47fee60e15fc1b7ba65f1cfd66201e4c61d", + "batcherAddr": "0x6322c47FEE60e15FC1B7ba65f1cfd66201E4c61d", "overhead": "0xbc", "scalar": "0xa6fe0", "gasLimit": 30000000, "baseFeeScalar": null, - "blobBaseFeeScalar": null + "blobBaseFeeScalar": null, + "eip1559Denominator": null, + "eip1559Elasticity": null } }, "block_time": 2, @@ -39,8 +41,8 @@ "ecotone_time": 0, "fjord_time": 0, "granite_time": 1725984001, - "batch_inbox_address": "0x136b12db1fbac1d6aa6d0a1d2b724892c6fba921", - "deposit_contract_address": "0x16839f9f6a11195a72b88744336edff036e7b3d5", - "l1_system_config_address": "0x19145e3aee49c40d9f499f705f25ac1ea7409834", + "batch_inbox_address": "0x136B12DB1FbaC1d6Aa6D0a1D2b724892c6FbA921", + "deposit_contract_address": "0x16839f9F6a11195A72B88744336EDFf036e7B3d5", + "l1_system_config_address": "0x19145e3aEe49C40D9f499F705F25ac1eA7409834", "protocol_versions_address": "0x0000000000000000000000000000000000000000" } \ No newline at end of file diff --git a/contracts/opsuccinctl2ooconfig.json b/contracts/opsuccinctl2ooconfig.json index 34cf2e5b..0539980b 100644 --- a/contracts/opsuccinctl2ooconfig.json +++ b/contracts/opsuccinctl2ooconfig.json @@ -1,16 +1,16 @@ { - "chainId": 10, + "chainId": 13269, "challenger": "0x0000000000000000000000000000000000000000", "finalizationPeriod": 0, "l2BlockTime": 2, "owner": "0xDEd0000E32f8F40414d3ab3a830f735a3553E18e", "proposer": "0xDEd0000E32f8F40414d3ab3a830f735a3553E18e", - "rollupConfigHash": "0xaaa6ae5735fc2cd9d94d361a8208946371cc689e4c03e45be9dd7a3ea866ab2f", - "startingBlockNumber": 126147850, - "startingOutputRoot": "0xc6722f65202d9971ff736f449973d72ade2268e29945a5753c5a3be8d6b15a97", - "startingTimestamp": 1727894477, + "rollupConfigHash": "0x50efb1261373319cd7ba429612d9a18e585627618ec04560e7d27ba7e36d4c05", + "startingBlockNumber": 2298536, + "startingOutputRoot": "0x39d791dc827d53c4add5a213148b9633bb1e74a4b3fb61e1fa699a6465ad4be3", + "startingTimestamp": 1728084424, "submissionInterval": 1000, "verifierGateway": "0x3B6041173B80E77f038f3F2C0f9744f04837185e", - "aggregationVkey": "0x004c04e3eab8ea57cd1f9076cf1b3b87e1e7155776a64ae101e0a39d9098c676", - "rangeVkeyCommitment": "0x50b251451a821c18594f0ae7267b5ac2072e308a03c540552c1d754f2103b460" + "aggregationVkey": "0x00e4dc504bcd3355a4bf7382ba15d74e058aa144948edb753879055d243ebad0", + "rangeVkeyCommitment": "0x58660c1a1ab2cc77620b051a4d9006883dcb81fa5a856f654bbc5f8739d83868" } \ No newline at end of file diff --git a/contracts/test/helpers/Utils.sol b/contracts/test/helpers/Utils.sol index 4fcc68a0..07d8faff 100644 --- a/contracts/test/helpers/Utils.sol +++ b/contracts/test/helpers/Utils.sol @@ -73,6 +73,9 @@ contract Utils is Test, JSONDecoder { // This script updates the rollup config hash and the block number in the config. function updateRollupConfig() public { + // If ENV_FILE is set, pass it to the fetch-rollup-config binary. + string memory envFile = vm.envOr("ENV_FILE", string(".env.conduit")); + // Build the fetch-rollup-config binary. Use the quiet flag to suppress build output. string[] memory inputs = new string[](6); inputs[0] = "cargo"; @@ -85,13 +88,16 @@ contract Utils is Test, JSONDecoder { // Run the fetch-rollup-config binary which updates the rollup config hash and the block number in the config. // Use the quiet flag to suppress build output. - string[] memory inputs2 = new string[](6); + string[] memory inputs2 = new string[](9); inputs2[0] = "cargo"; inputs2[1] = "run"; inputs2[2] = "--bin"; inputs2[3] = "fetch-rollup-config"; inputs2[4] = "--release"; inputs2[5] = "--quiet"; + inputs2[6] = "--"; + inputs2[7] = "--env-file"; + inputs2[8] = envFile; vm.ffi(inputs2); } diff --git a/docker-compose.yml b/docker-compose.yml index 102ff5b8..66f243c2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,7 +5,7 @@ services: context: . dockerfile: ./proposer/succinct/Dockerfile env_file: - - .env + - ${ENV_FILE:-.env} restart: unless-stopped ports: - "3000:3000" @@ -16,7 +16,7 @@ services: context: . dockerfile: ./proposer/op/Dockerfile.op_proposer env_file: - - .env + - ${ENV_FILE:-.env} restart: unless-stopped depends_on: - op-succinct-server diff --git a/elf/aggregation-elf b/elf/aggregation-elf index 36797083..31792a72 100755 Binary files a/elf/aggregation-elf and b/elf/aggregation-elf differ diff --git a/proposer/db-utils/query_proofs.py b/proposer/db-utils/query_proofs.py index cffa716a..e1367e09 100644 --- a/proposer/db-utils/query_proofs.py +++ b/proposer/db-utils/query_proofs.py @@ -96,14 +96,17 @@ def query_agg_proofs(db_path) -> [ProofRequest]: raise ValueError("L2OO_ADDRESS not found in .env file") print(f"L2OO_ADDRESS: {L2OO_ADDRESS}") - db_path = "../../db/proofs.db" + db_path = "../../db/11155420/proofs.db" # Get all span proofs print("\nSpan Proofs:") span_proofs = query_span_proofs(db_path) for proof in span_proofs: - print(f"Request ID: {proof.id}, Type: {proof.type}, Start Block: {proof.start_block}, End Block: {proof.end_block}, Status: {proof.status}, Prover Request ID: {proof.prover_request_id}, Time: {proof.request_added_time}") + proof_time_difference = None + if proof.proof_request_time is not None: + proof_time_difference = proof.proof_request_time - proof.request_added_time + print(f"Request ID: {proof.id}, Type: {proof.type}, Start Block: {proof.start_block}, End Block: {proof.end_block}, Status: {proof.status}, Prover Request ID: {proof.prover_request_id}, Request Added Time: {proof.request_added_time}, Proof Request Time: {proof.proof_request_time}, Proof Time Difference: {proof_time_difference}") # Query for aggregation proofs print("\nAggregation Proofs:") diff --git a/proposer/op/Dockerfile.op_proposer b/proposer/op/Dockerfile.op_proposer index dfc07dd6..3176d33e 100644 --- a/proposer/op/Dockerfile.op_proposer +++ b/proposer/op/Dockerfile.op_proposer @@ -26,7 +26,7 @@ COPY --from=optimism-builder /optimism/op-proposer/proposer/bin/op-proposer /usr COPY ./proposer/op/op_proposer.sh /usr/local/bin/op_proposer.sh # Copy the rollup configs -COPY ../rollup-configs /rollup-configs +COPY ../configs /configs # Make the binary and entrypoint executable. RUN ls -l /usr/local/bin/ diff --git a/proposer/op/Dockerfile.span_batch_server b/proposer/op/Dockerfile.span_batch_server index 06ebffc2..9f44dbbd 100644 --- a/proposer/op/Dockerfile.span_batch_server +++ b/proposer/op/Dockerfile.span_batch_server @@ -26,7 +26,7 @@ WORKDIR /app COPY ./proposer/op /app/op-proposer-go # Copy the rollup configs -COPY ../rollup-configs /rollup-configs +COPY ../configs /configs # Change to the server directory and build the application WORKDIR /app/op-proposer-go/server diff --git a/proposer/op/go.sum b/proposer/op/go.sum index 82fa69fd..58434749 100644 --- a/proposer/op/go.sum +++ b/proposer/op/go.sum @@ -321,6 +321,10 @@ github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKl github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/proposer/op/proposer/db/db.go b/proposer/op/proposer/db/db.go index ae847798..c2cfae41 100644 --- a/proposer/op/proposer/db/db.go +++ b/proposer/op/proposer/db/db.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "path/filepath" "time" "github.com/succinctlabs/op-succinct-go/proposer/db/ent" @@ -23,6 +24,13 @@ func InitDB(dbPath string, useCachedDb bool) (*ProofDB, error) { } else { fmt.Printf("Using cached DB at %s\n", dbPath) } + + // Create the intermediate directories if they don't exist + dir := filepath.Dir(dbPath) + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, fmt.Errorf("failed to create directories for DB: %w", err) + } + connectionString := fmt.Sprintf("file:%s?_fk=1", dbPath) client, err := ent.Open("sqlite3", connectionString) if err != nil { @@ -69,6 +77,7 @@ func (db *ProofDB) newEntryWithReqAddedTimestamp(proofType string, start, end, n SetEndBlock(end). SetStatus(proofrequest.StatusUNREQ). SetRequestAddedTime(now). + SetLastUpdatedTime(now). Save(context.Background()) if err != nil { @@ -97,6 +106,7 @@ func (db *ProofDB) UpdateProofStatus(id int, newStatus string) error { _, err := db.client.ProofRequest.Update(). Where(proofrequest.ID(id)). SetStatus(pStatus). + SetLastUpdatedTime(uint64(time.Now().Unix())). Save(context.Background()) return err @@ -107,6 +117,7 @@ func (db *ProofDB) SetProverRequestID(id int, proverRequestID string) error { Where(proofrequest.ID(id)). SetProverRequestID(proverRequestID). SetProofRequestTime(uint64(time.Now().Unix())). + SetLastUpdatedTime(uint64(time.Now().Unix())). Save(context.Background()) if err != nil { @@ -149,6 +160,7 @@ func (db *ProofDB) AddProof(id int, proof []byte) error { UpdateOne(existingProof). SetProof(proof). SetStatus(proofrequest.StatusCOMPLETE). + SetLastUpdatedTime(uint64(time.Now().Unix())). Save(context.Background()) if err != nil { @@ -174,6 +186,7 @@ func (db *ProofDB) AddL1BlockInfoToAggRequest(startBlock, endBlock, l1BlockNumbe ). SetL1BlockNumber(l1BlockNumber). SetL1BlockHash(l1BlockHash). + SetLastUpdatedTime(uint64(time.Now().Unix())). Save(context.Background()) if err != nil { @@ -227,7 +240,7 @@ func (db *ProofDB) GetWitnessGenerationTimeoutProofsOnServer() ([]*ent.ProofRequ Where( proofrequest.StatusEQ(proofrequest.StatusREQ), proofrequest.ProverRequestIDIsNil(), - proofrequest.RequestAddedTimeLT(uint64(twentyMinutesAgo)), + proofrequest.LastUpdatedTimeLT(uint64(twentyMinutesAgo)), ). All(context.Background()) diff --git a/proposer/op/proposer/db/ent/migrate/schema.go b/proposer/op/proposer/db/ent/migrate/schema.go index 3416571a..dc8f48ef 100644 --- a/proposer/op/proposer/db/ent/migrate/schema.go +++ b/proposer/op/proposer/db/ent/migrate/schema.go @@ -18,6 +18,7 @@ var ( {Name: "request_added_time", Type: field.TypeUint64}, {Name: "prover_request_id", Type: field.TypeString, Nullable: true}, {Name: "proof_request_time", Type: field.TypeUint64, Nullable: true}, + {Name: "last_updated_time", Type: field.TypeUint64}, {Name: "l1_block_number", Type: field.TypeUint64, Nullable: true}, {Name: "l1_block_hash", Type: field.TypeString, Nullable: true}, {Name: "proof", Type: field.TypeBytes, Nullable: true}, diff --git a/proposer/op/proposer/db/ent/mutation.go b/proposer/op/proposer/db/ent/mutation.go index 3d83c8e8..2ffbe120 100644 --- a/proposer/op/proposer/db/ent/mutation.go +++ b/proposer/op/proposer/db/ent/mutation.go @@ -43,6 +43,8 @@ type ProofRequestMutation struct { prover_request_id *string proof_request_time *uint64 addproof_request_time *int64 + last_updated_time *uint64 + addlast_updated_time *int64 l1_block_number *uint64 addl1_block_number *int64 l1_block_hash *string @@ -510,6 +512,62 @@ func (m *ProofRequestMutation) ResetProofRequestTime() { delete(m.clearedFields, proofrequest.FieldProofRequestTime) } +// SetLastUpdatedTime sets the "last_updated_time" field. +func (m *ProofRequestMutation) SetLastUpdatedTime(u uint64) { + m.last_updated_time = &u + m.addlast_updated_time = nil +} + +// LastUpdatedTime returns the value of the "last_updated_time" field in the mutation. +func (m *ProofRequestMutation) LastUpdatedTime() (r uint64, exists bool) { + v := m.last_updated_time + if v == nil { + return + } + return *v, true +} + +// OldLastUpdatedTime returns the old "last_updated_time" field's value of the ProofRequest entity. +// If the ProofRequest object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProofRequestMutation) OldLastUpdatedTime(ctx context.Context) (v uint64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastUpdatedTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastUpdatedTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastUpdatedTime: %w", err) + } + return oldValue.LastUpdatedTime, nil +} + +// AddLastUpdatedTime adds u to the "last_updated_time" field. +func (m *ProofRequestMutation) AddLastUpdatedTime(u int64) { + if m.addlast_updated_time != nil { + *m.addlast_updated_time += u + } else { + m.addlast_updated_time = &u + } +} + +// AddedLastUpdatedTime returns the value that was added to the "last_updated_time" field in this mutation. +func (m *ProofRequestMutation) AddedLastUpdatedTime() (r int64, exists bool) { + v := m.addlast_updated_time + if v == nil { + return + } + return *v, true +} + +// ResetLastUpdatedTime resets all changes to the "last_updated_time" field. +func (m *ProofRequestMutation) ResetLastUpdatedTime() { + m.last_updated_time = nil + m.addlast_updated_time = nil +} + // SetL1BlockNumber sets the "l1_block_number" field. func (m *ProofRequestMutation) SetL1BlockNumber(u uint64) { m.l1_block_number = &u @@ -712,7 +770,7 @@ func (m *ProofRequestMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *ProofRequestMutation) Fields() []string { - fields := make([]string, 0, 10) + fields := make([]string, 0, 11) if m._type != nil { fields = append(fields, proofrequest.FieldType) } @@ -734,6 +792,9 @@ func (m *ProofRequestMutation) Fields() []string { if m.proof_request_time != nil { fields = append(fields, proofrequest.FieldProofRequestTime) } + if m.last_updated_time != nil { + fields = append(fields, proofrequest.FieldLastUpdatedTime) + } if m.l1_block_number != nil { fields = append(fields, proofrequest.FieldL1BlockNumber) } @@ -765,6 +826,8 @@ func (m *ProofRequestMutation) Field(name string) (ent.Value, bool) { return m.ProverRequestID() case proofrequest.FieldProofRequestTime: return m.ProofRequestTime() + case proofrequest.FieldLastUpdatedTime: + return m.LastUpdatedTime() case proofrequest.FieldL1BlockNumber: return m.L1BlockNumber() case proofrequest.FieldL1BlockHash: @@ -794,6 +857,8 @@ func (m *ProofRequestMutation) OldField(ctx context.Context, name string) (ent.V return m.OldProverRequestID(ctx) case proofrequest.FieldProofRequestTime: return m.OldProofRequestTime(ctx) + case proofrequest.FieldLastUpdatedTime: + return m.OldLastUpdatedTime(ctx) case proofrequest.FieldL1BlockNumber: return m.OldL1BlockNumber(ctx) case proofrequest.FieldL1BlockHash: @@ -858,6 +923,13 @@ func (m *ProofRequestMutation) SetField(name string, value ent.Value) error { } m.SetProofRequestTime(v) return nil + case proofrequest.FieldLastUpdatedTime: + v, ok := value.(uint64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastUpdatedTime(v) + return nil case proofrequest.FieldL1BlockNumber: v, ok := value.(uint64) if !ok { @@ -899,6 +971,9 @@ func (m *ProofRequestMutation) AddedFields() []string { if m.addproof_request_time != nil { fields = append(fields, proofrequest.FieldProofRequestTime) } + if m.addlast_updated_time != nil { + fields = append(fields, proofrequest.FieldLastUpdatedTime) + } if m.addl1_block_number != nil { fields = append(fields, proofrequest.FieldL1BlockNumber) } @@ -918,6 +993,8 @@ func (m *ProofRequestMutation) AddedField(name string) (ent.Value, bool) { return m.AddedRequestAddedTime() case proofrequest.FieldProofRequestTime: return m.AddedProofRequestTime() + case proofrequest.FieldLastUpdatedTime: + return m.AddedLastUpdatedTime() case proofrequest.FieldL1BlockNumber: return m.AddedL1BlockNumber() } @@ -957,6 +1034,13 @@ func (m *ProofRequestMutation) AddField(name string, value ent.Value) error { } m.AddProofRequestTime(v) return nil + case proofrequest.FieldLastUpdatedTime: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddLastUpdatedTime(v) + return nil case proofrequest.FieldL1BlockNumber: v, ok := value.(int64) if !ok { @@ -1045,6 +1129,9 @@ func (m *ProofRequestMutation) ResetField(name string) error { case proofrequest.FieldProofRequestTime: m.ResetProofRequestTime() return nil + case proofrequest.FieldLastUpdatedTime: + m.ResetLastUpdatedTime() + return nil case proofrequest.FieldL1BlockNumber: m.ResetL1BlockNumber() return nil diff --git a/proposer/op/proposer/db/ent/proofrequest.go b/proposer/op/proposer/db/ent/proofrequest.go index cba885db..793620d2 100644 --- a/proposer/op/proposer/db/ent/proofrequest.go +++ b/proposer/op/proposer/db/ent/proofrequest.go @@ -30,6 +30,8 @@ type ProofRequest struct { ProverRequestID string `json:"prover_request_id,omitempty"` // ProofRequestTime holds the value of the "proof_request_time" field. ProofRequestTime uint64 `json:"proof_request_time,omitempty"` + // LastUpdatedTime holds the value of the "last_updated_time" field. + LastUpdatedTime uint64 `json:"last_updated_time,omitempty"` // L1BlockNumber holds the value of the "l1_block_number" field. L1BlockNumber uint64 `json:"l1_block_number,omitempty"` // L1BlockHash holds the value of the "l1_block_hash" field. @@ -46,7 +48,7 @@ func (*ProofRequest) scanValues(columns []string) ([]any, error) { switch columns[i] { case proofrequest.FieldProof: values[i] = new([]byte) - case proofrequest.FieldID, proofrequest.FieldStartBlock, proofrequest.FieldEndBlock, proofrequest.FieldRequestAddedTime, proofrequest.FieldProofRequestTime, proofrequest.FieldL1BlockNumber: + case proofrequest.FieldID, proofrequest.FieldStartBlock, proofrequest.FieldEndBlock, proofrequest.FieldRequestAddedTime, proofrequest.FieldProofRequestTime, proofrequest.FieldLastUpdatedTime, proofrequest.FieldL1BlockNumber: values[i] = new(sql.NullInt64) case proofrequest.FieldType, proofrequest.FieldStatus, proofrequest.FieldProverRequestID, proofrequest.FieldL1BlockHash: values[i] = new(sql.NullString) @@ -113,6 +115,12 @@ func (pr *ProofRequest) assignValues(columns []string, values []any) error { } else if value.Valid { pr.ProofRequestTime = uint64(value.Int64) } + case proofrequest.FieldLastUpdatedTime: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field last_updated_time", values[i]) + } else if value.Valid { + pr.LastUpdatedTime = uint64(value.Int64) + } case proofrequest.FieldL1BlockNumber: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field l1_block_number", values[i]) @@ -188,6 +196,9 @@ func (pr *ProofRequest) String() string { builder.WriteString("proof_request_time=") builder.WriteString(fmt.Sprintf("%v", pr.ProofRequestTime)) builder.WriteString(", ") + builder.WriteString("last_updated_time=") + builder.WriteString(fmt.Sprintf("%v", pr.LastUpdatedTime)) + builder.WriteString(", ") builder.WriteString("l1_block_number=") builder.WriteString(fmt.Sprintf("%v", pr.L1BlockNumber)) builder.WriteString(", ") diff --git a/proposer/op/proposer/db/ent/proofrequest/proofrequest.go b/proposer/op/proposer/db/ent/proofrequest/proofrequest.go index e52dd857..0ff10cba 100644 --- a/proposer/op/proposer/db/ent/proofrequest/proofrequest.go +++ b/proposer/op/proposer/db/ent/proofrequest/proofrequest.go @@ -27,6 +27,8 @@ const ( FieldProverRequestID = "prover_request_id" // FieldProofRequestTime holds the string denoting the proof_request_time field in the database. FieldProofRequestTime = "proof_request_time" + // FieldLastUpdatedTime holds the string denoting the last_updated_time field in the database. + FieldLastUpdatedTime = "last_updated_time" // FieldL1BlockNumber holds the string denoting the l1_block_number field in the database. FieldL1BlockNumber = "l1_block_number" // FieldL1BlockHash holds the string denoting the l1_block_hash field in the database. @@ -47,6 +49,7 @@ var Columns = []string{ FieldRequestAddedTime, FieldProverRequestID, FieldProofRequestTime, + FieldLastUpdatedTime, FieldL1BlockNumber, FieldL1BlockHash, FieldProof, @@ -153,6 +156,11 @@ func ByProofRequestTime(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldProofRequestTime, opts...).ToFunc() } +// ByLastUpdatedTime orders the results by the last_updated_time field. +func ByLastUpdatedTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastUpdatedTime, opts...).ToFunc() +} + // ByL1BlockNumber orders the results by the l1_block_number field. func ByL1BlockNumber(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldL1BlockNumber, opts...).ToFunc() diff --git a/proposer/op/proposer/db/ent/proofrequest/where.go b/proposer/op/proposer/db/ent/proofrequest/where.go index 37e380bf..585791d5 100644 --- a/proposer/op/proposer/db/ent/proofrequest/where.go +++ b/proposer/op/proposer/db/ent/proofrequest/where.go @@ -77,6 +77,11 @@ func ProofRequestTime(v uint64) predicate.ProofRequest { return predicate.ProofRequest(sql.FieldEQ(FieldProofRequestTime, v)) } +// LastUpdatedTime applies equality check predicate on the "last_updated_time" field. It's identical to LastUpdatedTimeEQ. +func LastUpdatedTime(v uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldEQ(FieldLastUpdatedTime, v)) +} + // L1BlockNumber applies equality check predicate on the "l1_block_number" field. It's identical to L1BlockNumberEQ. func L1BlockNumber(v uint64) predicate.ProofRequest { return predicate.ProofRequest(sql.FieldEQ(FieldL1BlockNumber, v)) @@ -377,6 +382,46 @@ func ProofRequestTimeNotNil() predicate.ProofRequest { return predicate.ProofRequest(sql.FieldNotNull(FieldProofRequestTime)) } +// LastUpdatedTimeEQ applies the EQ predicate on the "last_updated_time" field. +func LastUpdatedTimeEQ(v uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldEQ(FieldLastUpdatedTime, v)) +} + +// LastUpdatedTimeNEQ applies the NEQ predicate on the "last_updated_time" field. +func LastUpdatedTimeNEQ(v uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldNEQ(FieldLastUpdatedTime, v)) +} + +// LastUpdatedTimeIn applies the In predicate on the "last_updated_time" field. +func LastUpdatedTimeIn(vs ...uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldIn(FieldLastUpdatedTime, vs...)) +} + +// LastUpdatedTimeNotIn applies the NotIn predicate on the "last_updated_time" field. +func LastUpdatedTimeNotIn(vs ...uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldNotIn(FieldLastUpdatedTime, vs...)) +} + +// LastUpdatedTimeGT applies the GT predicate on the "last_updated_time" field. +func LastUpdatedTimeGT(v uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldGT(FieldLastUpdatedTime, v)) +} + +// LastUpdatedTimeGTE applies the GTE predicate on the "last_updated_time" field. +func LastUpdatedTimeGTE(v uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldGTE(FieldLastUpdatedTime, v)) +} + +// LastUpdatedTimeLT applies the LT predicate on the "last_updated_time" field. +func LastUpdatedTimeLT(v uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldLT(FieldLastUpdatedTime, v)) +} + +// LastUpdatedTimeLTE applies the LTE predicate on the "last_updated_time" field. +func LastUpdatedTimeLTE(v uint64) predicate.ProofRequest { + return predicate.ProofRequest(sql.FieldLTE(FieldLastUpdatedTime, v)) +} + // L1BlockNumberEQ applies the EQ predicate on the "l1_block_number" field. func L1BlockNumberEQ(v uint64) predicate.ProofRequest { return predicate.ProofRequest(sql.FieldEQ(FieldL1BlockNumber, v)) diff --git a/proposer/op/proposer/db/ent/proofrequest_create.go b/proposer/op/proposer/db/ent/proofrequest_create.go index 02012aca..d57dbc49 100644 --- a/proposer/op/proposer/db/ent/proofrequest_create.go +++ b/proposer/op/proposer/db/ent/proofrequest_create.go @@ -77,6 +77,12 @@ func (prc *ProofRequestCreate) SetNillableProofRequestTime(u *uint64) *ProofRequ return prc } +// SetLastUpdatedTime sets the "last_updated_time" field. +func (prc *ProofRequestCreate) SetLastUpdatedTime(u uint64) *ProofRequestCreate { + prc.mutation.SetLastUpdatedTime(u) + return prc +} + // SetL1BlockNumber sets the "l1_block_number" field. func (prc *ProofRequestCreate) SetL1BlockNumber(u uint64) *ProofRequestCreate { prc.mutation.SetL1BlockNumber(u) @@ -170,6 +176,9 @@ func (prc *ProofRequestCreate) check() error { if _, ok := prc.mutation.RequestAddedTime(); !ok { return &ValidationError{Name: "request_added_time", err: errors.New(`ent: missing required field "ProofRequest.request_added_time"`)} } + if _, ok := prc.mutation.LastUpdatedTime(); !ok { + return &ValidationError{Name: "last_updated_time", err: errors.New(`ent: missing required field "ProofRequest.last_updated_time"`)} + } return nil } @@ -224,6 +233,10 @@ func (prc *ProofRequestCreate) createSpec() (*ProofRequest, *sqlgraph.CreateSpec _spec.SetField(proofrequest.FieldProofRequestTime, field.TypeUint64, value) _node.ProofRequestTime = value } + if value, ok := prc.mutation.LastUpdatedTime(); ok { + _spec.SetField(proofrequest.FieldLastUpdatedTime, field.TypeUint64, value) + _node.LastUpdatedTime = value + } if value, ok := prc.mutation.L1BlockNumber(); ok { _spec.SetField(proofrequest.FieldL1BlockNumber, field.TypeUint64, value) _node.L1BlockNumber = value diff --git a/proposer/op/proposer/db/ent/proofrequest_update.go b/proposer/op/proposer/db/ent/proofrequest_update.go index b4c04bb8..2b2f59f1 100644 --- a/proposer/op/proposer/db/ent/proofrequest_update.go +++ b/proposer/op/proposer/db/ent/proofrequest_update.go @@ -165,6 +165,27 @@ func (pru *ProofRequestUpdate) ClearProofRequestTime() *ProofRequestUpdate { return pru } +// SetLastUpdatedTime sets the "last_updated_time" field. +func (pru *ProofRequestUpdate) SetLastUpdatedTime(u uint64) *ProofRequestUpdate { + pru.mutation.ResetLastUpdatedTime() + pru.mutation.SetLastUpdatedTime(u) + return pru +} + +// SetNillableLastUpdatedTime sets the "last_updated_time" field if the given value is not nil. +func (pru *ProofRequestUpdate) SetNillableLastUpdatedTime(u *uint64) *ProofRequestUpdate { + if u != nil { + pru.SetLastUpdatedTime(*u) + } + return pru +} + +// AddLastUpdatedTime adds u to the "last_updated_time" field. +func (pru *ProofRequestUpdate) AddLastUpdatedTime(u int64) *ProofRequestUpdate { + pru.mutation.AddLastUpdatedTime(u) + return pru +} + // SetL1BlockNumber sets the "l1_block_number" field. func (pru *ProofRequestUpdate) SetL1BlockNumber(u uint64) *ProofRequestUpdate { pru.mutation.ResetL1BlockNumber() @@ -322,6 +343,12 @@ func (pru *ProofRequestUpdate) sqlSave(ctx context.Context) (n int, err error) { if pru.mutation.ProofRequestTimeCleared() { _spec.ClearField(proofrequest.FieldProofRequestTime, field.TypeUint64) } + if value, ok := pru.mutation.LastUpdatedTime(); ok { + _spec.SetField(proofrequest.FieldLastUpdatedTime, field.TypeUint64, value) + } + if value, ok := pru.mutation.AddedLastUpdatedTime(); ok { + _spec.AddField(proofrequest.FieldLastUpdatedTime, field.TypeUint64, value) + } if value, ok := pru.mutation.L1BlockNumber(); ok { _spec.SetField(proofrequest.FieldL1BlockNumber, field.TypeUint64, value) } @@ -501,6 +528,27 @@ func (pruo *ProofRequestUpdateOne) ClearProofRequestTime() *ProofRequestUpdateOn return pruo } +// SetLastUpdatedTime sets the "last_updated_time" field. +func (pruo *ProofRequestUpdateOne) SetLastUpdatedTime(u uint64) *ProofRequestUpdateOne { + pruo.mutation.ResetLastUpdatedTime() + pruo.mutation.SetLastUpdatedTime(u) + return pruo +} + +// SetNillableLastUpdatedTime sets the "last_updated_time" field if the given value is not nil. +func (pruo *ProofRequestUpdateOne) SetNillableLastUpdatedTime(u *uint64) *ProofRequestUpdateOne { + if u != nil { + pruo.SetLastUpdatedTime(*u) + } + return pruo +} + +// AddLastUpdatedTime adds u to the "last_updated_time" field. +func (pruo *ProofRequestUpdateOne) AddLastUpdatedTime(u int64) *ProofRequestUpdateOne { + pruo.mutation.AddLastUpdatedTime(u) + return pruo +} + // SetL1BlockNumber sets the "l1_block_number" field. func (pruo *ProofRequestUpdateOne) SetL1BlockNumber(u uint64) *ProofRequestUpdateOne { pruo.mutation.ResetL1BlockNumber() @@ -688,6 +736,12 @@ func (pruo *ProofRequestUpdateOne) sqlSave(ctx context.Context) (_node *ProofReq if pruo.mutation.ProofRequestTimeCleared() { _spec.ClearField(proofrequest.FieldProofRequestTime, field.TypeUint64) } + if value, ok := pruo.mutation.LastUpdatedTime(); ok { + _spec.SetField(proofrequest.FieldLastUpdatedTime, field.TypeUint64, value) + } + if value, ok := pruo.mutation.AddedLastUpdatedTime(); ok { + _spec.AddField(proofrequest.FieldLastUpdatedTime, field.TypeUint64, value) + } if value, ok := pruo.mutation.L1BlockNumber(); ok { _spec.SetField(proofrequest.FieldL1BlockNumber, field.TypeUint64, value) } diff --git a/proposer/op/proposer/db/ent/schema/proofrequest.go b/proposer/op/proposer/db/ent/schema/proofrequest.go index 600d65e3..88732181 100644 --- a/proposer/op/proposer/db/ent/schema/proofrequest.go +++ b/proposer/op/proposer/db/ent/schema/proofrequest.go @@ -20,6 +20,7 @@ func (ProofRequest) Fields() []ent.Field { field.Uint64("request_added_time"), field.String("prover_request_id").Optional(), field.Uint64("proof_request_time").Optional(), + field.Uint64("last_updated_time"), field.Uint64("l1_block_number").Optional(), field.String("l1_block_hash").Optional(), field.Bytes("proof").Optional(), diff --git a/proposer/op/proposer/driver.go b/proposer/op/proposer/driver.go index ea6b8632..0470f0ff 100644 --- a/proposer/op/proposer/driver.go +++ b/proposer/op/proposer/driver.go @@ -466,6 +466,7 @@ func (l *L2OutputSubmitter) sendTransaction(ctx context.Context, output *eth.Out if err != nil { return err } + // TODO: This currently blocks the loop while it waits for the transaction to be confirmed. Up to 3 minutes. receipt, err = l.Txmgr.Send(ctx, txmgr.TxCandidate{ TxData: data, To: l.Cfg.L2OutputOracleAddr, @@ -495,6 +496,7 @@ func (l *L2OutputSubmitter) sendCheckpointTransaction(ctx context.Context, block if err != nil { return 0, common.Hash{}, err } + // TODO: This currently blocks the loop while it waits for the transaction to be confirmed. Up to 3 minutes. receipt, err = l.Txmgr.Send(ctx, txmgr.TxCandidate{ TxData: data, To: l.Cfg.L2OutputOracleAddr, diff --git a/proposer/op/proposer/prove.go b/proposer/op/proposer/prove.go index cbc48396..0051bd5d 100644 --- a/proposer/op/proposer/prove.go +++ b/proposer/op/proposer/prove.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "time" @@ -33,7 +32,9 @@ func (l *L2OutputSubmitter) ProcessPendingProofs() error { // Combine the two lists of proofs. reqsToRetry := append(failedReqs, timedOutReqs...) - l.Log.Info("Retrying all proofs that failed to reach the prover network with a timeout.", "failed", len(failedReqs), "timedOut", len(timedOutReqs)) + if len(reqsToRetry) > 0 { + l.Log.Info("Retrying failed and timed out proofs.", "failed", len(failedReqs), "timedOut", len(timedOutReqs)) + } for _, req := range reqsToRetry { err = l.RetryRequest(req) @@ -49,7 +50,7 @@ func (l *L2OutputSubmitter) ProcessPendingProofs() error { if err != nil { return err } - l.Log.Info("Got all pending proofs from DB.", "count", len(reqs)) + l.Log.Info("Number of Pending Proofs.", "count", len(reqs)) for _, req := range reqs { status, proof, err := l.GetProofStatus(req.ProverRequestID) if err != nil { @@ -58,7 +59,7 @@ func (l *L2OutputSubmitter) ProcessPendingProofs() error { } if status == "PROOF_FULFILLED" { // Update the proof in the DB and update status to COMPLETE. - l.Log.Info("proof fulfilled", "id", req.ProverRequestID) + l.Log.Info("Fulfilled Proof", "id", req.ProverRequestID) err = l.db.AddProof(req.ID, proof) if err != nil { l.Log.Error("failed to update completed proof status", "err", err) @@ -155,7 +156,7 @@ func (l *L2OutputSubmitter) RequestQueuedProofs(ctx context.Context) error { } } go func(p ent.ProofRequest) { - l.Log.Info("requesting proof from server", "proof", p) + l.Log.Info("requesting proof from server", "type", p.Type, "start", p.StartBlock, "end", p.EndBlock, "id", p.ID) err = l.db.UpdateProofStatus(nextProofToRequest.ID, "REQ") if err != nil { l.Log.Error("failed to update proof status", "err", err) @@ -170,7 +171,7 @@ func (l *L2OutputSubmitter) RequestQueuedProofs(ctx context.Context) error { l.Log.Error("failed to revert proof status", "err", err, "proverRequestID", nextProofToRequest.ID) } - // If the proof fails to request from the server, we should retry it with a smaller span proof. + // If the proof fails to be requested, we should add it to the queue to be retried. err = l.RetryRequest(nextProofToRequest) if err != nil { l.Log.Error("failed to retry request", "err", err) @@ -198,7 +199,7 @@ func (l *L2OutputSubmitter) DeriveAggProofs(ctx context.Context) error { return fmt.Errorf("failed to get next L2OO output: %w", err) } - l.Log.Info("Determining if next AGG proof can be created from span proofs", "latestBlock", from, "minOutputBlock", minTo.Uint64()) + l.Log.Info("Checking for AGG proof", "blocksToProve", minTo.Uint64()-from, "latestProvenBlock", from, "minBlockToProveToAgg", minTo.Uint64()) created, end, err := l.db.TryCreateAggProofFromSpanProofs(from, minTo.Uint64()) if err != nil { return fmt.Errorf("failed to create agg proof from span proofs: %w", err) @@ -317,7 +318,7 @@ func (l *L2OutputSubmitter) RequestProofFromServer(urlPath string, jsonBody []by defer resp.Body.Close() // Read the response body. - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return "", fmt.Errorf("error reading the response body: %v", err) } diff --git a/proposer/op/proposer/span_batches.go b/proposer/op/proposer/span_batches.go index b01b0229..e80483e7 100644 --- a/proposer/op/proposer/span_batches.go +++ b/proposer/op/proposer/span_batches.go @@ -41,7 +41,6 @@ func (l *L2OutputSubmitter) DeriveNewSpanBatches(ctx context.Context) error { } } newL2StartBlock := latestL2EndBlock + 1 - l.Log.Info("deriving span batch for L2 block", "nextBlock", newL2StartBlock) rollupClient, err := l.RollupProvider.RollupClient(ctx) if err != nil { @@ -62,7 +61,7 @@ func (l *L2OutputSubmitter) DeriveNewSpanBatches(ctx context.Context) error { // Add each span to the DB. If there are no spans, we will not create any proofs. for _, span := range spans { err := l.db.NewEntry("SPAN", span.Start, span.End) - l.Log.Info("New SPAN proof request", "start", span.Start, "end", span.End) + l.Log.Info("New range proof request.", "start", span.Start, "end", span.End) if err != nil { l.Log.Error("failed to add span to db", "err", err) return err diff --git a/proposer/succinct/Dockerfile b/proposer/succinct/Dockerfile index 245f95c1..fac33dbc 100644 --- a/proposer/succinct/Dockerfile +++ b/proposer/succinct/Dockerfile @@ -11,8 +11,6 @@ RUN apt-get update && apt-get install -y \ git \ pkg-config \ libssl-dev \ - && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \ - && . $HOME/.cargo/env \ && rm -rf /var/lib/apt/lists/* RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y diff --git a/proposer/succinct/bin/server.rs b/proposer/succinct/bin/server.rs index c256979b..2947d8ba 100644 --- a/proposer/succinct/bin/server.rs +++ b/proposer/succinct/bin/server.rs @@ -55,6 +55,8 @@ struct ProofStatus { async fn main() { utils::setup_logger(); + dotenv::dotenv().ok(); + env::set_var("SKIP_SIMULATION", "true"); let app = Router::new() @@ -75,10 +77,9 @@ async fn request_span_proof( Json(payload): Json, ) -> Result<(StatusCode, Json), AppError> { info!("Received span proof request: {:?}", payload); - dotenv::dotenv().ok(); // TODO: Save data fetcher, NetworkProver, and NetworkClient globally // and access via Store. - let data_fetcher = OPSuccinctDataFetcher::new().await; + let data_fetcher = OPSuccinctDataFetcher::default(); let host_cli = data_fetcher .get_host_cli_args( @@ -144,7 +145,7 @@ async fn request_agg_proof( )?; let l1_head: [u8; 32] = l1_head_bytes.try_into().unwrap(); - let fetcher = OPSuccinctDataFetcher::new().await; + let fetcher = OPSuccinctDataFetcher::default(); let headers = fetcher .get_header_preimages(&boot_infos, l1_head.into()) .await?; @@ -169,7 +170,6 @@ async fn get_proof_status( Path(proof_id): Path, ) -> Result<(StatusCode, Json), AppError> { info!("Received proof status request: {:?}", proof_id); - dotenv::dotenv().ok(); let private_key = env::var("SP1_PRIVATE_KEY")?; let client = NetworkClient::new(&private_key); diff --git a/rollup-configs/10.json b/rollup-configs/10.json deleted file mode 100644 index b2f57929..00000000 --- a/rollup-configs/10.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "genesis": { - "l1": { - "number": 17422590, - "hash": "0x438335a20d98863a4c0c97999eb2481921ccd28553eac6f913af7c12aec04108" - }, - "l2": { - "number": 105235063, - "hash": "0xdbf6a80fef073de06add9b0d14026d6e5a86c85f6d102c36d3d8e9cf89c2afd3" - }, - "l2_time": 1686068903, - "system_config": { - "batcherAddr": "0x6887246668a3b87F54DeB3b94Ba47a6f63F32985", - "overhead": "0xbc", - "scalar": "0xa6fe0", - "gasLimit": 30000000, - "baseFeeScalar": null, - "blobBaseFeeScalar": null - } - }, - "block_time": 2, - "max_sequencer_drift": 600, - "seq_window_size": 3600, - "channel_timeout": 300, - "granite_channel_timeout": 50, - "l1_chain_id": 1, - "l2_chain_id": 10, - "base_fee_params": { - "max_change_denominator": "0x32", - "elasticity_multiplier": "0x6" - }, - "canyon_base_fee_params": { - "max_change_denominator": "0xfa", - "elasticity_multiplier": "0x6" - }, - "regolith_time": 0, - "canyon_time": 1704992401, - "delta_time": 1708560000, - "ecotone_time": 1710374401, - "fjord_time": 1720627201, - "granite_time": 1726070401, - "batch_inbox_address": "0xFF00000000000000000000000000000000000010", - "deposit_contract_address": "0xbEb5Fc579115071764c7423A4f12eDde41f106Ed", - "l1_system_config_address": "0x229047fed2591dbec1eF1118d64F7aF3dB9EB290", - "protocol_versions_address": "0x0000000000000000000000000000000000000000" -} \ No newline at end of file diff --git a/rollup-configs/288882.json b/rollup-configs/288882.json deleted file mode 100644 index 51228649..00000000 --- a/rollup-configs/288882.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "genesis": { - "l1": { - "number": 6576100, - "hash": "0xb6404ecff691edd6895c474f4dfca5b3e27b92a19deabd80cbe05c75c1b4c924" - }, - "l2": { - "number": 0, - "hash": "0xe919706177d2c568ed21a4b443d421c8098b4e453a29bd432258fab3f7fe1d07" - }, - "l2_time": 1724692140, - "system_config": { - "batcherAddr": "0xe40d3fb61a6a9e16ffd17ae4ed225de00a4b16fd", - "overhead": "0x834", - "scalar": "0xf4240", - "gasLimit": 30000000, - "baseFeeScalar": null, - "blobBaseFeeScalar": null - } - }, - "block_time": 2, - "max_sequencer_drift": 600, - "seq_window_size": 3600, - "channel_timeout": 300, - "granite_channel_timeout": 50, - "l1_chain_id": 11155111, - "l2_chain_id": 288882, - "base_fee_params": { - "max_change_denominator": "0x32", - "elasticity_multiplier": "0x6" - }, - "canyon_base_fee_params": { - "max_change_denominator": "0xfa", - "elasticity_multiplier": "0x6" - }, - "regolith_time": 0, - "canyon_time": 1724692140, - "delta_time": 1724692140, - "ecotone_time": 1724692141, - "fjord_time": 1724692150, - "granite_time": 1724914800, - "batch_inbox_address": "0xfff0000000000000000000000000000000288882", - "deposit_contract_address": "0xd00d5cc5620697a31014e5594aabba590793836d", - "l1_system_config_address": "0xcc3c025036612b849340d6866ec0bd4d2d794a36", - "protocol_versions_address": "0x0000000000000000000000000000000000000000", - "da_challenge_address": "0x0000000000000000000000000000000000000000" -} \ No newline at end of file diff --git a/rollup-configs/8453.json b/rollup-configs/8453.json deleted file mode 100644 index 803a9958..00000000 --- a/rollup-configs/8453.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "genesis": { - "l1": { - "hash": "0x5c13d307623a926cd31415036c8b7fa14572f9dac64528e857a470511fc30771", - "number": "0x10ac028" - }, - "l2": { - "hash": "0xf712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd", - "number": "0x0" - }, - "l2_time": 1686789347, - "system_config": { - "batcherAddr": "0x5050f69a9786f081509234f1a7f4684b5e5b76c9", - "overhead": "0xbc", - "scalar": "0xa6fe0", - "gasLimit": 30000000, - "baseFeeScalar": null, - "blobBaseFeeScalar": null - } - }, - "block_time": 2, - "max_sequencer_drift": 600, - "seq_window_size": 3600, - "channel_timeout": 300, - "granite_channel_timeout": 50, - "l1_chain_id": 1, - "l2_chain_id": 8453, - "base_fee_params": { - "max_change_denominator": "0x32", - "elasticity_multiplier": "0x6" - }, - "canyon_base_fee_params": { - "max_change_denominator": "0xfa", - "elasticity_multiplier": "0x6" - }, - "regolith_time": 0, - "canyon_time": 1704992401, - "delta_time": 1708560000, - "ecotone_time": 1710374401, - "fjord_time": 1720627201, - "granite_time": 1726070401, - "batch_inbox_address": "0xff00000000000000000000000000000000008453", - "deposit_contract_address": "0x49048044d57e1c92a77f79988d21fa8faf74e97e", - "l1_system_config_address": "0x73a79fab69143498ed3712e519a88a918e1f4072", - "protocol_versions_address": "0x0000000000000000000000000000000000000000" -} \ No newline at end of file diff --git a/scripts/prove/bin/agg.rs b/scripts/prove/bin/agg.rs index 8f3719ae..08cbdbad 100644 --- a/scripts/prove/bin/agg.rs +++ b/scripts/prove/bin/agg.rs @@ -1,5 +1,3 @@ -use std::fs; - use anyhow::Result; use cargo_metadata::MetadataCommand; use clap::Parser; @@ -9,6 +7,7 @@ use op_succinct_host_utils::{ get_agg_proof_stdin, }; use sp1_sdk::{utils, HashableKey, ProverClient, SP1Proof, SP1ProofWithPublicValues}; +use std::fs; pub const AGG_ELF: &[u8] = include_bytes!("../../../elf/aggregation-elf"); pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); @@ -66,7 +65,7 @@ async fn main() -> Result<()> { let args = Args::parse(); let prover = ProverClient::new(); - let fetcher = OPSuccinctDataFetcher::new().await; + let fetcher = OPSuccinctDataFetcher::default(); let l2_chain_id = fetcher.get_chain_id(RPCMode::L2).await?; let (proofs, boot_infos) = load_aggregation_proof_data(args.proofs, l2_chain_id); diff --git a/scripts/prove/bin/multi.rs b/scripts/prove/bin/multi.rs index 28b64147..d0664f72 100644 --- a/scripts/prove/bin/multi.rs +++ b/scripts/prove/bin/multi.rs @@ -1,5 +1,3 @@ -use std::{fs, time::Instant}; - use anyhow::Result; use clap::Parser; use op_succinct_host_utils::{ @@ -10,6 +8,7 @@ use op_succinct_host_utils::{ ProgramType, }; use sp1_sdk::{utils, ProverClient}; +use std::{fs, time::Instant}; pub const MULTI_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/range-elf"); @@ -44,7 +43,7 @@ async fn main() -> Result<()> { utils::setup_logger(); let args = Args::parse(); - let data_fetcher = OPSuccinctDataFetcher::new().await; + let data_fetcher = OPSuccinctDataFetcher::default(); let cache_mode = if args.use_cache { CacheMode::KeepCache diff --git a/scripts/prove/bin/single.rs b/scripts/prove/bin/single.rs index a77f9ca2..bcc08871 100644 --- a/scripts/prove/bin/single.rs +++ b/scripts/prove/bin/single.rs @@ -1,5 +1,3 @@ -use std::{env, time::Instant}; - use anyhow::Result; use clap::Parser; use op_succinct_host_utils::{ @@ -10,6 +8,7 @@ use op_succinct_host_utils::{ ProgramType, }; use sp1_sdk::{utils, ProverClient}; +use std::time::Instant; pub const SINGLE_BLOCK_ELF: &[u8] = include_bytes!("../../../elf/fault-proof-elf"); @@ -36,10 +35,7 @@ async fn main() -> Result<()> { let args = Args::parse(); utils::setup_logger(); - let data_fetcher = OPSuccinctDataFetcher { - l2_rpc: env::var("L2_RPC").expect("L2_RPC is not set."), - ..Default::default() - }; + let data_fetcher = OPSuccinctDataFetcher::default(); let l2_safe_head = args.l2_block - 1; diff --git a/scripts/utils/bin/cost_estimator.rs b/scripts/utils/bin/cost_estimator.rs index 2c13548e..5645d43d 100644 --- a/scripts/utils/bin/cost_estimator.rs +++ b/scripts/utils/bin/cost_estimator.rs @@ -161,7 +161,7 @@ async fn execute_blocks_parallel( let execution_stats_map = Arc::clone(&execution_stats_map); let handle = tokio::spawn(async move { // Create a new data fetcher. This avoids the runtime dropping the provider dispatch task. - let data_fetcher = OPSuccinctDataFetcher::new().await; + let data_fetcher = OPSuccinctDataFetcher::default(); let mut exec_stats = ExecutionStats::default(); exec_stats.add_block_data(&data_fetcher, start, end).await; let mut execution_stats_map = execution_stats_map.lock().unwrap(); @@ -292,7 +292,7 @@ async fn main() -> Result<()> { utils::setup_logger(); let args = HostArgs::parse(); - let data_fetcher = OPSuccinctDataFetcher::new().await; + let data_fetcher = OPSuccinctDataFetcher::default(); let l2_chain_id = data_fetcher.get_chain_id(RPCMode::L2).await?; diff --git a/scripts/utils/bin/fetch_and_save_proof.rs b/scripts/utils/bin/fetch_and_save_proof.rs index a1f6d3c7..2163caa1 100644 --- a/scripts/utils/bin/fetch_and_save_proof.rs +++ b/scripts/utils/bin/fetch_and_save_proof.rs @@ -1,7 +1,6 @@ use alloy::{hex, sol_types::SolValue}; use anyhow::Result; use clap::Parser; -use dotenv::dotenv; use op_succinct_client_utils::{boot::BootInfoStruct, AGGREGATION_OUTPUTS_SIZE}; use sp1_sdk::{NetworkProver, SP1ProofWithPublicValues}; use std::{fs, path::Path}; @@ -28,7 +27,7 @@ struct Args { #[tokio::main] async fn main() -> Result<()> { - dotenv().ok(); + dotenv::dotenv().ok(); let args = Args::parse(); let prover = NetworkProver::new(); diff --git a/scripts/utils/bin/fetch_rollup_config.rs b/scripts/utils/bin/fetch_rollup_config.rs index 21da5a37..e90cf68c 100644 --- a/scripts/utils/bin/fetch_rollup_config.rs +++ b/scripts/utils/bin/fetch_rollup_config.rs @@ -38,7 +38,7 @@ struct L2OOConfig { /// Update the L2OO config with the rollup config hash and other relevant data before the contract is deployed. /// /// Specifically, updates the following fields in `opsuccinctl2ooconfig.json`: -/// - rollup_config_hash: Get the hash of the rollup config in rollup-configs/{l2_chain_id}.json. +/// - rollup_config_hash: Get the hash of the rollup config from the rollup config file. /// - l2_block_time: Get the block time from the rollup config. /// - starting_block_number: If `USE_CACHED_STARTING_BLOCK` is `false`, set starting_block_number to 10 blocks before the latest block on L2. /// - starting_output_root: Set to the output root of the starting block number. @@ -95,9 +95,13 @@ async fn update_l2oo_config() -> Result<()> { .unwrap(); // Set the submission interval. - l2oo_config.submission_interval = env::var("SUBMISSION_INTERVAL") + // The order of precedence is: + // 1. SUBMISSION_INTERVAL environment variable + // 2. 1000 (default) + let submission_interval: u64 = env::var("SUBMISSION_INTERVAL") .unwrap_or("1000".to_string()) .parse()?; + l2oo_config.submission_interval = submission_interval; // Set the chain id. l2oo_config.chain_id = data_fetcher.get_chain_id(RPCMode::L2).await?; @@ -164,14 +168,32 @@ fn find_project_root() -> Option { Some(path) } +use clap::Parser; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// L2 chain ID + #[arg(long, default_value = ".env")] + env_file: String, +} + #[tokio::main] async fn main() -> Result<()> { + let args = Args::parse(); + // This fetches the .env file from the project root. If the command is invoked in the contracts/ directory, // the .env file in the root of the repo is used. if let Some(root) = find_project_root() { - dotenv::from_path(root.join(".env")).ok(); + dotenv::from_path(root.join(args.env_file)).ok(); } else { - eprintln!("Warning: Could not find project root. .env file not loaded."); + eprintln!( + "Warning: Could not find project root. {} file not loaded.", + args.env_file + ); } - update_l2oo_config().await + + update_l2oo_config().await?; + + Ok(()) } diff --git a/utils/host/src/fetcher.rs b/utils/host/src/fetcher.rs index 2fb5bcc5..eefc7a4b 100644 --- a/utils/host/src/fetcher.rs +++ b/utils/host/src/fetcher.rs @@ -19,7 +19,7 @@ use tokio::time::sleep; use alloy_primitives::keccak256; use crate::{ - rollup_config::{merge_rollup_config, save_rollup_config}, + rollup_config::{get_rollup_config_path, merge_rollup_config, save_rollup_config}, L2Output, ProgramType, }; @@ -28,11 +28,8 @@ use crate::{ /// given block number. It is used to generate the boot info for the native host program. /// TODO: Add retries for all requests (3 retries). pub struct OPSuccinctDataFetcher { - pub l1_rpc: String, + pub rpc_config: RPCConfig, pub l1_provider: Arc>>, - pub l1_beacon_rpc: String, - pub l2_rpc: String, - pub l2_node_rpc: String, pub l2_provider: Arc>>, pub rollup_config: RollupConfig, } @@ -43,6 +40,14 @@ impl Default for OPSuccinctDataFetcher { } } +#[derive(Debug, Clone)] +pub struct RPCConfig { + l1_rpc: String, + l1_beacon_rpc: String, + l2_rpc: String, + l2_node_rpc: String, +} + /// The mode corresponding to the chain we are fetching data for. #[derive(Clone, Copy)] pub enum RPCMode { @@ -59,6 +64,17 @@ pub enum CacheMode { DeleteCache, } +fn get_rpcs() -> RPCConfig { + RPCConfig { + l1_rpc: env::var("L1_RPC").unwrap_or_else(|_| "http://localhost:8545".to_string()), + l1_beacon_rpc: env::var("L1_BEACON_RPC") + .unwrap_or_else(|_| "http://localhost:5052".to_string()), + l2_rpc: env::var("L2_RPC").unwrap_or_else(|_| "http://localhost:9545".to_string()), + l2_node_rpc: env::var("L2_NODE_RPC") + .unwrap_or_else(|_| "http://localhost:5058".to_string()), + } +} + /// The info to fetch for a block. pub struct BlockInfo { pub block_number: u64, @@ -67,27 +83,21 @@ pub struct BlockInfo { } impl OPSuccinctDataFetcher { - /// Gets the RPC URL's and saves the rollup config for the chain to `rollup-configs/{l2_chain_id}.json`. + /// Gets the RPC URL's and saves the rollup config for the chain to the rollup config file. pub async fn new() -> Self { - dotenv::dotenv().ok(); - let l1_rpc = env::var("L1_RPC").unwrap_or_else(|_| "http://localhost:8545".to_string()); - let l1_provider = - Arc::new(ProviderBuilder::default().on_http(Url::from_str(&l1_rpc).unwrap())); - let l1_beacon_rpc = - env::var("L1_BEACON_RPC").unwrap_or_else(|_| "http://localhost:5052".to_string()); - let l2_rpc = env::var("L2_RPC").unwrap_or_else(|_| "http://localhost:9545".to_string()); - let l2_node_rpc = - env::var("L2_NODE_RPC").unwrap_or_else(|_| "http://localhost:5058".to_string()); - let l2_provider = - Arc::new(ProviderBuilder::default().on_http(Url::from_str(&l2_rpc).unwrap())); + let rpc_config = get_rpcs(); + + let l1_provider = Arc::new( + ProviderBuilder::default().on_http(Url::from_str(&rpc_config.l1_rpc).unwrap()), + ); + let l2_provider = Arc::new( + ProviderBuilder::default().on_http(Url::from_str(&rpc_config.l2_rpc).unwrap()), + ); let mut fetcher = OPSuccinctDataFetcher { - l1_rpc, + rpc_config, l1_provider, - l1_beacon_rpc, - l2_rpc, l2_provider, - l2_node_rpc, rollup_config: RollupConfig::default(), }; @@ -105,10 +115,10 @@ impl OPSuccinctDataFetcher { /// Get the RPC URL for the given RPC mode. pub fn get_rpc_url(&self, rpc_mode: RPCMode) -> String { match rpc_mode { - RPCMode::L1 => self.l1_rpc.clone(), - RPCMode::L2 => self.l2_rpc.clone(), - RPCMode::L1Beacon => self.l1_beacon_rpc.clone(), - RPCMode::L2Node => self.l2_node_rpc.clone(), + RPCMode::L1 => self.rpc_config.l1_rpc.clone(), + RPCMode::L2 => self.rpc_config.l2_rpc.clone(), + RPCMode::L1Beacon => self.rpc_config.l1_beacon_rpc.clone(), + RPCMode::L2Node => self.rpc_config.l2_node_rpc.clone(), } } @@ -453,7 +463,7 @@ impl OPSuccinctDataFetcher { } // Create the path to the rollup config file. - let rollup_config_path = format!("{}/rollup-configs/{}.json", workspace_root, l2_chain_id); + let rollup_config_path = get_rollup_config_path(l2_chain_id)?; // Creates the data directory if it doesn't exist, or no-ops if it does. Used to store the // witness data. @@ -466,13 +476,13 @@ impl OPSuccinctDataFetcher { l2_block_number: l2_end_block, l2_chain_id: Some(l2_chain_id), l2_head: l2_head.0.into(), - l2_node_address: Some(self.l2_rpc.clone()), - l1_node_address: Some(self.l1_rpc.clone()), - l1_beacon_address: Some(self.l1_beacon_rpc.clone()), + l2_node_address: Some(self.rpc_config.l2_node_rpc.clone()), + l1_node_address: Some(self.rpc_config.l1_rpc.clone()), + l1_beacon_address: Some(self.rpc_config.l1_beacon_rpc.clone()), data_dir: Some(data_directory.into()), exec: Some(exec_directory), server: false, - rollup_config_path: Some(rollup_config_path.into()), + rollup_config_path: Some(rollup_config_path), v: std::env::var("VERBOSITY") .unwrap_or("0".to_string()) .parse() diff --git a/utils/host/src/rollup_config.rs b/utils/host/src/rollup_config.rs index 580ce0a0..16a75607 100644 --- a/utils/host/src/rollup_config.rs +++ b/utils/host/src/rollup_config.rs @@ -1,4 +1,5 @@ use std::fs; +use std::path::PathBuf; use alloy::eips::eip1559::BaseFeeParams; use alloy_primitives::Address; @@ -113,30 +114,35 @@ pub(crate) fn merge_rollup_config( Ok(rollup_config) } -/// Save rollup config to rollup-configs/{l2_chain_id}.json in the workspace root. +/// Save rollup config to the rollup config file. pub fn save_rollup_config(rollup_config: &RollupConfig) -> Result<()> { - let workspace_root = cargo_metadata::MetadataCommand::new() - .exec()? - .workspace_root; - // Create rollup-configs directory if it doesn't exist. - let rollup_configs_dir = workspace_root.join("rollup-configs"); + let rollup_config_path = get_rollup_config_path(rollup_config.l2_chain_id)?; + + // Create the directory for the rollup config if it doesn't exist. + let rollup_configs_dir = rollup_config_path.parent().unwrap(); if !rollup_configs_dir.exists() { - fs::create_dir_all(&rollup_configs_dir)?; + fs::create_dir_all(rollup_configs_dir)?; } - let rollup_config_path = - workspace_root.join(format!("rollup-configs/{}.json", rollup_config.l2_chain_id)); + // Write the rollup config to the file. let rollup_config_str = serde_json::to_string_pretty(rollup_config)?; fs::write(rollup_config_path, rollup_config_str)?; Ok(()) } -/// Read rollup config from rollup-configs/{l2_chain_id}.json in the workspace root. -pub fn read_rollup_config(l2_chain_id: u64) -> Result { +/// Get the path to the rollup config file for the given chain id. +pub fn get_rollup_config_path(l2_chain_id: u64) -> Result { let workspace_root = cargo_metadata::MetadataCommand::new() - .exec()? + .exec() + .expect("Failed to get workspace root") .workspace_root; - let rollup_config_path = workspace_root.join(format!("rollup-configs/{}.json", l2_chain_id)); + let rollup_config_path = workspace_root.join(format!("configs/{}/rollup.json", l2_chain_id)); + Ok(rollup_config_path.into()) +} + +/// Read rollup config from the rollup config file. +pub fn read_rollup_config(l2_chain_id: u64) -> Result { + let rollup_config_path = get_rollup_config_path(l2_chain_id)?; let rollup_config_str = fs::read_to_string(rollup_config_path)?; let rollup_config: RollupConfig = serde_json::from_str(&rollup_config_str)?; Ok(rollup_config)