diff --git a/hive_integration/nodocker/engine/node.nim b/hive_integration/nodocker/engine/node.nim index 43bc494329..07238370e1 100644 --- a/hive_integration/nodocker/engine/node.nim +++ b/hive_integration/nodocker/engine/node.nim @@ -42,8 +42,6 @@ proc processBlock( ## implementations (but can be savely removed, as well.) ## variant of `processBlock()` where the `header` argument is explicitely set. template header: Header = blk.header - var dbTx = vmState.com.db.ctx.txFrameBegin() - defer: dbTx.dispose() let com = vmState.com if com.daoForkSupport and @@ -64,7 +62,7 @@ proc processBlock( discard com.db.persistUncles(blk.uncles) # EIP-3675: no reward for miner in POA/POS - if com.proofOfStake(header): + if com.proofOfStake(header, vmState.stateDB.txFrame): vmState.calculateReward(header, blk.uncles) vmState.mutateStateDB: diff --git a/nimbus/common/common.nim b/nimbus/common/common.nim index 65215a8bf9..0902cd9559 100644 --- a/nimbus/common/common.nim +++ b/nimbus/common/common.nim @@ -105,12 +105,6 @@ type taskpool*: Taskpool ## Shared task pool for offloading computation to other threads -# ------------------------------------------------------------------------------ -# Forward declarations -# ------------------------------------------------------------------------------ - -proc proofOfStake*(com: CommonRef, header: Header): bool {.gcsafe.} - # ------------------------------------------------------------------------------ # Private helper functions # ------------------------------------------------------------------------------ @@ -217,14 +211,13 @@ proc init(com : CommonRef, com.initializeDb() -proc isBlockAfterTtd(com: CommonRef, header: Header): bool = +proc isBlockAfterTtd(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool = if com.config.terminalTotalDifficulty.isNone: return false let ttd = com.config.terminalTotalDifficulty.get() - # TODO use head frame? - ptd = com.db.baseTxFrame().getScore(header.parentHash).valueOr: + ptd = txFrame.getScore(header.parentHash).valueOr: return false td = ptd + header.difficulty ptd >= ttd and td >= ttd @@ -334,7 +327,7 @@ func isCancunOrLater*(com: CommonRef, t: EthTime): bool = func isPragueOrLater*(com: CommonRef, t: EthTime): bool = com.config.pragueTime.isSome and t >= com.config.pragueTime.get -proc proofOfStake*(com: CommonRef, header: Header): bool = +proc proofOfStake*(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool = if com.config.posBlock.isSome: # see comments of posBlock in common/hardforks.nim header.number >= com.config.posBlock.get @@ -342,7 +335,7 @@ proc proofOfStake*(com: CommonRef, header: Header): bool = header.number >= com.config.mergeNetsplitBlock.get else: # This costly check is only executed from test suite - com.isBlockAfterTtd(header) + com.isBlockAfterTtd(header, txFrame) func depositContractAddress*(com: CommonRef): Address = com.config.depositContractAddress.get(default(Address)) diff --git a/nimbus/core/chain/forked_chain.nim b/nimbus/core/chain/forked_chain.nim index 68b156c54d..88e311fc0d 100644 --- a/nimbus/core/chain/forked_chain.nim +++ b/nimbus/core/chain/forked_chain.nim @@ -474,6 +474,9 @@ proc importBlock*(c: ForkedChainRef, blk: Block): Result[void, string] = template header(): Header = blk.header + if header.parentHash == c.baseHash: + return c.validateBlock(c.baseHeader, c.baseTxFrame, blk) + c.blocks.withValue(header.parentHash, bd) do: # TODO: If engine API keep importing blocks # but not finalized it, e.g. current chain length > StagedBlocksThreshold @@ -490,6 +493,7 @@ proc importBlock*(c: ForkedChainRef, blk: Block): Result[void, string] = parentHash = header.parentHash.short return err("Block is not part of valid chain") + ok() proc forkChoice*(c: ForkedChainRef, headHash: Hash32, @@ -572,6 +576,9 @@ proc haveBlockLocally*(c: ForkedChainRef, blockHash: Hash32): bool = c.baseTxFrame.headerExists(blockHash) func txFrame*(c: ForkedChainRef, blockHash: Hash32): CoreDbTxRef = + if blockHash == c.baseHash: + return c.baseTxFrame + c.blocks.withValue(blockHash, bd) do: return bd[].txFrame diff --git a/nimbus/core/executor/process_block.nim b/nimbus/core/executor/process_block.nim index d9ad7ae6ac..141bd8f227 100644 --- a/nimbus/core/executor/process_block.nim +++ b/nimbus/core/executor/process_block.nim @@ -277,7 +277,7 @@ proc processBlock*( ?vmState.procBlkPreamble(blk, skipValidation, skipReceipts, skipUncles, taskpool) # EIP-3675: no reward for miner in POA/POS - if not vmState.com.proofOfStake(blk.header): + if not vmState.com.proofOfStake(blk.header, vmState.stateDB.txFrame): vmState.calculateReward(blk.header, blk.uncles) ?vmState.procBlkEpilogue(blk, skipValidation, skipReceipts) diff --git a/nimbus/core/validate.nim b/nimbus/core/validate.nim index 397556347c..d07da1487d 100644 --- a/nimbus/core/validate.nim +++ b/nimbus/core/validate.nim @@ -41,6 +41,7 @@ proc validateHeader( com: CommonRef; blk: Block; parentHeader: Header; + txFrame: CoreDbTxRef; ): Result[void,string] = template header: Header = blk.header # TODO this code is used for validating uncles also, though these get passed @@ -76,7 +77,7 @@ proc validateHeader( if header.extraData != daoForkBlockExtraData: return err("header extra data should be marked DAO") - if com.proofOfStake(header): + if com.proofOfStake(header, txFrame): # EIP-4399 and EIP-3675 # no need to check mixHash because EIP-4399 override this field # checking rule @@ -159,7 +160,7 @@ proc validateUncles(com: CommonRef; header: Header; txFrame: CoreDbTxRef, let uncleParent = ?txFrame.getBlockHeader(uncle.parentHash) ? com.validateHeader( - Block.init(uncle, BlockBody()), uncleParent) + Block.init(uncle, BlockBody()), uncleParent, txFrame) ok() @@ -372,12 +373,12 @@ proc validateHeaderAndKinship*( return err("Header.extraData larger than 32 bytes") return ok() - ? com.validateHeader(blk, parent) + ? com.validateHeader(blk, parent, txFrame) if blk.uncles.len > MAX_UNCLES: return err("Number of uncles exceed limit.") - if not com.proofOfStake(header): + if not com.proofOfStake(header, txFrame): ? com.validateUncles(header, txFrame, blk.uncles) ok() diff --git a/nimbus/db/aristo/aristo_api.nim b/nimbus/db/aristo/aristo_api.nim index 2a0dd080ba..81b446588f 100644 --- a/nimbus/db/aristo/aristo_api.nim +++ b/nimbus/db/aristo/aristo_api.nim @@ -481,7 +481,7 @@ func dup*(api: AristoApiRef): AristoApiRef = result = AristoApiRef() result[] = api[] when AutoValidateApiHooks: - result.validate + result[].validate # ------------------------------------------------------------------------------ # Public profile API constuctor diff --git a/nimbus/db/aristo/aristo_check/check_be.nim b/nimbus/db/aristo/aristo_check/check_be.nim index 7e15d3a098..792d71845c 100644 --- a/nimbus/db/aristo/aristo_check/check_be.nim +++ b/nimbus/db/aristo/aristo_check/check_be.nim @@ -51,7 +51,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( for (rvid,key) in T.walkKeyBe db: if topVidBe.vid < rvid.vid: topVidBe = rvid - let _ = db.getVtxBE(rvid).valueOr: + let _ = db.getVtxBe(rvid).valueOr: return err((rvid.vid,CheckBeVtxMissing)) # Compare calculated `vTop` against database state diff --git a/nimbus/db/aristo/aristo_compute.nim b/nimbus/db/aristo/aristo_compute.nim index ba97edc3a1..67834916e0 100644 --- a/nimbus/db/aristo/aristo_compute.nim +++ b/nimbus/db/aristo/aristo_compute.nim @@ -124,7 +124,7 @@ proc getKey( db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool ): Result[((HashKey, VertexRef), int), AristoError] = ok when skipLayers: - (?db.db.getKeyUbe(rvid, {GetVtxFlag.PeekCache}), -2) + (?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2) else: ?db.getKeyRc(rvid, {}) @@ -281,7 +281,7 @@ proc computeKeyImpl( ): Result[HashKey, AristoError] = let (keyvtx, level) = when skipLayers: - (?db.db.getKeyUbe(rvid, {GetVtxFlag.PeekCache}), -2) + (?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2) else: ?db.getKeyRc(rvid, {}) diff --git a/nimbus/db/aristo/aristo_desc.nim b/nimbus/db/aristo/aristo_desc.nim index f99f223418..f69117c6b5 100644 --- a/nimbus/db/aristo/aristo_desc.nim +++ b/nimbus/db/aristo/aristo_desc.nim @@ -152,12 +152,14 @@ func hash*(db: AristoDbRef): Hash = # Public helpers # ------------------------------------------------------------------------------ -iterator rstack*(tx: AristoTxRef): LayerRef = +iterator rstack*(tx: AristoTxRef): (LayerRef, int) = # Stack in reverse order var tx = tx + var i = 0 while tx != nil: - yield tx.layer + let level = if tx.parent == nil: -1 else: i + yield (tx.layer, level) tx = tx.parent proc deltaAtLevel*(db: AristoTxRef, level: int): LayerRef = diff --git a/nimbus/db/aristo/aristo_fetch.nim b/nimbus/db/aristo/aristo_fetch.nim index fd56492a11..ddb58f5707 100644 --- a/nimbus/db/aristo/aristo_fetch.nim +++ b/nimbus/db/aristo/aristo_fetch.nim @@ -193,11 +193,11 @@ proc hasStoragePayload( proc fetchLastSavedState*( db: AristoTxRef; ): Result[SavedState,AristoError] = - ## Wrapper around `getLstUbe()`. The function returns the state of the last + ## Wrapper around `getLstBe()`. The function returns the state of the last ## saved state. This is a Merkle hash tag for vertex with ID 1 and a bespoke ## `uint64` identifier (may be interpreted as block number.) # TODO store in frame!! - db.db.getLstUbe() + db.db.getLstBe() proc fetchAccountRecord*( db: AristoTxRef; diff --git a/nimbus/db/aristo/aristo_get.nim b/nimbus/db/aristo/aristo_get.nim index 173fc28167..a0ebc6c725 100644 --- a/nimbus/db/aristo/aristo_get.nim +++ b/nimbus/db/aristo/aristo_get.nim @@ -14,7 +14,6 @@ {.push raises: [].} import - std/tables, results, "."/[aristo_desc, aristo_layers] @@ -22,16 +21,16 @@ import # Public functions # ------------------------------------------------------------------------------ -proc getTuvUbe*( +proc getTuvBe*( db: AristoDbRef; ): Result[VertexID,AristoError] = - ## Get the ID generator state from the unfiltered backened if available. + ## Get the ID generator state from the backened if available. let be = db.backend if not be.isNil: return be.getTuvFn() err(GetTuvNotFound) -proc getLstUbe*( +proc getLstBe*( db: AristoDbRef; ): Result[SavedState,AristoError] = ## Get the last saved state @@ -40,23 +39,23 @@ proc getLstUbe*( return be.getLstFn() err(GetLstNotFound) -proc getVtxUbe*( +proc getVtxBe*( db: AristoDbRef; rvid: RootedVertexID; flags: set[GetVtxFlag] = {}; ): Result[VertexRef,AristoError] = - ## Get the vertex from the unfiltered backened if available. + ## Get the vertex from the backened if available. let be = db.backend if not be.isNil: return be.getVtxFn(rvid, flags) err GetVtxNotFound -proc getKeyUbe*( +proc getKeyBe*( db: AristoDbRef; rvid: RootedVertexID; flags: set[GetVtxFlag]; ): Result[(HashKey, VertexRef),AristoError] = - ## Get the Merkle hash/key from the unfiltered backend if available. + ## Get the Merkle hash/key from the backend if available. let be = db.backend if not be.isNil: return be.getKeyFn(rvid, flags) @@ -64,45 +63,6 @@ proc getKeyUbe*( # ------------------ -proc getTuvBE*( - db: AristoDbRef; - ): Result[VertexID,AristoError] = - ## Get the ID generator state the `backened` layer if available. - if not db.txRef.isNil: - return ok(db.txRef.layer.vTop) - db.getTuvUbe() - -proc getVtxBE*( - db: AristoDbRef; - rvid: RootedVertexID; - flags: set[GetVtxFlag] = {}; - ): Result[(VertexRef, int),AristoError] = - ## Get the vertex from the (filtered) backened if available. - if not db.txRef.isNil: - db.txRef.layer.sTab.withValue(rvid, w): - if w[].isValid: - return ok (w[], -1) - return err(GetVtxNotFound) - ok (? db.getVtxUbe(rvid, flags), -2) - -proc getKeyBE*( - db: AristoDbRef; - rvid: RootedVertexID; - flags: set[GetVtxFlag]; - ): Result[((HashKey, VertexRef), int),AristoError] = - ## Get the merkle hash/key from the (filtered) backend if available. - if not db.txRef.isNil: - db.txRef.layer.kMap.withValue(rvid, w): - if w[].isValid: - return ok(((w[], nil), -1)) - db.txRef.layer.sTab.withValue(rvid, s): - if s[].isValid: - return ok(((VOID_HASH_KEY, s[]), -1)) - return err(GetKeyNotFound) - ok ((?db.getKeyUbe(rvid, flags)), -2) - -# ------------------ - proc getVtxRc*( db: AristoTxRef; rvid: RootedVertexID; @@ -121,7 +81,7 @@ proc getVtxRc*( else: return err(GetVtxNotFound) - db.db.getVtxBE(rvid, flags) + ok (?db.db.getVtxBe(rvid, flags), -2) proc getVtx*(db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef = ## Cascaded attempt to fetch a vertex from the cache layers or the backend. @@ -155,7 +115,7 @@ proc getKeyRc*( # The vertex is to be deleted. So is the value key. return err(GetKeyNotFound) - db.db.getKeyBE(rvid, flags) + ok (?db.db.getKeyBe(rvid, flags), -2) proc getKey*(db: AristoTxRef; rvid: RootedVertexID): HashKey = ## Cascaded attempt to fetch a vertex from the cache layers or the backend. diff --git a/nimbus/db/aristo/aristo_layers.nim b/nimbus/db/aristo/aristo_layers.nim index c3a5154de8..d9b685f5b4 100644 --- a/nimbus/db/aristo/aristo_layers.nim +++ b/nimbus/db/aristo/aristo_layers.nim @@ -11,7 +11,7 @@ {.push raises: [].} import - std/[enumerate, sets, tables], + std/[sets, tables], eth/common/hashes, results, ./aristo_desc, @@ -34,9 +34,9 @@ func layersGetVtx*(db: AristoTxRef; rvid: RootedVertexID): Opt[(VertexRef, int)] ## Find a vertex on the cache layers. An `ok()` result might contain a ## `nil` vertex if it is stored on the cache that way. ## - for i, w in enumerate(db.rstack): + for w, level in db.rstack: w.sTab.withValue(rvid, item): - return Opt.some((item[], i)) + return Opt.some((item[], level)) Opt.none((VertexRef, int)) @@ -45,11 +45,11 @@ func layersGetKey*(db: AristoTxRef; rvid: RootedVertexID): Opt[(HashKey, int)] = ## hash key if it is stored on the cache that way. ## - for i, w in enumerate(db.rstack): + for w, level in db.rstack: w.kMap.withValue(rvid, item): - return ok((item[], i)) + return ok((item[], level)) if rvid in w.sTab: - return Opt.some((VOID_HASH_KEY, i)) + return Opt.some((VOID_HASH_KEY, level)) Opt.none((HashKey, int)) @@ -58,14 +58,14 @@ func layersGetKeyOrVoid*(db: AristoTxRef; rvid: RootedVertexID): HashKey = (db.layersGetKey(rvid).valueOr (VOID_HASH_KEY, 0))[0] func layersGetAccLeaf*(db: AristoTxRef; accPath: Hash32): Opt[VertexRef] = - for w in db.rstack: + for w, _ in db.rstack: w.accLeaves.withValue(accPath, item): return Opt.some(item[]) Opt.none(VertexRef) func layersGetStoLeaf*(db: AristoTxRef; mixPath: Hash32): Opt[VertexRef] = - for w in db.rstack: + for w, _ in db.rstack: w.stoLeaves.withValue(mixPath, item): return Opt.some(item[]) @@ -187,7 +187,7 @@ iterator layersWalkVtx*( ## the one with a zero vertex which are othewise skipped by the iterator. ## The `seen` argument must not be modified while the iterator is active. ## - for w in db.rstack: + for w, _ in db.rstack: for (rvid,vtx) in w.sTab.pairs: if rvid.vid notin seen: yield (rvid,vtx) @@ -208,7 +208,7 @@ iterator layersWalkKey*( ## Walk over all `(VertexID,HashKey)` pairs on the cache layers. Note that ## entries are unsorted. var seen: HashSet[VertexID] - for w in db.rstack: + for w, _ in db.rstack: for (rvid,key) in w.kMap.pairs: if rvid.vid notin seen: yield (rvid,key) diff --git a/nimbus/db/aristo/aristo_walk/persistent.nim b/nimbus/db/aristo/aristo_walk/persistent.nim index 5c6d79fcca..bf2dc11983 100644 --- a/nimbus/db/aristo/aristo_walk/persistent.nim +++ b/nimbus/db/aristo/aristo_walk/persistent.nim @@ -36,7 +36,7 @@ iterator walkVtxBe*[T: RdbBackendRef]( db: AristoDbRef; kinds = {Branch, Leaf}; ): tuple[rvid: RootedVertexID, vtx: VertexRef] = - ## Iterate over filtered RocksDB backend vertices. This function depends on + ## Iterate over RocksDB backend vertices. This function depends on ## the particular backend type name which must match the backend descriptor. for (rvid,vtx) in walkVtxBeImpl[T](db, kinds): yield (rvid,vtx) diff --git a/nimbus/db/core_db/base.nim b/nimbus/db/core_db/base.nim index b70882400d..b1ff603386 100644 --- a/nimbus/db/core_db/base.nim +++ b/nimbus/db/core_db/base.nim @@ -652,13 +652,6 @@ proc recast*( # Public transaction related methods # ------------------------------------------------------------------------------ -# proc txFrameLevel*(db: CoreDbRef): int = -# ## Retrieve transaction level (zero if there is no pending transaction). -# ## -# db.setTrackNewApi BaseLevelFn -# result = CoreDbAccRef(db.ctx).call(txFrameLevel, db.ctx.mpt) -# db.ifTrackNewApi: debug logTxt, api, elapsed, result - proc txFrameBegin*(ctx: CoreDbCtxRef, parent: CoreDbTxRef): CoreDbTxRef = ## Constructor ## diff --git a/nimbus/db/core_db/base/api_tracking.nim b/nimbus/db/core_db/base/api_tracking.nim index c0ddaa46d8..323bfcfd3c 100644 --- a/nimbus/db/core_db/base/api_tracking.nim +++ b/nimbus/db/core_db/base/api_tracking.nim @@ -77,7 +77,6 @@ type TxCommitFn = "commit" TxDisposeFn = "dispose" - TxFrameLevelFn = "level" TxRollbackFn = "rollback" TxSaveDisposeFn = "safeDispose" diff --git a/nimbus/db/kvt/kvt_utils.nim b/nimbus/db/kvt/kvt_utils.nim index d91fdc5cd4..ae25538a4e 100644 --- a/nimbus/db/kvt/kvt_utils.nim +++ b/nimbus/db/kvt/kvt_utils.nim @@ -14,7 +14,6 @@ {.push raises: [].} import - std/tables, results, ./kvt_desc/desc_backend, "."/[kvt_desc, kvt_layers] @@ -25,7 +24,7 @@ export results # Public functions, converters # ------------------------------------------------------------------------------ -proc getUbe*( +proc getBe*( db: KvtDbRef; # Database key: openArray[byte]; # Key of database record ): Result[seq[byte],KvtError] = @@ -37,7 +36,7 @@ proc getUbe*( return be.getKvpFn key err(GetNotFound) -proc getUbeLen*( +proc getBeLen*( db: KvtDbRef; # Database key: openArray[byte]; # Key of database record ): Result[int,KvtError] = @@ -49,29 +48,6 @@ proc getUbeLen*( return be.lenKvpFn key err(GetNotFound) -proc getBe*( - db: KvtDbRef; # Database - key: openArray[byte]; # Key of database record - ): Result[seq[byte],KvtError] = - ## Get the vertex from the (filtered) backened if available. - if not db.txRef.isNil: - db.txRef.layer.sTab.withValue(@key, w): - if w[].len == 0: - return err(GetNotFound) - return ok(w[]) - db.getUbe key - -proc getBeLen*( - db: KvtDbRef; # Database - key: openArray[byte]; # Key of database record - ): Result[int,KvtError] = - ## Get the vertex from the (filtered) backened if available. - if not db.txRef.isNil: - db.txRef.layer.sTab.withValue(@key, w): - if w[].len == 0: - return err(GetNotFound) - return ok(w[].len) - db.getUbeLen key # ------------ diff --git a/nimbus/db/ledger.nim b/nimbus/db/ledger.nim index 0bfc7b86a5..cbecf4073c 100644 --- a/nimbus/db/ledger.nim +++ b/nimbus/db/ledger.nim @@ -271,17 +271,16 @@ proc persistMode(acc: AccountRef): PersistMode = result = Remove proc persistCode(acc: AccountRef, ac: LedgerRef) = - discard - # if acc.code.len != 0 and not acc.code.persisted: - # let rc = ac.kvt.put( - # contractHashKey(acc.statement.codeHash).toOpenArray, acc.code.bytes()) - # if rc.isErr: - # warn logTxt "persistCode()", - # codeHash=acc.statement.codeHash, error=($$rc.error) - # else: - # # If the ledger changes rolled back entirely from the database, the ledger - # # code cache must also be cleared! - # acc.code.persisted = true + if acc.code.len != 0 and not acc.code.persisted: + let rc = ac.txFrame.put( + contractHashKey(acc.statement.codeHash).toOpenArray, acc.code.bytes()) + if rc.isErr: + warn logTxt "persistCode()", + codeHash=acc.statement.codeHash, error=($$rc.error) + else: + # If the ledger changes rolled back entirely from the database, the ledger + # code cache must also be cleared! + acc.code.persisted = true proc persistStorage(acc: AccountRef, ac: LedgerRef) = const info = "persistStorage(): " @@ -332,9 +331,9 @@ proc persistStorage(acc: AccountRef, ac: LedgerRef) = # over.. let key = slotKey.data.slotHashToSlotKey - # rc = ac.kvt.put(key.toOpenArray, blobify(slot).data) - # if rc.isErr: - # warn logTxt "persistStorage()", slot, error=($$rc.error) + rc = ac.txFrame.put(key.toOpenArray, blobify(slot).data) + if rc.isErr: + warn logTxt "persistStorage()", slot, error=($$rc.error) acc.overlayStorage.clear() diff --git a/nimbus/evm/state.nim b/nimbus/evm/state.nim index 2e8af25195..60e9413de3 100644 --- a/nimbus/evm/state.nim +++ b/nimbus/evm/state.nim @@ -241,7 +241,7 @@ proc proofOfStake*(vmState: BaseVMState): bool = number: vmState.blockNumber, parentHash: vmState.blockCtx.parentHash, difficulty: vmState.blockCtx.difficulty, - )) + ), vmState.stateDB.txFrame) proc difficultyOrPrevRandao*(vmState: BaseVMState): UInt256 = if vmState.proofOfStake(): diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index 2ddbdb6715..9d74901fba 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -58,8 +58,8 @@ proc parseEnv(node: JsonNode): TestEnv = result.network = node["network"].getStr result.pre = node["pre"] -proc rootExists(db: CoreDbRef; root: Hash32): bool = - let state = db.baseTxFrame().getStateRoot().valueOr: +proc rootExists(db: CoreDbTxRef; root: Hash32): bool = + let state = db.getStateRoot().valueOr: return false state == root @@ -107,7 +107,7 @@ proc executeCase(node: JsonNode): bool = " expect: ", env.lastBlockHash return false - if not memDB.rootExists(lastStateRoot): + if not c.txFrame(headHash).rootExists(lastStateRoot): debugEcho "Last stateRoot not exists" return false diff --git a/tests/test_genesis.nim b/tests/test_genesis.nim index a0bc7f0d2f..b0cfb9da1e 100644 --- a/tests/test_genesis.nim +++ b/tests/test_genesis.nim @@ -35,7 +35,7 @@ proc proofOfStake(params: NetworkParams): bool = networkId = params.config.chainId.NetworkId, params = params) let header = com.genesisHeader - com.proofOfStake(header) + com.proofOfStake(header, com.db.baseTxFrame()) proc genesisTest() = suite "Genesis": @@ -71,7 +71,7 @@ proc customGenesisTest() = let genesisHash = hash32"a28d8d73e087a01d09d8cb806f60863652f30b6b6dfa4e0157501ff07d422399" check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Devnet5.json (aka Kiln in all but chainId and TTD)": var cg: NetworkParams @@ -81,7 +81,7 @@ proc customGenesisTest() = let genesisHash = hash32"51c7fe41be669f69c45c33a56982cbde405313342d9e2b00d7c91a7b284dd4f8" check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Mainnet shadow fork 1": var cg: NetworkParams @@ -93,7 +93,7 @@ proc customGenesisTest() = check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash check com.ttd.get == ttd - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false test "Geth shadow fork 1": # parse using geth format should produce the same result with nimbus format @@ -106,7 +106,7 @@ proc customGenesisTest() = check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash check com.ttd.get == ttd - check com.proofOfStake(com.genesisHeader) == false + check com.proofOfStake(com.genesisHeader, com.db.baseTxFrame()) == false check cg.config.mergeNetsplitBlock.isSome check cg.config.mergeNetsplitBlock.get == 14660963.BlockNumber