Skip to content

Commit

Permalink
fixes
Browse files Browse the repository at this point in the history
* get rid of "filtered backen" functions
* more txframes
* oopses
  • Loading branch information
arnetheduck committed Dec 19, 2024
1 parent 674b8b7 commit 256cd61
Show file tree
Hide file tree
Showing 20 changed files with 72 additions and 144 deletions.
4 changes: 1 addition & 3 deletions hive_integration/nodocker/engine/node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@ proc processBlock(
## implementations (but can be savely removed, as well.)
## variant of `processBlock()` where the `header` argument is explicitely set.
template header: Header = blk.header
var dbTx = vmState.com.db.ctx.txFrameBegin()
defer: dbTx.dispose()

let com = vmState.com
if com.daoForkSupport and
Expand All @@ -64,7 +62,7 @@ proc processBlock(
discard com.db.persistUncles(blk.uncles)

# EIP-3675: no reward for miner in POA/POS
if com.proofOfStake(header):
if com.proofOfStake(header, vmState.stateDB.txFrame):
vmState.calculateReward(header, blk.uncles)

vmState.mutateStateDB:
Expand Down
15 changes: 4 additions & 11 deletions nimbus/common/common.nim
Original file line number Diff line number Diff line change
Expand Up @@ -105,12 +105,6 @@ type
taskpool*: Taskpool
## Shared task pool for offloading computation to other threads

# ------------------------------------------------------------------------------
# Forward declarations
# ------------------------------------------------------------------------------

proc proofOfStake*(com: CommonRef, header: Header): bool {.gcsafe.}

# ------------------------------------------------------------------------------
# Private helper functions
# ------------------------------------------------------------------------------
Expand Down Expand Up @@ -217,14 +211,13 @@ proc init(com : CommonRef,

com.initializeDb()

proc isBlockAfterTtd(com: CommonRef, header: Header): bool =
proc isBlockAfterTtd(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool =
if com.config.terminalTotalDifficulty.isNone:
return false

let
ttd = com.config.terminalTotalDifficulty.get()
# TODO use head frame?
ptd = com.db.baseTxFrame().getScore(header.parentHash).valueOr:
ptd = txFrame.getScore(header.parentHash).valueOr:
return false
td = ptd + header.difficulty
ptd >= ttd and td >= ttd
Expand Down Expand Up @@ -334,15 +327,15 @@ func isCancunOrLater*(com: CommonRef, t: EthTime): bool =
func isPragueOrLater*(com: CommonRef, t: EthTime): bool =
com.config.pragueTime.isSome and t >= com.config.pragueTime.get

proc proofOfStake*(com: CommonRef, header: Header): bool =
proc proofOfStake*(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool =
if com.config.posBlock.isSome:
# see comments of posBlock in common/hardforks.nim
header.number >= com.config.posBlock.get
elif com.config.mergeNetsplitBlock.isSome:
header.number >= com.config.mergeNetsplitBlock.get
else:
# This costly check is only executed from test suite
com.isBlockAfterTtd(header)
com.isBlockAfterTtd(header, txFrame)

func depositContractAddress*(com: CommonRef): Address =
com.config.depositContractAddress.get(default(Address))
Expand Down
7 changes: 7 additions & 0 deletions nimbus/core/chain/forked_chain.nim
Original file line number Diff line number Diff line change
Expand Up @@ -474,6 +474,9 @@ proc importBlock*(c: ForkedChainRef, blk: Block): Result[void, string] =
template header(): Header =
blk.header

if header.parentHash == c.baseHash:
return c.validateBlock(c.baseHeader, c.baseTxFrame, blk)

c.blocks.withValue(header.parentHash, bd) do:
# TODO: If engine API keep importing blocks
# but not finalized it, e.g. current chain length > StagedBlocksThreshold
Expand All @@ -490,6 +493,7 @@ proc importBlock*(c: ForkedChainRef, blk: Block): Result[void, string] =
parentHash = header.parentHash.short
return err("Block is not part of valid chain")

ok()

proc forkChoice*(c: ForkedChainRef,
headHash: Hash32,
Expand Down Expand Up @@ -572,6 +576,9 @@ proc haveBlockLocally*(c: ForkedChainRef, blockHash: Hash32): bool =
c.baseTxFrame.headerExists(blockHash)

func txFrame*(c: ForkedChainRef, blockHash: Hash32): CoreDbTxRef =
if blockHash == c.baseHash:
return c.baseTxFrame

c.blocks.withValue(blockHash, bd) do:
return bd[].txFrame

Expand Down
2 changes: 1 addition & 1 deletion nimbus/core/executor/process_block.nim
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ proc processBlock*(
?vmState.procBlkPreamble(blk, skipValidation, skipReceipts, skipUncles, taskpool)

# EIP-3675: no reward for miner in POA/POS
if not vmState.com.proofOfStake(blk.header):
if not vmState.com.proofOfStake(blk.header, vmState.stateDB.txFrame):
vmState.calculateReward(blk.header, blk.uncles)

?vmState.procBlkEpilogue(blk, skipValidation, skipReceipts)
Expand Down
9 changes: 5 additions & 4 deletions nimbus/core/validate.nim
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ proc validateHeader(
com: CommonRef;
blk: Block;
parentHeader: Header;
txFrame: CoreDbTxRef;
): Result[void,string] =
template header: Header = blk.header
# TODO this code is used for validating uncles also, though these get passed
Expand Down Expand Up @@ -76,7 +77,7 @@ proc validateHeader(
if header.extraData != daoForkBlockExtraData:
return err("header extra data should be marked DAO")

if com.proofOfStake(header):
if com.proofOfStake(header, txFrame):
# EIP-4399 and EIP-3675
# no need to check mixHash because EIP-4399 override this field
# checking rule
Expand Down Expand Up @@ -159,7 +160,7 @@ proc validateUncles(com: CommonRef; header: Header; txFrame: CoreDbTxRef,

let uncleParent = ?txFrame.getBlockHeader(uncle.parentHash)
? com.validateHeader(
Block.init(uncle, BlockBody()), uncleParent)
Block.init(uncle, BlockBody()), uncleParent, txFrame)

ok()

Expand Down Expand Up @@ -372,12 +373,12 @@ proc validateHeaderAndKinship*(
return err("Header.extraData larger than 32 bytes")
return ok()

? com.validateHeader(blk, parent)
? com.validateHeader(blk, parent, txFrame)

if blk.uncles.len > MAX_UNCLES:
return err("Number of uncles exceed limit.")

if not com.proofOfStake(header):
if not com.proofOfStake(header, txFrame):
? com.validateUncles(header, txFrame, blk.uncles)

ok()
Expand Down
2 changes: 1 addition & 1 deletion nimbus/db/aristo/aristo_api.nim
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ func dup*(api: AristoApiRef): AristoApiRef =
result = AristoApiRef()
result[] = api[]
when AutoValidateApiHooks:
result.validate
result[].validate

# ------------------------------------------------------------------------------
# Public profile API constuctor
Expand Down
2 changes: 1 addition & 1 deletion nimbus/db/aristo/aristo_check/check_be.nim
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
for (rvid,key) in T.walkKeyBe db:
if topVidBe.vid < rvid.vid:
topVidBe = rvid
let _ = db.getVtxBE(rvid).valueOr:
let _ = db.getVtxBe(rvid).valueOr:
return err((rvid.vid,CheckBeVtxMissing))

# Compare calculated `vTop` against database state
Expand Down
4 changes: 2 additions & 2 deletions nimbus/db/aristo/aristo_compute.nim
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ proc getKey(
db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool
): Result[((HashKey, VertexRef), int), AristoError] =
ok when skipLayers:
(?db.db.getKeyUbe(rvid, {GetVtxFlag.PeekCache}), -2)
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2)
else:
?db.getKeyRc(rvid, {})

Expand Down Expand Up @@ -281,7 +281,7 @@ proc computeKeyImpl(
): Result[HashKey, AristoError] =
let (keyvtx, level) =
when skipLayers:
(?db.db.getKeyUbe(rvid, {GetVtxFlag.PeekCache}), -2)
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2)
else:
?db.getKeyRc(rvid, {})

Expand Down
6 changes: 4 additions & 2 deletions nimbus/db/aristo/aristo_desc.nim
Original file line number Diff line number Diff line change
Expand Up @@ -152,12 +152,14 @@ func hash*(db: AristoDbRef): Hash =
# Public helpers
# ------------------------------------------------------------------------------

iterator rstack*(tx: AristoTxRef): LayerRef =
iterator rstack*(tx: AristoTxRef): (LayerRef, int) =
# Stack in reverse order
var tx = tx

var i = 0
while tx != nil:
yield tx.layer
let level = if tx.parent == nil: -1 else: i
yield (tx.layer, level)
tx = tx.parent

proc deltaAtLevel*(db: AristoTxRef, level: int): LayerRef =
Expand Down
4 changes: 2 additions & 2 deletions nimbus/db/aristo/aristo_fetch.nim
Original file line number Diff line number Diff line change
Expand Up @@ -193,11 +193,11 @@ proc hasStoragePayload(
proc fetchLastSavedState*(
db: AristoTxRef;
): Result[SavedState,AristoError] =
## Wrapper around `getLstUbe()`. The function returns the state of the last
## Wrapper around `getLstBe()`. The function returns the state of the last
## saved state. This is a Merkle hash tag for vertex with ID 1 and a bespoke
## `uint64` identifier (may be interpreted as block number.)
# TODO store in frame!!
db.db.getLstUbe()
db.db.getLstBe()

proc fetchAccountRecord*(
db: AristoTxRef;
Expand Down
58 changes: 9 additions & 49 deletions nimbus/db/aristo/aristo_get.nim
Original file line number Diff line number Diff line change
Expand Up @@ -14,24 +14,23 @@
{.push raises: [].}

import
std/tables,
results,
"."/[aristo_desc, aristo_layers]

# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------

proc getTuvUbe*(
proc getTuvBe*(
db: AristoDbRef;
): Result[VertexID,AristoError] =
## Get the ID generator state from the unfiltered backened if available.
## Get the ID generator state from the backened if available.
let be = db.backend
if not be.isNil:
return be.getTuvFn()
err(GetTuvNotFound)

proc getLstUbe*(
proc getLstBe*(
db: AristoDbRef;
): Result[SavedState,AristoError] =
## Get the last saved state
Expand All @@ -40,69 +39,30 @@ proc getLstUbe*(
return be.getLstFn()
err(GetLstNotFound)

proc getVtxUbe*(
proc getVtxBe*(
db: AristoDbRef;
rvid: RootedVertexID;
flags: set[GetVtxFlag] = {};
): Result[VertexRef,AristoError] =
## Get the vertex from the unfiltered backened if available.
## Get the vertex from the backened if available.
let be = db.backend
if not be.isNil:
return be.getVtxFn(rvid, flags)
err GetVtxNotFound

proc getKeyUbe*(
proc getKeyBe*(
db: AristoDbRef;
rvid: RootedVertexID;
flags: set[GetVtxFlag];
): Result[(HashKey, VertexRef),AristoError] =
## Get the Merkle hash/key from the unfiltered backend if available.
## Get the Merkle hash/key from the backend if available.
let be = db.backend
if not be.isNil:
return be.getKeyFn(rvid, flags)
err GetKeyNotFound

# ------------------

proc getTuvBE*(
db: AristoDbRef;
): Result[VertexID,AristoError] =
## Get the ID generator state the `backened` layer if available.
if not db.txRef.isNil:
return ok(db.txRef.layer.vTop)
db.getTuvUbe()

proc getVtxBE*(
db: AristoDbRef;
rvid: RootedVertexID;
flags: set[GetVtxFlag] = {};
): Result[(VertexRef, int),AristoError] =
## Get the vertex from the (filtered) backened if available.
if not db.txRef.isNil:
db.txRef.layer.sTab.withValue(rvid, w):
if w[].isValid:
return ok (w[], -1)
return err(GetVtxNotFound)
ok (? db.getVtxUbe(rvid, flags), -2)

proc getKeyBE*(
db: AristoDbRef;
rvid: RootedVertexID;
flags: set[GetVtxFlag];
): Result[((HashKey, VertexRef), int),AristoError] =
## Get the merkle hash/key from the (filtered) backend if available.
if not db.txRef.isNil:
db.txRef.layer.kMap.withValue(rvid, w):
if w[].isValid:
return ok(((w[], nil), -1))
db.txRef.layer.sTab.withValue(rvid, s):
if s[].isValid:
return ok(((VOID_HASH_KEY, s[]), -1))
return err(GetKeyNotFound)
ok ((?db.getKeyUbe(rvid, flags)), -2)

# ------------------

proc getVtxRc*(
db: AristoTxRef;
rvid: RootedVertexID;
Expand All @@ -121,7 +81,7 @@ proc getVtxRc*(
else:
return err(GetVtxNotFound)

db.db.getVtxBE(rvid, flags)
ok (?db.db.getVtxBe(rvid, flags), -2)

proc getVtx*(db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef =
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
Expand Down Expand Up @@ -155,7 +115,7 @@ proc getKeyRc*(
# The vertex is to be deleted. So is the value key.
return err(GetKeyNotFound)

db.db.getKeyBE(rvid, flags)
ok (?db.db.getKeyBe(rvid, flags), -2)

proc getKey*(db: AristoTxRef; rvid: RootedVertexID): HashKey =
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
Expand Down
Loading

0 comments on commit 256cd61

Please sign in to comment.