diff --git a/graft/coreth/core/blockchain.go b/graft/coreth/core/blockchain.go index f4526ff0fa88..b29d1fdaf1e8 100644 --- a/graft/coreth/core/blockchain.go +++ b/graft/coreth/core/blockchain.go @@ -1851,6 +1851,9 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error // If the state is already available and the acceptor tip is up to date, skip re-processing. if bc.HasState(current.Root()) && acceptorTipUpToDate { + if t, ok := bc.triedb.Backend().(*firewood.TrieDB); ok { + t.SetHashAndHeight(current.Hash(), current.NumberU64()) + } log.Info("Skipping state reprocessing", "root", current.Root()) return nil } @@ -1902,6 +1905,9 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error ) // Note: we add 1 since in each iteration, we attempt to re-execute the next block. log.Info("Re-executing blocks to generate state for last accepted block", "from", current.NumberU64()+1, "to", origin) + if t, ok := bc.triedb.Backend().(*firewood.TrieDB); ok { + t.SetHashAndHeight(current.Hash(), current.NumberU64()) + } var roots []common.Hash for current.NumberU64() < origin { // TODO: handle canceled context diff --git a/graft/evm/firewood/account_trie.go b/graft/evm/firewood/account_trie.go index c51e9d1d2dcf..3085faf97803 100644 --- a/graft/evm/firewood/account_trie.go +++ b/graft/evm/firewood/account_trie.go @@ -20,13 +20,14 @@ import ( var _ state.Trie = (*accountTrie)(nil) -// accountTrie implements state.Trie for managing account states. -// There are a couple caveats to the current implementation: -// 1. `Commit` is not used as expected in the state package. The `StorageTrie` doesn't return -// values, and we thus rely on the `accountTrie`. -// 2. The `Hash` method actually creates the proposal, since Firewood cannot calculate -// the hash of the trie without committing it. It is immediately dropped, and this -// can likely be optimized. +// accountTrie implements [state.Trie] for managing account states. +// Although it fulfills the [state.Trie] interface, it has some important differences: +// 1. [accountTrie.Commit] is not used as expected in the state package. The `StorageTrie` doesn't return +// values, and we thus rely on the `accountTrie`. Additionally, no [trienode.NodeSet] is +// actually constructed, since Firewood manages nodes internally and the list of changes +// is not needed externally. +// 2. The [accountTrie.Hash] method actually creates the [ffi.Proposal], since Firewood cannot calculate +// the hash of the trie without committing it. // // Note this is not concurrent safe. type accountTrie struct { @@ -172,7 +173,7 @@ func (a *accountTrie) DeleteAccount(addr common.Address) error { // Queue the key for deletion a.dirtyKeys[string(key)] = nil a.updateKeys = append(a.updateKeys, key) - a.updateValues = append(a.updateValues, nil) // Nil value indicates deletion + a.updateValues = append(a.updateValues, nil) // Must use nil to indicate deletion a.hasChanges = true // Mark that there are changes to commit return nil } @@ -188,14 +189,16 @@ func (a *accountTrie) DeleteStorage(addr common.Address, key []byte) error { // Queue the key for deletion a.dirtyKeys[string(combinedKey[:])] = nil a.updateKeys = append(a.updateKeys, combinedKey[:]) - a.updateValues = append(a.updateValues, nil) // Nil value indicates deletion + a.updateValues = append(a.updateValues, nil) // Must use nil to indicate deletion a.hasChanges = true // Mark that there are changes to commit return nil } // Hash returns the current hash of the state trie. -// This will create a proposal and drop it, so it is not efficient to call for each transaction. +// This will create the necessary proposals to guarantee that the changes can +// later be committed. All new proposals will be tracked by the [TrieDB]. // If there are no changes since the last call, the cached root is returned. +// On error, the zero hash is returned. func (a *accountTrie) Hash() common.Hash { hash, err := a.hash() if err != nil { @@ -208,7 +211,7 @@ func (a *accountTrie) Hash() common.Hash { func (a *accountTrie) hash() (common.Hash, error) { // If we haven't already hashed, we need to do so. if a.hasChanges { - root, err := a.fw.getProposalHash(a.parentRoot, a.updateKeys, a.updateValues) + root, err := a.fw.createProposals(a.parentRoot, a.updateKeys, a.updateValues) if err != nil { return common.Hash{}, err } @@ -218,51 +221,51 @@ func (a *accountTrie) hash() (common.Hash, error) { return a.root, nil } -// Commit returns the new root hash of the trie and a NodeSet containing all modified accounts and storage slots. -// The format of the NodeSet is different than in go-ethereum's trie implementation due to Firewood's design. -// This boolean is ignored, as it is a relic of the StateTrie implementation. +// Commit returns the new root hash of the trie and an empty [trienode.NodeSet]. +// The boolean input is ignored, as it is a relic of the StateTrie implementation. +// If the changes are not yet already tracked by the [TrieDB], they are created. func (a *accountTrie) Commit(bool) (common.Hash, *trienode.NodeSet, error) { // Get the hash of the trie. + // Ensures all changes are tracked by the Database. hash, err := a.hash() if err != nil { return common.Hash{}, nil, err } - // Create the NodeSet. This will be sent to `triedb.Update` later. - nodeset := trienode.NewNodeSet(common.Hash{}) - for i, key := range a.updateKeys { - nodeset.AddNode(key, &trienode.Node{ - Blob: a.updateValues[i], - }) - } - - return hash, nodeset, nil + set := trienode.NewNodeSet(common.Hash{}) + return hash, set, nil } // UpdateContractCode implements state.Trie. -// Contract code is controlled by rawdb, so we don't need to do anything here. +// Contract code is controlled by `rawdb`, so we don't need to do anything here. +// This always returns nil. func (*accountTrie) UpdateContractCode(common.Address, common.Hash, []byte) error { return nil } // GetKey implements state.Trie. -// This should not be used, since any user should not be accessing by raw key. +// Preimages are not yet supported in Firewood. +// It always returns nil. func (*accountTrie) GetKey([]byte) []byte { return nil } // NodeIterator implements state.Trie. // Firewood does not support iterating over internal nodes. +// This always returns an error. func (*accountTrie) NodeIterator([]byte) (trie.NodeIterator, error) { return nil, errors.New("NodeIterator not implemented for Firewood") } // Prove implements state.Trie. -// Firewood does not yet support providing key proofs. +// Firewood does not support providing key proofs. +// This always returns an error. func (*accountTrie) Prove([]byte, ethdb.KeyValueWriter) error { return errors.New("Prove not implemented for Firewood") } +// Copy creates a deep copy of the [accountTrie]. +// The [database.Reader] is shared, since it is read-only. func (a *accountTrie) Copy() *accountTrie { // Create a new AccountTrie with the same root and reader newTrie := &accountTrie{ diff --git a/graft/evm/firewood/storage_trie.go b/graft/evm/firewood/storage_trie.go index 056b37c9c917..cddd4615cb2b 100644 --- a/graft/evm/firewood/storage_trie.go +++ b/graft/evm/firewood/storage_trie.go @@ -23,22 +23,19 @@ func newStorageTrie(accountTrie *accountTrie) *storageTrie { } } -// Actual commit is handled by the account trie. -// Return the old storage root as if there was no change since Firewood -// will manage the hash calculations without it. -// All changes are managed by the account trie. +// Commit is a no-op for storage tries, as all changes are managed by the account trie. +// It always returns a nil NodeSet and zero hash. func (*storageTrie) Commit(bool) (common.Hash, *trienode.NodeSet, error) { return common.Hash{}, nil, nil } -// Firewood doesn't require tracking storage roots inside of an account. -// They will be updated in place when hashing of the proposal takes place. +// Hash returns an empty hash, as the storage roots are managed internally to Firewood. func (*storageTrie) Hash() common.Hash { return common.Hash{} } -// Copy should never be called on a storage trie, as it is just a wrapper around the account trie. -// Each storage trie should be re-opened with the account trie separately. +// Copy returns nil, as storage tries do not need to be copied separately. +// All usage of a copied storage trie should first ensure it is non-nil. func (*storageTrie) Copy() *storageTrie { return nil } diff --git a/graft/evm/firewood/triedb.go b/graft/evm/firewood/triedb.go index b31daaec7173..ea9569d5b377 100644 --- a/graft/evm/firewood/triedb.go +++ b/graft/evm/firewood/triedb.go @@ -29,38 +29,69 @@ import ( const firewoodDir = "firewood" var ( - _ proposable = (*ffi.Database)(nil) - _ proposable = (*ffi.Proposal)(nil) - - // FFI triedb operation metrics - ffiProposeCount = metrics.GetOrRegisterCounter("firewood/triedb/propose/count", nil) - ffiProposeTimer = metrics.GetOrRegisterCounter("firewood/triedb/propose/time", nil) - ffiCommitCount = metrics.GetOrRegisterCounter("firewood/triedb/commit/count", nil) - ffiCommitTimer = metrics.GetOrRegisterCounter("firewood/triedb/commit/time", nil) - ffiCleanupTimer = metrics.GetOrRegisterCounter("firewood/triedb/cleanup/time", nil) - ffiOutstandingProposals = metrics.GetOrRegisterGauge("firewood/triedb/propose/outstanding", nil) - - // FFI Trie operation metrics - ffiHashCount = metrics.GetOrRegisterCounter("firewood/triedb/hash/count", nil) - ffiHashTimer = metrics.GetOrRegisterCounter("firewood/triedb/hash/time", nil) - ffiReadCount = metrics.GetOrRegisterCounter("firewood/triedb/read/count", nil) - ffiReadTimer = metrics.GetOrRegisterCounter("firewood/triedb/read/time", nil) + _ triedb.DBConstructor = TrieDBConfig{}.BackendConstructor + _ triedb.DBOverride = (*TrieDB)(nil) + + hashCount = metrics.GetOrRegisterCounter("firewood/triedb/hash/count", nil) + hashTimer = metrics.GetOrRegisterCounter("firewood/triedb/hash/time", nil) + commitCount = metrics.GetOrRegisterCounter("firewood/triedb/commit/count", nil) + commitTimer = metrics.GetOrRegisterCounter("firewood/triedb/commit/time", nil) + proposeOnDiskCount = metrics.GetOrRegisterCounter("firewood/triedb/propose/disk/count", nil) + proposeOnProposeCount = metrics.GetOrRegisterCounter("firewood/triedb/propose/proposal/count", nil) + explicitlyDroppedCount = metrics.GetOrRegisterCounter("firewood/triedb/drop/count", nil) + + errNoProposalFound = errors.New("no proposal found") + errUnexpectedProposalFound = errors.New("unexpected proposal found") ) -type proposable interface { - // Propose creates a new proposal from the current state with the given keys and values. - Propose(keys, values [][]byte) (*ffi.Proposal, error) +// TrieDB is a triedb.DBOverride implementation backed by Firewood. +// It acts as a HashDB for backwards compatibility with most of the blockchain code. +type TrieDB struct { + proposals + + // The underlying Firewood database, used for storing proposals and revisions. + // This is exported as read-only, with knowledge that the consumer will not close it + // and the latest state can be modified at any time. + Firewood *ffi.Database +} + +type proposals struct { + sync.Mutex + + byStateRoot map[common.Hash][]*proposal + // The proposal tree tracks the structure of the current proposals, and which proposals are children of which. + // This is used to ensure that we can dereference proposals correctly and commit the correct ones + // in the case of duplicate state roots. + // The root of the tree is stored here, and represents the top-most layer on disk. + tree *proposal + + // possible temporarily holds proposals created during a trie update. + // This is cleared after the update is complete and the proposals have been sent to the database. + // It's unexpected for multiple updates to this to occur simultaneously, but a lock is used to ensure safety. + possible map[possibleKey]*proposal } -// ProposalContext represents a proposal in the Firewood database. -// This tracks all outstanding proposals to allow dereferencing upon commit. -type ProposalContext struct { - Proposal *ffi.Proposal - Hashes map[common.Hash]struct{} // All corresponding block hashes - Root common.Hash - Block uint64 - Parent *ProposalContext - Children []*ProposalContext +type possibleKey struct { + parentBlockHash, root common.Hash //nolint:unused // It is used as a map key +} + +// A proposal carries a Firewood FFI proposal (i.e. Rust-owned memory). +// The Firewood library adds a finalizer to the proposal handle to ensure that +// the memory is freed when the Go object is garbage collected. However, because +// we form a tree of proposals, the `proposal.Proposal` field may be the only +// reference to a given proposal. To ensure that all proposals in the tree +// can be freed in a finalizer, this cannot be included in the tree structure. +type proposal struct { + *proposalMeta + handle *ffi.Proposal +} + +type proposalMeta struct { + parent *proposalMeta + children []*proposalMeta + blockHashes map[common.Hash]struct{} // All corresponding block hashes + root common.Hash + height uint64 } type TrieDBConfig struct { @@ -88,39 +119,24 @@ func DefaultConfig(dir string) TrieDBConfig { } } +// BackendConstructor implements the [triedb.DBConstructor] interface. +// It creates a new Firewood database with the given configuration. +// Any error during creation will cause the program to exit. func (c TrieDBConfig) BackendConstructor(ethdb.Database) triedb.DBOverride { db, err := New(c) if err != nil { - log.Crit("firewood: error creating database", "error", err) + log.Crit("creating firewood database", "error", err) } return db } -type TrieDB struct { - // The underlying Firewood database, used for storing proposals and revisions. - // This is exported with the knowledge that consumer will not close it and the latest state can be modified - // at any time via block execution. The consumer should only use for read operations, - // or ensure that writes occur outside of block execution. - Firewood *ffi.Database - - proposalLock sync.RWMutex - // proposalMap provides O(1) access by state root to all proposals stored in the proposalTree - proposalMap map[common.Hash][]*ProposalContext - // The proposal tree tracks the structure of the current proposals, and which proposals are children of which. - // This is used to ensure that we can dereference proposals correctly and commit the correct ones - // in the case of duplicate state roots. - // The root of the tree is stored here, and represents the top-most layer on disk. - proposalTree *ProposalContext -} - -// New creates a new Firewood database with the given disk database and configuration. -// Any error during creation will cause the program to exit. +// New creates a new Firewood database with the given configuration. +// The database will not be opened on error. func New(config TrieDBConfig) (*TrieDB, error) { - path := filepath.Join(config.DatabaseDir, firewoodDir) - if err := validatePath(path); err != nil { + if err := validateDir(config.DatabaseDir); err != nil { return nil, err } - + path := filepath.Join(config.DatabaseDir, firewoodDir) options := []ffi.Option{ ffi.WithNodeCacheEntries(config.CacheSizeBytes / 256), // TODO(#4750): is 256 bytes per node a good estimate? ffi.WithFreeListCacheEntries(config.FreeListCacheEntries), @@ -133,53 +149,69 @@ func New(config TrieDBConfig) (*TrieDB, error) { fw, err := ffi.New(path, options...) if err != nil { - return nil, err + return nil, fmt.Errorf("opening database: %w", err) } - currentRoot, err := fw.Root() + initialRoot, err := fw.Root() if err != nil { + if closeErr := fw.Close(context.Background()); closeErr != nil { + return nil, fmt.Errorf("%w: error while closing: %w", err, closeErr) + } return nil, err } + blockHashes := make(map[common.Hash]struct{}) + blockHashes[common.Hash{}] = struct{}{} return &TrieDB{ - Firewood: fw, - proposalMap: make(map[common.Hash][]*ProposalContext), - proposalTree: &ProposalContext{ - Root: common.Hash(currentRoot), + Firewood: fw, + proposals: proposals{ + byStateRoot: make(map[common.Hash][]*proposal), + tree: &proposal{ + proposalMeta: &proposalMeta{ + root: common.Hash(initialRoot), + blockHashes: blockHashes, + height: 0, + }, + }, + possible: make(map[possibleKey]*proposal), }, }, nil } -func validatePath(path string) error { - if path == "" { - return errors.New("firewood database file path must be set") +// validateDir ensures that the given directory exists and is a directory. +func validateDir(dir string) error { + if dir == "" { + return errors.New("chain data directory must be set") } - // Check that the directory exists - dir := filepath.Dir(path) switch info, err := os.Stat(dir); { case os.IsNotExist(err): log.Info("Database directory not found, creating", "path", dir) if err := os.MkdirAll(dir, 0o755); err != nil { - return fmt.Errorf("error creating database directory: %w", err) + return fmt.Errorf("creating database directory: %w", err) } - return nil case err != nil: - return fmt.Errorf("error checking database directory: %w", err) + return fmt.Errorf("os.Stat() on database directory: %w", err) case !info.IsDir(): - return fmt.Errorf("database directory path is not a directory: %s", dir) + return fmt.Errorf("database directory path is not a directory: %q", dir) } return nil } +// SetHashAndHeight sets the committed block hashes and height in memory. +// This must be called at startup to initialize the in-memory state if the +// database is non-empty (e.g. restart, state sync) +func (t *TrieDB) SetHashAndHeight(blockHash common.Hash, height uint64) { + t.Lock() + defer t.Unlock() + clear(t.tree.blockHashes) + t.tree.blockHashes[blockHash] = struct{}{} + t.tree.height = height +} + // Scheme returns the scheme of the database. -// This is only used in some API calls -// and in StateDB to avoid iterating through deleted storage tries. -// WARNING: If cherry-picking anything from upstream that uses this, -// it must be overwritten to use something like: -// `_, ok := db.(*Database); if !ok { return "" }` -// to recognize the Firewood database. +// However, to avoid a slow deletion in `libevm` `StateDB`, it returns [rawdb.HashScheme]. func (*TrieDB) Scheme() string { return rawdb.HashScheme } @@ -188,115 +220,142 @@ func (*TrieDB) Scheme() string { func (t *TrieDB) Initialized(common.Hash) bool { root, err := t.Firewood.Root() if err != nil { - log.Error("firewood: error getting current root", "error", err) + log.Error("get current root", "error", err) return false } - // If the current root isn't empty, then unless the database is empty, we have a genesis block recorded. return common.Hash(root) != types.EmptyRootHash } -// Update takes a root and a set of keys-values and creates a new proposal. -// It will not be committed until the Commit method is called. -// This function should be called even if there are no changes to the state to ensure proper tracking of block hashes. -func (t *TrieDB) Update(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, _ *triestate.Set, opts ...stateconf.TrieDBUpdateOption) error { - // We require block hashes to be provided for all blocks in production. - // However, many tests cannot reasonably provide a block hash for genesis, so we allow it to be omitted. - parentHash, hash, ok := stateconf.ExtractTrieDBUpdatePayload(opts...) - if !ok { - log.Error("firewood: no block hash provided for block %d", block) +// Size is a no-op because Firewood does not track storage size. +// All memory management is handled internally by Firewood. +func (*TrieDB) Size() (common.StorageSize, common.StorageSize) { + return 0, 0 +} + +// Reference is no-op because proposals are only referenced when created. +// Additionally, internal nodes do not need tracked by consumers. +func (*TrieDB) Reference(common.Hash, common.Hash) {} + +// Dereference is no-op because proposals will be removed automatically. +// Additionally, internal nodes do not need tracked by consumers. +func (*TrieDB) Dereference(common.Hash) {} + +// Cap is a no-op because it isn't supported by Firewood. +func (*TrieDB) Cap(common.StorageSize) error { + return nil +} + +// Close closes the database, freeing all associated resources. +// This may hang for a short period while waiting for finalizers to complete. +// If it does not close as expected, this indicates that there are still references +// to proposals or revisions in memory, and an error will be returned. +// The database should not be used after calling Close, but it is safe to call multiple times. +func (t *TrieDB) Close() error { + p := &t.proposals + p.Lock() + defer p.Unlock() + + if p.tree == nil { + return nil // already closed } - // The rest of the operations except key-value arranging must occur with a lock - t.proposalLock.Lock() - defer t.proposalLock.Unlock() - - // Check if this proposal already exists. - // During reorgs, we may have already created this proposal. - // Additionally, we may have already created this proposal with a different block hash. - if existingProposals, ok := t.proposalMap[root]; ok { - for _, existing := range existingProposals { - // If the block hash is already tracked, we can skip proposing this again. - if _, exists := existing.Hashes[hash]; exists { - log.Debug("firewood: proposal already exists", "root", root.Hex(), "parent", parentRoot.Hex(), "block", block, "hash", hash.Hex()) - return nil - } - // We already have this proposal, but should create a new context with the correct hash. - // This solves the case of a unique block hash, but the same underlying proposal. - if _, exists := existing.Parent.Hashes[parentHash]; exists { - log.Debug("firewood: proposal already exists, updating hash", "root", root.Hex(), "parent", parentRoot.Hex(), "block", block, "hash", hash.Hex()) - existing.Hashes[hash] = struct{}{} - return nil - } - } + // All remaining proposals can explicitly be dropped. + for _, child := range p.tree.children { + p.removeProposalAndChildren(child) } + p.tree = nil + p.byStateRoot = nil + t.possible = nil - keys, values := arrangeKeyValuePairs(nodes) // may return nil, nil if no changes - return t.propose(root, parentRoot, hash, parentHash, block, keys, values) + // We must provide a context to close since it may hang while waiting for the finalizers to complete. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + return t.Firewood.Close(ctx) } -// propose creates a new proposal for every possible parent with the given keys and values. -// If the parent cannot be found, an error will be returned. +// Update updates the database to the given root at the given height. +// The parent block hash and block hash must be provided in the options. +// A proposal must have already been created from [accountTrie.Commit] with the same root, +// parent root, and height. +// If no such proposal exists, an error will be returned. // -// To avoid having to create a new proposal for each valid state root, the block hashes are -// provided to ensure uniqueness. When this method is called, we can guarantee that the proposalContext -// must be created and tracked. -// -// Should only be accessed with the proposal lock held. -func (t *TrieDB) propose(root common.Hash, parentRoot common.Hash, hash common.Hash, parentHash common.Hash, block uint64, keys [][]byte, values [][]byte) error { - // Find the parent proposal with the correct hash. - // We assume the number of proposals at a given root is small, so we can iterate through them. - for _, parentProposal := range t.proposalMap[parentRoot] { - // If we know this proposal cannot be the parent, we can skip it. - // Since the only possible block that won't have a parent hash is block 1, - // and that will always be proposed from the database root, - // we can guarantee that the parent hash will be present in one of the proposals. - if _, exists := parentProposal.Hashes[parentHash]; !exists { - continue - } - log.Debug("firewood: proposing from parent proposal", "parent", parentProposal.Root.Hex(), "root", root.Hex(), "height", block) - p, err := createProposal(parentProposal.Proposal, root, keys, values) - if err != nil { - return err - } - pCtx := &ProposalContext{ - Proposal: p, - Hashes: map[common.Hash]struct{}{hash: {}}, - Root: root, - Block: block, - Parent: parentProposal, - } +// Unlike for HashDB and PathDB, `Commit` must be called even if if the root is unchanged. +func (t *TrieDB) Update(root, parent common.Hash, height uint64, _ *trienode.MergedNodeSet, _ *triestate.Set, opts ...stateconf.TrieDBUpdateOption) error { + // We require block hashes to be provided for all blocks in production. + // However, many tests cannot reasonably provide a block blockHash for genesis, so we allow it to be omitted. + parentBlockHash, blockHash, ok := stateconf.ExtractTrieDBUpdatePayload(opts...) + if !ok { + return fmt.Errorf("no block hash provided for block %d", height) + } - t.proposalMap[root] = append(t.proposalMap[root], pCtx) - parentProposal.Children = append(parentProposal.Children, pCtx) - return nil + // The rest of the operations except key-value arranging must occur with a lock + t.proposals.Lock() + defer t.proposals.Unlock() + + p, ok := t.possible[possibleKey{parentBlockHash: parentBlockHash, root: root}] + // It's possible that we are committing a proposal on top of an empty genesis block in testing. + // In this case, we can still find the proposal by looking for the empty block hash + if !ok && height == 1 && parent == types.EmptyRootHash { + p, ok = t.possible[possibleKey{parentBlockHash: common.Hash{}, root: root}] + p.height = 1 + p.parent.blockHashes[parentBlockHash] = struct{}{} + } + if !ok { + return fmt.Errorf("%w for block %d, root %s, hash %s", errNoProposalFound, height, root.Hex(), blockHash.Hex()) } - // Since we were unable to find a parent proposal with the given parent hash, - // we must create a new proposal from the database root. - // We must avoid the case in which we are reexecuting blocks upon startup, and haven't yet stored the parent block. - if _, exists := t.proposalTree.Hashes[parentHash]; t.proposalTree.Block != 0 && !exists { - return fmt.Errorf("firewood: parent hash %s not found for block %s at height %d", parentHash.Hex(), hash.Hex(), block) - } else if t.proposalTree.Root != parentRoot { - return fmt.Errorf("firewood: parent root %s does not match proposal tree root %s for root %s at height %d", parentRoot.Hex(), t.proposalTree.Root.Hex(), root.Hex(), block) + // If we have already created an identical proposal, we can skip adding it again. + if t.proposals.exists(root, blockHash, parentBlockHash) { + // All unused proposals can be cleared, since we are already tracking an identical one. + clear(t.possible) + return nil } + switch { + case p.root != root: + return fmt.Errorf("%w: expected root %#x, got %#x", errUnexpectedProposalFound, root, p.root) + case p.parent.root != parent: + return fmt.Errorf("%w: expected parent root %#x, got %#x", errUnexpectedProposalFound, parent, p.parent.root) + case p.height != height: + return fmt.Errorf("%w: expected height %d, got %d", errUnexpectedProposalFound, height, p.height) + } + + // Track the proposal context in the tree and map. + p.parent.children = append(p.parent.children, p.proposalMeta) + t.proposals.byStateRoot[root] = append(t.proposals.byStateRoot[root], p) + p.blockHashes[blockHash] = struct{}{} + // Now, all unused proposals have no other references, since we didn't store them + // in the proposal map or tree, so they will be garbage collected. + // Any proposals with a different root were mistakenly created, so they can be freed as well. + clear(t.possible) + return nil +} - log.Debug("firewood: proposing from database root", "root", root.Hex(), "height", block) - p, err := createProposal(t.Firewood, root, keys, values) - if err != nil { - return err +// Check if this proposal already exists. +// During reorgs, we may have already tracked this block hash. +// Additionally, we may have coincidentally created an identical proposal with a different block hash. +func (ps *proposals) exists(root, block, parentBlock common.Hash) bool { + proposals, ok := ps.byStateRoot[root] + if !ok { + return false } - pCtx := &ProposalContext{ - Proposal: p, - Hashes: map[common.Hash]struct{}{hash: {}}, // This may be common.Hash{} for genesis blocks. - Root: root, - Block: block, - Parent: t.proposalTree, + + for _, p := range proposals { + // If the block hash is already tracked, we can skip proposing this again. + if _, ok := p.blockHashes[block]; ok { + log.Debug("proposal already exists", "root", root.Hex(), "parentBlock", parentBlock.Hex(), "block", block.Hex()) + return true + } + + // We have an identical proposal, but should ensure the hash is tracked with this proposal. + if _, ok := p.parent.blockHashes[parentBlock]; ok { + log.Debug("proposal already exists, updating hash", "root", root.Hex(), "parentBlock", parentBlock.Hex(), "block", block.Hex()) + p.blockHashes[block] = struct{}{} + return true + } } - t.proposalMap[root] = append(t.proposalMap[root], pCtx) - t.proposalTree.Children = append(t.proposalTree.Children, pCtx) - return nil + return false } // Commit persists a proposal as a revision to the database. @@ -304,304 +363,243 @@ func (t *TrieDB) propose(root common.Hash, parentRoot common.Hash, hash common.H // Any time this is called, we expect either: // 1. The root is the same as the current root of the database (empty block during bootstrapping) // 2. We have created a valid propsal with that root, and it is of height +1 above the proposal tree root. -// Additionally, this should be unique. +// Additionally, this will be unique. // // Afterward, we know that no other proposal at this height can be committed, so we can dereference all // children in the the other branches of the proposal tree. +// +// Unlike for HashDB and PathDB, `Commit` must be called even if if the root is unchanged. func (t *TrieDB) Commit(root common.Hash, report bool) error { - // We need to lock the proposal tree to prevent concurrent writes. - t.proposalLock.Lock() - defer t.proposalLock.Unlock() - - // Find the proposal with the given root. - var pCtx *ProposalContext - for _, possible := range t.proposalMap[root] { - if possible.Parent.Root == t.proposalTree.Root && possible.Parent.Block == t.proposalTree.Block { - // We found the proposal with the correct parent. - if pCtx != nil { - // This should never happen, as we ensure that we don't create duplicate proposals in `propose`. - return fmt.Errorf("firewood: multiple proposals found for %s", root.Hex()) - } - pCtx = possible - } + start := time.Now() + defer func() { + commitTimer.Inc(time.Since(start).Milliseconds()) + commitCount.Inc(1) + }() + + t.proposals.Lock() + defer t.proposals.Unlock() + + p, err := t.proposals.findProposalToCommitWhenLocked(root) + if err != nil { + return err } - if pCtx == nil { - return fmt.Errorf("firewood: committable proposal not found for %s", root.Hex()) + + if err := p.handle.Commit(); err != nil { + return fmt.Errorf("committing proposal %s: %w", root.Hex(), err) } + p.handle = nil // The proposal has been committed. - start := time.Now() - // Commit the proposal to the database. - if err := pCtx.Proposal.Commit(); err != nil { - t.dereference(pCtx) // no longer committable - return fmt.Errorf("firewood: error committing proposal %s: %w", root.Hex(), err) - } - ffiCommitCount.Inc(1) - ffiCommitTimer.Inc(time.Since(start).Milliseconds()) - ffiOutstandingProposals.Dec(1) - // Now that the proposal is committed, we should clean up the proposal tree on return. - defer t.cleanupCommittedProposal(pCtx) - - // Assert that the root of the database matches the committed proposal root. - currentRoot, err := t.Firewood.Root() + newRoot, err := t.Firewood.Root() if err != nil { - return fmt.Errorf("firewood: error getting current root after commit: %w", err) + return fmt.Errorf("getting current root after commit: %w", err) } - - currentRootHash := common.Hash(currentRoot) - if currentRootHash != root { - return fmt.Errorf("firewood: current root %s does not match expected root %s", currentRootHash.Hex(), root.Hex()) + if common.Hash(newRoot) != root { + return fmt.Errorf("root after commit (%x) does not match expected root %x", newRoot, root) } + logFn := log.Debug if report { - log.Info("Persisted proposal to firewood database", "root", root) - } else { - log.Debug("Persisted proposal to firewood database", "root", root) + logFn = log.Info } - return nil -} - -// Size returns the storage size of diff layer nodes above the persistent disk -// layer and the dirty nodes buffered within the disk layer -// Only used for metrics and Commit intervals in APIs. -// This will be implemented in the firewood database eventually. -// Currently, Firewood stores all revisions in disk and proposals in memory. -func (*TrieDB) Size() (common.StorageSize, common.StorageSize) { - return 0, 0 -} - -// Reference is a no-op. -func (*TrieDB) Reference(common.Hash, common.Hash) {} - -// Dereference is a no-op since Firewood handles unused state roots internally. -func (*TrieDB) Dereference(common.Hash) {} + logFn("Persisted proposal to firewood database", "root", root) -// Firewood does not support this. -func (*TrieDB) Cap(common.StorageSize) error { + // On success, we should remove all children of the committed proposal. + // They will never be committed. + t.cleanupCommittedProposal(p) return nil } -func (t *TrieDB) Close() error { - t.proposalLock.Lock() - defer t.proposalLock.Unlock() +func (ps *proposals) findProposalToCommitWhenLocked(root common.Hash) (*proposal, error) { + var candidate *proposal - // before closing, we must deference any outstanding proposals to free the - // memory owned by firewood (outside of go's memory management) - for _, pCtx := range t.proposalTree.Children { - t.dereference(pCtx) + for _, p := range ps.byStateRoot[root] { + if p.parent.root != ps.tree.root || p.parent.height != ps.tree.height { + continue + } + if candidate != nil { + // This should never happen, as we ensure that we don't create duplicate proposals in `propose`. + return nil, fmt.Errorf("multiple proposals found for root %#x", root) + } + candidate = p } - t.proposalMap = nil - t.proposalTree.Children = nil - - // Close the database - // This may block momentarily while finalizers for Firewood objects run. - return t.Firewood.Close(context.Background()) + if candidate == nil { + return nil, fmt.Errorf("committable proposal not found for %d:%#x", ps.tree.height+1, root) + } + return candidate, nil } // createProposal creates a new proposal from the given layer -// If there are no changes, it will return nil. -func createProposal(layer proposable, root common.Hash, keys, values [][]byte) (p *ffi.Proposal, err error) { - // If there's an error after creating the proposal, we must drop it. - defer func() { - if err != nil && p != nil { - if dropErr := p.Drop(); dropErr != nil { - // We should still return the original error. - log.Error("firewood: error dropping proposal after error", "root", root.Hex(), "error", dropErr) - } - p = nil - } - }() - - if len(keys) != len(values) { - return nil, fmt.Errorf("firewood: keys and values must have the same length, got %d keys and %d values", len(keys), len(values)) +func (t *TrieDB) createProposal(parent *proposal, keys, values [][]byte) (*proposal, error) { + propose := t.Firewood.Propose + if h := parent.handle; h != nil { + propose = h.Propose + proposeOnProposeCount.Inc(1) + } else { + proposeOnDiskCount.Inc(1) } - - start := time.Now() - p, err = layer.Propose(keys, values) + handle, err := propose(keys, values) if err != nil { - return nil, fmt.Errorf("firewood: unable to create proposal for root %s: %w", root.Hex(), err) + return nil, fmt.Errorf("create proposal from parent root %s: %w", parent.root.Hex(), err) } - ffiProposeCount.Inc(1) - ffiProposeTimer.Inc(time.Since(start).Milliseconds()) - ffiOutstandingProposals.Inc(1) - currentRoot, err := p.Root() - if err != nil { - return nil, fmt.Errorf("firewood: error getting root of proposal %s: %w", root, err) + // Edge case: genesis block + block := parent.height + 1 + if _, ok := parent.blockHashes[common.Hash{}]; ok && parent.root == types.EmptyRootHash { + block = 0 } - currentRootHash := common.Hash(currentRoot) - if root != currentRootHash { - return nil, fmt.Errorf("firewood: proposed root %s does not match expected root %s", currentRootHash.Hex(), root.Hex()) + p := &proposal{ + handle: handle, + proposalMeta: &proposalMeta{ + blockHashes: make(map[common.Hash]struct{}), + parent: parent.proposalMeta, + height: block, + }, + } + + root, err := handle.Root() + if err != nil { + return nil, fmt.Errorf("getting root of proposal: %w", err) } + p.root = common.Hash(root) return p, nil } // cleanupCommittedProposal dereferences the proposal and removes it from the proposal map. // It also recursively dereferences all children of the proposal. -func (t *TrieDB) cleanupCommittedProposal(pCtx *ProposalContext) { - start := time.Now() - oldChildren := t.proposalTree.Children - t.proposalTree = pCtx - t.proposalTree.Parent = nil - - t.removeProposalFromMap(pCtx) - - for _, childCtx := range oldChildren { - // Don't dereference the recently commit proposal. - if childCtx != pCtx { - t.dereference(childCtx) +func (ps *proposals) cleanupCommittedProposal(p *proposal) { + oldChildren := ps.tree.children + ps.tree = p + ps.tree.parent = nil + ps.tree.handle = nil + + // Since this propose has been committed, it doesn't need dropped. + ps.removeProposalFromMap(p.proposalMeta, false) + + for _, child := range oldChildren { + if child != p.proposalMeta { + ps.removeProposalAndChildren(child) } } - ffiCleanupTimer.Inc(time.Since(start).Milliseconds()) } // Internally removes all references of the proposal from the database. +// Frees the associated Rust memory for the proposal and all its children. // Should only be accessed with the proposal lock held. -// Consumer must not be iterating the proposal map at this root. -func (t *TrieDB) dereference(pCtx *ProposalContext) { - // Base case: if there are children, we need to dereference them as well. - for _, child := range pCtx.Children { - t.dereference(child) - } - pCtx.Children = nil - - // Remove the proposal from the map. - t.removeProposalFromMap(pCtx) - - // Drop the proposal in the backend. - if err := pCtx.Proposal.Drop(); err != nil { - log.Error("firewood: error dropping proposal", "root", pCtx.Root.Hex(), "error", err) +func (ps *proposals) removeProposalAndChildren(p *proposalMeta) { + for _, child := range p.children { + ps.removeProposalAndChildren(child) } - ffiOutstandingProposals.Dec(1) + ps.removeProposalFromMap(p, true) } -// removeProposalFromMap removes the proposal from the proposal map. +// removeProposalFromMap removes the proposal from the state root map. // The proposal lock must be held when calling this function. -func (t *TrieDB) removeProposalFromMap(pCtx *ProposalContext) { - rootList := t.proposalMap[pCtx.Root] +// The Rust memory is explicitly freed if drop is true. +func (ps *proposals) removeProposalFromMap(meta *proposalMeta, drop bool) { + rootList := ps.byStateRoot[meta.root] for i, p := range rootList { - if p == pCtx { // pointer comparison - guaranteed to be unique + if p.proposalMeta == meta { // pointer comparison - guaranteed to be unique rootList[i] = rootList[len(rootList)-1] rootList[len(rootList)-1] = nil rootList = rootList[:len(rootList)-1] + + if drop { + explicitlyDroppedCount.Inc(1) + if err := p.handle.Drop(); err != nil { + log.Error("while dropping proposal", "root", meta.root, "height", meta.height, "err", err) + } + } break } } if len(rootList) == 0 { - delete(t.proposalMap, pCtx.Root) + delete(ps.byStateRoot, meta.root) } else { - t.proposalMap[pCtx.Root] = rootList - } -} - -// Reader retrieves a node reader belonging to the given state root. -// An error will be returned if the requested state is not available. -func (t *TrieDB) Reader(root common.Hash) (database.Reader, error) { - revision, err := t.Firewood.Revision(ffi.Hash(root)) - if err != nil { - return nil, fmt.Errorf("firewood: unable to retrieve from root %s: %w", root.Hex(), err) + ps.byStateRoot[meta.root] = rootList } - return &reader{revision: revision}, nil -} - -// reader is a state reader of Database which implements the Reader interface. -type reader struct { - revision *ffi.Revision } -// Node retrieves the trie node with the given node hash. No error will be -// returned if the node is not found. -func (reader *reader) Node(_ common.Hash, path []byte, _ common.Hash) ([]byte, error) { - // This function relies on Firewood's internal locking to ensure concurrent reads are safe. - // This is safe even if a proposal is being committed concurrently. +// createProposals calculates the hash if the set of keys and values are +// proposed from the given parent root. +// All proposals created will be tracked for future use. +func (t *TrieDB) createProposals(parentRoot common.Hash, keys, values [][]byte) (common.Hash, error) { start := time.Now() - result, err := reader.revision.Get(path) - if metrics.EnabledExpensive { - ffiReadCount.Inc(1) - ffiReadTimer.Inc(time.Since(start).Milliseconds()) + defer func() { + hashTimer.Inc(time.Since(start).Milliseconds()) + hashCount.Inc(1) + }() + + if len(keys) != len(values) { + return common.Hash{}, fmt.Errorf("keys and values must have the same length, got %d keys and %d values", len(keys), len(values)) } - return result, err -} -// getProposalHash calculates the hash if the set of keys and values are -// proposed from the given parent root. -func (t *TrieDB) getProposalHash(parentRoot common.Hash, keys, values [][]byte) (common.Hash, error) { - // This function only reads from existing tracked proposals, so we can use a read lock. - t.proposalLock.RLock() - defer t.proposalLock.RUnlock() + // Must prevent a simultaneous `Commit`, as it alters the proposal tree/disk state. + t.proposals.Lock() + defer t.proposals.Unlock() var ( - p *ffi.Proposal - err error + count int // number of proposals created. + root common.Hash // The resulting root hash, should match between proposals ) - start := time.Now() - if t.proposalTree.Root == parentRoot { + if t.proposals.tree.root == parentRoot { // Propose from the database root. - p, err = t.Firewood.Propose(keys, values) + p, err := t.createProposal(t.proposals.tree, keys, values) if err != nil { - return common.Hash{}, fmt.Errorf("firewood: error proposing from root %s: %w", parentRoot.Hex(), err) + return common.Hash{}, fmt.Errorf("proposing from root %s: %w", parentRoot.Hex(), err) } - } else { - // Find any proposal with the given parent root. - // Since we are only using the proposal to find the root hash, - // we can use the first proposal found. - proposals, ok := t.proposalMap[parentRoot] - if !ok || len(proposals) == 0 { - return common.Hash{}, fmt.Errorf("firewood: no proposal found for parent root %s", parentRoot.Hex()) + root = p.root + for parentHash := range t.proposals.tree.blockHashes { + t.possible[possibleKey{parentBlockHash: parentHash, root: root}] = p } - rootProposal := proposals[0].Proposal + count++ + } - p, err = rootProposal.Propose(keys, values) + // Find any proposal with the given parent root. + // Since we are only using the proposal to find the root hash, + // we can use the first proposal found. + for _, parent := range t.proposals.byStateRoot[parentRoot] { + p, err := t.createProposal(parent, keys, values) if err != nil { - return common.Hash{}, fmt.Errorf("firewood: error proposing from parent proposal %s: %w", parentRoot.Hex(), err) + return common.Hash{}, fmt.Errorf("proposing from root %s: %w", parentRoot.Hex(), err) } - } - ffiHashCount.Inc(1) - ffiHashTimer.Inc(time.Since(start).Milliseconds()) - - // We succesffuly created a proposal, so we must drop it after use. - defer func() { - if err := p.Drop(); err != nil { - log.Error("firewood: error dropping proposal after hash computation", "parentRoot", parentRoot.Hex(), "error", err) + if root != (common.Hash{}) && p.root != root { + return common.Hash{}, fmt.Errorf("inconsistent proposal roots found for parent root %s: %#x and %#x", parentRoot.Hex(), root, p.root) } - }() + root = p.root + for parentHash := range parent.blockHashes { + t.possible[possibleKey{parentBlockHash: parentHash, root: root}] = p + } + count++ + } - root, err := p.Root() - if err != nil { - return common.Hash{}, err + // This should never occur, as to process a block, there must be a revision to read from. + if count == 0 { + return common.Hash{}, fmt.Errorf("no proposals found with parent root %s", parentRoot.Hex()) } - return common.Hash(root), nil + + return root, nil } -func arrangeKeyValuePairs(nodes *trienode.MergedNodeSet) ([][]byte, [][]byte) { - if nodes == nil { - return nil, nil // No changes to propose +// Reader retrieves a node reader belonging to the given state root. +// An error will be returned if the requested state is not available. +func (t *TrieDB) Reader(root common.Hash) (database.Reader, error) { + revision, err := t.Firewood.Revision(ffi.Hash(root)) + if err != nil { + return nil, fmt.Errorf("retrieve revision at root %s: %w", root.Hex(), err) } - // Create key-value pairs for the nodes in bytes. - var ( - acctKeys [][]byte - acctValues [][]byte - storageKeys [][]byte - storageValues [][]byte - ) + return &reader{revision: revision}, nil +} - flattenedNodes := nodes.Flatten() - - for _, nodeset := range flattenedNodes { - for str, node := range nodeset { - if len(str) == common.HashLength { - // This is an account node. - acctKeys = append(acctKeys, []byte(str)) - acctValues = append(acctValues, node.Blob) - } else { - storageKeys = append(storageKeys, []byte(str)) - storageValues = append(storageValues, node.Blob) - } - } - } +// reader is a state reader of Database which implements the Reader interface. +type reader struct { + revision *ffi.Revision +} - // We need to do all storage operations first, so prefix-deletion works for accounts. - return append(storageKeys, acctKeys...), append(storageValues, acctValues...) +// Node retrieves the trie node with the given node hash. No error will be +// returned if the node is not found. +func (r *reader) Node(_ common.Hash, path []byte, _ common.Hash) ([]byte, error) { + return r.revision.Get(path) } diff --git a/graft/evm/firewood/triedb_test.go b/graft/evm/firewood/triedb_test.go new file mode 100644 index 000000000000..412667bbf489 --- /dev/null +++ b/graft/evm/firewood/triedb_test.go @@ -0,0 +1,274 @@ +// Copyright (C) 2019, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package firewood + +import ( + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/state" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/libevm/stateconf" + "github.com/ava-labs/libevm/triedb" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +func newTestDatabase(t *testing.T) state.Database { + t.Helper() + fwConfig := DefaultConfig(t.TempDir()) + triedbConfig := &triedb.Config{ + DBOverride: fwConfig.BackendConstructor, + } + internalState := state.NewDatabaseWithConfig( + rawdb.NewMemoryDatabase(), + triedbConfig, + ) + tdb := internalState.TrieDB().Backend().(*TrieDB) + t.Cleanup(func() { + require.NoError(t, tdb.Close()) + }) + + return NewStateAccessor(internalState, tdb) +} + +func TestCommitEmptyGenesis(t *testing.T) { + db := newTestDatabase(t) + triedb := db.TrieDB() + + tr, err := db.OpenTrie(types.EmptyRootHash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + + root := tr.Hash() + require.Equal(t, types.EmptyRootHash, root) + + root, _, err = tr.Commit(true) + require.NoErrorf(t, err, "%T.Commit()", tr) + require.Equal(t, types.EmptyRootHash, root) + + require.NoErrorf( + t, + triedb.Update( + types.EmptyRootHash, + types.EmptyRootHash, + 0, nil, nil, + stateconf.WithTrieDBUpdatePayload(common.Hash{}, common.Hash{1}), + ), + "%T.Update()", triedb, + ) + + require.NoErrorf(t, triedb.Commit(types.EmptyRootHash, true), "%T.Commit()", triedb) +} + +func generateAccount(addr common.Address) types.StateAccount { + return types.StateAccount{ + Balance: uint256.NewInt(0).SetBytes(addr[:]), + } +} + +func verifyAccount(t *testing.T, tr state.Trie, addr common.Address, expected types.StateAccount) { + t.Helper() + acct, err := tr.GetAccount(addr) + require.NoErrorf(t, err, "%T.GetAccount(%s)", tr, addr) + require.Equalf(t, expected.Balance, acct.Balance, "%T.GetAccount(%s) balance", tr, addr) + require.Equalf(t, expected.Nonce, acct.Nonce, "%T.GetAccount(%s) nonce", tr, addr) +} + +func TestAccountPersistence(t *testing.T) { + db := newTestDatabase(t) + triedb := db.TrieDB() + + tr, err := db.OpenTrie(types.EmptyRootHash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + require.NotNil(t, tr) + + addr := common.HexToAddress("1234") + acct := generateAccount(addr) + require.NoErrorf(t, tr.UpdateAccount(addr, &acct), "%T.UpdateAccount()", tr) + verifyAccount(t, tr, addr, acct) + + hash := tr.Hash() + require.NotEqual(t, types.EmptyRootHash, hash) + + root, _, err := tr.Commit(true) + require.NoErrorf(t, err, "%T.Commit()", tr) + require.Equal(t, hash, root) + + require.NoErrorf( + t, + triedb.Update( + hash, + types.EmptyRootHash, + 0, nil, nil, + stateconf.WithTrieDBUpdatePayload(common.Hash{}, common.Hash{1}), + ), + "%T.Update()", triedb, + ) + + // We should be able to read the account before and after committing. + tr, err = db.OpenTrie(hash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + verifyAccount(t, tr, addr, acct) + + require.NoErrorf(t, triedb.Commit(hash, true), "%T.Commit()", triedb) + tr, err = db.OpenTrie(hash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + verifyAccount(t, tr, addr, acct) +} + +func TestStoragePersistence(t *testing.T) { + db := newTestDatabase(t) + triedb := db.TrieDB() + + tr, err := db.OpenTrie(types.EmptyRootHash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + require.NotNil(t, tr) + + addr := common.HexToAddress("1234") + acct := generateAccount(addr) + require.NoErrorf(t, tr.UpdateAccount(addr, &acct), "%T.UpdateAccount()", tr) + + key := []byte{1} + value := []byte{2} + // A separate Storage Trie is expected to be used. + require.NoErrorf(t, tr.UpdateStorage(addr, key, value), "%T.UpdateStorage()", tr) + + // Retrievable before hashing + storedValue, err := tr.GetStorage(addr, key) + require.NoErrorf(t, err, "%T.GetStorage()", tr) + require.Equalf(t, value, storedValue, "%T.GetStorage() value", tr) + + hash := tr.Hash() + require.NotEqual(t, types.EmptyRootHash, hash) + + root, _, err := tr.Commit(true) + require.NoErrorf(t, err, "%T.Commit()", tr) + require.Equal(t, hash, root) + + require.NoErrorf( + t, + triedb.Update( + hash, + types.EmptyRootHash, + 0, nil, nil, + stateconf.WithTrieDBUpdatePayload(common.Hash{}, common.Hash{1}), + ), + "%T.Update()", triedb, + ) + + // We should be able to read the storage before and after committing. + tr, err = db.OpenTrie(hash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + verifyAccount(t, tr, addr, acct) + + storedValue, err = tr.GetStorage(addr, key) + require.NoErrorf(t, err, "%T.GetStorage()", tr) + require.Equalf(t, value, storedValue, "%T.GetStorage() value", tr) + + require.NoErrorf(t, triedb.Commit(hash, true), "%T.Commit()", triedb) + tr, err = db.OpenTrie(hash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + verifyAccount(t, tr, addr, acct) + + storedValue, err = tr.GetStorage(addr, key) + require.NoErrorf(t, err, "%T.GetStorage()", tr) + require.Equalf(t, value, storedValue, "%T.GetStorage() value", tr) +} + +// Ensure that even if other tries are hashed, others can still be persisted via Update. +// Additionally, the now stale tries should not be accessible. +func TestParallelHashing(t *testing.T) { + db := newTestDatabase(t) + triedb := db.TrieDB() + + tr1, err := db.OpenTrie(types.EmptyRootHash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + require.NotNil(t, tr1) + + addr1 := common.HexToAddress("1234") + acct1 := generateAccount(addr1) + require.NoErrorf(t, tr1.UpdateAccount(addr1, &acct1), "%T.UpdateAccount()", tr1) + + tr2, err := db.OpenTrie(types.EmptyRootHash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + require.NotNil(t, tr2) + + addr2 := common.HexToAddress("5678") + acct2 := generateAccount(addr2) + require.NoErrorf(t, tr2.UpdateAccount(addr2, &acct2), "%T.UpdateAccount()", tr2) + + hash1 := tr1.Hash() + hash2 := tr2.Hash() + require.NotEqual(t, hash1, hash2) + + // Commit both tries + root1, _, err := tr1.Commit(true) + require.NoErrorf(t, err, "%T.Commit()", tr1) + require.Equal(t, hash1, root1) + root2, _, err := tr2.Commit(true) + require.NoErrorf(t, err, "%T.Commit()", tr2) + require.Equal(t, hash2, root2) + + require.NoErrorf( + t, + triedb.Update( + hash1, + types.EmptyRootHash, + 0, nil, nil, + stateconf.WithTrieDBUpdatePayload(common.Hash{}, common.Hash{1}), + ), + "%T.Update()", triedb, + ) + + err = triedb.Update( + hash2, + types.EmptyRootHash, + 0, nil, nil, + stateconf.WithTrieDBUpdatePayload(common.Hash{}, common.Hash{1}), + ) + require.ErrorIsf(t, err, errNoProposalFound, "%T.Update()", triedb) +} + +func TestUpdateWithWrongParameters(t *testing.T) { + db := newTestDatabase(t) + triedb := db.TrieDB() + + tr, err := db.OpenTrie(types.EmptyRootHash) + require.NoErrorf(t, err, "%T.OpenTrie()", db) + require.NotNil(t, tr) + + addr := common.HexToAddress("1234") + acct := generateAccount(addr) + require.NoErrorf(t, tr.UpdateAccount(addr, &acct), "%T.UpdateAccount()", tr) + + hash := tr.Hash() + require.NotEqual(t, types.EmptyRootHash, hash) + + root, _, err := tr.Commit(true) + require.NoErrorf(t, err, "%T.Commit()", tr) + require.Equal(t, hash, root) + + // "Accidentally" provide the wrong height + err = triedb.Update( + root, + types.EmptyRootHash, + 42, nil, nil, + stateconf.WithTrieDBUpdatePayload(common.Hash{}, common.Hash{1}), + ) + require.ErrorIsf(t, err, errUnexpectedProposalFound, "%T.Update()", triedb) + + // Providing the correct parameters can recover + require.NoErrorf( + t, + triedb.Update( + root, + types.EmptyRootHash, + 0, nil, nil, + stateconf.WithTrieDBUpdatePayload(common.Hash{}, common.Hash{1}), + ), + "%T.Update()", triedb, + ) + require.NoErrorf(t, triedb.Commit(root, true), "%T.Commit()", triedb) +} diff --git a/graft/subnet-evm/core/blockchain.go b/graft/subnet-evm/core/blockchain.go index 79d963adbcec..2e489d0076b8 100644 --- a/graft/subnet-evm/core/blockchain.go +++ b/graft/subnet-evm/core/blockchain.go @@ -1890,6 +1890,9 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error // If the state is already available and the acceptor tip is up to date, skip re-processing. if bc.HasState(current.Root()) && acceptorTipUpToDate { + if t, ok := bc.triedb.Backend().(*firewood.TrieDB); ok { + t.SetHashAndHeight(current.Hash(), current.NumberU64()) + } log.Info("Skipping state reprocessing", "root", current.Root()) return nil } @@ -1941,6 +1944,9 @@ func (bc *BlockChain) reprocessState(current *types.Block, reexec uint64) error ) // Note: we add 1 since in each iteration, we attempt to re-execute the next block. log.Info("Re-executing blocks to generate state for last accepted block", "from", current.NumberU64()+1, "to", origin) + if t, ok := bc.triedb.Backend().(*firewood.TrieDB); ok { + t.SetHashAndHeight(current.Hash(), current.NumberU64()) + } var roots []common.Hash for current.NumberU64() < origin { // TODO: handle canceled context