diff --git a/vms/evm/database/blockdb/database.go b/vms/evm/database/blockdb/database.go new file mode 100644 index 000000000000..eceaf688a66b --- /dev/null +++ b/vms/evm/database/blockdb/database.go @@ -0,0 +1,425 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blockdb + +import ( + "encoding/binary" + "errors" + "fmt" + "path/filepath" + "slices" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/rlp" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/heightindexdb/meterdb" + "github.com/ava-labs/avalanchego/utils/logging" + + heightindexdb "github.com/ava-labs/avalanchego/x/blockdb" +) + +var ( + errAlreadyInitialized = errors.New("database already initialized") + errInvalidEncodedLength = errors.New("invalid encoded length") +) + +var _ ethdb.Database = (*Database)(nil) + +// Database wraps an [ethdb.Database] and routes block headers, bodies, and receipts +// to separate [database.HeightIndex] databases for blocks at or above the minimum height. +// All other data uses the underlying [ethdb.Database] directly. +type Database struct { + ethdb.Database + + // Databases + metaDB database.Database + headerDB database.HeightIndex + bodyDB database.HeightIndex + receiptsDB database.HeightIndex + + // Configuration + config heightindexdb.DatabaseConfig + dbPath string + minHeight uint64 + + heightDBsReady bool + + reg prometheus.Registerer + logger logging.Logger +} + +const blockNumberSize = 8 + +func encodeBlockNumber(number uint64) []byte { + enc := make([]byte, blockNumberSize) + binary.BigEndian.PutUint64(enc, number) + return enc +} + +func blockHeaderKey(num uint64, hash common.Hash) []byte { + return slices.Concat([]byte{evmHeaderPrefix}, encodeBlockNumber(num), hash.Bytes()) +} + +func blockBodyKey(num uint64, hash common.Hash) []byte { + return slices.Concat([]byte{evmBlockBodyPrefix}, encodeBlockNumber(num), hash.Bytes()) +} + +func receiptsKey(num uint64, hash common.Hash) []byte { + return slices.Concat([]byte{evmReceiptsPrefix}, encodeBlockNumber(num), hash.Bytes()) +} + +// blockDBMinHeightKey stores the minimum block height of the +// height-indexed block databases. +// It is set at initialization and cannot be changed without +// recreating the databases. +var blockDBMinHeightKey = []byte("blockdb_min_height") + +func databaseMinHeight(db database.KeyValueReader) (uint64, bool, error) { + minBytes, err := db.Get(blockDBMinHeightKey) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + return 0, false, nil + } + return 0, false, err + } + if len(minBytes) != blockNumberSize { + return 0, false, fmt.Errorf("%w: min height expected %d bytes, got %d", errInvalidEncodedLength, blockNumberSize, len(minBytes)) + } + return binary.BigEndian.Uint64(minBytes), true, nil +} + +// IsEnabled checks if blockdb has ever been initialized. +// It returns true if the minimum block height key exists, indicating the +// block databases have been created and initialized with a minimum height. +func IsEnabled(db database.KeyValueReader) (bool, error) { + has, err := db.Has(blockDBMinHeightKey) + if err != nil { + return false, err + } + return has, nil +} + +func (db *Database) newMeteredHeightDB( + namespace string, + minHeight uint64, +) (database.HeightIndex, error) { + path := filepath.Join(db.dbPath, namespace) + config := db.config.WithDir(path).WithMinimumHeight(minHeight) + ndb, err := heightindexdb.New(config, db.logger) + if err != nil { + return nil, fmt.Errorf("failed to create %s database at %s: %w", namespace, path, err) + } + + mdb, err := meterdb.New(db.reg, namespace, ndb) + if err != nil { + return nil, errors.Join( + fmt.Errorf("failed to create metered %s database: %w", namespace, err), + ndb.Close(), + ) + } + + return mdb, nil +} + +// New creates a new [Database] over the provided [ethdb.Database]. +// +// If allowDeferredInit is true and no minimum block height is known, +// New defers initializing the height-indexed block databases until +// [Database.InitBlockDBs] is called. +// +// The bool result is true if the block databases were initialized immediately, +// and false if initialization was deferred. +func New( + metaDB database.Database, + evmDB ethdb.Database, + dbPath string, + allowDeferredInit bool, + config heightindexdb.DatabaseConfig, + logger logging.Logger, + reg prometheus.Registerer, +) (*Database, bool, error) { + db := &Database{ + metaDB: metaDB, + Database: evmDB, + dbPath: dbPath, + config: config, + reg: reg, + logger: logger, + } + + minHeightFn := [](func() (uint64, bool, error)){ + func() (uint64, bool, error) { + // Load existing database min height. + return databaseMinHeight(db.metaDB) + }, + func() (uint64, bool, error) { + // Use min height 1 unless deferring initialization. + return 1, !allowDeferredInit, nil + }, + } + for _, fn := range minHeightFn { + h, ok, err := fn() + if err != nil { + return nil, false, err + } + if !ok { + continue + } + if err := db.InitBlockDBs(h); err != nil { + return nil, false, err + } + return db, true, nil + } + + db.logger.Info( + "Deferring block database initialization until minimum height is known", + ) + return db, false, nil +} + +// InitBlockDBs initializes [database.HeightIndex] databases with the specified +// minimum height. +// Once initialized, the minimum height cannot be changed without recreating +// the databases. +// +// Returns an error if already initialized. +func (db *Database) InitBlockDBs(minHeight uint64) error { + if db.heightDBsReady { + return errAlreadyInitialized + } + + if err := db.metaDB.Put(blockDBMinHeightKey, encodeBlockNumber(minHeight)); err != nil { + return err + } + headerDB, err := db.newMeteredHeightDB("headerdb", minHeight) + if err != nil { + return err + } + bodyDB, err := db.newMeteredHeightDB("bodydb", minHeight) + if err != nil { + return errors.Join(err, headerDB.Close()) + } + receiptsDB, err := db.newMeteredHeightDB("receiptsdb", minHeight) + if err != nil { + return errors.Join(err, headerDB.Close(), bodyDB.Close()) + } + db.headerDB = headerDB + db.bodyDB = bodyDB + db.receiptsDB = receiptsDB + + db.heightDBsReady = true + db.minHeight = minHeight + + db.logger.Info( + "Initialized height-indexed block databases", + zap.Uint64("minHeight", db.minHeight), + ) + + return nil +} + +// Key prefixes for block data in [ethdb.Database]. +// This is copied from libevm because they are not exported. +// Since the prefixes should never be changed, we can avoid libevm changes by +// duplicating them here. +const ( + evmHeaderPrefix = 'h' + evmBlockBodyPrefix = 'b' + evmReceiptsPrefix = 'r' +) + +var blockPrefixes = []byte{evmBlockBodyPrefix, evmHeaderPrefix, evmReceiptsPrefix} + +func parseBlockKey(key []byte) (num uint64, hash common.Hash, ok bool) { + // Block keys should have 1 byte prefix + blockNumberSize + 32 bytes for the hash + if len(key) != 1+blockNumberSize+32 { + return 0, common.Hash{}, false + } + if !slices.Contains(blockPrefixes, key[0]) { + return 0, common.Hash{}, false + } + num = binary.BigEndian.Uint64(key[1 : 1+blockNumberSize]) + bytes := key[1+blockNumberSize:] + hash = common.BytesToHash(bytes) + return num, hash, true +} + +type parsedBlockKey struct { + db database.HeightIndex + num uint64 + hash common.Hash +} + +func (p *parsedBlockKey) writeHashAndData(data []byte) error { + return writeHashAndData(p.db, p.num, p.hash, data) +} + +func writeHashAndData( + db database.HeightIndex, + height uint64, + hash common.Hash, + data []byte, +) error { + encoded, err := rlp.EncodeToBytes([][]byte{hash.Bytes(), data}) + if err != nil { + return err + } + return db.Put(height, encoded) +} + +// parseKey parses a block key into a parsedBlockKey. +// It returns false if no block databases for the key prefix exist. +func (db *Database) parseKey(key []byte) (*parsedBlockKey, bool) { + if !db.heightDBsReady { + return nil, false + } + + var hdb database.HeightIndex + switch key[0] { + case evmBlockBodyPrefix: + hdb = db.bodyDB + case evmHeaderPrefix: + hdb = db.headerDB + case evmReceiptsPrefix: + hdb = db.receiptsDB + default: + return nil, false + } + + num, hash, ok := parseBlockKey(key) + if !ok { + return nil, false + } + + if num < db.minHeight { + return nil, false + } + + return &parsedBlockKey{ + db: hdb, + num: num, + hash: hash, + }, true +} + +func (*Database) readBlock(p *parsedBlockKey) ([]byte, error) { + data, err := p.db.Get(p.num) + if err != nil { + return nil, err + } + + var elems [][]byte + if err := rlp.DecodeBytes(data, &elems); err != nil { + return nil, err + } + if len(elems) != 2 { + err := fmt.Errorf( + "invalid hash+data format: expected 2 elements, got %d", len(elems), + ) + return nil, err + } + + // Hash mismatch means we are trying to read a different block at this height. + if common.BytesToHash(elems[0]) != p.hash { + return nil, database.ErrNotFound + } + + return elems[1], nil +} + +func (db *Database) Get(key []byte) ([]byte, error) { + if p, ok := db.parseKey(key); ok { + return db.readBlock(p) + } + return db.Database.Get(key) +} + +func (db *Database) Put(key []byte, value []byte) error { + if p, ok := db.parseKey(key); ok { + return p.writeHashAndData(value) + } + return db.Database.Put(key, value) +} + +func (db *Database) Has(key []byte) (bool, error) { + p, ok := db.parseKey(key) + if !ok { + return db.Database.Has(key) + } + + if _, err := db.readBlock(p); err != nil { + if errors.Is(err, database.ErrNotFound) { + return false, nil + } + return false, err + } + return true, nil +} + +// Delete removes the key from the underlying database for non-block data. +// Block data deletion is a no-op because [database.HeightIndex] does not support deletion. +func (db *Database) Delete(key []byte) error { + if p, ok := db.parseKey(key); ok { + db.logger.Debug( + "Deleting block data is a no-op", + zap.Uint64("height", p.num), + zap.Stringer("hash", p.hash), + ) + return nil + } + return db.Database.Delete(key) +} + +func (db *Database) Close() error { + if !db.heightDBsReady { + return db.Database.Close() + } + + // Don't close metaDB since the caller should be managing it. + return errors.Join( + db.headerDB.Close(), + db.bodyDB.Close(), + db.receiptsDB.Close(), + db.Database.Close(), + ) +} + +var _ ethdb.Batch = (*batch)(nil) + +type batch struct { + ethdb.Batch + db *Database +} + +func (db *Database) NewBatch() ethdb.Batch { + return &batch{ + db: db, + Batch: db.Database.NewBatch(), + } +} + +func (db *Database) NewBatchWithSize(size int) ethdb.Batch { + return &batch{ + db: db, + Batch: db.Database.NewBatchWithSize(size), + } +} + +func (b *batch) Put(key []byte, value []byte) error { + if p, ok := b.db.parseKey(key); ok { + return p.writeHashAndData(value) + } + return b.Batch.Put(key, value) +} + +func (b *batch) Delete(key []byte) error { + if _, ok := b.db.parseKey(key); ok { + return b.db.Delete(key) + } + return b.Batch.Delete(key) +} diff --git a/vms/evm/database/blockdb/database_test.go b/vms/evm/database/blockdb/database_test.go new file mode 100644 index 000000000000..9c6679afb7a6 --- /dev/null +++ b/vms/evm/database/blockdb/database_test.go @@ -0,0 +1,498 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blockdb + +import ( + "testing" + + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/params" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/leveldb" + "github.com/ava-labs/avalanchego/utils/logging" + + evmdb "github.com/ava-labs/avalanchego/vms/evm/database" + heightindexdb "github.com/ava-labs/avalanchego/x/blockdb" +) + +func TestDatabaseWriteAndReadBlock(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 10) + writeBlocks(db, blocks, receipts) + + for _, block := range blocks { + actualBlock := rawdb.ReadBlock(db, block.Hash(), block.NumberU64()) + requireRLPEqual(t, block, actualBlock) + } +} + +func TestDatabaseWriteAndReadReceipts(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 10) + writeBlocks(db, blocks, receipts) + + for i, block := range blocks { + require.True(t, rawdb.HasReceipts(db, block.Hash(), block.NumberU64())) + actualReceipts := rawdb.ReadReceipts( + db, block.Hash(), block.NumberU64(), block.Time(), params.TestChainConfig, + ) + requireRLPEqual(t, receipts[i], actualReceipts) + } +} + +func TestDatabaseReadLogs(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 10) + writeBlocks(db, blocks, receipts) + + for i, block := range blocks { + actualLogs := rawdb.ReadLogs(db, block.Hash(), block.NumberU64()) + recs := receipts[i] + requireRLPEqual(t, logsFromReceipts(recs), actualLogs) + } +} + +func TestDatabaseDeleteBlocksNoOp(t *testing.T) { + // Verifies that block header, body and receipts cannot be deleted (no-op), + // but hash to height mapping should be deleted. + tests := []struct { + name string + useBatch bool + batchSize int + }{ + {name: "delete block data is a no-op"}, + {name: "batch delete", useBatch: true}, + {name: "batch delete with size", useBatch: true, batchSize: 1024}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + allBlocks, allReceipts := createBlocks(t, 4) + blocks := allBlocks[1:] // skip genesis block + receipts := allReceipts[1:] + writeBlocks(db, blocks, receipts) + + // perform delete operations on all blocks + if tc.useBatch { + var batch ethdb.Batch + if tc.batchSize > 0 { + batch = db.NewBatchWithSize(tc.batchSize) + } else { + batch = db.NewBatch() + } + + for _, block := range blocks { + rawdb.DeleteBlock(batch, block.Hash(), block.NumberU64()) + } + require.NoError(t, batch.Write()) + } else { + for _, block := range blocks { + rawdb.DeleteBlock(db, block.Hash(), block.NumberU64()) + } + } + + for i, block := range blocks { + actualBlock := rawdb.ReadBlock(db, block.Hash(), block.NumberU64()) + requireRLPEqual(t, block, actualBlock) + require.True(t, rawdb.HasReceipts(db, block.Hash(), block.NumberU64())) + expReceipts := receipts[i] + logs := rawdb.ReadLogs(db, block.Hash(), block.NumberU64()) + requireRLPEqual(t, logsFromReceipts(expReceipts), logs) + + // hash -> number mapping should be deleted + num := rawdb.ReadHeaderNumber(db, block.Hash()) + require.Nil(t, num) + } + }) + } +} + +func TestDatabaseWriteToHeightIndexedDB(t *testing.T) { + dataDir := t.TempDir() + db, evmDB := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 2) + writeBlocks(db, blocks, receipts) + block := blocks[1] + + // verify no block data in evmDB + require.False(t, rawdb.HasHeader(evmDB, block.Hash(), block.NumberU64())) + require.False(t, rawdb.HasBody(evmDB, block.Hash(), block.NumberU64())) + require.False(t, rawdb.HasReceipts(evmDB, block.Hash(), block.NumberU64())) + + // verify block data in height-indexed databases + ok, err := db.headerDB.Has(block.NumberU64()) + require.NoError(t, err) + require.True(t, ok) + ok, err = db.bodyDB.Has(block.NumberU64()) + require.NoError(t, err) + require.True(t, ok) + ok, err = db.receiptsDB.Has(block.NumberU64()) + require.NoError(t, err) + require.True(t, ok) +} + +func TestDatabaseNewBatch(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 2) + block := blocks[1] + batch := db.NewBatch() + writeBlocks(batch, blocks, receipts) + + // after adding blocks to batch, blocks and receipts should be available immediately + require.True(t, rawdb.HasBody(db, block.Hash(), block.NumberU64())) + require.True(t, rawdb.HasHeader(db, block.Hash(), block.NumberU64())) + require.True(t, rawdb.HasReceipts(db, block.Hash(), block.NumberU64())) + + // header number should not be available until batch is written + require.Nil(t, rawdb.ReadHeaderNumber(db, block.Hash())) + require.NoError(t, batch.Write()) + num := rawdb.ReadHeaderNumber(db, block.Hash()) + require.Equal(t, block.NumberU64(), *num) +} + +func TestDatabaseNewBatchWithSize(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 2) + batch := db.NewBatchWithSize(2048) + writeBlocks(batch, blocks, receipts) + require.NoError(t, batch.Write()) + + for _, block := range blocks { + require.True(t, rawdb.HasHeader(db, block.Hash(), block.NumberU64())) + require.True(t, rawdb.HasBody(db, block.Hash(), block.NumberU64())) + require.True(t, rawdb.HasReceipts(db, block.Hash(), block.NumberU64())) + } +} + +func TestDatabaseWriteSameBlockTwice(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, _ := createBlocks(t, 2) + block := blocks[1] + + // write same block twice + rawdb.WriteBlock(db, block) + rawdb.WriteBlock(db, block) + + // we should be able to read the block after duplicate writes + actualBlock := rawdb.ReadBlock(db, block.Hash(), block.NumberU64()) + requireRLPEqual(t, block, actualBlock) +} + +func TestDatabaseWriteDifferentBlocksAtSameHeight(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 2) + b1 := blocks[1] + r1 := receipts[1] + + // create a second block with the same height but different tx + to := addrFromTest(t, "different-to") + blocks2, receipts2 := createBlocksToAddr(t, 2, to) + b2 := blocks2[1] + r2 := receipts2[1] + + // ensure both blocks have the same height but different hashes + require.Equal(t, b1.NumberU64(), b2.NumberU64()) + require.NotEqual(t, b1.Hash(), b2.Hash()) + + writeBlocks(db, []*types.Block{b1, b2}, []types.Receipts{r1, r2}) + + // reading by the first block's hash should not return anything + require.Nil(t, rawdb.ReadHeader(db, b1.Hash(), b1.NumberU64())) + require.Nil(t, rawdb.ReadBody(db, b1.Hash(), b1.NumberU64())) + require.Nil(t, rawdb.ReadReceipts(db, b1.Hash(), b1.NumberU64(), b1.Time(), params.TestChainConfig)) + + // reading by the second block's hash returns second block data + requireRLPEqual(t, b2, rawdb.ReadBlock(db, b2.Hash(), b2.NumberU64())) + actualReceipts := rawdb.ReadReceipts(db, b2.Hash(), b2.NumberU64(), b2.Time(), params.TestChainConfig) + requireRLPEqual(t, r2, actualReceipts) +} + +func TestDatabaseReopen(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 2) + writeBlocks(db, blocks, receipts) + b1 := blocks[1] + r1 := receipts[1] + + // close db and verify we can no longer read block data + require.NoError(t, db.Close()) + block := rawdb.ReadBlock(db, b1.Hash(), b1.NumberU64()) + require.Nil(t, block) + recs := rawdb.ReadReceipts(db, b1.Hash(), b1.NumberU64(), b1.Time(), params.TestChainConfig) + require.Nil(t, recs) + _, err := db.headerDB.Get(b1.NumberU64()) + require.ErrorIs(t, err, database.ErrClosed) + + // reopen the database and data can be read again + db, _ = newDatabasesFromDir(t, dataDir) + block = rawdb.ReadBlock(db, b1.Hash(), b1.NumberU64()) + requireRLPEqual(t, b1, block) + actualReceipts := rawdb.ReadReceipts(db, b1.Hash(), b1.NumberU64(), b1.Time(), params.TestChainConfig) + requireRLPEqual(t, r1, actualReceipts) +} + +func TestDatabaseInitialization(t *testing.T) { + blocks, _ := createBlocks(t, 10) + + tests := []struct { + name string + deferInit bool + evmDBBlocks []*types.Block + dbMinHeight uint64 + wantDBReady bool + wantMinHeight uint64 + }{ + { + name: "empty evmDB and no deferred init", + wantDBReady: true, + wantMinHeight: 1, + }, + { + name: "empty evmDB and deferred init", + deferInit: true, + wantDBReady: false, // db should not be ready due to deferred init + }, + { + name: "existing db created with min height", + evmDBBlocks: blocks[5:8], + dbMinHeight: 2, + wantDBReady: true, + wantMinHeight: 2, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + dataDir := t.TempDir() + base, err := leveldb.New(dataDir, nil, logging.NoLog{}, prometheus.NewRegistry()) + require.NoError(t, err) + evmDB := rawdb.NewDatabase(evmdb.New(base)) + + // create block databases with existing min height if needed + if tc.dbMinHeight > 0 { + db := Database{ + metaDB: base, + Database: evmDB, + dbPath: dataDir, + config: heightindexdb.DefaultConfig(), + reg: prometheus.NewRegistry(), + logger: logging.NoLog{}, + } + require.NoError(t, db.InitBlockDBs(tc.dbMinHeight)) + require.NoError(t, db.headerDB.Close()) + require.NoError(t, db.bodyDB.Close()) + require.NoError(t, db.receiptsDB.Close()) + minHeight, ok, err := databaseMinHeight(base) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, tc.dbMinHeight, minHeight) + } + + writeBlocks(evmDB, tc.evmDBBlocks, []types.Receipts{}) + db, _, err := New( + base, + evmDB, + dataDir, + tc.deferInit, + heightindexdb.DefaultConfig(), + logging.NoLog{}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + require.Equal(t, tc.wantDBReady, db.heightDBsReady, "database ready mismatch") + require.Equal(t, tc.wantMinHeight, db.minHeight, "database min height mismatch") + }) + } +} + +func TestDatabaseGenesisBlockHandling(t *testing.T) { + // Verifies that genesis blocks (block 0) only exist in evmDB and not + // in the height-indexed databases. + dataDir := t.TempDir() + db, evmDB := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 1) // first block is genesis + writeBlocks(db, blocks, receipts) + + // Validate genesis block can be retrieved and is stored in evmDB. + hash := rawdb.ReadCanonicalHash(evmDB, 0) + block := rawdb.ReadBlock(db, hash, 0) + requireRLPEqual(t, blocks[0], block) + _, err := db.headerDB.Get(0) + require.ErrorIs(t, err, database.ErrNotFound) + _, err = db.receiptsDB.Get(0) + require.ErrorIs(t, err, database.ErrNotFound) + require.Equal(t, uint64(1), db.minHeight) +} + +func TestDatabaseInitBlockDBs(t *testing.T) { + dataDir := t.TempDir() + base, err := leveldb.New(dataDir, nil, logging.NoLog{}, prometheus.NewRegistry()) + require.NoError(t, err) + evmDB := rawdb.NewDatabase(evmdb.New(base)) + + db, initialized, err := New( + base, + evmDB, + dataDir, + true, + heightindexdb.DefaultConfig(), + logging.NoLog{}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + require.False(t, initialized) + + require.NoError(t, db.InitBlockDBs(10)) + require.Equal(t, uint64(10), db.minHeight) +} + +func TestDatabaseMinHeightWrites(t *testing.T) { + // Verifies writes are gated by minHeight: below threshold go to evmDB, + // at/above threshold go to height-index DBs. + dataDir := t.TempDir() + base, err := leveldb.New(dataDir, nil, logging.NoLog{}, prometheus.NewRegistry()) + require.NoError(t, err) + evmDB := rawdb.NewDatabase(evmdb.New(base)) + + db, _, err := New( + base, + evmDB, + dataDir, + true, + heightindexdb.DefaultConfig(), + logging.NoLog{}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + require.NoError(t, db.InitBlockDBs(10)) + blocks, receipts := createBlocks(t, 11) + + // write block 9 (below minHeight) and block 10 (at minHeight) + writeBlocks(db, blocks[9:11], receipts[9:11]) + + // below threshold should not be in height DBs but in kvDB + has, err := db.headerDB.Has(9) + require.NoError(t, err) + require.False(t, has) + has, err = db.bodyDB.Has(9) + require.NoError(t, err) + require.False(t, has) + has, err = db.receiptsDB.Has(9) + require.NoError(t, err) + require.False(t, has) + require.True(t, rawdb.HasHeader(evmDB, blocks[9].Hash(), 9)) + require.True(t, rawdb.HasBody(evmDB, blocks[9].Hash(), 9)) + require.True(t, rawdb.HasReceipts(evmDB, blocks[9].Hash(), 9)) + + // at/above threshold should be in height DBs + _, err = db.bodyDB.Get(10) + require.NoError(t, err) + _, err = db.headerDB.Get(10) + require.NoError(t, err) + _, err = db.receiptsDB.Get(10) + require.NoError(t, err) + require.Nil(t, rawdb.ReadBlock(evmDB, blocks[10].Hash(), 10)) + require.False(t, rawdb.HasReceipts(evmDB, blocks[10].Hash(), 10)) +} + +func TestDatabaseMinHeightReturnsErrorOnInvalidEncoding(t *testing.T) { + dataDir := t.TempDir() + db, err := leveldb.New(dataDir, nil, logging.NoLog{}, prometheus.NewRegistry()) + require.NoError(t, err) + + // write an incorrectly sized value for blockDBMinHeightKey + require.NoError(t, db.Put(blockDBMinHeightKey, []byte{0x01})) + + _, ok, err := databaseMinHeight(db) + require.ErrorIs(t, err, errInvalidEncodedLength) + require.False(t, ok) +} + +func TestDatabaseHasReturnsFalseOnHashMismatch(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 3) + writeBlocks(db, blocks[1:3], receipts[1:3]) + + // fetch block 2 with block 1's hash + require.False(t, rawdb.HasHeader(db, blocks[1].Hash(), blocks[2].NumberU64())) + require.False(t, rawdb.HasBody(db, blocks[1].Hash(), blocks[2].NumberU64())) + require.False(t, rawdb.HasReceipts(db, blocks[1].Hash(), blocks[2].NumberU64())) +} + +func TestDatabaseAlreadyInitializedError(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + + err := db.InitBlockDBs(5) + require.ErrorIs(t, err, errAlreadyInitialized) + require.Equal(t, uint64(1), db.minHeight) +} + +func TestDatabaseGetNotFoundOnHashMismatch(t *testing.T) { + dataDir := t.TempDir() + db, _ := newDatabasesFromDir(t, dataDir) + blocks, receipts := createBlocks(t, 3) + writeBlocks(db, blocks, receipts) + + // get block 1 with block 0's hash + _, err := db.Get(blockHeaderKey(1, blocks[0].Hash())) + require.ErrorIs(t, err, database.ErrNotFound) + _, err = db.Get(blockBodyKey(1, blocks[0].Hash())) + require.ErrorIs(t, err, database.ErrNotFound) + _, err = db.Get(receiptsKey(1, blocks[0].Hash())) + require.ErrorIs(t, err, database.ErrNotFound) +} + +func TestIsEnabled(t *testing.T) { + // Verifies database min height is set on first init. + dataDir := t.TempDir() + base, err := leveldb.New(dataDir, nil, logging.NoLog{}, prometheus.NewRegistry()) + require.NoError(t, err) + evmDB := rawdb.NewDatabase(evmdb.New(base)) + + // initially not enabled + enabled, err := IsEnabled(base) + require.NoError(t, err) + require.False(t, enabled) + + // create db but don't initialize + db, initialized, err := New( + base, + evmDB, + dataDir, + true, + heightindexdb.DefaultConfig(), + logging.NoLog{}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + require.False(t, initialized) + + // not enabled since InitBlockDBs was not called + enabled, err = IsEnabled(base) + require.NoError(t, err) + require.False(t, enabled) + + // now enabled + require.NoError(t, db.InitBlockDBs(10)) + enabled, err = IsEnabled(base) + require.NoError(t, err) + require.True(t, enabled) +} diff --git a/vms/evm/database/blockdb/helpers_test.go b/vms/evm/database/blockdb/helpers_test.go new file mode 100644 index 000000000000..39ff8ccb25df --- /dev/null +++ b/vms/evm/database/blockdb/helpers_test.go @@ -0,0 +1,111 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blockdb + +import ( + "math/big" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/core/types" + "github.com/ava-labs/libevm/crypto" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/rlp" + "github.com/ava-labs/libevm/trie" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/leveldb" + "github.com/ava-labs/avalanchego/utils/logging" + + evmdb "github.com/ava-labs/avalanchego/vms/evm/database" + heightindexdb "github.com/ava-labs/avalanchego/x/blockdb" +) + +func newDatabasesFromDir(t *testing.T, dataDir string) (*Database, ethdb.Database) { + t.Helper() + + base, err := leveldb.New(dataDir, nil, logging.NoLog{}, prometheus.NewRegistry()) + require.NoError(t, err) + evmDB := rawdb.NewDatabase(evmdb.New(base)) + db, _, err := New( + base, + evmDB, + dataDir, + false, + heightindexdb.DefaultConfig(), + logging.NoLog{}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + return db, evmDB +} + +// addrFromTest returns a deterministic address derived from the test name and supplied salt. +func addrFromTest(t *testing.T, salt string) common.Address { + t.Helper() + h := crypto.Keccak256Hash([]byte(t.Name() + ":" + salt)) + return common.BytesToAddress(h.Bytes()[12:]) +} + +// createBlocksToAddr generates blocks with receipts containing a log to the provided address. +func createBlocksToAddr(t *testing.T, numBlocks int, to common.Address) ([]*types.Block, []types.Receipts) { + t.Helper() + + blocks := make([]*types.Block, numBlocks) + receipts := make([]types.Receipts, numBlocks) + parentHash := common.Hash{} + + for i := range numBlocks { + header := &types.Header{ + ParentHash: parentHash, + Number: big.NewInt(int64(i)), + Extra: crypto.Keccak256(to.Bytes(), []byte{byte(i)}), // unique hash per block/recipient + } + tx := types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &to}) + block := types.NewBlock(header, []*types.Transaction{tx}, nil, nil, trie.NewStackTrie(nil)) + blocks[i] = block + parentHash = block.Hash() + + receipt := &types.Receipt{TxHash: tx.Hash(), Logs: []*types.Log{{Address: to}}} + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + receipts[i] = types.Receipts{receipt} + } + + return blocks, receipts +} + +func createBlocks(t *testing.T, numBlocks int) ([]*types.Block, []types.Receipts) { + t.Helper() + to := addrFromTest(t, "default-to") + return createBlocksToAddr(t, numBlocks, to) +} + +func writeBlocks(db ethdb.KeyValueWriter, blocks []*types.Block, receipts []types.Receipts) { + for i, block := range blocks { + rawdb.WriteBlock(db, block) + if i < len(receipts) { + rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i]) + } + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + } +} + +func requireRLPEqual(t *testing.T, expected, actual interface{}) { + t.Helper() + expectedBytes, err := rlp.EncodeToBytes(expected) + require.NoError(t, err) + actualBytes, err := rlp.EncodeToBytes(actual) + require.NoError(t, err) + require.Equal(t, expectedBytes, actualBytes) +} + +func logsFromReceipts(receipts types.Receipts) [][]*types.Log { + logs := make([][]*types.Log, len(receipts)) + for i := range receipts { + logs[i] = receipts[i].Logs + } + return logs +}