diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index 902ba16cb46..b152c28f3e5 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -41,6 +41,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/snapshot" system_addresses "github.com/onflow/flow-go/cmd/util/cmd/system-addresses" truncate_database "github.com/onflow/flow-go/cmd/util/cmd/truncate-database" + verify_evm_offchain_replay "github.com/onflow/flow-go/cmd/util/cmd/verify-evm-offchain-replay" verify_execution_result "github.com/onflow/flow-go/cmd/util/cmd/verify_execution_result" "github.com/onflow/flow-go/cmd/util/cmd/version" "github.com/onflow/flow-go/module/profiler" @@ -128,6 +129,7 @@ func addCommands() { rootCmd.AddCommand(generate_authorization_fixes.Cmd) rootCmd.AddCommand(evm_state_exporter.Cmd) rootCmd.AddCommand(verify_execution_result.Cmd) + rootCmd.AddCommand(verify_evm_offchain_replay.Cmd) } func initConfig() { diff --git a/cmd/util/cmd/verify-evm-offchain-replay/main.go b/cmd/util/cmd/verify-evm-offchain-replay/main.go new file mode 100644 index 00000000000..d42c9841435 --- /dev/null +++ b/cmd/util/cmd/verify-evm-offchain-replay/main.go @@ -0,0 +1,88 @@ +package verify + +import ( + "fmt" + "strconv" + "strings" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagDatadir string + flagExecutionDataDir string + flagEVMStateGobDir string + flagChain string + flagFromTo string + flagSaveEveryNBlocks uint64 +) + +// usage example +// +// ./util verify-evm-offchain-replay --chain flow-testnet --from_to 211176670-211177000 +// --datadir /var/flow/data/protocol --execution_data_dir /var/flow/data/execution_data +var Cmd = &cobra.Command{ + Use: "verify-evm-offchain-replay", + Short: "verify evm offchain replay with execution data", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagDatadir, "datadir", "/var/flow/data/protocol", + "directory that stores the protocol state") + + Cmd.Flags().StringVar(&flagExecutionDataDir, "execution_data_dir", "/var/flow/data/execution_data", + "directory that stores the execution state") + + Cmd.Flags().StringVar(&flagFromTo, "from_to", "", + "the flow height range to verify blocks, i.e, 1-1000, 1000-2000, 2000-3000, etc.") + + Cmd.Flags().StringVar(&flagEVMStateGobDir, "evm_state_gob_dir", "/var/flow/data/evm_state_gob", + "directory that stores the evm state gob files as checkpoint") + + Cmd.Flags().Uint64Var(&flagSaveEveryNBlocks, "save_every", uint64(1_000_000), + "save the evm state gob files every N blocks") +} + +func run(*cobra.Command, []string) { + chainID := flow.ChainID(flagChain) + + from, to, err := parseFromTo(flagFromTo) + if err != nil { + log.Fatal().Err(err).Msg("could not parse from_to") + } + + err = Verify(log.Logger, from, to, chainID, flagDatadir, flagExecutionDataDir, flagEVMStateGobDir, flagSaveEveryNBlocks) + if err != nil { + log.Fatal().Err(err).Msg("could not verify height") + } +} + +func parseFromTo(fromTo string) (from, to uint64, err error) { + parts := strings.Split(fromTo, "-") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid format: expected 'from-to', got '%s'", fromTo) + } + + from, err = strconv.ParseUint(strings.TrimSpace(parts[0]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'from' value: %w", err) + } + + to, err = strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'to' value: %w", err) + } + + if from > to { + return 0, 0, fmt.Errorf("'from' value (%d) must be less than or equal to 'to' value (%d)", from, to) + } + + return from, to, nil +} diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go new file mode 100644 index 00000000000..47b34c72afa --- /dev/null +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -0,0 +1,173 @@ +package verify + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/dgraph-io/badger/v2" + badgerds "github.com/ipfs/go-ds-badger2" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/fvm/evm/offchain/utils" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" +) + +// Verify verifies the offchain replay of EVM blocks from the given height range +// and updates the EVM state gob files with the latest state +func Verify( + log zerolog.Logger, + from uint64, + to uint64, + chainID flow.ChainID, + dataDir string, + executionDataDir string, + evmStateGobDir string, + saveEveryNBlocks uint64, +) error { + lg := log.With(). + Uint64("from", from).Uint64("to", to). + Str("chain", chainID.String()). + Str("dataDir", dataDir). + Str("executionDataDir", executionDataDir). + Str("evmStateGobDir", evmStateGobDir). + Uint64("saveEveryNBlocks", saveEveryNBlocks). + Logger() + + lg.Info().Msgf("verifying range from %d to %d", from, to) + + db, storages, executionDataStore, dsStore, err := initStorages(dataDir, executionDataDir) + if err != nil { + return fmt.Errorf("could not initialize storages: %w", err) + } + + defer db.Close() + defer dsStore.Close() + + var store *testutils.TestValueStore + + // root block require the account status registers to be saved + isRoot := utils.IsEVMRootHeight(chainID, from) + if isRoot { + store = testutils.GetSimpleValueStore() + } else { + prev := from - 1 + store, err = loadState(prev, evmStateGobDir) + if err != nil { + return fmt.Errorf("could not load EVM state from previous height %d: %w", prev, err) + } + } + + // save state every N blocks + onHeightReplayed := func(height uint64) error { + log.Info().Msgf("replayed height %d", height) + if height%saveEveryNBlocks == 0 { + err := saveState(store, height, evmStateGobDir) + if err != nil { + return err + } + } + return nil + } + + // replay blocks + err = utils.OffchainReplayBackwardCompatibilityTest( + log, + chainID, + from, + to, + storages.Headers, + storages.Results, + executionDataStore, + store, + onHeightReplayed, + ) + + if err != nil { + return err + } + + err = saveState(store, to, evmStateGobDir) + if err != nil { + return err + } + + lg.Info().Msgf("successfully verified range from %d to %d", from, to) + + return nil +} + +func saveState(store *testutils.TestValueStore, height uint64, gobDir string) error { + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(gobDir, height) + values, allocators := store.Dump() + err := testutils.SerializeState(valueFileName, values) + if err != nil { + return err + } + err = testutils.SerializeAllocator(allocatorFileName, allocators) + if err != nil { + return err + } + + log.Info().Msgf("saved EVM state to %s and %s", valueFileName, allocatorFileName) + + return nil +} + +func loadState(height uint64, gobDir string) (*testutils.TestValueStore, error) { + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(gobDir, height) + values, err := testutils.DeserializeState(valueFileName) + if err != nil { + return nil, fmt.Errorf("could not deserialize state %v: %w", valueFileName, err) + } + + allocators, err := testutils.DeserializeAllocator(allocatorFileName) + if err != nil { + return nil, fmt.Errorf("could not deserialize allocator %v: %w", allocatorFileName, err) + } + store := testutils.GetSimpleValueStorePopulated(values, allocators) + + log.Info().Msgf("loaded EVM state for height %d from gob file %v", height, valueFileName) + return store, nil +} + +func initStorages(dataDir string, executionDataDir string) ( + *badger.DB, + *storage.All, + execution_data.ExecutionDataGetter, + io.Closer, + error, +) { + db := common.InitStorage(dataDir) + + storages := common.InitStorages(db) + + datastoreDir := filepath.Join(executionDataDir, "blobstore") + err := os.MkdirAll(datastoreDir, 0700) + if err != nil { + return nil, nil, nil, nil, err + } + dsOpts := &badgerds.DefaultOptions + ds, err := badgerds.NewDatastore(datastoreDir, dsOpts) + if err != nil { + return nil, nil, nil, nil, err + } + + executionDataBlobstore := blobs.NewBlobstore(ds) + executionDataStore := execution_data.NewExecutionDataStore(executionDataBlobstore, execution_data.DefaultSerializer) + + return db, storages, executionDataStore, ds, nil +} + +func evmStateGobFileNamesByEndHeight(evmStateGobDir string, endHeight uint64) (string, string) { + valueFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("values-%d.gob", endHeight)) + allocatorFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("allocators-%d.gob", endHeight)) + return valueFileName, allocatorFileName +} diff --git a/fvm/evm/handler/blockHashList.go b/fvm/evm/handler/blockHashList.go index 91eefded24e..0db2aff73f9 100644 --- a/fvm/evm/handler/blockHashList.go +++ b/fvm/evm/handler/blockHashList.go @@ -3,6 +3,7 @@ package handler import ( "encoding/binary" "fmt" + "strings" gethCommon "github.com/onflow/go-ethereum/common" @@ -26,6 +27,14 @@ const ( heightEncodingSize ) +func IsBlockHashListBucketKeyFormat(id flow.RegisterID) bool { + return strings.HasPrefix(id.Key, "BlockHashListBucket") +} + +func IsBlockHashListMetaKey(id flow.RegisterID) bool { + return id.Key == blockHashListMetaKey +} + // BlockHashList stores the last `capacity` number of block hashes // // Under the hood it breaks the list of hashes into diff --git a/fvm/evm/offchain/blocks/block_proposal.go b/fvm/evm/offchain/blocks/block_proposal.go new file mode 100644 index 00000000000..cd1d68ed517 --- /dev/null +++ b/fvm/evm/offchain/blocks/block_proposal.go @@ -0,0 +1,34 @@ +package blocks + +import ( + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func ReconstructProposal( + blockEvent *events.BlockEventPayload, + results []*types.Result, +) *types.BlockProposal { + receipts := make([]types.LightReceipt, 0, len(results)) + txHashes := make(types.TransactionHashes, 0, len(results)) + + for _, result := range results { + receipts = append(receipts, *result.LightReceipt()) + txHashes = append(txHashes, result.TxHash) + } + + return &types.BlockProposal{ + Block: types.Block{ + ParentBlockHash: blockEvent.ParentBlockHash, + Height: blockEvent.Height, + Timestamp: blockEvent.Timestamp, + TotalSupply: blockEvent.TotalSupply.Big(), + ReceiptRoot: blockEvent.ReceiptRoot, + TransactionHashRoot: blockEvent.TransactionHashRoot, + TotalGasUsed: blockEvent.TotalGasUsed, + PrevRandao: blockEvent.PrevRandao, + }, + Receipts: receipts, + TxHashes: txHashes, + } +} diff --git a/fvm/evm/offchain/blocks/provider.go b/fvm/evm/offchain/blocks/provider.go index 9111be4ac64..b9da39bd468 100644 --- a/fvm/evm/offchain/blocks/provider.go +++ b/fvm/evm/offchain/blocks/provider.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/handler" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" ) @@ -13,7 +14,10 @@ import ( // a OnBlockReceived call before block execution and // a follow up OnBlockExecuted call after block execution. type BasicProvider struct { + chainID flow.ChainID blks *Blocks + rootAddr flow.Address + storage types.BackendStorage latestBlockPayload *events.BlockEventPayload } @@ -28,7 +32,12 @@ func NewBasicProvider( if err != nil { return nil, err } - return &BasicProvider{blks: blks}, nil + return &BasicProvider{ + chainID: chainID, + blks: blks, + rootAddr: rootAddr, + storage: storage, + }, nil } // GetSnapshotAt returns a block snapshot at the given height @@ -61,14 +70,49 @@ func (p *BasicProvider) OnBlockReceived(blockEvent *events.BlockEventPayload) er // OnBlockExecuted should be called after executing blocks. func (p *BasicProvider) OnBlockExecuted( height uint64, - resCol types.ReplayResultCollector) error { + resCol types.ReplayResultCollector, + blockProposal *types.BlockProposal, +) error { // we push the block hash after execution, so the behaviour of the blockhash is // identical to the evm.handler. if p.latestBlockPayload.Height != height { return fmt.Errorf("active block height doesn't match expected: %d, got: %d", p.latestBlockPayload.Height, height) } + + blockBytes, err := blockProposal.Block.ToBytes() + if err != nil { + return types.NewFatalError(err) + } + + // do the same as handler.CommitBlockProposal + err = p.storage.SetValue( + p.rootAddr[:], + []byte(handler.BlockStoreLatestBlockKey), + blockBytes, + ) + if err != nil { + return err + } + + blockProposalBytes, err := blockProposal.ToBytes() + if err != nil { + return types.NewFatalError(err) + } + + hash := p.latestBlockPayload.Hash + // update block proposal + err = p.storage.SetValue( + p.rootAddr[:], + []byte(handler.BlockStoreLatestBlockProposalKey), + blockProposalBytes, + ) + if err != nil { + return err + } + + // update block hash list return p.blks.PushBlockHash( p.latestBlockPayload.Height, - p.latestBlockPayload.Hash, + hash, ) } diff --git a/fvm/evm/offchain/sync/replay.go b/fvm/evm/offchain/sync/replay.go index 4516f37007d..e85fc21658c 100644 --- a/fvm/evm/offchain/sync/replay.go +++ b/fvm/evm/offchain/sync/replay.go @@ -30,25 +30,26 @@ func ReplayBlockExecution( transactionEvents []events.TransactionEventPayload, blockEvent *events.BlockEventPayload, validateResults bool, -) error { +) ([]*types.Result, error) { // check the passed block event if blockEvent == nil { - return fmt.Errorf("nil block event has been passed") + return nil, fmt.Errorf("nil block event has been passed") } // create a base block context for all transactions // tx related context values will be replaced during execution ctx, err := blockSnapshot.BlockContext() if err != nil { - return err + return nil, err } // update the tracer ctx.Tracer = tracer gasConsumedSoFar := uint64(0) txHashes := make(types.TransactionHashes, len(transactionEvents)) + results := make([]*types.Result, 0, len(transactionEvents)) for idx, tx := range transactionEvents { - err = replayTransactionExecution( + result, err := replayTransactionExecution( rootAddr, ctx, uint(idx), @@ -58,28 +59,30 @@ func ReplayBlockExecution( validateResults, ) if err != nil { - return fmt.Errorf("transaction execution failed, txIndex: %d, err: %w", idx, err) + return nil, fmt.Errorf("transaction execution failed, txIndex: %d, err: %w", idx, err) } gasConsumedSoFar += tx.GasConsumed txHashes[idx] = tx.Hash + + results = append(results, result) } if validateResults { // check transaction inclusion txHashRoot := gethTypes.DeriveSha(txHashes, gethTrie.NewStackTrie(nil)) if txHashRoot != blockEvent.TransactionHashRoot { - return fmt.Errorf("transaction root hash doesn't match [%x] != [%x]", txHashRoot, blockEvent.TransactionHashRoot) + return nil, fmt.Errorf("transaction root hash doesn't match [%x] != [%x]", txHashRoot, blockEvent.TransactionHashRoot) } // check total gas used if blockEvent.TotalGasUsed != gasConsumedSoFar { - return fmt.Errorf("total gas used doesn't match [%d] != [%d]", gasConsumedSoFar, blockEvent.TotalGasUsed) + return nil, fmt.Errorf("total gas used doesn't match [%d] != [%d]", gasConsumedSoFar, blockEvent.TotalGasUsed) } // no need to check the receipt root hash given we have checked the logs and other // values during tx execution. } - return nil + return results, nil } func replayTransactionExecution( @@ -90,7 +93,7 @@ func replayTransactionExecution( ledger atree.Ledger, txEvent *events.TransactionEventPayload, validate bool, -) error { +) (*types.Result, error) { // create emulator em := emulator.NewEmulator(ledger, rootAddr) @@ -102,7 +105,7 @@ func replayTransactionExecution( if len(txEvent.PrecompiledCalls) > 0 { pcs, err := types.AggregatedPrecompileCallsFromEncoded(txEvent.PrecompiledCalls) if err != nil { - return fmt.Errorf("error decoding precompiled calls [%x]: %w", txEvent.Payload, err) + return nil, fmt.Errorf("error decoding precompiled calls [%x]: %w", txEvent.Payload, err) } ctx.ExtraPrecompiledContracts = precompiles.AggregatedPrecompiledCallsToPrecompiledContracts(pcs) } @@ -110,7 +113,7 @@ func replayTransactionExecution( // create a new block view bv, err := em.NewBlockView(ctx) if err != nil { - return err + return nil, err } var res *types.Result @@ -119,31 +122,31 @@ func replayTransactionExecution( if txEvent.TransactionType == types.DirectCallTxType { call, err := types.DirectCallFromEncoded(txEvent.Payload) if err != nil { - return fmt.Errorf("failed to RLP-decode direct call [%x]: %w", txEvent.Payload, err) + return nil, fmt.Errorf("failed to RLP-decode direct call [%x]: %w", txEvent.Payload, err) } res, err = bv.DirectCall(call) if err != nil { - return fmt.Errorf("failed to execute direct call [%x]: %w", txEvent.Hash, err) + return nil, fmt.Errorf("failed to execute direct call [%x]: %w", txEvent.Hash, err) } } else { gethTx := &gethTypes.Transaction{} if err := gethTx.UnmarshalBinary(txEvent.Payload); err != nil { - return fmt.Errorf("failed to RLP-decode transaction [%x]: %w", txEvent.Payload, err) + return nil, fmt.Errorf("failed to RLP-decode transaction [%x]: %w", txEvent.Payload, err) } res, err = bv.RunTransaction(gethTx) if err != nil { - return fmt.Errorf("failed to run transaction [%x]: %w", txEvent.Hash, err) + return nil, fmt.Errorf("failed to run transaction [%x]: %w", txEvent.Hash, err) } } // validate results if validate { if err := ValidateResult(res, txEvent); err != nil { - return fmt.Errorf("transaction replay failed (txHash %x): %w", txEvent.Hash, err) + return nil, fmt.Errorf("transaction replay failed (txHash %x): %w", txEvent.Hash, err) } } - return nil + return res, nil } func ValidateResult( diff --git a/fvm/evm/offchain/sync/replayer.go b/fvm/evm/offchain/sync/replayer.go index 25ccdc10cbf..96df01d58a0 100644 --- a/fvm/evm/offchain/sync/replayer.go +++ b/fvm/evm/offchain/sync/replayer.go @@ -45,22 +45,35 @@ func NewReplayer( } // ReplayBlock replays the execution of the transactions of an EVM block +func (cr *Replayer) ReplayBlock( + transactionEvents []events.TransactionEventPayload, + blockEvent *events.BlockEventPayload, +) (types.ReplayResultCollector, error) { + res, _, err := cr.ReplayBlockEvents(transactionEvents, blockEvent) + return res, err +} + +// ReplayBlockEvents replays the execution of the transactions of an EVM block // using the provided transactionEvents and blockEvents, -// which include all the context data for re-executing the transactions, and returns the replay result. +// which include all the context data for re-executing the transactions, and returns +// the replay result and the result of each transaction. +// the replay result contains the register updates, and the result of each transaction +// contains the execution result of each transaction, which is useful for recontstructing +// the EVM block proposal. // this method can be called concurrently if underlying storage // tracer and block snapshot provider support concurrency. // // Warning! the list of transaction events has to be sorted based on their // execution, sometimes the access node might return events out of order // it needs to be sorted by txIndex and eventIndex respectively. -func (cr *Replayer) ReplayBlock( +func (cr *Replayer) ReplayBlockEvents( transactionEvents []events.TransactionEventPayload, blockEvent *events.BlockEventPayload, -) (types.ReplayResultCollector, error) { +) (types.ReplayResultCollector, []*types.Result, error) { // prepare storage st, err := cr.storageProvider.GetSnapshotAt(blockEvent.Height) if err != nil { - return nil, err + return nil, nil, err } // create storage @@ -69,11 +82,11 @@ func (cr *Replayer) ReplayBlock( // get block snapshot bs, err := cr.blockProvider.GetSnapshotAt(blockEvent.Height) if err != nil { - return nil, err + return nil, nil, err } // replay transactions - err = ReplayBlockExecution( + results, err := ReplayBlockExecution( cr.chainID, cr.rootAddr, state, @@ -84,8 +97,8 @@ func (cr *Replayer) ReplayBlock( cr.validateResults, ) if err != nil { - return nil, err + return nil, nil, err } - return state, nil + return state, results, nil } diff --git a/fvm/evm/offchain/sync/replayer_test.go b/fvm/evm/offchain/sync/replayer_test.go index f7c05ab63b5..06262b5811e 100644 --- a/fvm/evm/offchain/sync/replayer_test.go +++ b/fvm/evm/offchain/sync/replayer_test.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + "github.com/onflow/flow-go/fvm/evm/offchain/storage" "github.com/onflow/flow-go/fvm/evm/offchain/sync" . "github.com/onflow/flow-go/fvm/evm/testutils" "github.com/onflow/flow-go/fvm/evm/types" @@ -154,7 +155,8 @@ func TestChainReplay(t *testing.T) { // check replay - bp, err := blocks.NewBasicProvider(chainID, snapshot, rootAddr) + bpStorage := storage.NewEphemeralStorage(snapshot) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) require.NoError(t, err) err = bp.OnBlockReceived(blockEventPayload) @@ -162,21 +164,15 @@ func TestChainReplay(t *testing.T) { sp := NewTestStorageProvider(snapshot, 1) cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) - res, err := cr.ReplayBlock(txEventPayloads, blockEventPayload) + res, results, err := cr.ReplayBlockEvents(txEventPayloads, blockEventPayload) require.NoError(t, err) - err = bp.OnBlockExecuted(blockEventPayload.Height, res) - require.NoError(t, err) + require.Len(t, results, totalTxCount) + + proposal := blocks.ReconstructProposal(blockEventPayload, results) - // TODO: verify the state delta - // currently the backend storage doesn't work well with this - // changes needed to make this work, which is left for future PRs - // - // for k, v := range result.StorageRegisterUpdates() { - // ret, err := backend.GetValue([]byte(k.Owner), []byte(k.Key)) - // require.NoError(t, err) - // require.Equal(t, ret[:], v[:]) - // } + err = bp.OnBlockExecuted(blockEventPayload.Height, res, proposal) + require.NoError(t, err) }) }) }) diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index a18ce4a81ac..5dad9b86658 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -2,7 +2,6 @@ package utils_test import ( "bufio" - "encoding/gob" "encoding/hex" "encoding/json" "fmt" @@ -11,7 +10,6 @@ import ( "strings" "testing" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" @@ -21,9 +19,6 @@ import ( "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" - "github.com/onflow/flow-go/fvm/evm/offchain/blocks" - "github.com/onflow/flow-go/fvm/evm/offchain/storage" - "github.com/onflow/flow-go/fvm/evm/offchain/sync" "github.com/onflow/flow-go/fvm/evm/offchain/utils" . "github.com/onflow/flow-go/fvm/evm/testutils" "github.com/onflow/flow-go/model/flow" @@ -41,7 +36,7 @@ func TestTestnetBackwardCompatibility(t *testing.T) { // > ~/Downloads/events_devnet51_1.jsonl // ... // - // 2) comment the above t.Skip, and update the events file paths and checkpoint dir + // 2) comment the above t.Skip, and update the events file paths and evmStateGob dir // to run the tests BackwardCompatibleSinceEVMGenesisBlock( t, flow.Testnet, []string{ @@ -65,47 +60,47 @@ func TestTestnetBackwardCompatibility(t *testing.T) { // --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000 // // During the replay process, it will generate `values_.gob` and -// `allocators_.gob` checkpoint files for each height. If these checkpoint files exist, +// `allocators_.gob` checkpoint files for each height. If these checkpoint gob files exist, // the corresponding event JSON files will be skipped to optimize replay. func BackwardCompatibleSinceEVMGenesisBlock( t *testing.T, chainID flow.ChainID, eventsFilePaths []string, // ordered EVM events in JSONL format - checkpointDir string, - checkpointEndHeight uint64, // EVM height of an EVM state that a checkpoint was created for + evmStateGob string, + evmStateEndHeight uint64, // EVM height of an EVM state that a evmStateGob file was created for ) { // ensure that event files is not an empty array require.True(t, len(eventsFilePaths) > 0) - log.Info().Msgf("replaying EVM events from %v to %v, with checkpoints in %s, and checkpointEndHeight: %v", + log.Info().Msgf("replaying EVM events from %v to %v, with evmStateGob file in %s, and evmStateEndHeight: %v", eventsFilePaths[0], eventsFilePaths[len(eventsFilePaths)-1], - checkpointDir, checkpointEndHeight) + evmStateGob, evmStateEndHeight) - store, checkpointEndHeightOrZero := initStorageWithCheckpoints(t, chainID, checkpointDir, checkpointEndHeight) + store, evmStateEndHeightOrZero := initStorageWithEVMStateGob(t, chainID, evmStateGob, evmStateEndHeight) // the events to replay - nextHeight := checkpointEndHeightOrZero + 1 + nextHeight := evmStateEndHeightOrZero + 1 // replay each event files for _, eventsFilePath := range eventsFilePaths { log.Info().Msgf("replaying events from %v, nextHeight: %v", eventsFilePath, nextHeight) - checkpointEndHeight := replayEvents(t, chainID, store, eventsFilePath, checkpointDir, nextHeight) - nextHeight = checkpointEndHeight + 1 + evmStateEndHeight := replayEvents(t, chainID, store, eventsFilePath, evmStateGob, nextHeight) + nextHeight = evmStateEndHeight + 1 } log.Info(). Msgf("succhessfully replayed all events and state changes are consistent with onchain state change. nextHeight: %v", nextHeight) } -func initStorageWithCheckpoints(t *testing.T, chainID flow.ChainID, checkpointDir string, checkpointEndHeight uint64) ( +func initStorageWithEVMStateGob(t *testing.T, chainID flow.ChainID, evmStateGob string, evmStateEndHeight uint64) ( *TestValueStore, uint64, ) { rootAddr := evm.StorageAccountAddress(chainID) - // if there is no checkpoint, create a empty store and initialize the account status, + // if there is no evmStateGob file, create a empty store and initialize the account status, // return 0 as the genesis height - if checkpointEndHeight == 0 { + if evmStateEndHeight == 0 { store := GetSimpleValueStore() as := environment.NewAccountStatus() require.NoError(t, store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes())) @@ -113,26 +108,22 @@ func initStorageWithCheckpoints(t *testing.T, chainID flow.ChainID, checkpointDi return store, 0 } - valueFileName, allocatorFileName := checkpointFileNamesByEndHeight(checkpointDir, checkpointEndHeight) - values, err := deserialize(valueFileName) + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGob, evmStateEndHeight) + values, err := DeserializeState(valueFileName) require.NoError(t, err) - allocators, err := deserializeAllocator(allocatorFileName) + allocators, err := DeserializeAllocator(allocatorFileName) require.NoError(t, err) store := GetSimpleValueStorePopulated(values, allocators) - return store, checkpointEndHeight + return store, evmStateEndHeight } func replayEvents( t *testing.T, chainID flow.ChainID, - store *TestValueStore, eventsFilePath string, checkpointDir string, initialNextHeight uint64) uint64 { + store *TestValueStore, eventsFilePath string, evmStateGob string, initialNextHeight uint64) uint64 { rootAddr := evm.StorageAccountAddress(chainID) - bpStorage := storage.NewEphemeralStorage(store) - bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) - require.NoError(t, err) - nextHeight := initialNextHeight scanEventFilesAndRun(t, eventsFilePath, @@ -143,55 +134,43 @@ func replayEvents( nextHeight, blockEventPayload.Height) } - err = bp.OnBlockReceived(blockEventPayload) - require.NoError(t, err) - - sp := NewTestStorageProvider(store, blockEventPayload.Height) - cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) - res, err := cr.ReplayBlock(txEvents, blockEventPayload) - require.NoError(t, err) - - // commit all changes - for k, v := range res.StorageRegisterUpdates() { - err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) - require.NoError(t, err) + _, _, err := utils.ReplayEVMEventsToStore( + log.Logger, + store, + chainID, + rootAddr, + blockEventPayload, + txEvents, + ) + if err != nil { + return fmt.Errorf("fail to replay events: %w", err) } - - err = bp.OnBlockExecuted(blockEventPayload.Height, res) - require.NoError(t, err) - - // commit all block hash list changes - for k, v := range bpStorage.StorageRegisterUpdates() { - err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) - require.NoError(t, err) - } - // verify the block height is sequential without gap nextHeight++ return nil }) - checkpointEndHeight := nextHeight - 1 + evmStateEndHeight := nextHeight - 1 - log.Info().Msgf("finished replaying events from %v to %v, creating checkpoint", initialNextHeight, checkpointEndHeight) - valuesFile, allocatorsFile := dumpCheckpoint(t, store, checkpointDir, checkpointEndHeight) - log.Info().Msgf("checkpoint created: %v, %v", valuesFile, allocatorsFile) + log.Info().Msgf("finished replaying events from %v to %v, creating evm state gobs", initialNextHeight, evmStateEndHeight) + valuesFile, allocatorsFile := dumpEVMStateToGobFiles(t, store, evmStateGob, evmStateEndHeight) + log.Info().Msgf("evm state gobs created: %v, %v", valuesFile, allocatorsFile) - return checkpointEndHeight + return evmStateEndHeight } -func checkpointFileNamesByEndHeight(dir string, endHeight uint64) (string, string) { +func evmStateGobFileNamesByEndHeight(dir string, endHeight uint64) (string, string) { return filepath.Join(dir, fmt.Sprintf("values_%d.gob", endHeight)), filepath.Join(dir, fmt.Sprintf("allocators_%d.gob", endHeight)) } -func dumpCheckpoint(t *testing.T, store *TestValueStore, dir string, checkpointEndHeight uint64) (string, string) { - valuesFileName, allocatorsFileName := checkpointFileNamesByEndHeight(dir, checkpointEndHeight) +func dumpEVMStateToGobFiles(t *testing.T, store *TestValueStore, dir string, evmStateEndHeight uint64) (string, string) { + valuesFileName, allocatorsFileName := evmStateGobFileNamesByEndHeight(dir, evmStateEndHeight) values, allocators := store.Dump() - require.NoError(t, serialize(valuesFileName, values)) - require.NoError(t, serializeAllocator(allocatorsFileName, allocators)) + require.NoError(t, SerializeState(valuesFileName, values)) + require.NoError(t, SerializeAllocator(allocatorsFileName, allocators)) return valuesFileName, allocatorsFileName } @@ -244,85 +223,3 @@ func scanEventFilesAndRun( t.Fatal(err) } } - -// Serialize function: saves map data to a file -func serialize(filename string, data map[string][]byte) error { - // Create a file to save data - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - - // Use gob to encode data - encoder := gob.NewEncoder(file) - err = encoder.Encode(data) - if err != nil { - return err - } - - return nil -} - -// Deserialize function: reads map data from a file -func deserialize(filename string) (map[string][]byte, error) { - // Open the file for reading - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - // Prepare the map to store decoded data - var data map[string][]byte - - // Use gob to decode data - decoder := gob.NewDecoder(file) - err = decoder.Decode(&data) - if err != nil { - return nil, err - } - - return data, nil -} - -// Serialize function: saves map data to a file -func serializeAllocator(filename string, data map[string]uint64) error { - // Create a file to save data - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - - // Use gob to encode data - encoder := gob.NewEncoder(file) - err = encoder.Encode(data) - if err != nil { - return err - } - - return nil -} - -// Deserialize function: reads map data from a file -func deserializeAllocator(filename string) (map[string]uint64, error) { - // Open the file for reading - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - // Prepare the map to store decoded data - var data map[string]uint64 - - // Use gob to decode data - decoder := gob.NewDecoder(file) - err = decoder.Decode(&data) - if err != nil { - return nil, err - } - - return data, nil -} diff --git a/fvm/evm/offchain/utils/replay.go b/fvm/evm/offchain/utils/replay.go new file mode 100644 index 00000000000..5aba8affcd1 --- /dev/null +++ b/fvm/evm/offchain/utils/replay.go @@ -0,0 +1,104 @@ +package utils + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/offchain/sync" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" +) + +func ReplayEVMEventsToStore( + log zerolog.Logger, + store environment.ValueStore, + chainID flow.ChainID, + rootAddr flow.Address, + evmBlockEvent *events.BlockEventPayload, // EVM block event + evmTxEvents []events.TransactionEventPayload, // EVM transaction event +) ( + map[flow.RegisterID]flow.RegisterValue, // EVM state transition updates + map[flow.RegisterID]flow.RegisterValue, // block provider updates + error, +) { + + bpStorage := evmStorage.NewEphemeralStorage(store) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) + if err != nil { + return nil, nil, err + } + + err = bp.OnBlockReceived(evmBlockEvent) + if err != nil { + return nil, nil, err + } + + sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) + cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log, nil, true) + res, results, err := cr.ReplayBlockEvents(evmTxEvents, evmBlockEvent) + if err != nil { + return nil, nil, err + } + + // commit all register changes from the EVM state transition + for k, v := range res.StorageRegisterUpdates() { + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + if err != nil { + return nil, nil, err + } + } + + blockProposal := blocks.ReconstructProposal(evmBlockEvent, results) + + err = bp.OnBlockExecuted(evmBlockEvent.Height, res, blockProposal) + if err != nil { + return nil, nil, err + } + + // commit all register changes from non-EVM state transition, such + // as block hash list changes + for k, v := range bpStorage.StorageRegisterUpdates() { + // verify the block hash list changes are included in the trie update + + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + if err != nil { + return nil, nil, err + } + } + + return res.StorageRegisterUpdates(), bpStorage.StorageRegisterUpdates(), nil +} + +type EVMEventsAccumulator struct { + pendingEVMTxEvents []events.TransactionEventPayload +} + +func NewEVMEventsAccumulator() *EVMEventsAccumulator { + return &EVMEventsAccumulator{ + pendingEVMTxEvents: make([]events.TransactionEventPayload, 0), + } +} + +func (a *EVMEventsAccumulator) HasBlockEvent( + evmBlockEvent *events.BlockEventPayload, + evmTxEvents []events.TransactionEventPayload) ( + *events.BlockEventPayload, + []events.TransactionEventPayload, + bool, // true if there is an EVM block event +) { + a.pendingEVMTxEvents = append(a.pendingEVMTxEvents, evmTxEvents...) + + // if there is no EVM block event, we will accumulate the pending txs + if evmBlockEvent == nil { + return evmBlockEvent, a.pendingEVMTxEvents, false + } + + pendingEVMTxEvents := a.pendingEVMTxEvents + // reset pending events + a.pendingEVMTxEvents = make([]events.TransactionEventPayload, 0) + // if there is an EVM block event, we return the EVM block and the accumulated tx events + return evmBlockEvent, pendingEVMTxEvents, true +} diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go new file mode 100644 index 00000000000..9335beb6230 --- /dev/null +++ b/fvm/evm/offchain/utils/verify.go @@ -0,0 +1,293 @@ +package utils + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" +) + +// EVM Root Height is the first block that has EVM Block Event where the EVM block height is 1 +func IsEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { + if chainID == flow.Testnet { + return flowHeight == 211176670 + } else if chainID == flow.Mainnet { + return flowHeight == 85981135 + } + return flowHeight == 1 +} + +// IsSporkHeight returns true if the given flow height is a spork height for the given chainID +// At spork height, there is no EVM events +func IsSporkHeight(chainID flow.ChainID, flowHeight uint64) bool { + if IsEVMRootHeight(chainID, flowHeight) { + return true + } + + if chainID == flow.Testnet { + return flowHeight == 218215349 // Testnet 52 + } else if chainID == flow.Mainnet { + return flowHeight == 88226267 // Mainnet 26 + } + return false +} + +// OffchainReplayBackwardCompatibilityTest replays the offchain EVM state transition for a given range of flow blocks, +// the replay will also verify the StateUpdateChecksum of the EVM state transition from each transaction execution. +// the updated register values will be saved to the given value store. +func OffchainReplayBackwardCompatibilityTest( + log zerolog.Logger, + chainID flow.ChainID, + flowStartHeight uint64, + flowEndHeight uint64, + headers storage.Headers, + results storage.ExecutionResults, + executionDataStore execution_data.ExecutionDataGetter, + store environment.ValueStore, + onHeightReplayed func(uint64) error, +) error { + rootAddr := evm.StorageAccountAddress(chainID) + rootAddrStr := string(rootAddr.Bytes()) + + // pendingEVMTxEvents are tx events that are executed block included in a flow block that + // didn't emit EVM block event, which is caused when the system tx to emit EVM block fails. + // we accumulate these pending txs, and replay them when we encounter a block with EVM block event. + pendingEVMEvents := NewEVMEventsAccumulator() + + for height := flowStartHeight; height <= flowEndHeight; height++ { + // account status initialization for the root account at the EVM root height + if IsEVMRootHeight(chainID, height) { + log.Info().Msgf("initializing EVM state for root height %d", flowStartHeight) + + as := environment.NewAccountStatus() + rootAddr := evm.StorageAccountAddress(chainID) + err := store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) + if err != nil { + return err + } + + continue + } + + if IsSporkHeight(chainID, height) { + // spork root block has no EVM events + continue + } + + // get EVM events and register updates at the flow height + evmBlockEvent, evmTxEvents, registerUpdates, err := evmEventsAndRegisterUpdatesAtFlowHeight( + height, + headers, results, executionDataStore, rootAddrStr) + if err != nil { + return fmt.Errorf("failed to get EVM events and register updates at height %d: %w", height, err) + } + + blockEvent, txEvents, hasBlockEvent := pendingEVMEvents.HasBlockEvent(evmBlockEvent, evmTxEvents) + + if !hasBlockEvent { + log.Info().Msgf("block has no EVM block, height :%v, txEvents: %v", height, len(evmTxEvents)) + + err = onHeightReplayed(height) + if err != nil { + return err + } + continue + } + + evmUpdates, blockProviderUpdates, err := ReplayEVMEventsToStore( + log, + store, + chainID, + rootAddr, + blockEvent, + txEvents, + ) + if err != nil { + return fmt.Errorf("fail to replay events: %w", err) + } + + err = verifyEVMRegisterUpdates(registerUpdates, evmUpdates, blockProviderUpdates) + if err != nil { + return err + } + + err = onHeightReplayed(height) + if err != nil { + return err + } + } + + return nil +} + +func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.TransactionEventPayload, error) { + var blockEvent *events.BlockEventPayload + txEvents := make([]events.TransactionEventPayload, 0) + + for _, e := range evts { + evtType := string(e.Type) + if strings.Contains(evtType, "BlockExecuted") { + if blockEvent != nil { + return nil, nil, errors.New("multiple block events in a single block") + } + + ev, err := ccf.Decode(nil, e.Payload) + if err != nil { + return nil, nil, err + } + + blockEventPayload, err := events.DecodeBlockEventPayload(ev.(cadence.Event)) + if err != nil { + return nil, nil, err + } + blockEvent = blockEventPayload + } else if strings.Contains(evtType, "TransactionExecuted") { + ev, err := ccf.Decode(nil, e.Payload) + if err != nil { + return nil, nil, err + } + txEv, err := events.DecodeTransactionEventPayload(ev.(cadence.Event)) + if err != nil { + return nil, nil, err + } + txEvents = append(txEvents, *txEv) + } + } + + return blockEvent, txEvents, nil +} + +func evmEventsAndRegisterUpdatesAtFlowHeight( + flowHeight uint64, + headers storage.Headers, + results storage.ExecutionResults, + executionDataStore execution_data.ExecutionDataGetter, + rootAddr string, +) ( + *events.BlockEventPayload, // EVM block event, might be nil if there is no block Event at this height + []events.TransactionEventPayload, // EVM transaction event + map[flow.RegisterID]flow.RegisterValue, // update registers + error, +) { + + blockID, err := headers.BlockIDByHeight(flowHeight) + if err != nil { + return nil, nil, nil, err + } + + result, err := results.ByBlockID(blockID) + if err != nil { + return nil, nil, nil, err + } + + executionData, err := executionDataStore.Get(context.Background(), result.ExecutionDataID) + if err != nil { + return nil, nil, nil, + fmt.Errorf("could not get execution data %v for block %d: %w", + result.ExecutionDataID, flowHeight, err) + } + + evts := flow.EventsList{} + payloads := []*ledger.Payload{} + + for _, chunkData := range executionData.ChunkExecutionDatas { + evts = append(evts, chunkData.Events...) + payloads = append(payloads, chunkData.TrieUpdate.Payloads...) + } + + updates := make(map[flow.RegisterID]flow.RegisterValue, len(payloads)) + for i := len(payloads) - 1; i >= 0; i-- { + regID, regVal, err := convert.PayloadToRegister(payloads[i]) + if err != nil { + return nil, nil, nil, err + } + + // find the register updates for the root account + if regID.Owner == rootAddr { + updates[regID] = regVal + } + } + + // parse EVM events + evmBlockEvent, evmTxEvents, err := parseEVMEvents(evts) + if err != nil { + return nil, nil, nil, err + } + return evmBlockEvent, evmTxEvents, updates, nil +} + +func verifyEVMRegisterUpdates( + registerUpdates map[flow.RegisterID]flow.RegisterValue, + evmUpdates map[flow.RegisterID]flow.RegisterValue, + blockProviderUpdates map[flow.RegisterID]flow.RegisterValue, +) error { + // skip the register level validation + // since the register is not stored at the same slab id as the on-chain EVM + // instead, we will compare by exporting the logic EVM state, which contains + // accounts, codes and slots. + return nil +} + +func VerifyRegisterUpdates(expectedUpdates map[flow.RegisterID]flow.RegisterValue, actualUpdates map[flow.RegisterID]flow.RegisterValue) error { + missingUpdates := make(map[flow.RegisterID]flow.RegisterValue) + additionalUpdates := make(map[flow.RegisterID]flow.RegisterValue) + mismatchingUpdates := make(map[flow.RegisterID][2]flow.RegisterValue) + + for k, v := range expectedUpdates { + if actualVal, ok := actualUpdates[k]; !ok { + missingUpdates[k] = v + } else if !bytes.Equal(v, actualVal) { + mismatchingUpdates[k] = [2]flow.RegisterValue{v, actualVal} + } + + delete(actualUpdates, k) + } + + for k, v := range actualUpdates { + additionalUpdates[k] = v + } + + // Build a combined error message + var errorMessage strings.Builder + + if len(missingUpdates) > 0 { + errorMessage.WriteString("Missing register updates:\n") + for id, value := range missingUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ExpectedValue: %x\n", id.Key, value)) + } + } + + if len(additionalUpdates) > 0 { + errorMessage.WriteString("Additional register updates:\n") + for id, value := range additionalUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ActualValue: %x\n", id.Key, value)) + } + } + + if len(mismatchingUpdates) > 0 { + errorMessage.WriteString("Mismatching register updates:\n") + for id, values := range mismatchingUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ExpectedValue: %x, ActualValue: %x\n", id.Key, values[0], values[1])) + } + } + + if errorMessage.Len() > 0 { + return errors.New(errorMessage.String()) + } + + return nil +} diff --git a/fvm/evm/testutils/gob.go b/fvm/evm/testutils/gob.go new file mode 100644 index 00000000000..1c944a1e9e3 --- /dev/null +++ b/fvm/evm/testutils/gob.go @@ -0,0 +1,88 @@ +package testutils + +import ( + "encoding/gob" + "os" +) + +// Serialize function: saves map data to a file +func SerializeState(filename string, data map[string][]byte) error { + // Create a file to save data + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(data) + if err != nil { + return err + } + + return nil +} + +// Deserialize function: reads map data from a file +func DeserializeState(filename string) (map[string][]byte, error) { + // Open the file for reading + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data map[string][]byte + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return data, nil +} + +// Serialize function: saves map data to a file +func SerializeAllocator(filename string, data map[string]uint64) error { + // Create a file to save data + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(data) + if err != nil { + return err + } + + return nil +} + +// Deserialize function: reads map data from a file +func DeserializeAllocator(filename string) (map[string]uint64, error) { + // Open the file for reading + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data map[string]uint64 + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return data, nil +}