diff --git a/Makefile b/Makefile index 724f84d5ed7..7ba432f6bb6 100644 --- a/Makefile +++ b/Makefile @@ -157,7 +157,6 @@ generate-mocks: install-mock-generators mockery --name '(Connector|PingInfoProvider)' --dir=network/p2p --case=underscore --output="./network/mocknetwork" --outpkg="mocknetwork" CGO_CFLAGS=$(CRYPTO_FLAG) mockgen -destination=storage/mocks/storage.go -package=mocks github.com/onflow/flow-go/storage Blocks,Headers,Payloads,Collections,Commits,Events,ServiceEvents,TransactionResults CGO_CFLAGS=$(CRYPTO_FLAG) mockgen -destination=network/mocknetwork/mock_network.go -package=mocknetwork github.com/onflow/flow-go/network EngineRegistry - mockery --name='.*' --dir=integration/benchmark/mocksiface --case=underscore --output="integration/benchmark/mock" --outpkg="mock" mockery --name=ExecutionDataStore --dir=module/executiondatasync/execution_data --case=underscore --output="./module/executiondatasync/execution_data/mock" --outpkg="mock" mockery --name=Downloader --dir=module/executiondatasync/execution_data --case=underscore --output="./module/executiondatasync/execution_data/mock" --outpkg="mock" mockery --name '(ExecutionDataRequester|IndexReporter)' --dir=module/state_synchronization --case=underscore --output="./module/state_synchronization/mock" --outpkg="state_synchronization" diff --git a/access/api.go b/access/api.go index 3201796c6ed..4a5bcbc7de3 100644 --- a/access/api.go +++ b/access/api.go @@ -197,7 +197,7 @@ type API interface { // SubscribeTransactionStatuses streams transaction statuses starting from the reference block saved in the // transaction itself until the block containing the transaction becomes sealed or expired. When the transaction // status becomes TransactionStatusSealed or TransactionStatusExpired, the subscription will automatically shut down. - SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody) subscription.Subscription + SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription } // TODO: Combine this with flow.TransactionResult? diff --git a/access/handler.go b/access/handler.go index 71e48511aca..559cb7b5096 100644 --- a/access/handler.go +++ b/access/handler.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" @@ -708,9 +709,9 @@ func (h *Handler) GetExecutionResultForBlockID(ctx context.Context, req *access. func (h *Handler) GetExecutionResultByID(ctx context.Context, req *access.GetExecutionResultByIDRequest) (*access.ExecutionResultByIDResponse, error) { metadata := h.buildMetadataResponse() - blockID := convert.MessageToIdentifier(req.GetId()) + resultID := convert.MessageToIdentifier(req.GetId()) - result, err := h.api.GetExecutionResultByID(ctx, blockID) + result, err := h.api.GetExecutionResultByID(ctx, resultID) if err != nil { return nil, err } @@ -1112,11 +1113,23 @@ func (h *Handler) SendAndSubscribeTransactionStatuses( return err } - sub := h.api.SubscribeTransactionStatuses(ctx, &tx) - return subscription.HandleSubscription(sub, func(txSubInfo *convert.TransactionSubscribeInfo) error { - err = stream.Send(convert.TransactionSubscribeInfoToMessage(txSubInfo)) - if err != nil { - return rpc.ConvertError(err, "could not send response", codes.Internal) + sub := h.api.SubscribeTransactionStatuses(ctx, &tx, request.GetEventEncodingVersion()) + + messageIndex := counters.NewMonotonousCounter(0) + return subscription.HandleSubscription(sub, func(txResults []*TransactionResult) error { + for i := range txResults { + value := messageIndex.Value() + if ok := messageIndex.Set(value + 1); !ok { + return status.Errorf(codes.Internal, "the message index has already been incremented to %d", messageIndex.Value()) + } + + err = stream.Send(&access.SendAndSubscribeTransactionStatusesResponse{ + TransactionResults: TransactionResultToMessage(txResults[i]), + MessageIndex: value, + }) + if err != nil { + return rpc.ConvertError(err, "could not send response", codes.Internal) + } } return nil diff --git a/access/mock/api.go b/access/mock/api.go index b27e8a03580..8e6f8e53936 100644 --- a/access/mock/api.go +++ b/access/mock/api.go @@ -977,13 +977,13 @@ func (_m *API) SubscribeBlocksFromStartHeight(ctx context.Context, startHeight u return r0 } -// SubscribeTransactionStatuses provides a mock function with given fields: ctx, tx -func (_m *API) SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody) subscription.Subscription { - ret := _m.Called(ctx, tx) +// SubscribeTransactionStatuses provides a mock function with given fields: ctx, tx, requiredEventEncodingVersion +func (_m *API) SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody, requiredEventEncodingVersion entities.EventEncodingVersion) subscription.Subscription { + ret := _m.Called(ctx, tx, requiredEventEncodingVersion) var r0 subscription.Subscription - if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody) subscription.Subscription); ok { - r0 = rf(ctx, tx) + if rf, ok := ret.Get(0).(func(context.Context, *flow.TransactionBody, entities.EventEncodingVersion) subscription.Subscription); ok { + r0 = rf(ctx, tx, requiredEventEncodingVersion) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(subscription.Subscription) diff --git a/admin/commands/storage/read_protocol_snapshot.go b/admin/commands/storage/read_protocol_snapshot.go index 8b87164f230..738e6409936 100644 --- a/admin/commands/storage/read_protocol_snapshot.go +++ b/admin/commands/storage/read_protocol_snapshot.go @@ -57,7 +57,7 @@ func (s *ProtocolSnapshotCommand) Handler(_ context.Context, req *admin.CommandR s.logger.Info().Uint("blocksToSkip", blocksToSkip).Msgf("admintool: generating protocol snapshot") - snapshot, sealedHeight, commit, err := common.GenerateProtocolSnapshotForCheckpoint( + snapshot, sealedHeight, commit, checkpointFile, err := common.GenerateProtocolSnapshotForCheckpoint( s.logger, s.state, s.headers, s.seals, s.checkpointDir, blocksToSkip) if err != nil { return nil, fmt.Errorf("could not generate protocol snapshot for checkpoint, checkpointDir %v: %w", @@ -79,10 +79,19 @@ func (s *ProtocolSnapshotCommand) Handler(_ context.Context, req *admin.CommandR Hex("finalized_block_id", logging.Entity(header)). Uint64("sealed_height", sealedHeight). Hex("sealed_commit", commit[:]). // not the commit for the finalized height, but for the sealed height + Str("checkpoint_file", checkpointFile). Uint("blocks_to_skip", blocksToSkip). Msgf("admintool: protocol snapshot generated successfully") - return commands.ConvertToMap(serializable.Encodable()) + return commands.ConvertToMap(protocolSnapshotResponse{ + Snapshot: serializable.Encodable(), + Checkpoint: checkpointFile, + }) +} + +type protocolSnapshotResponse struct { + Snapshot inmem.EncodableSnapshot `json:"snapshot"` + Checkpoint string `json:"checkpoint"` } func (s *ProtocolSnapshotCommand) Validator(req *admin.CommandRequest) error { diff --git a/cmd/Dockerfile b/cmd/Dockerfile index ba858d12b9c..5a746b3bd70 100644 --- a/cmd/Dockerfile +++ b/cmd/Dockerfile @@ -37,7 +37,7 @@ WORKDIR /app ARG GOARCH=amd64 # TAGS can be overriden to modify the go build tags (e.g. build without netgo) -ARG TAGS="netgo" +ARG TAGS="netgo,osusergo" # CC flag can be overwritten to specify a C compiler ARG CC="" # CGO_FLAG uses ADX instructions by default, flag can be overwritten to build without ADX diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 3c8e868afe8..03f942481ca 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -1315,7 +1315,7 @@ func (builder *FlowAccessNodeBuilder) InitIDProviders() { filter.And( filter.HasRole[flow.Identity](flow.RoleConsensus), filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), - underlay.NotEjectedFilter, + filter.NotEjectedFilter, ), builder.IdentityProvider, ) diff --git a/cmd/bootstrap/cmd/check_machine_account.go b/cmd/bootstrap/cmd/check_machine_account.go index e2261012219..5594f483060 100644 --- a/cmd/bootstrap/cmd/check_machine_account.go +++ b/cmd/bootstrap/cmd/check_machine_account.go @@ -13,6 +13,7 @@ import ( sdk "github.com/onflow/flow-go-sdk" client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/module/epochs" ) @@ -44,7 +45,10 @@ func checkMachineAccountRun(_ *cobra.Command, _ []string) { // read the private node information - used to get the role var nodeInfoPriv model.NodeInfoPriv - readJSON(filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeInfoPriv, nodeID)), &nodeInfoPriv) + err = common.ReadJSON(filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeInfoPriv, nodeID)), &nodeInfoPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } // read the machine account info file machineAccountInfo := readMachineAccountInfo(nodeID) @@ -97,7 +101,10 @@ func readMachineAccountInfo(nodeID string) model.NodeMachineAccountInfo { var machineAccountInfo model.NodeMachineAccountInfo path := filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID)) - readJSON(path, &machineAccountInfo) + err := common.ReadJSON(path, &machineAccountInfo) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } return machineAccountInfo } diff --git a/cmd/bootstrap/cmd/clusters.go b/cmd/bootstrap/cmd/clusters.go deleted file mode 100644 index 27ab1c52605..00000000000 --- a/cmd/bootstrap/cmd/clusters.go +++ /dev/null @@ -1,125 +0,0 @@ -package cmd - -import ( - "errors" - - "github.com/onflow/flow-go/cmd/bootstrap/run" - model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/cluster" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/assignment" - "github.com/onflow/flow-go/model/flow/factory" - "github.com/onflow/flow-go/model/flow/filter" -) - -// Construct random cluster assignment with internal and partner nodes. -// The number of clusters is read from the `flagCollectionClusters` flag. -// The number of nodes in each cluster is deterministic and only depends on the number of clusters -// and the number of nodes. The repartition of internal and partner nodes is also deterministic -// and only depends on the number of clusters and nodes. -// The identity of internal and partner nodes in each cluster is the non-deterministic and is randomized -// using the system entropy. -// The function guarantees a specific constraint when partitioning the nodes into clusters: -// Each cluster must contain strictly more than 2/3 of internal nodes. If the constraint can't be -// satisfied, an exception is returned. -// Note that if an exception is returned with a certain number of internal/partner nodes, there is no chance -// of succeeding the assignment by re-running the function without increasing the internal nodes ratio. -func constructClusterAssignment(partnerNodes, internalNodes []model.NodeInfo) (flow.AssignmentList, flow.ClusterList, error) { - - partners := model.ToIdentityList(partnerNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) - internals := model.ToIdentityList(internalNodes).Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) - nClusters := int(flagCollectionClusters) - nCollectors := len(partners) + len(internals) - - // ensure we have at least as many collection nodes as clusters - if nCollectors < int(flagCollectionClusters) { - log.Fatal().Msgf("network bootstrap is configured with %d collection nodes, but %d clusters - must have at least one collection node per cluster", - nCollectors, flagCollectionClusters) - } - - // shuffle both collector lists based on a non-deterministic algorithm - partners, err := partners.Shuffle() - if err != nil { - log.Fatal().Err(err).Msg("could not shuffle partners") - } - internals, err = internals.Shuffle() - if err != nil { - log.Fatal().Err(err).Msg("could not shuffle internals") - } - - identifierLists := make([]flow.IdentifierList, nClusters) - // array to track the 2/3 internal-nodes constraint (internal_nodes > 2 * partner_nodes) - constraint := make([]int, nClusters) - - // first, round-robin internal nodes into each cluster - for i, node := range internals { - identifierLists[i%nClusters] = append(identifierLists[i%nClusters], node.NodeID) - constraint[i%nClusters] += 1 - } - - // next, round-robin partner nodes into each cluster - for i, node := range partners { - identifierLists[i%len(identifierLists)] = append(identifierLists[i%len(identifierLists)], node.NodeID) - constraint[i%nClusters] -= 2 - } - - // check the 2/3 constraint: for every cluster `i`, constraint[i] must be strictly positive - for i := 0; i < nClusters; i++ { - if constraint[i] <= 0 { - return nil, nil, errors.New("there isn't enough internal nodes to have at least 2/3 internal nodes in each cluster") - } - } - - assignments := assignment.FromIdentifierLists(identifierLists) - - collectors := append(partners, internals...) - clusters, err := factory.NewClusterList(assignments, collectors.ToSkeleton()) - if err != nil { - log.Fatal().Err(err).Msg("could not create cluster list") - } - - return assignments, clusters, nil -} - -func constructRootQCsForClusters( - clusterList flow.ClusterList, - nodeInfos []model.NodeInfo, - clusterBlocks []*cluster.Block, -) []*flow.QuorumCertificate { - - if len(clusterBlocks) != len(clusterList) { - log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). - Msg("number of clusters needs to equal number of cluster blocks") - } - - qcs := make([]*flow.QuorumCertificate, len(clusterBlocks)) - for i, cluster := range clusterList { - signers := filterClusterSigners(cluster, nodeInfos) - - qc, err := run.GenerateClusterRootQC(signers, cluster, clusterBlocks[i]) - if err != nil { - log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed") - } - qcs[i] = qc - } - - return qcs -} - -// Filters a list of nodes to include only nodes that will sign the QC for the -// given cluster. The resulting list of nodes is only nodes that are in the -// given cluster AND are not partner nodes (ie. we have the private keys). -func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.NodeInfo) []model.NodeInfo { - - var filtered []model.NodeInfo - for _, node := range nodeInfos { - _, isInCluster := cluster.ByNodeID(node.NodeID) - isNotPartner := node.Type() == model.NodeInfoTypePrivate - - if isInCluster && isNotPartner { - filtered = append(filtered, node) - } - } - - return filtered -} diff --git a/cmd/bootstrap/cmd/db_encryption_key.go b/cmd/bootstrap/cmd/db_encryption_key.go index c99843e859b..897a7099c90 100644 --- a/cmd/bootstrap/cmd/db_encryption_key.go +++ b/cmd/bootstrap/cmd/db_encryption_key.go @@ -7,6 +7,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -35,7 +36,7 @@ func dbEncryptionKeyRun(_ *cobra.Command, _ []string) { log = log.With().Str("path", dbEncryptionKeyPath).Logger() // check if the key already exists - exists, err := pathExists(path.Join(flagOutdir, dbEncryptionKeyPath)) + exists, err := common.PathExists(path.Join(flagOutdir, dbEncryptionKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if db encryption key already exists") } @@ -50,5 +51,10 @@ func dbEncryptionKeyRun(_ *cobra.Command, _ []string) { } log.Info().Msg("generated db encryption key") - writeText(dbEncryptionKeyPath, dbEncryptionKey) + err = common.WriteText(dbEncryptionKeyPath, flagOutdir, dbEncryptionKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + + log.Info().Msgf("wrote file %s/%s", flagOutdir, dbEncryptionKeyPath) } diff --git a/cmd/bootstrap/cmd/dkg.go b/cmd/bootstrap/cmd/dkg.go index 42d5d84d838..44805407e4e 100644 --- a/cmd/bootstrap/cmd/dkg.go +++ b/cmd/bootstrap/cmd/dkg.go @@ -6,6 +6,7 @@ import ( "github.com/onflow/crypto" bootstrapDKG "github.com/onflow/flow-go/cmd/bootstrap/dkg" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" "github.com/onflow/flow-go/model/encodable" @@ -38,17 +39,25 @@ func runBeaconKG(nodes []model.NodeInfo) dkg.DKGData { encKey := encodable.RandomBeaconPrivKey{PrivateKey: privKey} privKeyShares = append(privKeyShares, encKey) - writeJSON(fmt.Sprintf(model.PathRandomBeaconPriv, nodeID), encKey) + err = common.WriteJSON(fmt.Sprintf(model.PathRandomBeaconPriv, nodeID), flagOutdir, encKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fmt.Sprintf(model.PathRandomBeaconPriv, nodeID)) } // write full DKG info that will be used to construct QC - writeJSON(model.PathRootDKGData, inmem.EncodableFullDKG{ + err = common.WriteJSON(model.PathRootDKGData, flagOutdir, inmem.EncodableFullDKG{ GroupKey: encodable.RandomBeaconPubKey{ PublicKey: dkgData.PubGroupKey, }, PubKeyShares: pubKeyShares, PrivKeyShares: privKeyShares, }) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootDKGData) return dkgData } diff --git a/cmd/bootstrap/cmd/final_list.go b/cmd/bootstrap/cmd/final_list.go index ac1b000876b..ca34739de2a 100644 --- a/cmd/bootstrap/cmd/final_list.go +++ b/cmd/bootstrap/cmd/final_list.go @@ -1,9 +1,12 @@ package cmd import ( + "fmt" + "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -65,7 +68,11 @@ func finalList(cmd *cobra.Command, args []string) { validateNodes(localNodes, registeredNodes) // write node-config.json with the new list of nodes to be used for the `finalize` command - writeJSON(model.PathFinallist, model.ToPublicNodeInfoList(localNodes)) + err := common.WriteJSON(model.PathFinallist, flagOutdir, model.ToPublicNodeInfoList(localNodes)) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathFinallist) } func validateNodes(localNodes []model.NodeInfo, registeredNodes []model.NodeInfo) { @@ -229,18 +236,25 @@ func checkMismatchingNodes(localNodes []model.NodeInfo, registeredNodes []model. } func assembleInternalNodesWithoutWeight() []model.NodeInfo { - privInternals := readInternalNodes() + privInternals, err := common.ReadInternalNodeInfos(flagInternalNodePrivInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read internal node infos") + } log.Info().Msgf("read %v internal private node-info files", len(privInternals)) var nodes []model.NodeInfo for _, internal := range privInternals { // check if address is valid format - validateAddressFormat(internal.Address) + common.ValidateAddressFormat(log, internal.Address) // validate every single internal node - nodeID := validateNodeID(internal.NodeID) + err := common.ValidateNodeID(internal.NodeID) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("invalid node ID: %s", internal.NodeID)) + } + node := model.NewPrivateNodeInfo( - nodeID, + internal.NodeID, internal.Role, internal.Address, flow.DefaultInitialWeight, @@ -255,35 +269,50 @@ func assembleInternalNodesWithoutWeight() []model.NodeInfo { } func assemblePartnerNodesWithoutWeight() []model.NodeInfo { - partners := readPartnerNodes() + partners, err := common.ReadPartnerNodeInfos(flagPartnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read partner node infos") + } log.Info().Msgf("read %v partner node configuration files", len(partners)) return createPublicNodeInfo(partners) } func readStakingContractDetails() []model.NodeInfo { var stakingNodes []model.NodeInfoPub - readJSON(flagStakingNodesPath, &stakingNodes) + err := common.ReadJSON(flagStakingNodesPath, &stakingNodes) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } return createPublicNodeInfo(stakingNodes) } func createPublicNodeInfo(nodes []model.NodeInfoPub) []model.NodeInfo { var publicInfoNodes []model.NodeInfo for _, n := range nodes { - validateAddressFormat(n.Address) + common.ValidateAddressFormat(log, n.Address) // validate every single partner node - nodeID := validateNodeID(n.NodeID) - networkPubKey := validateNetworkPubKey(n.NetworkPubKey) - stakingPubKey := validateStakingPubKey(n.StakingPubKey) + err := common.ValidateNodeID(n.NodeID) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("invalid node ID: %s", n.NodeID)) + } + err = common.ValidateNetworkPubKey(n.NetworkPubKey) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("invalid network public key: %s", n.NetworkPubKey)) + } + err = common.ValidateStakingPubKey(n.StakingPubKey) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("invalid staking public key: %s", n.StakingPubKey)) + } - // all nodes should have equal weight + // all nodes should have equal weight (this might change in the future) node := model.NewPublicNodeInfo( - nodeID, + n.NodeID, n.Role, n.Address, flow.DefaultInitialWeight, - networkPubKey, - stakingPubKey, + n.NetworkPubKey, + n.StakingPubKey, ) publicInfoNodes = append(publicInfoNodes, node) diff --git a/cmd/bootstrap/cmd/finalize.go b/cmd/bootstrap/cmd/finalize.go index 7faee8967f4..62bc9213006 100644 --- a/cmd/bootstrap/cmd/finalize.go +++ b/cmd/bootstrap/cmd/finalize.go @@ -15,11 +15,11 @@ import ( "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/fvm" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" - "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" "github.com/onflow/flow-go/state/protocol" @@ -45,10 +45,7 @@ var ( flagGenesisTokenSupply string ) -// PartnerWeights is the format of the JSON file specifying partner node weights. -type PartnerWeights map[flow.Identifier]uint64 - -// finalizeCmd represents the finalize command +// finalizeCmd represents the finalize command` var finalizeCmd = &cobra.Command{ Use: "finalize", Short: "Finalize the bootstrapping process", @@ -115,11 +112,18 @@ func finalize(cmd *cobra.Command, args []string) { } log.Info().Msg("collecting partner network and staking keys") - partnerNodes := readPartnerNodeInfos() + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full partner node infos") + } log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") - internalNodes := readInternalNodeInfos() + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + log.Info().Msg("") log.Info().Msg("checking constraints on consensus nodes") @@ -195,7 +199,11 @@ func finalize(cmd *cobra.Command, args []string) { } // write snapshot to disk - writeJSON(model.PathRootProtocolStateSnapshot, snapshot.Encodable()) + err = common.WriteJSON(model.PathRootProtocolStateSnapshot, flagOutdir, snapshot.Encodable()) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootProtocolStateSnapshot) log.Info().Msg("") // read snapshot and verify consistency @@ -250,7 +258,7 @@ func finalize(cmd *cobra.Command, args []string) { log.Info().Msg("") // print count of all nodes - roleCounts := nodeCountByRole(stakingNodes) + roleCounts := common.NodeCountByRole(stakingNodes) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleConsensus], flow.RoleConsensus.String())) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleCollection], flow.RoleCollection.String())) log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", roleCounts[flow.RoleVerification], flow.RoleVerification.String())) @@ -263,7 +271,7 @@ func finalize(cmd *cobra.Command, args []string) { // readRootBlockVotes reads votes for root block func readRootBlockVotes() []*hotstuff.Vote { var votes []*hotstuff.Vote - files, err := filesInDir(flagRootBlockVotesDir) + files, err := common.FilesInDir(flagRootBlockVotesDir) if err != nil { log.Fatal().Err(err).Msg("could not read root block votes") } @@ -275,159 +283,17 @@ func readRootBlockVotes() []*hotstuff.Vote { // read file and append to partners var vote hotstuff.Vote - readJSON(f, &vote) + err = common.ReadJSON(f, &vote) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + votes = append(votes, &vote) log.Info().Msgf("read vote %v for block %v from signerID %v", vote.ID(), vote.BlockID, vote.SignerID) } return votes } -// readPartnerNodeInfos returns a list of partner nodes after gathering weights -// and public key information from configuration files -func readPartnerNodeInfos() []model.NodeInfo { - partners := readPartnerNodes() - log.Info().Msgf("read %d partner node configuration files", len(partners)) - - var weights PartnerWeights - readJSON(flagPartnerWeights, &weights) - log.Info().Msgf("read %d weights for partner nodes", len(weights)) - - var nodes []model.NodeInfo - for _, partner := range partners { - // validate every single partner node - nodeID := validateNodeID(partner.NodeID) - networkPubKey := validateNetworkPubKey(partner.NetworkPubKey) - stakingPubKey := validateStakingPubKey(partner.StakingPubKey) - weight, valid := validateWeight(weights[partner.NodeID]) - if !valid { - log.Error().Msgf("weights: %v", weights) - log.Fatal().Msgf("partner node id %x has no weight", nodeID) - } - if weight != flow.DefaultInitialWeight { - log.Warn().Msgf("partner node (id=%x) has non-default weight (%d != %d)", partner.NodeID, weight, flow.DefaultInitialWeight) - } - - node := model.NewPublicNodeInfo( - nodeID, - partner.Role, - partner.Address, - weight, - networkPubKey.PublicKey, - stakingPubKey.PublicKey, - ) - nodes = append(nodes, node) - } - - return nodes -} - -// readPartnerNodes reads the partner node information -func readPartnerNodes() []model.NodeInfoPub { - var partners []model.NodeInfoPub - files, err := filesInDir(flagPartnerNodeInfoDir) - if err != nil { - log.Fatal().Err(err).Msg("could not read partner node infos") - } - for _, f := range files { - // skip files that do not include node-infos - if !strings.Contains(f, model.PathPartnerNodeInfoPrefix) { - continue - } - - // read file and append to partners - var p model.NodeInfoPub - readJSON(f, &p) - partners = append(partners, p) - } - return partners -} - -// readInternalNodeInfos returns a list of internal nodes after collecting weights -// from configuration files -func readInternalNodeInfos() []model.NodeInfo { - privInternals := readInternalNodes() - log.Info().Msgf("read %v internal private node-info files", len(privInternals)) - - weights := internalWeightsByAddress() - log.Info().Msgf("read %d weights for internal nodes", len(weights)) - - var nodes []model.NodeInfo - for _, internal := range privInternals { - // check if address is valid format - validateAddressFormat(internal.Address) - - // validate every single internal node - nodeID := validateNodeID(internal.NodeID) - weight, valid := validateWeight(weights[internal.Address]) - if !valid { - log.Error().Msgf("weights: %v", weights) - log.Fatal().Msgf("internal node %v has no weight. Did you forget to update the node address?", internal) - } - if weight != flow.DefaultInitialWeight { - log.Warn().Msgf("internal node (id=%x) has non-default weight (%d != %d)", internal.NodeID, weight, flow.DefaultInitialWeight) - } - - node := model.NewPrivateNodeInfo( - nodeID, - internal.Role, - internal.Address, - weight, - internal.NetworkPrivKey, - internal.StakingPrivKey, - ) - - nodes = append(nodes, node) - } - - return nodes -} - -// readInternalNodes reads our internal node private infos generated by -// `keygen` command and returns it -func readInternalNodes() []model.NodeInfoPriv { - var internalPrivInfos []model.NodeInfoPriv - - // get files in internal priv node infos directory - files, err := filesInDir(flagInternalNodePrivInfoDir) - if err != nil { - log.Fatal().Err(err).Msg("could not read partner node infos") - } - - // for each of the files - for _, f := range files { - // skip files that do not include node-infos - if !strings.Contains(f, model.PathPrivNodeInfoPrefix) { - continue - } - - // read file and append to partners - var p model.NodeInfoPriv - readJSON(f, &p) - internalPrivInfos = append(internalPrivInfos, p) - } - - return internalPrivInfos -} - -// internalWeightsByAddress returns a mapping of node address by weight for internal nodes -func internalWeightsByAddress() map[string]uint64 { - // read json - var configs []model.NodeConfig - readJSON(flagConfig, &configs) - log.Info().Interface("config", configs).Msgf("read internal node configurations") - - weights := make(map[string]uint64) - for _, config := range configs { - if _, ok := weights[config.Address]; !ok { - weights[config.Address] = config.Weight - } else { - log.Error().Msgf("duplicate internal node address %s", config.Address) - } - } - - return weights -} - // mergeNodeInfos merges the internal and partner nodes and checks if there are no // duplicate addresses or node Ids. // @@ -495,31 +361,6 @@ func readDKGData() dkg.DKGData { // Validation utility methods ------------------------------------------------ -func validateNodeID(nodeID flow.Identifier) flow.Identifier { - if nodeID == flow.ZeroID { - log.Fatal().Msg("NodeID must not be zero") - } - return nodeID -} - -func validateNetworkPubKey(key encodable.NetworkPubKey) encodable.NetworkPubKey { - if key.PublicKey == nil { - log.Fatal().Msg("NetworkPubKey must not be nil") - } - return key -} - -func validateStakingPubKey(key encodable.StakingPubKey) encodable.StakingPubKey { - if key.PublicKey == nil { - log.Fatal().Msg("StakingPubKey must not be nil") - } - return key -} - -func validateWeight(weight uint64) (uint64, bool) { - return weight, weight > 0 -} - // loadRootProtocolSnapshot loads the root protocol snapshot from disk func loadRootProtocolSnapshot(path string) (*inmem.Snapshot, error) { data, err := io.ReadFile(filepath.Join(flagOutdir, path)) diff --git a/cmd/bootstrap/cmd/finalize_test.go b/cmd/bootstrap/cmd/finalize_test.go index 47d892350f1..89088898c7d 100644 --- a/cmd/bootstrap/cmd/finalize_test.go +++ b/cmd/bootstrap/cmd/finalize_test.go @@ -2,17 +2,18 @@ package cmd import ( "encoding/hex" - "fmt" "math/rand" "path/filepath" "regexp" "strings" "testing" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" utils "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -91,7 +92,6 @@ func TestFinalize_HappyPath(t *testing.T) { log = log.Hook(hook) finalize(nil, nil) - fmt.Println(hook.logs.String()) assert.Regexp(t, finalizeHappyPathRegex, hook.logs.String()) hook.logs.Reset() @@ -110,15 +110,16 @@ func TestClusterAssignment(t *testing.T) { partners := unittest.NodeInfosFixture(partnersLen, unittest.WithRole(flow.RoleCollection)) internals := unittest.NodeInfosFixture(internalLen, unittest.WithRole(flow.RoleCollection)) + log := zerolog.Nop() // should not error - _, clusters, err := constructClusterAssignment(partners, internals) + _, clusters, err := common.ConstructClusterAssignment(log, model.ToIdentityList(partners), model.ToIdentityList(internals), int(flagCollectionClusters)) require.NoError(t, err) require.True(t, checkClusterConstraint(clusters, partners, internals)) // unhappy Path internals = internals[:21] // reduce one internal node // should error - _, _, err = constructClusterAssignment(partners, internals) + _, _, err = common.ConstructClusterAssignment(log, model.ToIdentityList(partners), model.ToIdentityList(internals), int(flagCollectionClusters)) require.Error(t, err) // revert the flag value flagCollectionClusters = tmp diff --git a/cmd/bootstrap/cmd/genconfig.go b/cmd/bootstrap/cmd/genconfig.go index ccf66104ecc..f1902778f3a 100644 --- a/cmd/bootstrap/cmd/genconfig.go +++ b/cmd/bootstrap/cmd/genconfig.go @@ -5,6 +5,7 @@ import ( "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -56,7 +57,11 @@ func genconfigCmdRun(_ *cobra.Command, _ []string) { configs = append(configs, createConf(flow.RoleVerification, i)) } - writeJSON(flagConfig, configs) + err := common.WriteJSON(flagConfig, flagOutdir, configs) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, flagConfig) } // genconfigCmd represents the genconfig command diff --git a/cmd/bootstrap/cmd/key.go b/cmd/bootstrap/cmd/key.go index d8cdc46afa1..7ef97a19a8e 100644 --- a/cmd/bootstrap/cmd/key.go +++ b/cmd/bootstrap/cmd/key.go @@ -2,18 +2,15 @@ package cmd import ( "fmt" - "net" - "strconv" - "github.com/multiformats/go-multiaddr" "github.com/onflow/crypto" "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" - p2putils "github.com/onflow/flow-go/network/p2p/utils" ) var ( @@ -75,7 +72,7 @@ func keyCmdRun(_ *cobra.Command, _ []string) { // validate inputs role := validateRole(flagRole) - validateAddressFormat(flagAddress) + common.ValidateAddressFormat(log, flagAddress) // generate staking and network keys networkKey, stakingKey, secretsDBKey, err := generateKeys() @@ -97,10 +94,29 @@ func keyCmdRun(_ *cobra.Command, _ []string) { } // write files - writeText(model.PathNodeID, []byte(nodeInfo.NodeID.String())) - writeJSON(fmt.Sprintf(model.PathNodeInfoPriv, nodeInfo.NodeID), private) - writeText(fmt.Sprintf(model.PathSecretsEncryptionKey, nodeInfo.NodeID), secretsDBKey) - writeJSON(fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID), nodeInfo.Public()) + err = common.WriteText(model.PathNodeID, flagOutdir, []byte(nodeInfo.NodeID.String())) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeID) + + err = common.WriteJSON(fmt.Sprintf(model.PathNodeInfoPriv, nodeInfo.NodeID), flagOutdir, private) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfoPriv) + + err = common.WriteText(fmt.Sprintf(model.PathSecretsEncryptionKey, nodeInfo.NodeID), flagOutdir, secretsDBKey) + if err != nil { + log.Fatal().Err(err).Msg("failed to write file") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathSecretsEncryptionKey) + + err = common.WriteJSON(fmt.Sprintf(model.PathNodeInfoPub, nodeInfo.NodeID), flagOutdir, nodeInfo.Public()) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfoPub) // write machine account info if role == flow.RoleCollection || role == flow.RoleConsensus { @@ -114,7 +130,11 @@ func keyCmdRun(_ *cobra.Command, _ []string) { log.Debug().Str("address", flagAddress).Msg("assembling machine account information") // write the public key to terminal for entry in Flow Port machineAccountPriv := assembleNodeMachineAccountKey(machineKey) - writeJSON(fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeInfo.NodeID), machineAccountPriv) + err = common.WriteJSON(fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeInfo.NodeID), flagOutdir, machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeMachineAccountPrivateKey) } } @@ -164,27 +184,3 @@ func validateRole(role string) flow.Role { } return parsed } - -// validateAddressFormat validates the address provided by pretty much doing what the network layer would do before -// starting the node -func validateAddressFormat(address string) { - checkErr := func(err error) { - if err != nil { - log.Fatal().Err(err).Str("address", address).Msg("invalid address format.\n" + - `Address needs to be in the format hostname:port or ip:port e.g. "flow.com:3569"`) - } - } - - // split address into ip/hostname and port - ip, port, err := net.SplitHostPort(address) - checkErr(err) - - // check that port number is indeed a number - _, err = strconv.Atoi(port) - checkErr(err) - - // create a libp2p address from the ip and port - lp2pAddr := p2putils.MultiAddressStr(ip, port) - _, err = multiaddr.NewMultiaddr(lp2pAddr) - checkErr(err) -} diff --git a/cmd/bootstrap/cmd/keygen.go b/cmd/bootstrap/cmd/keygen.go index 62457fe4b56..43da4d6cf90 100644 --- a/cmd/bootstrap/cmd/keygen.go +++ b/cmd/bootstrap/cmd/keygen.go @@ -5,11 +5,11 @@ import ( "io" "os" - "github.com/onflow/flow-go/cmd" - "github.com/onflow/flow-go/cmd/bootstrap/utils" - "github.com/spf13/cobra" + "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -22,7 +22,7 @@ var keygenCmd = &cobra.Command{ Long: `Generate Staking and Networking keys for a list of nodes provided by the flag '--config'`, Run: func(cmd *cobra.Command, args []string) { // check if out directory exists - exists, err := pathExists(flagOutdir) + exists, err := common.PathExists(flagOutdir) if err != nil { log.Error().Msg("could not check if directory exists") return @@ -49,12 +49,10 @@ var keygenCmd = &cobra.Command{ // write key files writeJSONFile := func(relativePath string, val interface{}) error { - writeJSON(relativePath, val) - return nil + return common.WriteJSON(relativePath, flagOutdir, val) } writeFile := func(relativePath string, data []byte) error { - writeText(relativePath, data) - return nil + return common.WriteText(relativePath, flagOutdir, data) } log.Info().Msg("writing internal private key files") @@ -85,7 +83,7 @@ var keygenCmd = &cobra.Command{ } // count roles - roleCounts := nodeCountByRole(nodes) + roleCounts := common.NodeCountByRole(nodes) for role, count := range roleCounts { log.Info().Msg(fmt.Sprintf("created keys for %d %s nodes", count, role.String())) } @@ -127,5 +125,9 @@ func genNodePubInfo(nodes []model.NodeInfo) { for _, node := range nodes { pubNodes = append(pubNodes, node.Public()) } - writeJSON(model.PathInternalNodeInfosPub, pubNodes) + err := common.WriteJSON(model.PathInternalNodeInfosPub, flagOutdir, pubNodes) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathInternalNodeInfosPub) } diff --git a/cmd/bootstrap/cmd/keys.go b/cmd/bootstrap/cmd/keys.go index b9d32528727..f33b5f28241 100644 --- a/cmd/bootstrap/cmd/keys.go +++ b/cmd/bootstrap/cmd/keys.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/crypto" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" @@ -20,7 +21,11 @@ import ( func genNetworkAndStakingKeys() []model.NodeInfo { var nodeConfigs []model.NodeConfig - readJSON(flagConfig, &nodeConfigs) + err := common.ReadJSON(flagConfig, &nodeConfigs) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + nodes := len(nodeConfigs) log.Info().Msgf("read %v node configurations", nodes) @@ -62,8 +67,8 @@ func assembleNodeInfo(nodeConfig model.NodeConfig, networkKey, stakingKey crypto } log.Debug(). - Str("networkPubKey", pubKeyToString(networkKey.PublicKey())). - Str("stakingPubKey", pubKeyToString(stakingKey.PublicKey())). + Str("networkPubKey", networkKey.PublicKey().String()). + Str("stakingPubKey", stakingKey.PublicKey().String()). Msg("encoded public staking and network keys") nodeInfo := model.NewPrivateNodeInfo( diff --git a/cmd/bootstrap/cmd/machine_account.go b/cmd/bootstrap/cmd/machine_account.go index a1305ae1035..bdaa7a08922 100644 --- a/cmd/bootstrap/cmd/machine_account.go +++ b/cmd/bootstrap/cmd/machine_account.go @@ -9,6 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ioutils "github.com/onflow/flow-go/utils/io" @@ -52,7 +53,7 @@ func machineAccountRun(_ *cobra.Command, _ []string) { // check if node-machine-account-key.priv.json path exists machineAccountKeyPath := fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID) - keyExists, err := pathExists(filepath.Join(flagOutdir, machineAccountKeyPath)) + keyExists, err := common.PathExists(filepath.Join(flagOutdir, machineAccountKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-key.priv.json exists") } @@ -63,7 +64,7 @@ func machineAccountRun(_ *cobra.Command, _ []string) { // check if node-machine-account-info.priv.json file exists in boostrap dir machineAccountInfoPath := fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID) - infoExists, err := pathExists(filepath.Join(flagOutdir, machineAccountInfoPath)) + infoExists, err := common.PathExists(filepath.Join(flagOutdir, machineAccountInfoPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-info.priv.json exists") } @@ -80,7 +81,11 @@ func machineAccountRun(_ *cobra.Command, _ []string) { machineAccountInfo := assembleNodeMachineAccountInfo(machinePrivKey, flagMachineAccountAddress) // write machine account info - writeJSON(fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID), machineAccountInfo) + err = common.WriteJSON(fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID), flagOutdir, machineAccountInfo) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountInfoPriv, nodeID)) } // readMachineAccountPriv reads the machine account private key files in the bootstrap dir @@ -88,7 +93,10 @@ func readMachineAccountKey(nodeID string) crypto.PrivateKey { var machineAccountPriv model.NodeMachineAccountKey path := filepath.Join(flagOutdir, fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID)) - readJSON(path, &machineAccountPriv) + err := common.ReadJSON(path, &machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } return machineAccountPriv.PrivateKey.PrivateKey } diff --git a/cmd/bootstrap/cmd/machine_account_key.go b/cmd/bootstrap/cmd/machine_account_key.go index 9ec26c68520..14bdef868df 100644 --- a/cmd/bootstrap/cmd/machine_account_key.go +++ b/cmd/bootstrap/cmd/machine_account_key.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" ) @@ -37,7 +38,7 @@ func machineAccountKeyRun(_ *cobra.Command, _ []string) { // check if node-machine-account-key.priv.json path exists machineAccountKeyPath := fmt.Sprintf(model.PathNodeMachineAccountPrivateKey, nodeID) - keyExists, err := pathExists(path.Join(flagOutdir, machineAccountKeyPath)) + keyExists, err := common.PathExists(path.Join(flagOutdir, machineAccountKeyPath)) if err != nil { log.Fatal().Err(err).Msg("could not check if node-machine-account-key.priv.json exists") } @@ -56,5 +57,9 @@ func machineAccountKeyRun(_ *cobra.Command, _ []string) { // also write the public key to terminal for entry in Flow Port machineAccountPriv := assembleNodeMachineAccountKey(machineKey) - writeJSON(machineAccountKeyPath, machineAccountPriv) + err = common.WriteJSON(machineAccountKeyPath, flagOutdir, machineAccountPriv) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msg(fmt.Sprintf("wrote file %s/%s", flagOutdir, machineAccountKeyPath)) } diff --git a/cmd/bootstrap/cmd/machine_account_key_test.go b/cmd/bootstrap/cmd/machine_account_key_test.go index adcf45ea4b2..dfd93fcd5f6 100644 --- a/cmd/bootstrap/cmd/machine_account_key_test.go +++ b/cmd/bootstrap/cmd/machine_account_key_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" ioutils "github.com/onflow/flow-go/utils/io" @@ -49,7 +50,7 @@ func TestMachineAccountKeyFileExists(t *testing.T) { // read file priv key file before command var machineAccountPrivBefore model.NodeMachineAccountKey - readJSON(machineKeyFilePath, &machineAccountPrivBefore) + require.NoError(t, common.ReadJSON(machineKeyFilePath, &machineAccountPrivBefore)) // run command with flags machineAccountKeyRun(nil, nil) @@ -59,7 +60,7 @@ func TestMachineAccountKeyFileExists(t *testing.T) { // read machine account key file again var machineAccountPrivAfter model.NodeMachineAccountKey - readJSON(machineKeyFilePath, &machineAccountPrivAfter) + require.NoError(t, common.ReadJSON(machineKeyFilePath, &machineAccountPrivAfter)) // check if key was modified assert.Equal(t, machineAccountPrivBefore, machineAccountPrivAfter) diff --git a/cmd/bootstrap/cmd/machine_account_test.go b/cmd/bootstrap/cmd/machine_account_test.go index 7a1627ca3ac..27631a3bddc 100644 --- a/cmd/bootstrap/cmd/machine_account_test.go +++ b/cmd/bootstrap/cmd/machine_account_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" @@ -115,14 +116,14 @@ func TestMachineAccountInfoFileExists(t *testing.T) { // read in info file var machineAccountInfoBefore model.NodeMachineAccountInfo - readJSON(machineInfoFilePath, &machineAccountInfoBefore) + require.NoError(t, common.ReadJSON(machineInfoFilePath, &machineAccountInfoBefore)) // run again and make sure info file was not changed machineAccountRun(nil, nil) require.Regexp(t, regex, hook.logs.String()) var machineAccountInfoAfter model.NodeMachineAccountInfo - readJSON(machineInfoFilePath, &machineAccountInfoAfter) + require.NoError(t, common.ReadJSON(machineInfoFilePath, &machineAccountInfoAfter)) assert.Equal(t, machineAccountInfoBefore, machineAccountInfoAfter) }) diff --git a/cmd/bootstrap/cmd/observer_network_key.go b/cmd/bootstrap/cmd/observer_network_key.go index 330b2cad47e..dfb6a2f609e 100644 --- a/cmd/bootstrap/cmd/observer_network_key.go +++ b/cmd/bootstrap/cmd/observer_network_key.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" ) var ( @@ -47,7 +48,7 @@ func observerNetworkKeyRun(_ *cobra.Command, _ []string) { } // if the file already exists, exit - keyExists, err := pathExists(flagOutputFile) + keyExists, err := common.PathExists(flagOutputFile) if err != nil { log.Fatal().Err(err).Msgf("could not check if %s exists", flagOutputFile) } diff --git a/cmd/bootstrap/cmd/partner_infos.go b/cmd/bootstrap/cmd/partner_infos.go index 05db3192609..653ee861ff7 100644 --- a/cmd/bootstrap/cmd/partner_infos.go +++ b/cmd/bootstrap/cmd/partner_infos.go @@ -64,7 +64,7 @@ func populatePartnerInfosRun(_ *cobra.Command, _ []string) { flowClient := getFlowClient() - partnerWeights := make(PartnerWeights) + partnerWeights := make(common.PartnerWeights) skippedNodes := 0 numOfPartnerNodesByRole := map[flow.Role]int{ flow.RoleCollection: 0, @@ -203,12 +203,20 @@ func validateANNetworkKey(key string) error { // writeNodePubInfoFile writes the node-pub-info file func writeNodePubInfoFile(info *bootstrap.NodeInfoPub) { fileOutputPath := fmt.Sprintf(bootstrap.PathNodeInfoPub, info.NodeID) - writeJSON(fileOutputPath, info) + err := common.WriteJSON(fileOutputPath, flagOutdir, info) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, fileOutputPath) } // writePartnerWeightsFile writes the partner weights file -func writePartnerWeightsFile(partnerWeights PartnerWeights) { - writeJSON(bootstrap.FileNamePartnerWeights, partnerWeights) +func writePartnerWeightsFile(partnerWeights common.PartnerWeights) { + err := common.WriteJSON(bootstrap.FileNamePartnerWeights, flagOutdir, partnerWeights) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, bootstrap.FileNamePartnerWeights) } func printNodeCounts(numOfNodesByType map[flow.Role]int, totalNumOfPartnerNodes, skippedNodes int) { diff --git a/cmd/bootstrap/cmd/qc.go b/cmd/bootstrap/cmd/qc.go index 6e97363051b..22474ed1d19 100644 --- a/cmd/bootstrap/cmd/qc.go +++ b/cmd/bootstrap/cmd/qc.go @@ -5,6 +5,7 @@ import ( "path/filepath" "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/dkg" @@ -48,6 +49,10 @@ func constructRootVotes(block *flow.Block, allNodes, internalNodes []bootstrap.N for _, vote := range votes { path := filepath.Join(bootstrap.DirnameRootBlockVotes, fmt.Sprintf(bootstrap.FilenameRootBlockVote, vote.SignerID)) - writeJSON(path, vote) + err = common.WriteJSON(path, flagOutdir, vote) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, path) } } diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index e32b9d95811..9810834c2e4 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -19,6 +19,7 @@ import ( "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/flow" ) @@ -144,11 +145,18 @@ func rootBlock(cmd *cobra.Command, args []string) { } log.Info().Msg("collecting partner network and staking keys") - partnerNodes := readPartnerNodeInfos() + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, flagPartnerWeights, flagPartnerNodeInfoDir) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full partner node infos") + } log.Info().Msg("") log.Info().Msg("generating internal private networking and staking keys") - internalNodes := readInternalNodeInfos() + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagConfig) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + log.Info().Msg("") log.Info().Msg("checking constraints on consensus nodes") @@ -157,7 +165,11 @@ func rootBlock(cmd *cobra.Command, args []string) { log.Info().Msg("assembling network and staking keys") stakingNodes := mergeNodeInfos(internalNodes, partnerNodes) - writeJSON(model.PathNodeInfosPub, model.ToPublicNodeInfoList(stakingNodes)) + err = common.WriteJSON(model.PathNodeInfosPub, flagOutdir, model.ToPublicNodeInfoList(stakingNodes)) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathNodeInfosPub) log.Info().Msg("") log.Info().Msg("running DKG for consensus nodes") @@ -168,7 +180,7 @@ func rootBlock(cmd *cobra.Command, args []string) { participants := model.ToIdentityList(stakingNodes).Sort(flow.Canonical[flow.Identity]) log.Info().Msg("computing collection node clusters") - assignments, clusters, err := constructClusterAssignment(partnerNodes, internalNodes) + assignments, clusters, err := common.ConstructClusterAssignment(log, model.ToIdentityList(partnerNodes), model.ToIdentityList(internalNodes), int(flagCollectionClusters)) if err != nil { log.Fatal().Err(err).Msg("unable to generate cluster assignment") } @@ -179,7 +191,7 @@ func rootBlock(cmd *cobra.Command, args []string) { log.Info().Msg("") log.Info().Msg("constructing root QCs for collection node clusters") - clusterQCs := constructRootQCsForClusters(clusters, internalNodes, clusterBlocks) + clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) log.Info().Msg("") log.Info().Msg("constructing root header") @@ -206,12 +218,20 @@ func rootBlock(cmd *cobra.Command, args []string) { IntermediaryEpochData: intermediaryEpochData, IntermediaryParamsData: intermediaryParamsData, } - writeJSON(model.PathIntermediaryBootstrappingData, intermediaryData) + err = common.WriteJSON(model.PathIntermediaryBootstrappingData, flagOutdir, intermediaryData) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathIntermediaryBootstrappingData) log.Info().Msg("") log.Info().Msg("constructing root block") block := constructRootBlock(header, epochSetup, epochCommit) - writeJSON(model.PathRootBlockData, block) + err = common.WriteJSON(model.PathRootBlockData, flagOutdir, block) + if err != nil { + log.Fatal().Err(err).Msg("failed to write json") + } + log.Info().Msgf("wrote file %s/%s", flagOutdir, model.PathRootBlockData) log.Info().Msg("") log.Info().Msg("constructing and writing votes") diff --git a/cmd/bootstrap/cmd/util.go b/cmd/bootstrap/cmd/util.go index 38bdc481c8a..ea89d1d2db6 100644 --- a/cmd/bootstrap/cmd/util.go +++ b/cmd/bootstrap/cmd/util.go @@ -2,16 +2,6 @@ package cmd import ( "crypto/rand" - "encoding/json" - "fmt" - "os" - "path/filepath" - - "github.com/onflow/crypto" - - model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/utils/io" ) func GenerateRandomSeeds(n int, seedLen int) [][]byte { @@ -29,90 +19,3 @@ func GenerateRandomSeed(seedLen int) []byte { } return seed } - -func readJSON(path string, target interface{}) { - dat, err := io.ReadFile(path) - if err != nil { - log.Fatal().Err(err).Msg("cannot read json") - } - err = json.Unmarshal(dat, target) - if err != nil { - log.Fatal().Err(err).Msgf("cannot unmarshal json in file %s", path) - } -} - -func writeJSON(path string, data interface{}) { - bz, err := json.MarshalIndent(data, "", " ") - if err != nil { - log.Fatal().Err(err).Msg("cannot marshal json") - } - - writeText(path, bz) -} - -func writeText(path string, data []byte) { - path = filepath.Join(flagOutdir, path) - - err := os.MkdirAll(filepath.Dir(path), 0755) - if err != nil { - log.Fatal().Err(err).Msg("could not create output dir") - } - - err = os.WriteFile(path, data, 0644) - if err != nil { - log.Fatal().Err(err).Msg("could not write file") - } - - log.Info().Msgf("wrote file %v", path) -} - -func pubKeyToString(key crypto.PublicKey) string { - return fmt.Sprintf("%x", key.Encode()) -} - -func filesInDir(dir string) ([]string, error) { - exists, err := pathExists(dir) - if err != nil { - return nil, fmt.Errorf("could not check if dir exists: %w", err) - } - - if !exists { - return nil, fmt.Errorf("dir %v does not exist", dir) - } - - var files []string - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if !info.IsDir() { - files = append(files, path) - } - return nil - }) - return files, err -} - -// pathExists -func pathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func nodeCountByRole(nodes []model.NodeInfo) map[flow.Role]uint16 { - roleCounts := map[flow.Role]uint16{ - flow.RoleCollection: 0, - flow.RoleConsensus: 0, - flow.RoleExecution: 0, - flow.RoleVerification: 0, - flow.RoleAccess: 0, - } - for _, node := range nodes { - roleCounts[node.Role] = roleCounts[node.Role] + 1 - } - - return roleCounts -} diff --git a/cmd/dynamic_startup.go b/cmd/dynamic_startup.go index 49ccd3dcb7a..616773c1e00 100644 --- a/cmd/dynamic_startup.go +++ b/cmd/dynamic_startup.go @@ -3,116 +3,21 @@ package cmd import ( "context" "encoding/hex" - "encoding/json" "fmt" "path/filepath" "strconv" "strings" - "time" "github.com/onflow/crypto" - "github.com/rs/zerolog" - "github.com/sethvargo/go-retry" - client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" badgerstate "github.com/onflow/flow-go/state/protocol/badger" utilsio "github.com/onflow/flow-go/utils/io" - - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/state/protocol/inmem" ) -const getSnapshotTimeout = 30 * time.Second - -// GetProtocolSnapshot callback that will get latest finalized protocol snapshot -type GetProtocolSnapshot func(ctx context.Context) (protocol.Snapshot, error) - -// GetSnapshot will attempt to get the latest finalized protocol snapshot with the given flow configs -func GetSnapshot(ctx context.Context, client *client.Client) (*inmem.Snapshot, error) { - ctx, cancel := context.WithTimeout(ctx, getSnapshotTimeout) - defer cancel() - - b, err := client.GetLatestProtocolStateSnapshot(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get latest finalized protocol state snapshot during pre-initialization: %w", err) - } - - var snapshotEnc inmem.EncodableSnapshot - err = json.Unmarshal(b, &snapshotEnc) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal protocol state snapshot: %w", err) - } - - snapshot := inmem.SnapshotFromEncodable(snapshotEnc) - return snapshot, nil -} - -// GetSnapshotAtEpochAndPhase will get the latest finalized protocol snapshot and check the current epoch and epoch phase. -// If we are past the target epoch and epoch phase we exit the retry mechanism immediately. -// If not check the snapshot at the specified interval until we reach the target epoch and phase. -func GetSnapshotAtEpochAndPhase(ctx context.Context, log zerolog.Logger, startupEpoch uint64, startupEpochPhase flow.EpochPhase, retryInterval time.Duration, getSnapshot GetProtocolSnapshot) (protocol.Snapshot, error) { - start := time.Now() - - log = log.With(). - Uint64("target_epoch_counter", startupEpoch). - Str("target_epoch_phase", startupEpochPhase.String()). - Logger() - - log.Info().Msg("starting dynamic startup - waiting until target epoch/phase to start...") - - var snapshot protocol.Snapshot - var err error - - backoff := retry.NewConstant(retryInterval) - err = retry.Do(ctx, backoff, func(ctx context.Context) error { - snapshot, err = getSnapshot(ctx) - if err != nil { - err = fmt.Errorf("failed to get protocol snapshot: %w", err) - log.Error().Err(err).Msg("could not get protocol snapshot") - return retry.RetryableError(err) - } - - // if we encounter any errors interpreting the snapshot something went wrong stop retrying - currEpochCounter, err := snapshot.Epochs().Current().Counter() - if err != nil { - return fmt.Errorf("failed to get the current epoch counter: %w", err) - } - - currEpochPhase, err := snapshot.Phase() - if err != nil { - return fmt.Errorf("failed to get the current epoch phase: %w", err) - } - - // check if we are in or past the target epoch and phase - if currEpochCounter > startupEpoch || (currEpochCounter == startupEpoch && currEpochPhase >= startupEpochPhase) { - log.Info(). - Dur("time-waiting", time.Since(start)). - Uint64("current-epoch", currEpochCounter). - Str("current-epoch-phase", currEpochPhase.String()). - Msg("finished dynamic startup - reached desired epoch and phase") - - return nil - } - - // wait then poll for latest snapshot again - log.Info(). - Dur("time-waiting", time.Since(start)). - Uint64("current-epoch", currEpochCounter). - Str("current-epoch-phase", currEpochPhase.String()). - Msgf("waiting for epoch %d and phase %s", startupEpoch, startupEpochPhase.String()) - - return retry.RetryableError(fmt.Errorf("dynamic startup epoch and epoch phase not reached")) - }) - if err != nil { - return nil, fmt.Errorf("failed to wait for target epoch and phase: %w", err) - } - - return snapshot, nil -} - // ValidateDynamicStartupFlags will validate flags necessary for dynamic node startup // - assert dynamic-startup-access-publickey is valid ECDSA_P256 public key hex // - assert dynamic-startup-access-address is not empty @@ -182,7 +87,7 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { } getSnapshotFunc := func(ctx context.Context) (protocol.Snapshot, error) { - return GetSnapshot(ctx, flowClient) + return common.GetSnapshot(ctx, flowClient) } // validate dynamic startup epoch flag @@ -199,7 +104,7 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { return err } - snapshot, err := GetSnapshotAtEpochAndPhase( + snapshot, err := common.GetSnapshotAtEpochAndPhase( ctx, log, startupEpoch, @@ -218,7 +123,7 @@ func DynamicStartPreInit(nodeConfig *NodeConfig) error { // validateDynamicStartEpochFlags parse the start epoch flag and return the uin64 value, // if epoch = current return the current epoch counter -func validateDynamicStartEpochFlags(ctx context.Context, getSnapshot GetProtocolSnapshot, flagEpoch string) (uint64, error) { +func validateDynamicStartEpochFlags(ctx context.Context, getSnapshot common.GetProtocolSnapshot, flagEpoch string) (uint64, error) { // if flag is not `current` sentinel, it must be a specific epoch counter (uint64) if flagEpoch != "current" { diff --git a/cmd/dynamic_startup_test.go b/cmd/dynamic_startup_test.go index 775e8221fbf..27da13fca72 100644 --- a/cmd/dynamic_startup_test.go +++ b/cmd/dynamic_startup_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol" protocolmock "github.com/onflow/flow-go/state/protocol/mock" @@ -87,7 +88,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, targetEpoch := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), targetEpoch, @@ -113,7 +114,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, targetEpoch := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), targetEpoch, @@ -143,7 +144,7 @@ func TestGetSnapshotAtEpochAndPhase(t *testing.T) { _, _, targetPhase, _ := dynamicJoinFlagsFixture() // get snapshot - actualSnapshot, err := GetSnapshotAtEpochAndPhase( + actualSnapshot, err := common.GetSnapshotAtEpochAndPhase( context.Background(), unittest.Logger(), 5, diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 8aee600f60e..b50aab6144d 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -1660,6 +1660,19 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { ), } + broadcaster := engine.NewBroadcaster() + // create BlockTracker that will track for new blocks (finalized and sealed) and + // handles block-related operations. + blockTracker, err := subscription.NewBlockTracker( + node.State, + builder.FinalizedRootBlock.Header.Height, + node.Storage.Headers, + broadcaster, + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize block tracker: %w", err) + } + backendParams := backend.Params{ State: node.State, Blocks: node.Storage.Blocks, @@ -1678,6 +1691,14 @@ func (builder *ObserverServiceBuilder) enqueueRPCServer() { Log: node.Logger, SnapshotHistoryLimit: backend.DefaultSnapshotHistoryLimit, Communicator: backend.NewNodeCommunicator(backendConfig.CircuitBreakerConfig.Enabled), + BlockTracker: blockTracker, + SubscriptionHandler: subscription.NewSubscriptionHandler( + builder.Logger, + broadcaster, + builder.stateStreamConf.ClientSendTimeout, + builder.stateStreamConf.ResponseLimit, + builder.stateStreamConf.ClientSendBufferSize, + ), } if builder.localServiceAPIEnabled { diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 1086314265b..d645dc9cf85 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -1253,7 +1253,7 @@ func (fnb *FlowNodeBuilder) InitIDProviders() { filter.And( filter.HasRole[flow.Identity](flow.RoleConsensus), filter.Not(filter.HasNodeID[flow.Identity](node.Me.NodeID())), - underlay.NotEjectedFilter, + filter.NotEjectedFilter, ), node.IdentityProvider, ) diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go new file mode 100644 index 00000000000..3e912b6d224 --- /dev/null +++ b/cmd/util/cmd/common/clusters.go @@ -0,0 +1,198 @@ +package common + +import ( + "errors" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/model/bootstrap" + model "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/assignment" + "github.com/onflow/flow-go/model/flow/factory" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/module/signature" +) + +// ConstructClusterAssignment generates a partially randomized collector cluster assignment with internal and partner nodes. +// The number of nodes in each cluster is deterministic and only depends on the number of clusters +// and the number of nodes. The repartition of internal and partner nodes is also deterministic +// and only depends on the number of clusters and nodes. +// The identity of internal and partner nodes in each cluster is the non-deterministic and is randomized +// using the system entropy. +// The function guarantees a specific constraint when partitioning the nodes into clusters: +// Each cluster must contain strictly more than 2/3 of internal nodes. If the constraint can't be +// satisfied, an exception is returned. +// Note that if an exception is returned with a certain number of internal/partner nodes, there is no chance +// of succeeding the assignment by re-running the function without increasing the internal nodes ratio. +// Args: +// - log: the logger instance. +// - partnerNodes: identity list of partner nodes. +// - internalNodes: identity list of internal nodes. +// - numCollectionClusters: the number of clusters to generate +// Returns: +// - flow.AssignmentList: the generated assignment list. +// - flow.ClusterList: the generate collection cluster list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes flow.IdentityList, numCollectionClusters int) (flow.AssignmentList, flow.ClusterList, error) { + + partners := partnerNodes.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + internals := internalNodes.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + nCollectors := len(partners) + len(internals) + + // ensure we have at least as many collection nodes as clusters + if nCollectors < int(numCollectionClusters) { + log.Fatal().Msgf("network bootstrap is configured with %d collection nodes, but %d clusters - must have at least one collection node per cluster", + nCollectors, numCollectionClusters) + } + + // shuffle both collector lists based on a non-deterministic algorithm + partners, err := partners.Shuffle() + if err != nil { + log.Fatal().Err(err).Msg("could not shuffle partners") + } + internals, err = internals.Shuffle() + if err != nil { + log.Fatal().Err(err).Msg("could not shuffle internals") + } + + // The following is a heuristic for distributing the internal collector nodes (private staking key available + // to generate QC for cluster root block) and partner nodes (private staking unknown). We need internal nodes + // to control strictly more than 2/3 of the cluster's total weight. + // The following is a heuristic that distributes collectors round-robbin across the specified number of clusters. + // This heuristic only works when all collectors have equal weight! The following sanity check enforces this: + if len(partnerNodes) > 0 && len(partnerNodes) > 2*len(internalNodes) { + return nil, nil, fmt.Errorf("requiring at least x>0 number of partner nodes and y > 2x number of internal nodes, but got x,y=%d,%d", len(partnerNodes), len(internalNodes)) + } + // sanity check ^ enforces that there is at least one internal node, hence `internalNodes[0].InitialWeight` is always a valid reference weight + refWeight := internalNodes[0].InitialWeight + + identifierLists := make([]flow.IdentifierList, numCollectionClusters) + // array to track the 2/3 internal-nodes constraint (internal_nodes > 2 * partner_nodes) + constraint := make([]int, numCollectionClusters) + + // first, round-robin internal nodes into each cluster + for i, node := range internals { + if node.InitialWeight != refWeight { + return nil, nil, fmt.Errorf("current implementation requires all collectors (partner & interal nodes) to have equal weight") + } + clusterIndex := i % numCollectionClusters + identifierLists[clusterIndex] = append(identifierLists[clusterIndex], node.NodeID) + constraint[clusterIndex] += 1 + } + + // next, round-robin partner nodes into each cluster + for i, node := range partners { + if node.InitialWeight != refWeight { + return nil, nil, fmt.Errorf("current implementation requires all collectors (partner & interal nodes) to have equal weight") + } + clusterIndex := i % numCollectionClusters + identifierLists[clusterIndex] = append(identifierLists[clusterIndex], node.NodeID) + constraint[clusterIndex] -= 2 + } + + // check the 2/3 constraint: for every cluster `i`, constraint[i] must be strictly positive + for i := 0; i < numCollectionClusters; i++ { + if constraint[i] <= 0 { + return nil, nil, errors.New("there isn't enough internal nodes to have at least 2/3 internal nodes in each cluster") + } + } + + assignments := assignment.FromIdentifierLists(identifierLists) + + collectors := append(partners, internals...) + clusters, err := factory.NewClusterList(assignments, collectors.ToSkeleton()) + if err != nil { + log.Fatal().Err(err).Msg("could not create cluster list") + } + + return assignments, clusters, nil +} + +// ConstructRootQCsForClusters constructs a root QC for each cluster in the list. +// Args: +// - log: the logger instance. +// - clusterList: list of clusters +// - nodeInfos: list of NodeInfos (must contain all internal nodes) +// - clusterBlocks: list of root blocks for each cluster +// Returns: +// - flow.AssignmentList: the generated assignment list. +// - flow.ClusterList: the generate collection cluster list. +func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate { + + if len(clusterBlocks) != len(clusterList) { + log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). + Msg("number of clusters needs to equal number of cluster blocks") + } + + qcs := make([]*flow.QuorumCertificate, len(clusterBlocks)) + for i, cluster := range clusterList { + signers := filterClusterSigners(cluster, nodeInfos) + + qc, err := run.GenerateClusterRootQC(signers, cluster, clusterBlocks[i]) + if err != nil { + log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed") + } + qcs[i] = qc + } + + return qcs +} + +// ConvertClusterAssignmentsCdc converts golang cluster assignments type to Cadence type `[[String]]`. +func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array { + assignmentsCdc := make([]cadence.Value, len(assignments)) + for i, asmt := range assignments { + vals := make([]cadence.Value, asmt.Len()) + for j, nodeID := range asmt { + vals[j] = cadence.String(nodeID.String()) + } + assignmentsCdc[i] = cadence.NewArray(vals).WithType(cadence.NewVariableSizedArrayType(cadence.StringType{})) + } + + return cadence.NewArray(assignmentsCdc).WithType(cadence.NewVariableSizedArrayType(cadence.NewVariableSizedArrayType(cadence.StringType{}))) +} + +// ConvertClusterQcsCdc converts cluster QCs from `QuorumCertificate` type to `ClusterQCVoteData` type. +func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.ClusterList) ([]*flow.ClusterQCVoteData, error) { + voteData := make([]*flow.ClusterQCVoteData, len(qcs)) + for i, qc := range qcs { + c, ok := clusterList.ByIndex(uint(i)) + if !ok { + return nil, fmt.Errorf("could not get cluster list for cluster index %v", i) + } + voterIds, err := signature.DecodeSignerIndicesToIdentifiers(c.NodeIDs(), qc.SignerIndices) + if err != nil { + return nil, fmt.Errorf("could not decode signer indices: %w", err) + } + voteData[i] = &flow.ClusterQCVoteData{ + SigData: qc.SigData, + VoterIDs: voterIds, + } + } + + return voteData, nil +} + +// Filters a list of nodes to include only nodes that will sign the QC for the +// given cluster. The resulting list of nodes is only nodes that are in the +// given cluster AND are not partner nodes (ie. we have the private keys). +func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.NodeInfo) []model.NodeInfo { + + var filtered []model.NodeInfo + for _, node := range nodeInfos { + _, isInCluster := cluster.ByNodeID(node.NodeID) + isNotPartner := node.Type() == model.NodeInfoTypePrivate + + if isInCluster && isNotPartner { + filtered = append(filtered, node) + } + } + + return filtered +} diff --git a/cmd/util/cmd/common/node_info.go b/cmd/util/cmd/common/node_info.go new file mode 100644 index 00000000000..061741d0955 --- /dev/null +++ b/cmd/util/cmd/common/node_info.go @@ -0,0 +1,226 @@ +package common + +import ( + "fmt" + "strings" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" +) + +// ReadFullPartnerNodeInfos reads partner node info and partner weight information from the specified paths and constructs +// a list of full bootstrap.NodeInfo for each partner node. +// Args: +// - log: logger used to log debug information. +// - partnerWeightsPath: path to partner weights configuration file. +// - partnerNodeInfoDir: path to partner nodes configuration file. +// Returns: +// - []bootstrap.NodeInfo: the generated node info list. (public information, private keys not set) +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadFullPartnerNodeInfos(log zerolog.Logger, partnerWeightsPath, partnerNodeInfoDir string) ([]bootstrap.NodeInfo, error) { + partners, err := ReadPartnerNodeInfos(partnerNodeInfoDir) + if err != nil { + return nil, err + } + log.Info().Msgf("read %d partner node configuration files", len(partners)) + + weights, err := ReadPartnerWeights(partnerWeightsPath) + if err != nil { + return nil, err + } + log.Info().Msgf("read %d weights for partner nodes", len(weights)) + + var nodes []bootstrap.NodeInfo + for _, partner := range partners { + // validate every single partner node + err = ValidateNodeID(partner.NodeID) + if err != nil { + return nil, fmt.Errorf("invalid node ID: %s", partner.NodeID) + } + err = ValidateNetworkPubKey(partner.NetworkPubKey) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("invalid network public key: %s", partner.NetworkPubKey)) + } + err = ValidateStakingPubKey(partner.StakingPubKey) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("invalid staking public key: %s", partner.StakingPubKey)) + } + + weight := weights[partner.NodeID] + if valid := ValidateWeight(weight); !valid { + return nil, fmt.Errorf(fmt.Sprintf("invalid partner weight: %d", weight)) + } + + if weight != flow.DefaultInitialWeight { + log.Warn().Msgf("partner node (id=%x) has non-default weight (%d != %d)", partner.NodeID, weight, flow.DefaultInitialWeight) + } + + node := bootstrap.NewPublicNodeInfo( + partner.NodeID, + partner.Role, + partner.Address, + weight, + partner.NetworkPubKey.PublicKey, + partner.StakingPubKey.PublicKey, + ) + nodes = append(nodes, node) + } + + return nodes, nil +} + +// ReadPartnerWeights reads the partner weights configuration file and returns a list of PartnerWeights. +// Args: +// - partnerWeightsPath: path to partner weights configuration file. +// Returns: +// - PartnerWeights: map from NodeID → node's weight +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadPartnerWeights(partnerWeightsPath string) (PartnerWeights, error) { + var weights PartnerWeights + + err := ReadJSON(partnerWeightsPath, &weights) + if err != nil { + return nil, fmt.Errorf("failed to read partner weights json: %w", err) + } + return weights, nil +} + +// ReadPartnerNodeInfos reads the partner node info from the configuration file and returns a list of []bootstrap.NodeInfoPub. +// Args: +// - partnerNodeInfoDir: path to partner nodes configuration file. +// Returns: +// - []bootstrap.NodeInfoPub: the generated partner node info list. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadPartnerNodeInfos(partnerNodeInfoDir string) ([]bootstrap.NodeInfoPub, error) { + var partners []bootstrap.NodeInfoPub + files, err := FilesInDir(partnerNodeInfoDir) + if err != nil { + return nil, fmt.Errorf("could not read partner node infos: %w", err) + } + for _, f := range files { + // skip files that do not include node-infos + if !strings.Contains(f, bootstrap.PathPartnerNodeInfoPrefix) { + continue + } + // read file and append to partners + var p bootstrap.NodeInfoPub + err = ReadJSON(f, &p) + if err != nil { + return nil, fmt.Errorf("failed to read node info: %w", err) + } + partners = append(partners, p) + } + return partners, nil +} + +// ReadFullInternalNodeInfos reads internal node info and internal node weight information from the specified paths and constructs +// a list of full bootstrap.NodeInfo for each internal node. +// Args: +// - log: logger used to log debug information. +// - internalNodePrivInfoDir: path to internal nodes private info. +// - internalWeightsConfig: path to internal weights configuration file. +// Returns: +// - []bootstrap.NodeInfo: the generated node info list. Caution: contains private keys! +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadFullInternalNodeInfos(log zerolog.Logger, internalNodePrivInfoDir, internalWeightsConfig string) ([]bootstrap.NodeInfo, error) { + privInternals, err := ReadInternalNodeInfos(internalNodePrivInfoDir) + if err != nil { + return nil, err + } + + log.Info().Msgf("read %v internal private node-info files", len(privInternals)) + + weights := internalWeightsByAddress(log, internalWeightsConfig) + log.Info().Msgf("read %d weights for internal nodes", len(weights)) + + var nodes []bootstrap.NodeInfo + for _, internal := range privInternals { + // check if address is valid format + ValidateAddressFormat(log, internal.Address) + + // validate every single internal node + err := ValidateNodeID(internal.NodeID) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("invalid internal node ID: %s", internal.NodeID)) + } + weight := weights[internal.Address] + + if valid := ValidateWeight(weight); !valid { + return nil, fmt.Errorf(fmt.Sprintf("invalid partner weight: %d", weight)) + } + if weight != flow.DefaultInitialWeight { + log.Warn().Msgf("internal node (id=%x) has non-default weight (%d != %d)", internal.NodeID, weight, flow.DefaultInitialWeight) + } + + node := bootstrap.NewPrivateNodeInfo( + internal.NodeID, + internal.Role, + internal.Address, + weight, + internal.NetworkPrivKey, + internal.StakingPrivKey, + ) + + nodes = append(nodes, node) + } + + return nodes, nil +} + +// ReadInternalNodeInfos reads our internal node private infos generated by `keygen` command and returns it. +// Args: +// - internalNodePrivInfoDir: path to internal nodes private info. +// Returns: +// - []bootstrap.NodeInfo: the generated private node info list. Caution: contains private keys! +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func ReadInternalNodeInfos(internalNodePrivInfoDir string) ([]bootstrap.NodeInfoPriv, error) { + var internalPrivInfos []bootstrap.NodeInfoPriv + + // get files in internal priv node infos directory + files, err := FilesInDir(internalNodePrivInfoDir) + if err != nil { + return nil, fmt.Errorf("could not read partner node infos: %w", err) + } + + // for each of the files + for _, f := range files { + // skip files that do not include node-infos + if !strings.Contains(f, bootstrap.PathPrivNodeInfoPrefix) { + continue + } + + // read file and append to partners + var p bootstrap.NodeInfoPriv + err = ReadJSON(f, &p) + if err != nil { + return nil, fmt.Errorf("failed to read json: %w", err) + } + internalPrivInfos = append(internalPrivInfos, p) + } + + return internalPrivInfos, nil +} + +// internalWeightsByAddress returns a mapping of node address by weight for internal nodes +func internalWeightsByAddress(log zerolog.Logger, config string) map[string]uint64 { + // read json + var configs []bootstrap.NodeConfig + err := ReadJSON(config, &configs) + if err != nil { + log.Fatal().Err(err).Msg("failed to read json") + } + log.Info().Interface("config", configs).Msgf("read internal node configurations") + + weights := make(map[string]uint64) + for _, config := range configs { + if _, ok := weights[config.Address]; !ok { + weights[config.Address] = config.Weight + } else { + log.Error().Msgf("duplicate internal node address %s", config.Address) + } + } + + return weights +} diff --git a/cmd/util/cmd/common/snapshot.go b/cmd/util/cmd/common/snapshot.go new file mode 100644 index 00000000000..5d73895d5ff --- /dev/null +++ b/cmd/util/cmd/common/snapshot.go @@ -0,0 +1,114 @@ +package common + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/rs/zerolog" + "github.com/sethvargo/go-retry" + + "github.com/onflow/flow-go-sdk/access/grpc" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +const getSnapshotTimeout = 30 * time.Second + +// GetProtocolSnapshot callback that will get latest finalized protocol snapshot +type GetProtocolSnapshot func(ctx context.Context) (protocol.Snapshot, error) + +// GetSnapshot will attempt to get the latest finalized protocol snapshot with the given flow configs +func GetSnapshot(ctx context.Context, client *grpc.Client) (*inmem.Snapshot, error) { + ctx, cancel := context.WithTimeout(ctx, getSnapshotTimeout) + defer cancel() + + b, err := client.GetLatestProtocolStateSnapshot(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get latest finalized protocol state snapshot during pre-initialization: %w", err) + } + + var snapshotEnc inmem.EncodableSnapshot + err = json.Unmarshal(b, &snapshotEnc) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal protocol state snapshot: %w", err) + } + + snapshot := inmem.SnapshotFromEncodable(snapshotEnc) + return snapshot, nil +} + +// GetSnapshotAtEpochAndPhase will get the latest finalized protocol snapshot and check the current epoch and epoch phase. +// If we are past the target epoch and epoch phase we exit the retry mechanism immediately. +// If not check the snapshot at the specified interval until we reach the target epoch and phase. +// Args: +// - ctx: context used when getting the snapshot from the network. +// - log: the logger +// - startupEpoch: the desired epoch in which to take a snapshot for startup. +// - startupEpochPhase: the desired epoch phase in which to take a snapshot for startup. +// - retryInterval: sleep interval used to retry getting the snapshot from the network in our desired epoch and epoch phase. +// - getSnapshot: func used to get the snapshot. +// Returns: +// - protocol.Snapshot: the protocol snapshot. +// - error: if any error occurs. Any error returned from this function is irrecoverable. +func GetSnapshotAtEpochAndPhase(ctx context.Context, log zerolog.Logger, startupEpoch uint64, startupEpochPhase flow.EpochPhase, retryInterval time.Duration, getSnapshot GetProtocolSnapshot) (protocol.Snapshot, error) { + start := time.Now() + + log = log.With(). + Uint64("target_epoch_counter", startupEpoch). + Str("target_epoch_phase", startupEpochPhase.String()). + Logger() + + log.Info().Msg("starting dynamic startup - waiting until target epoch/phase to start...") + + var snapshot protocol.Snapshot + var err error + + backoff := retry.NewConstant(retryInterval) + err = retry.Do(ctx, backoff, func(ctx context.Context) error { + snapshot, err = getSnapshot(ctx) + if err != nil { + err = fmt.Errorf("failed to get protocol snapshot: %w", err) + log.Error().Err(err).Msg("could not get protocol snapshot") + return retry.RetryableError(err) + } + + // if we encounter any errors interpreting the snapshot something went wrong stop retrying + currEpochCounter, err := snapshot.Epochs().Current().Counter() + if err != nil { + return fmt.Errorf("failed to get the current epoch counter: %w", err) + } + + currEpochPhase, err := snapshot.Phase() + if err != nil { + return fmt.Errorf("failed to get the current epoch phase: %w", err) + } + + // check if we are in or past the target epoch and phase + if currEpochCounter > startupEpoch || (currEpochCounter == startupEpoch && currEpochPhase >= startupEpochPhase) { + log.Info(). + Dur("time-waiting", time.Since(start)). + Uint64("current-epoch", currEpochCounter). + Str("current-epoch-phase", currEpochPhase.String()). + Msg("finished dynamic startup - reached desired epoch and phase") + + return nil + } + + // wait then poll for latest snapshot again + log.Info(). + Dur("time-waiting", time.Since(start)). + Uint64("current-epoch", currEpochCounter). + Str("current-epoch-phase", currEpochPhase.String()). + Msgf("waiting for epoch %d and phase %s", startupEpoch, startupEpochPhase.String()) + + return retry.RetryableError(fmt.Errorf("dynamic startup epoch and epoch phase not reached")) + }) + if err != nil { + return nil, fmt.Errorf("failed to wait for target epoch and phase: %w", err) + } + + return snapshot, nil +} diff --git a/cmd/util/cmd/common/utils.go b/cmd/util/cmd/common/utils.go new file mode 100644 index 00000000000..f5b9570071e --- /dev/null +++ b/cmd/util/cmd/common/utils.go @@ -0,0 +1,180 @@ +package common + +import ( + "encoding/json" + "fmt" + "net" + "os" + "path/filepath" + "strconv" + + "github.com/multiformats/go-multiaddr" + "github.com/rs/zerolog" + + "github.com/onflow/crypto" + + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/encodable" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/network/p2p/utils" + "github.com/onflow/flow-go/utils/io" +) + +func FilesInDir(dir string) ([]string, error) { + exists, err := PathExists(dir) + if err != nil { + return nil, fmt.Errorf("could not check if dir exists: %w", err) + } + + if !exists { + return nil, fmt.Errorf("dir %v does not exist", dir) + } + + var files []string + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if !info.IsDir() { + files = append(files, path) + } + return nil + }) + return files, err +} + +// PathExists +func PathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func ReadJSON(path string, target interface{}) error { + dat, err := io.ReadFile(path) + if err != nil { + return fmt.Errorf("cannot read json: %w", err) + } + err = json.Unmarshal(dat, target) + if err != nil { + return fmt.Errorf("cannot unmarshal json in file %s: %w", path, err) + } + return nil +} + +func WriteJSON(path string, out string, data interface{}) error { + bz, err := json.MarshalIndent(data, "", " ") + if err != nil { + return fmt.Errorf("cannot marshal json: %w", err) + } + + return WriteText(path, out, bz) +} + +func WriteText(path string, out string, data []byte) error { + path = filepath.Join(out, path) + + err := os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + return fmt.Errorf("could not create output dir: %w", err) + } + + err = os.WriteFile(path, data, 0644) + if err != nil { + return fmt.Errorf("could not write file: %w", err) + } + return nil +} + +func PubKeyToString(key crypto.PublicKey) string { + return fmt.Sprintf("%x", key.Encode()) +} + +func NodeCountByRole(nodes []bootstrap.NodeInfo) map[flow.Role]uint16 { + roleCounts := map[flow.Role]uint16{ + flow.RoleCollection: 0, + flow.RoleConsensus: 0, + flow.RoleExecution: 0, + flow.RoleVerification: 0, + flow.RoleAccess: 0, + } + for _, node := range nodes { + roleCounts[node.Role] = roleCounts[node.Role] + 1 + } + + return roleCounts +} + +// ValidateAddressFormat validates the address provided by pretty much doing what the network layer would do before +// starting the node +func ValidateAddressFormat(log zerolog.Logger, address string) { + checkErr := func(err error) { + if err != nil { + log.Fatal().Err(err).Str("address", address).Msg("invalid address format.\n" + + `Address needs to be in the format hostname:port or ip:port e.g. "flow.com:3569"`) + } + } + + // split address into ip/hostname and port + ip, port, err := net.SplitHostPort(address) + checkErr(err) + + // check that port number is indeed a number + _, err = strconv.Atoi(port) + checkErr(err) + + // create a libp2p address from the ip and port + lp2pAddr := utils.MultiAddressStr(ip, port) + _, err = multiaddr.NewMultiaddr(lp2pAddr) + checkErr(err) +} + +// ValidateNodeID returns an error if node ID is non-zero. +// Args: +// - nodeID: the node ID to validate. +// Returns: +// - error: if node id is the zero value. +func ValidateNodeID(nodeID flow.Identifier) error { + if nodeID == flow.ZeroID { + return fmt.Errorf("NodeID must not be zero") + } + return nil +} + +// ValidateNetworkPubKey returns an error if network public key is nil. +// Args: +// - key: the public key. +// Returns: +// - error: if the network key is nil. +func ValidateNetworkPubKey(key encodable.NetworkPubKey) error { + if key.PublicKey == nil { + return fmt.Errorf("network public key must not be nil") + } + return nil +} + +// ValidateStakingPubKey returns an error if the staking key is nil. +// Args: +// - key: the public key. +// Returns: +// - error: if the staking key is nil. +func ValidateStakingPubKey(key encodable.StakingPubKey) error { + if key.PublicKey == nil { + return fmt.Errorf("staking public key must not be nil") + } + return nil +} + +// ValidateWeight returns true if weight is greater than 0. +// Args: +// - weight: the weight to check. +// Returns: +// - bool: true if weight is greater than 0. +func ValidateWeight(weight uint64) bool { + return weight > 0 +} + +// PartnerWeights is the format of the JSON file specifying partner node weights. +type PartnerWeights map[flow.Identifier]uint64 diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go new file mode 100644 index 00000000000..049a8657910 --- /dev/null +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -0,0 +1,249 @@ +package cmd + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/cmd/bootstrap/run" + "github.com/onflow/flow-go/cmd/util/cmd/common" + epochcmdutil "github.com/onflow/flow-go/cmd/util/cmd/epochs/utils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +// generateRecoverEpochTxArgsCmd represents a command to generate the data needed to submit an epoch-recovery transaction +// to the network when it is in EFM (epoch fallback mode). +// EFM can be exited only by a special service event, EpochRecover, which initially originates from a manual service account transaction. +// The full epoch data must be generated manually and submitted with this transaction in order for an +// EpochRecover event to be emitted. This command retrieves the current protocol state identities, computes the cluster assignment using those +// identities, generates the cluster QCs and retrieves the DKG key vector of the last successful epoch. +// This recovery process has some constraints: +// - The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. +// - The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) +var ( + generateRecoverEpochTxArgsCmd = &cobra.Command{ + Use: "efm-recover-tx-args", + Short: "Generates recover epoch transaction arguments", + Long: ` +Generates transaction arguments for the epoch recovery transaction. +The epoch recovery transaction is used to recover from any failure in the epoch transition process without requiring a spork. +This recovery process has some constraints: +- The RecoveryEpoch must have exactly the same consensus committee as participated in the most recent successful DKG. +- The RecoveryEpoch must contain enough "internal" collection nodes so that all clusters contain a supermajority of "internal" collection nodes (same constraint as sporks) +`, + Run: generateRecoverEpochTxArgs(getSnapshot), + } + + flagAnAddress string + flagAnPubkey string + flagInternalNodePrivInfoDir string + flagNodeConfigJson string + flagCollectionClusters int + flagNumViewsInEpoch uint64 + flagNumViewsInStakingAuction uint64 + flagEpochCounter uint64 +) + +func init() { + rootCmd.AddCommand(generateRecoverEpochTxArgsCmd) + err := addGenerateRecoverEpochTxArgsCmdFlags() + if err != nil { + panic(err) + } +} + +func addGenerateRecoverEpochTxArgsCmdFlags() error { + generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 0, + "number of collection clusters") + // required parameters for network configuration and generation of root node identities + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagNodeConfigJson, "config", "", + "path to a JSON file containing multiple node configurations (fields Role, Address, Weight)") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagInternalNodePrivInfoDir, "internal-priv-dir", "", "path to directory "+ + "containing the output from the `keygen` command for internal nodes") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 0, "length of each epoch measured in views") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 0, "length of the epoch staking phase measured in views") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") + + err := generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") + if err != nil { + return fmt.Errorf("failed to mark epoch-length flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-staking-phase-length") + if err != nil { + return fmt.Errorf("failed to mark epoch-staking-phase-length flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-counter") + if err != nil { + return fmt.Errorf("failed to mark epoch-counter flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("collection-clusters") + if err != nil { + return fmt.Errorf("failed to mark collection-clusters flag as required") + } + return nil +} + +func getSnapshot() *inmem.Snapshot { + // get flow client with secure client connection to download protocol snapshot from access node + config, err := common.NewFlowClientConfig(flagAnAddress, flagAnPubkey, flow.ZeroID, false) + if err != nil { + log.Fatal().Err(err).Msg("failed to create flow client config") + } + + flowClient, err := common.FlowClient(config) + if err != nil { + log.Fatal().Err(err).Msg("failed to create flow client") + } + + snapshot, err := common.GetSnapshot(context.Background(), flowClient) + if err != nil { + log.Fatal().Err(err).Msg("failed") + } + + return snapshot +} + +// generateRecoverEpochTxArgs generates recover epoch transaction arguments from a root protocol state snapshot and writes it to a JSON file +func generateRecoverEpochTxArgs(getSnapshot func() *inmem.Snapshot) func(cmd *cobra.Command, args []string) { + return func(cmd *cobra.Command, args []string) { + stdout := cmd.OutOrStdout() + + // extract arguments from recover epoch tx from snapshot + txArgs := extractRecoverEpochArgs(getSnapshot()) + + // encode to JSON + encodedTxArgs, err := epochcmdutil.EncodeArgs(txArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not encode recover epoch transaction arguments") + } + + // write JSON args to stdout + _, err = stdout.Write(encodedTxArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not write jsoncdc encoded arguments") + } + } +} + +// extractRecoverEpochArgs extracts the required transaction arguments for the `recoverEpoch` transaction. +func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { + epoch := snapshot.Epochs().Current() + + currentEpochIdentities, err := snapshot.Identities(filter.IsValidProtocolParticipant) + if err != nil { + log.Fatal().Err(err).Msg("failed to get valid protocol participants from snapshot") + } + + // separate collector nodes by internal and partner nodes + collectors := currentEpochIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + internalCollectors := make(flow.IdentityList, 0) + partnerCollectors := make(flow.IdentityList, 0) + + log.Info().Msg("collecting internal node network and staking keys") + internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagNodeConfigJson) + if err != nil { + log.Fatal().Err(err).Msg("failed to read full internal node infos") + } + + internalNodesMap := make(map[flow.Identifier]struct{}) + for _, node := range internalNodes { + if !currentEpochIdentities.Exists(node.Identity()) { + log.Fatal().Msg(fmt.Sprintf("node ID found in internal node infos missing from protocol snapshot identities: %s", node.NodeID)) + } + internalNodesMap[node.NodeID] = struct{}{} + } + log.Info().Msg("") + + for _, collector := range collectors { + if _, ok := internalNodesMap[collector.NodeID]; ok { + internalCollectors = append(internalCollectors, collector) + } else { + partnerCollectors = append(partnerCollectors, collector) + } + } + + currentEpochDKG, err := epoch.DKG() + if err != nil { + log.Fatal().Err(err).Msg("failed to get DKG for current epoch") + } + + log.Info().Msg("computing collection node clusters") + + assignments, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, flagCollectionClusters) + if err != nil { + log.Fatal().Err(err).Msg("unable to generate cluster assignment") + } + log.Info().Msg("") + + log.Info().Msg("constructing root blocks for collection node clusters") + clusterBlocks := run.GenerateRootClusterBlocks(flagEpochCounter, clusters) + log.Info().Msg("") + + log.Info().Msg("constructing root QCs for collection node clusters") + clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) + log.Info().Msg("") + + dkgPubKeys := make([]cadence.Value, 0) + nodeIds := make([]cadence.Value, 0) + + // NOTE: The RecoveryEpoch will re-use the last successful DKG output. This means that the consensus + // committee in the RecoveryEpoch must be identical to the committee which participated in that DKG. + dkgGroupKeyCdc, cdcErr := cadence.NewString(currentEpochDKG.GroupKey().String()) + if cdcErr != nil { + log.Fatal().Err(cdcErr).Msg("failed to get dkg group key cadence string") + } + dkgPubKeys = append(dkgPubKeys, dkgGroupKeyCdc) + for _, id := range currentEpochIdentities { + if id.GetRole() == flow.RoleConsensus { + dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID()) + if keyShareErr != nil { + log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", id.GetNodeID())) + } + dkgPubKeyCdc, cdcErr := cadence.NewString(dkgPubKey.String()) + if cdcErr != nil { + log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", id.GetNodeID())) + } + dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc) + } + nodeIdCdc, err := cadence.NewString(id.GetNodeID().String()) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", id.GetNodeID())) + } + nodeIds = append(nodeIds, nodeIdCdc) + } + + // @TODO: cluster qcs are converted into flow.ClusterQCVoteData types, + // we need a corresponding type in cadence on the FlowClusterQC contract + // to store this struct. + _, err = common.ConvertClusterQcsCdc(clusterQCs, clusters) + if err != nil { + log.Fatal().Err(err).Msg("failed to convert cluster qcs to cadence type") + } + + currEpochFinalView, err := epoch.FinalView() + if err != nil { + log.Fatal().Err(err).Msg("failed to get final view of current epoch") + } + + args := []cadence.Value{ + // epoch start view + cadence.NewUInt64(currEpochFinalView + 1), + // staking phase end view + cadence.NewUInt64(currEpochFinalView + flagNumViewsInStakingAuction), + // epoch end view + cadence.NewUInt64(currEpochFinalView + flagNumViewsInEpoch), + // dkg pub keys + cadence.NewArray(dkgPubKeys), + // node ids + cadence.NewArray(nodeIds), + // clusters, + common.ConvertClusterAssignmentsCdc(assignments), + } + + return args +} diff --git a/cmd/util/cmd/epochs/cmd/recover_test.go b/cmd/util/cmd/epochs/cmd/recover_test.go new file mode 100644 index 00000000000..980a9788a55 --- /dev/null +++ b/cmd/util/cmd/epochs/cmd/recover_test.go @@ -0,0 +1,63 @@ +package cmd + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/state/protocol/inmem" + "github.com/onflow/flow-go/utils/unittest" +) + +// TestRecoverEpochHappyPath ensures recover epoch transaction arguments are generated as expected. +func TestRecoverEpochHappyPath(t *testing.T) { + // tests that given the root snapshot, the command + // writes the expected arguments to stdout. + utils.RunWithSporkBootstrapDir(t, func(bootDir, partnerDir, partnerWeights, internalPrivDir, configPath string) { + internalNodes, err := common.ReadFullInternalNodeInfos(log, internalPrivDir, configPath) + require.NoError(t, err) + partnerNodes, err := common.ReadFullPartnerNodeInfos(log, partnerWeights, partnerDir) + require.NoError(t, err) + + allNodeIds := make(flow.IdentityList, 0) + for _, node := range internalNodes { + allNodeIds = append(allNodeIds, node.Identity()) + } + for _, node := range partnerNodes { + allNodeIds = append(allNodeIds, node.Identity()) + } + + // create a root snapshot + rootSnapshot := unittest.RootSnapshotFixture(allNodeIds) + + snapshotFn := func() *inmem.Snapshot { return rootSnapshot } + + // run command with overwritten stdout + stdout := bytes.NewBuffer(nil) + generateRecoverEpochTxArgsCmd.SetOut(stdout) + + flagInternalNodePrivInfoDir = internalPrivDir + flagNodeConfigJson = configPath + flagCollectionClusters = 2 + flagNumViewsInEpoch = 4000 + flagNumViewsInStakingAuction = 100 + flagEpochCounter = 2 + + generateRecoverEpochTxArgs(snapshotFn)(generateRecoverEpochTxArgsCmd, nil) + + // read output from stdout + var outputTxArgs []interface{} + err = json.NewDecoder(stdout).Decode(&outputTxArgs) + require.NoError(t, err) + // compare to expected values + expectedArgs := extractRecoverEpochArgs(rootSnapshot) + unittest.VerifyCdcArguments(t, expectedArgs[:len(expectedArgs)-1], outputTxArgs[:len(expectedArgs)-1]) + // @TODO validate cadence values for generated cluster assignments and clusters + }) +} diff --git a/cmd/util/cmd/epochs/cmd/reset_test.go b/cmd/util/cmd/epochs/cmd/reset_test.go index 25983e5cf61..30e7d0178f2 100644 --- a/cmd/util/cmd/epochs/cmd/reset_test.go +++ b/cmd/util/cmd/epochs/cmd/reset_test.go @@ -11,9 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/cadence" - jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/unittest" @@ -50,7 +47,7 @@ func TestReset_LocalSnapshot(t *testing.T) { // compare to expected values expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) }) @@ -98,7 +95,7 @@ func TestReset_BucketSnapshot(t *testing.T) { rootSnapshot, err := getSnapshotFromBucket(fmt.Sprintf(rootSnapshotBucketURL, flagBucketNetworkName)) require.NoError(t, err) expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) // should output arguments to stdout, including specified payout @@ -120,7 +117,7 @@ func TestReset_BucketSnapshot(t *testing.T) { rootSnapshot, err := getSnapshotFromBucket(fmt.Sprintf(rootSnapshotBucketURL, flagBucketNetworkName)) require.NoError(t, err) expectedArgs := extractResetEpochArgs(rootSnapshot) - verifyArguments(t, expectedArgs, outputTxArgs) + unittest.VerifyCdcArguments(t, expectedArgs, outputTxArgs) }) // with a missing snapshot, should log an error @@ -139,22 +136,6 @@ func TestReset_BucketSnapshot(t *testing.T) { }) } -func verifyArguments(t *testing.T, expected []cadence.Value, actual []interface{}) { - - for index, arg := range actual { - - // marshal to bytes - bz, err := json.Marshal(arg) - require.NoError(t, err) - - // parse cadence value - decoded, err := jsoncdc.Decode(nil, bz) - require.NoError(t, err) - - assert.Equal(t, expected[index], decoded) - } -} - func writeRootSnapshot(bootDir string, snapshot *inmem.Snapshot) error { rootSnapshotPath := filepath.Join(bootDir, bootstrap.PathRootProtocolStateSnapshot) return writeJSON(rootSnapshotPath, snapshot.Encodable()) diff --git a/cmd/util/cmd/execution-state-extract/cmd.go b/cmd/util/cmd/execution-state-extract/cmd.go index f2d2cab7c76..7cd4ed7bdeb 100644 --- a/cmd/util/cmd/execution-state-extract/cmd.go +++ b/cmd/util/cmd/execution-state-extract/cmd.go @@ -34,6 +34,8 @@ var ( flagLogVerboseValidationError bool flagAllowPartialStateFromPayloads bool flagContinueMigrationOnValidationError bool + flagCheckStorageHealthBeforeMigration bool + flagCheckStorageHealthAfterMigration bool flagInputPayloadFileName string flagOutputPayloadFileName string flagOutputPayloadByAddresses string @@ -82,6 +84,12 @@ func init() { Cmd.Flags().BoolVar(&flagAllowPartialStateFromPayloads, "allow-partial-state-from-payload-file", false, "allow input payload file containing partial state (e.g. not all accounts)") + Cmd.Flags().BoolVar(&flagCheckStorageHealthBeforeMigration, "check-storage-health-before", false, + "check (atree) storage health before migration") + + Cmd.Flags().BoolVar(&flagCheckStorageHealthAfterMigration, "check-storage-health-after", false, + "check (atree) storage health after migration") + Cmd.Flags().BoolVar(&flagContinueMigrationOnValidationError, "continue-migration-on-validation-errors", false, "continue migration even if validation fails") @@ -248,6 +256,14 @@ func run(*cobra.Command, []string) { log.Warn().Msgf("atree migration has verbose validation error logging enabled which may increase size of log") } + if flagCheckStorageHealthBeforeMigration { + log.Warn().Msgf("--check-storage-health-before flag is enabled and will increase duration of migration") + } + + if flagCheckStorageHealthAfterMigration { + log.Warn().Msgf("--check-storage-health-after flag is enabled and will increase duration of migration") + } + var inputMsg string if len(flagInputPayloadFileName) > 0 { // Input is payloads diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract.go b/cmd/util/cmd/execution-state-extract/execution_state_extract.go index 7277a1f110a..4b991936ae7 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract.go @@ -365,12 +365,13 @@ func newMigrations( flagValidateMigration, flagLogVerboseValidationError, flagContinueMigrationOnValidationError, + flagCheckStorageHealthBeforeMigration, + flagCheckStorageHealthAfterMigration, ), &migrators.DeduplicateContractNamesMigration{}, - // This will fix storage used discrepancies caused by the - // DeduplicateContractNamesMigration. + // This will fix storage used discrepancies caused by the previous migrations &migrators.AccountUsageMigrator{}, }), } diff --git a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go index 77a9d77777f..765a55fd02f 100644 --- a/cmd/util/cmd/read-protocol-state/cmd/snapshot.go +++ b/cmd/util/cmd/read-protocol-state/cmd/snapshot.go @@ -70,13 +70,14 @@ func runSnapshot(*cobra.Command, []string) { var protocolSnapshot protocol.Snapshot var sealedHeight uint64 var sealedCommit flow.StateCommitment + var checkpointFile string if flagCheckpointScanEndHeight < 0 { // using default end height which is the last sealed height - protocolSnapshot, sealedHeight, sealedCommit, err = commonFuncs.GenerateProtocolSnapshotForCheckpoint( + protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpoint( log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep) } else { // using customized end height - protocolSnapshot, sealedHeight, sealedCommit, err = commonFuncs.GenerateProtocolSnapshotForCheckpointWithHeights( + protocolSnapshot, sealedHeight, sealedCommit, checkpointFile, err = commonFuncs.GenerateProtocolSnapshotForCheckpointWithHeights( log.Logger, state, storages.Headers, storages.Seals, flagCheckpointDir, flagCheckpointScanStep, uint64(flagCheckpointScanEndHeight)) } @@ -85,7 +86,7 @@ func runSnapshot(*cobra.Command, []string) { } snapshot = protocolSnapshot - log.Info().Msgf("snapshot found, sealed height %v, commit %x", sealedHeight, sealedCommit) + log.Info().Msgf("snapshot found for checkpoint file %v, sealed height %v, commit %x", checkpointFile, sealedHeight, sealedCommit) } head, err := snapshot.Head() diff --git a/cmd/util/common/checkpoint.go b/cmd/util/common/checkpoint.go index bddcead9e49..098db2cc096 100644 --- a/cmd/util/common/checkpoint.go +++ b/cmd/util/common/checkpoint.go @@ -113,12 +113,12 @@ func GenerateProtocolSnapshotForCheckpoint( seals storage.Seals, checkpointDir string, blocksToSkip uint, -) (protocol.Snapshot, uint64, flow.StateCommitment, error) { +) (protocol.Snapshot, uint64, flow.StateCommitment, string, error) { // skip X blocks (i.e. 10) each time to find the block that produces the state commitment in the checkpoint file // since a checkpoint file contains 500 tries, this allows us to find the block more efficiently sealed, err := state.Sealed().Head() if err != nil { - return nil, 0, flow.DummyStateCommitment, err + return nil, 0, flow.DummyStateCommitment, "", err } endHeight := sealed.Height @@ -156,7 +156,7 @@ func GenerateProtocolSnapshotForCheckpointWithHeights( checkpointDir string, blocksToSkip uint, endHeight uint64, -) (protocol.Snapshot, uint64, flow.StateCommitment, error) { +) (protocol.Snapshot, uint64, flow.StateCommitment, string, error) { // Stop searching after 10,000 iterations or upon reaching the minimum height, whichever comes first. startHeight := uint64(0) // preventing startHeight from being negative @@ -167,7 +167,7 @@ func GenerateProtocolSnapshotForCheckpointWithHeights( checkpointFilePath, err := findLatestCheckpointFilePath(checkpointDir) if err != nil { - return nil, 0, flow.DummyStateCommitment, fmt.Errorf("could not find latest checkpoint file in directory %v: %w", checkpointDir, err) + return nil, 0, flow.DummyStateCommitment, "", fmt.Errorf("could not find latest checkpoint file in directory %v: %w", checkpointDir, err) } log.Info(). @@ -178,7 +178,7 @@ func GenerateProtocolSnapshotForCheckpointWithHeights( // find the height of the finalized block that produces the state commitment contained in the checkpoint file sealedHeight, commit, finalizedHeight, err := FindHeightsByCheckpoints(logger, headers, seals, checkpointFilePath, blocksToSkip, startHeight, endHeight) if err != nil { - return nil, 0, flow.DummyStateCommitment, fmt.Errorf("could not find sealed height in range [%v:%v] (blocksToSkip: %v) by checkpoints: %w", + return nil, 0, flow.DummyStateCommitment, "", fmt.Errorf("could not find sealed height in range [%v:%v] (blocksToSkip: %v) by checkpoints: %w", startHeight, endHeight, blocksToSkip, err) } @@ -186,10 +186,10 @@ func GenerateProtocolSnapshotForCheckpointWithHeights( snapshot := state.AtHeight(finalizedHeight) validSnapshot, err := snapshots.GetDynamicBootstrapSnapshot(state, snapshot) if err != nil { - return nil, 0, flow.DummyStateCommitment, fmt.Errorf("could not get dynamic bootstrap snapshot: %w", err) + return nil, 0, flow.DummyStateCommitment, "", fmt.Errorf("could not get dynamic bootstrap snapshot: %w", err) } - return validSnapshot, sealedHeight, commit, nil + return validSnapshot, sealedHeight, commit, checkpointFilePath, nil } // hashesToCommits converts a list of ledger.RootHash to a list of flow.StateCommitment diff --git a/cmd/util/ledger/migrations/atree_register_migration.go b/cmd/util/ledger/migrations/atree_register_migration.go index 97b17aca5a8..222217572ff 100644 --- a/cmd/util/ledger/migrations/atree_register_migration.go +++ b/cmd/util/ledger/migrations/atree_register_migration.go @@ -8,13 +8,12 @@ import ( runtime2 "runtime" "time" + "github.com/onflow/atree" "github.com/rs/zerolog" - "github.com/onflow/atree" "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/interpreter" - "github.com/onflow/cadence/runtime/stdlib" "github.com/onflow/flow-go/cmd/util/ledger/reporters" "github.com/onflow/flow-go/fvm/environment" @@ -32,13 +31,14 @@ type AtreeRegisterMigrator struct { sampler zerolog.Sampler rw reporters.ReportWriter - rwf reporters.ReportWriterFactory nWorkers int validateMigratedValues bool logVerboseValidationError bool continueMigrationOnValidationError bool + checkStorageHealthBeforeMigration bool + checkStorageHealthAfterMigration bool } var _ AccountBasedMigration = (*AtreeRegisterMigrator)(nil) @@ -49,17 +49,20 @@ func NewAtreeRegisterMigrator( validateMigratedValues bool, logVerboseValidationError bool, continueMigrationOnValidationError bool, + checkStorageHealthBeforeMigration bool, + checkStorageHealthAfterMigration bool, ) *AtreeRegisterMigrator { sampler := util2.NewTimedSampler(30 * time.Second) migrator := &AtreeRegisterMigrator{ sampler: sampler, - rwf: rwf, rw: rwf.ReportWriter("atree-register-migrator"), validateMigratedValues: validateMigratedValues, logVerboseValidationError: logVerboseValidationError, continueMigrationOnValidationError: continueMigrationOnValidationError, + checkStorageHealthBeforeMigration: checkStorageHealthBeforeMigration, + checkStorageHealthAfterMigration: checkStorageHealthAfterMigration, } return migrator @@ -89,11 +92,22 @@ func (m *AtreeRegisterMigrator) MigrateAccount( oldPayloads []*ledger.Payload, ) ([]*ledger.Payload, error) { // create all the runtime components we need for the migration - mr, err := newMigratorRuntime(address, oldPayloads) + mr, err := NewAtreeRegisterMigratorRuntime(address, oldPayloads) if err != nil { return nil, fmt.Errorf("failed to create migrator runtime: %w", err) } + // Check storage health before migration, if enabled. + if m.checkStorageHealthBeforeMigration { + err = checkStorageHealth(address, mr.Storage, oldPayloads) + if err != nil { + m.log.Warn(). + Err(err). + Str("account", address.Hex()). + Msg("storage health check before migration failed") + } + } + // keep track of all storage maps that were accessed // if they are empty they won't be changed, but we still need to copy them over storageMapIds := make(map[string]struct{}) @@ -144,16 +158,32 @@ func (m *AtreeRegisterMigrator) MigrateAccount( }) } + // Check storage health after migration, if enabled. + if m.checkStorageHealthAfterMigration { + mr, err := NewAtreeRegisterMigratorRuntime(address, newPayloads) + if err != nil { + return nil, fmt.Errorf("failed to create migrator runtime: %w", err) + } + + err = checkStorageHealth(address, mr.Storage, newPayloads) + if err != nil { + m.log.Warn(). + Err(err). + Str("account", address.Hex()). + Msg("storage health check after migration failed") + } + } + return newPayloads, nil } func (m *AtreeRegisterMigrator) migrateAccountStorage( - mr *migratorRuntime, + mr *AtreeRegisterMigratorRuntime, storageMapIds map[string]struct{}, ) (map[flow.RegisterID]flow.RegisterValue, error) { // iterate through all domains and migrate them - for _, domain := range domains { + for _, domain := range allStorageMapDomains { err := m.convertStorageDomain(mr, storageMapIds, domain) if err != nil { return nil, fmt.Errorf("failed to convert storage domain %s : %w", domain, err) @@ -175,7 +205,7 @@ func (m *AtreeRegisterMigrator) migrateAccountStorage( } func (m *AtreeRegisterMigrator) convertStorageDomain( - mr *migratorRuntime, + mr *AtreeRegisterMigratorRuntime, storageMapIds map[string]struct{}, domain string, ) error { @@ -253,7 +283,7 @@ func (m *AtreeRegisterMigrator) convertStorageDomain( } func (m *AtreeRegisterMigrator) validateChangesAndCreateNewRegisters( - mr *migratorRuntime, + mr *AtreeRegisterMigratorRuntime, changes map[flow.RegisterID]flow.RegisterValue, storageMapIds map[string]struct{}, ) ([]*ledger.Payload, error) { @@ -328,7 +358,7 @@ func (m *AtreeRegisterMigrator) validateChangesAndCreateNewRegisters( continue } - if _, isADomainKey := domainsLookupMap[id.Key]; isADomainKey { + if _, isADomainKey := allStorageMapDomainsSet[id.Key]; isADomainKey { // this is expected. Move it to the new payloads newPayloads = append(newPayloads, value) continue @@ -388,7 +418,7 @@ func (m *AtreeRegisterMigrator) validateChangesAndCreateNewRegisters( } func (m *AtreeRegisterMigrator) cloneValue( - mr *migratorRuntime, + mr *AtreeRegisterMigratorRuntime, value interpreter.Value, ) (interpreter.Value, error) { @@ -426,25 +456,6 @@ func capturePanic(f func()) (err error) { return } -// convert all domains -var domains = []string{ - common.PathDomainStorage.Identifier(), - common.PathDomainPrivate.Identifier(), - common.PathDomainPublic.Identifier(), - runtime.StorageDomainContract, - stdlib.InboxStorageDomain, - stdlib.CapabilityControllerStorageDomain, -} - -var domainsLookupMap = map[string]struct{}{ - common.PathDomainStorage.Identifier(): {}, - common.PathDomainPrivate.Identifier(): {}, - common.PathDomainPublic.Identifier(): {}, - runtime.StorageDomainContract: {}, - stdlib.InboxStorageDomain: {}, - stdlib.CapabilityControllerStorageDomain: {}, -} - // migrationProblem is a struct for reporting errors type migrationProblem struct { Address string diff --git a/cmd/util/ledger/migrations/atree_register_migration_test.go b/cmd/util/ledger/migrations/atree_register_migration_test.go index 9f4017a5a57..d593e67b4b3 100644 --- a/cmd/util/ledger/migrations/atree_register_migration_test.go +++ b/cmd/util/ledger/migrations/atree_register_migration_test.go @@ -35,6 +35,8 @@ func TestAtreeRegisterMigration(t *testing.T) { true, false, false, + false, + false, ), }, ), diff --git a/cmd/util/ledger/migrations/atree_register_migrator_runtime.go b/cmd/util/ledger/migrations/atree_register_migrator_runtime.go new file mode 100644 index 00000000000..77f52d9198f --- /dev/null +++ b/cmd/util/ledger/migrations/atree_register_migrator_runtime.go @@ -0,0 +1,64 @@ +package migrations + +import ( + "fmt" + + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/interpreter" + + "github.com/onflow/flow-go/cmd/util/ledger/util" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/storage/state" + "github.com/onflow/flow-go/ledger" +) + +// NewAtreeRegisterMigratorRuntime returns a new runtime to be used with the AtreeRegisterMigrator. +func NewAtreeRegisterMigratorRuntime( + address common.Address, + payloads []*ledger.Payload, +) ( + *AtreeRegisterMigratorRuntime, + error, +) { + snapshot, err := util.NewPayloadSnapshot(payloads) + if err != nil { + return nil, fmt.Errorf("failed to create payload snapshot: %w", err) + } + transactionState := state.NewTransactionState(snapshot, state.DefaultParameters()) + accounts := environment.NewAccounts(transactionState) + + accountsAtreeLedger := util.NewAccountsAtreeLedger(accounts) + storage := runtime.NewStorage(accountsAtreeLedger, nil) + + inter, err := interpreter.NewInterpreter( + nil, + nil, + &interpreter.Config{ + Storage: storage, + }, + ) + if err != nil { + return nil, err + } + + return &AtreeRegisterMigratorRuntime{ + Address: address, + Payloads: payloads, + Snapshot: snapshot, + TransactionState: transactionState, + Interpreter: inter, + Storage: storage, + AccountsAtreeLedger: accountsAtreeLedger, + }, nil +} + +type AtreeRegisterMigratorRuntime struct { + Snapshot *util.PayloadSnapshot + TransactionState state.NestedTransactionPreparer + Interpreter *interpreter.Interpreter + Storage *runtime.Storage + Payloads []*ledger.Payload + Address common.Address + AccountsAtreeLedger *util.AccountsAtreeLedger +} diff --git a/cmd/util/ledger/migrations/cadence_value_validation.go b/cmd/util/ledger/migrations/cadence_value_validation.go index e72985dac95..6850a1e6b13 100644 --- a/cmd/util/ledger/migrations/cadence_value_validation.go +++ b/cmd/util/ledger/migrations/cadence_value_validation.go @@ -3,15 +3,11 @@ package migrations import ( "fmt" "strings" - "time" - "github.com/onflow/atree" - "github.com/onflow/cadence" "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/interpreter" "github.com/rs/zerolog" - "go.opentelemetry.io/otel/attribute" "github.com/onflow/flow-go/cmd/util/ledger/util" "github.com/onflow/flow-go/ledger" @@ -37,7 +33,7 @@ func validateCadenceValues( } // Iterate through all domains and compare cadence values. - for _, domain := range domains { + for _, domain := range allStorageMapDomains { err := validateStorageDomain(address, oldRuntime, newRuntime, domain, log, verboseLogging) if err != nil { return err @@ -99,7 +95,12 @@ func validateStorageDomain( newValue := newStorageMap.ReadValue(nil, mapKey) - err := cadenceValueEqual(oldRuntime.Interpreter, oldValue, newRuntime.Interpreter, newValue) + err := cadenceValueEqual( + oldRuntime.Interpreter, + oldValue, + newRuntime.Interpreter, + newValue, + ) if err != nil { if verboseLogging { log.Info(). @@ -112,7 +113,13 @@ func validateStorageDomain( Msgf("failed to validate value") } - return fmt.Errorf("failed to validate value for address %s, domain %s, key %s: %s", address.Hex(), domain, key, err.Error()) + return fmt.Errorf( + "failed to validate value for address %s, domain %s, key %s: %s", + address.Hex(), + domain, + key, + err.Error(), + ) } } @@ -380,22 +387,13 @@ func newReadonlyStorageRuntime(payloads []*ledger.Payload) ( storage := runtime.NewStorage(readonlyLedger, nil) - env := runtime.NewBaseInterpreterEnvironment(runtime.Config{ - AccountLinkingEnabled: true, - // Attachments are enabled everywhere except for Mainnet - AttachmentsEnabled: true, - // Capability Controllers are enabled everywhere except for Mainnet - CapabilityControllersEnabled: true, - }) - - env.Configure( - &NoopRuntimeInterface{}, - runtime.NewCodesAndPrograms(), - storage, + inter, err := interpreter.NewInterpreter( nil, + nil, + &interpreter.Config{ + Storage: storage, + }, ) - - inter, err := interpreter.NewInterpreter(nil, nil, env.InterpreterConfig) if err != nil { return nil, err } @@ -405,203 +403,3 @@ func newReadonlyStorageRuntime(payloads []*ledger.Payload) ( Storage: storage, }, nil } - -// NoopRuntimeInterface is a runtime interface that can be used in migrations. -type NoopRuntimeInterface struct { -} - -func (NoopRuntimeInterface) ResolveLocation(_ []runtime.Identifier, _ runtime.Location) ([]runtime.ResolvedLocation, error) { - panic("unexpected ResolveLocation call") -} - -func (NoopRuntimeInterface) GetCode(_ runtime.Location) ([]byte, error) { - panic("unexpected GetCode call") -} - -func (NoopRuntimeInterface) GetAccountContractCode(_ common.AddressLocation) ([]byte, error) { - panic("unexpected GetAccountContractCode call") -} - -func (NoopRuntimeInterface) GetOrLoadProgram(_ runtime.Location, _ func() (*interpreter.Program, error)) (*interpreter.Program, error) { - panic("unexpected GetOrLoadProgram call") -} - -func (NoopRuntimeInterface) MeterMemory(_ common.MemoryUsage) error { - return nil -} - -func (NoopRuntimeInterface) MeterComputation(_ common.ComputationKind, _ uint) error { - return nil -} - -func (NoopRuntimeInterface) GetValue(_, _ []byte) (value []byte, err error) { - panic("unexpected GetValue call") -} - -func (NoopRuntimeInterface) SetValue(_, _, _ []byte) (err error) { - panic("unexpected SetValue call") -} - -func (NoopRuntimeInterface) CreateAccount(_ runtime.Address) (address runtime.Address, err error) { - panic("unexpected CreateAccount call") -} - -func (NoopRuntimeInterface) AddEncodedAccountKey(_ runtime.Address, _ []byte) error { - panic("unexpected AddEncodedAccountKey call") -} - -func (NoopRuntimeInterface) RevokeEncodedAccountKey(_ runtime.Address, _ int) (publicKey []byte, err error) { - panic("unexpected RevokeEncodedAccountKey call") -} - -func (NoopRuntimeInterface) AddAccountKey(_ runtime.Address, _ *runtime.PublicKey, _ runtime.HashAlgorithm, _ int) (*runtime.AccountKey, error) { - panic("unexpected AddAccountKey call") -} - -func (NoopRuntimeInterface) GetAccountKey(_ runtime.Address, _ int) (*runtime.AccountKey, error) { - panic("unexpected GetAccountKey call") -} - -func (NoopRuntimeInterface) RevokeAccountKey(_ runtime.Address, _ int) (*runtime.AccountKey, error) { - panic("unexpected RevokeAccountKey call") -} - -func (NoopRuntimeInterface) UpdateAccountContractCode(_ common.AddressLocation, _ []byte) (err error) { - panic("unexpected UpdateAccountContractCode call") -} - -func (NoopRuntimeInterface) RemoveAccountContractCode(common.AddressLocation) (err error) { - panic("unexpected RemoveAccountContractCode call") -} - -func (NoopRuntimeInterface) GetSigningAccounts() ([]runtime.Address, error) { - panic("unexpected GetSigningAccounts call") -} - -func (NoopRuntimeInterface) ProgramLog(_ string) error { - panic("unexpected ProgramLog call") -} - -func (NoopRuntimeInterface) EmitEvent(_ cadence.Event) error { - panic("unexpected EmitEvent call") -} - -func (NoopRuntimeInterface) ValueExists(_, _ []byte) (exists bool, err error) { - panic("unexpected ValueExists call") -} - -func (NoopRuntimeInterface) GenerateUUID() (uint64, error) { - panic("unexpected GenerateUUID call") -} - -func (NoopRuntimeInterface) GetComputationLimit() uint64 { - panic("unexpected GetComputationLimit call") -} - -func (NoopRuntimeInterface) SetComputationUsed(_ uint64) error { - panic("unexpected SetComputationUsed call") -} - -func (NoopRuntimeInterface) DecodeArgument(_ []byte, _ cadence.Type) (cadence.Value, error) { - panic("unexpected DecodeArgument call") -} - -func (NoopRuntimeInterface) GetCurrentBlockHeight() (uint64, error) { - panic("unexpected GetCurrentBlockHeight call") -} - -func (NoopRuntimeInterface) GetBlockAtHeight(_ uint64) (block runtime.Block, exists bool, err error) { - panic("unexpected GetBlockAtHeight call") -} - -func (NoopRuntimeInterface) ReadRandom([]byte) error { - panic("unexpected ReadRandom call") -} - -func (NoopRuntimeInterface) VerifySignature(_ []byte, _ string, _ []byte, _ []byte, _ runtime.SignatureAlgorithm, _ runtime.HashAlgorithm) (bool, error) { - panic("unexpected VerifySignature call") -} - -func (NoopRuntimeInterface) Hash(_ []byte, _ string, _ runtime.HashAlgorithm) ([]byte, error) { - panic("unexpected Hash call") -} - -func (NoopRuntimeInterface) GetAccountBalance(_ common.Address) (value uint64, err error) { - panic("unexpected GetAccountBalance call") -} - -func (NoopRuntimeInterface) GetAccountAvailableBalance(_ common.Address) (value uint64, err error) { - panic("unexpected GetAccountAvailableBalance call") -} - -func (NoopRuntimeInterface) GetStorageUsed(_ runtime.Address) (value uint64, err error) { - panic("unexpected GetStorageUsed call") -} - -func (NoopRuntimeInterface) GetStorageCapacity(_ runtime.Address) (value uint64, err error) { - panic("unexpected GetStorageCapacity call") -} - -func (NoopRuntimeInterface) ImplementationDebugLog(_ string) error { - panic("unexpected ImplementationDebugLog call") -} - -func (NoopRuntimeInterface) ValidatePublicKey(_ *runtime.PublicKey) error { - panic("unexpected ValidatePublicKey call") -} - -func (NoopRuntimeInterface) GetAccountContractNames(_ runtime.Address) ([]string, error) { - panic("unexpected GetAccountContractNames call") -} - -func (NoopRuntimeInterface) AllocateStorageIndex(_ []byte) (atree.StorageIndex, error) { - panic("unexpected AllocateStorageIndex call") -} - -func (NoopRuntimeInterface) ComputationUsed() (uint64, error) { - panic("unexpected ComputationUsed call") -} - -func (NoopRuntimeInterface) MemoryUsed() (uint64, error) { - panic("unexpected MemoryUsed call") -} - -func (NoopRuntimeInterface) InteractionUsed() (uint64, error) { - panic("unexpected InteractionUsed call") -} - -func (NoopRuntimeInterface) SetInterpreterSharedState(_ *interpreter.SharedState) { - panic("unexpected SetInterpreterSharedState call") -} - -func (NoopRuntimeInterface) GetInterpreterSharedState() *interpreter.SharedState { - panic("unexpected GetInterpreterSharedState call") -} - -func (NoopRuntimeInterface) AccountKeysCount(_ runtime.Address) (uint64, error) { - panic("unexpected AccountKeysCount call") -} - -func (NoopRuntimeInterface) BLSVerifyPOP(_ *runtime.PublicKey, _ []byte) (bool, error) { - panic("unexpected BLSVerifyPOP call") -} - -func (NoopRuntimeInterface) BLSAggregateSignatures(_ [][]byte) ([]byte, error) { - panic("unexpected BLSAggregateSignatures call") -} - -func (NoopRuntimeInterface) BLSAggregatePublicKeys(_ []*runtime.PublicKey) (*runtime.PublicKey, error) { - panic("unexpected BLSAggregatePublicKeys call") -} - -func (NoopRuntimeInterface) ResourceOwnerChanged(_ *interpreter.Interpreter, _ *interpreter.CompositeValue, _ common.Address, _ common.Address) { - panic("unexpected ResourceOwnerChanged call") -} - -func (NoopRuntimeInterface) GenerateAccountID(_ common.Address) (uint64, error) { - panic("unexpected GenerateAccountID call") -} - -func (NoopRuntimeInterface) RecordTrace(_ string, _ runtime.Location, _ time.Duration, _ []attribute.KeyValue) { - panic("unexpected RecordTrace call") -} diff --git a/cmd/util/ledger/migrations/cadence_value_validation_test.go b/cmd/util/ledger/migrations/cadence_value_validation_test.go index 6ba449b987d..117e27ea761 100644 --- a/cmd/util/ledger/migrations/cadence_value_validation_test.go +++ b/cmd/util/ledger/migrations/cadence_value_validation_test.go @@ -52,7 +52,7 @@ func TestValidateCadenceValues(t *testing.T) { accountStatus.ToBytes(), ) - mr, err := newMigratorRuntime(address, []*ledger.Payload{accountStatusPayload}) + mr, err := NewAtreeRegisterMigratorRuntime(address, []*ledger.Payload{accountStatusPayload}) require.NoError(t, err) // Create new storage map @@ -140,7 +140,7 @@ func createTestPayloads(t *testing.T, address common.Address, domain string) []* accountStatus.ToBytes(), ) - mr, err := newMigratorRuntime(address, []*ledger.Payload{accountStatusPayload}) + mr, err := NewAtreeRegisterMigratorRuntime(address, []*ledger.Payload{accountStatusPayload}) require.NoError(t, err) // Create new storage map diff --git a/cmd/util/ledger/migrations/change_contract_code_migration.go b/cmd/util/ledger/migrations/change_contract_code_migration.go index c2715bdc8d0..f21f3b43c4f 100644 --- a/cmd/util/ledger/migrations/change_contract_code_migration.go +++ b/cmd/util/ledger/migrations/change_contract_code_migration.go @@ -326,7 +326,11 @@ func SystemContractChanges(chainID flow.ChainID) []SystemContractChange { // EVM related contracts NewSystemContractChange( systemContracts.EVMContract, - evm.ContractCode(systemContracts.FlowToken.Address), + evm.ContractCode( + systemContracts.NonFungibleToken.Address, + systemContracts.FungibleToken.Address, + systemContracts.FlowToken.Address, + ), ), } } diff --git a/cmd/util/ledger/migrations/migrator_runtime.go b/cmd/util/ledger/migrations/migrator_runtime.go deleted file mode 100644 index 940a0074933..00000000000 --- a/cmd/util/ledger/migrations/migrator_runtime.go +++ /dev/null @@ -1,84 +0,0 @@ -package migrations - -import ( - "fmt" - - "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - - "github.com/onflow/flow-go/cmd/util/ledger/util" - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/fvm/storage/state" - "github.com/onflow/flow-go/ledger" -) - -// migratorRuntime is a runtime that can be used to run a migration on a single account -func newMigratorRuntime( - address common.Address, - payloads []*ledger.Payload, -) ( - *migratorRuntime, - error, -) { - snapshot, err := util.NewPayloadSnapshot(payloads) - if err != nil { - return nil, fmt.Errorf("failed to create payload snapshot: %w", err) - } - transactionState := state.NewTransactionState(snapshot, state.DefaultParameters()) - accounts := environment.NewAccounts(transactionState) - - accountsAtreeLedger := util.NewAccountsAtreeLedger(accounts) - storage := runtime.NewStorage(accountsAtreeLedger, nil) - - ri := &util.MigrationRuntimeInterface{ - Accounts: accounts, - } - - env := runtime.NewBaseInterpreterEnvironment(runtime.Config{ - AccountLinkingEnabled: true, - // Attachments are enabled everywhere except for Mainnet - AttachmentsEnabled: true, - // Capability Controllers are enabled everywhere except for Mainnet - CapabilityControllersEnabled: true, - }) - - env.Configure( - ri, - runtime.NewCodesAndPrograms(), - storage, - runtime.NewCoverageReport(), - ) - - inter, err := interpreter.NewInterpreter( - nil, - nil, - env.InterpreterConfig) - if err != nil { - return nil, err - } - - return &migratorRuntime{ - Address: address, - Payloads: payloads, - Snapshot: snapshot, - TransactionState: transactionState, - Interpreter: inter, - Storage: storage, - Accounts: accountsAtreeLedger, - }, nil -} - -type migratorRuntime struct { - Snapshot *util.PayloadSnapshot - TransactionState state.NestedTransactionPreparer - Interpreter *interpreter.Interpreter - Storage *runtime.Storage - Payloads []*ledger.Payload - Address common.Address - Accounts *util.AccountsAtreeLedger -} - -func (mr *migratorRuntime) GetReadOnlyStorage() *runtime.Storage { - return runtime.NewStorage(util.NewPayloadsReadonlyLedger(mr.Snapshot), nil) -} diff --git a/cmd/util/ledger/migrations/utils.go b/cmd/util/ledger/migrations/utils.go index e747b3dc508..f9ce19b84e8 100644 --- a/cmd/util/ledger/migrations/utils.go +++ b/cmd/util/ledger/migrations/utils.go @@ -4,58 +4,62 @@ import ( "fmt" "github.com/onflow/atree" + "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/stdlib" - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" ) -type AccountsAtreeLedger struct { - Accounts environment.Accounts -} +func checkStorageHealth( + address common.Address, + storage *runtime.Storage, + payloads []*ledger.Payload, +) error { -func NewAccountsAtreeLedger(accounts environment.Accounts) *AccountsAtreeLedger { - return &AccountsAtreeLedger{Accounts: accounts} -} + for _, payload := range payloads { + registerID, _, err := convert.PayloadToRegister(payload) + if err != nil { + return fmt.Errorf("failed to convert payload to register: %w", err) + } -var _ atree.Ledger = &AccountsAtreeLedger{} + if !registerID.IsSlabIndex() { + continue + } -func (a *AccountsAtreeLedger) GetValue(owner, key []byte) ([]byte, error) { - v, err := a.Accounts.GetValue( - flow.NewRegisterID( - flow.BytesToAddress(owner), - string(key))) - if err != nil { - return nil, fmt.Errorf("getting value failed: %w", err) - } - return v, nil -} + // Convert the register ID to a storage ID. + slabID := atree.NewStorageID( + atree.Address([]byte(registerID.Owner)), + atree.StorageIndex([]byte(registerID.Key[1:]))) -func (a *AccountsAtreeLedger) SetValue(owner, key, value []byte) error { - err := a.Accounts.SetValue( - flow.NewRegisterID( - flow.BytesToAddress(owner), - string(key)), - value) - if err != nil { - return fmt.Errorf("setting value failed: %w", err) + // Retrieve the slab. + _, _, err = storage.Retrieve(slabID) + if err != nil { + return fmt.Errorf("failed to retrieve slab %s: %w", slabID, err) + } } - return nil -} -func (a *AccountsAtreeLedger) ValueExists(owner, key []byte) (exists bool, err error) { - v, err := a.GetValue(owner, key) - if err != nil { - return false, fmt.Errorf("checking value existence failed: %w", err) + for _, domain := range allStorageMapDomains { + _ = storage.GetStorageMap(address, domain, false) } - return len(v) > 0, nil + return storage.CheckHealth() +} + +var allStorageMapDomains = []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPrivate.Identifier(), + common.PathDomainPublic.Identifier(), + runtime.StorageDomainContract, + stdlib.InboxStorageDomain, + stdlib.CapabilityControllerStorageDomain, } -// AllocateStorageIndex allocates new storage index under the owner accounts to store a new register -func (a *AccountsAtreeLedger) AllocateStorageIndex(owner []byte) (atree.StorageIndex, error) { - v, err := a.Accounts.AllocateStorageIndex(flow.BytesToAddress(owner)) - if err != nil { - return atree.StorageIndex{}, fmt.Errorf("storage address allocation failed: %w", err) +var allStorageMapDomainsSet = map[string]struct{}{} + +func init() { + for _, domain := range allStorageMapDomains { + allStorageMapDomainsSet[domain] = struct{}{} } - return v, nil } diff --git a/cmd/util/ledger/util/migration_runtime_interface.go b/cmd/util/ledger/util/migration_runtime_interface.go deleted file mode 100644 index c72d8493095..00000000000 --- a/cmd/util/ledger/util/migration_runtime_interface.go +++ /dev/null @@ -1,295 +0,0 @@ -package util - -import ( - "fmt" - "time" - - "go.opentelemetry.io/otel/attribute" - - "github.com/onflow/atree" - "github.com/onflow/cadence" - "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/model/flow" -) - -// MigrationRuntimeInterface is a runtime interface that can be used in migrations. -type MigrationRuntimeInterface struct { - Accounts environment.Accounts - Programs *environment.Programs - - // GetOrLoadProgramFunc allows for injecting extra logic - GetOrLoadProgramFunc func(location runtime.Location, load func() (*interpreter.Program, error)) (*interpreter.Program, error) -} - -func (m MigrationRuntimeInterface) ResolveLocation( - identifiers []runtime.Identifier, - location runtime.Location, -) ([]runtime.ResolvedLocation, error) { - - addressLocation, isAddress := location.(common.AddressLocation) - - // if the location is not an address location, e.g. an identifier location (`import Crypto`), - // then return a single resolved location which declares all identifiers. - if !isAddress { - return []runtime.ResolvedLocation{ - { - Location: location, - Identifiers: identifiers, - }, - }, nil - } - - // if the location is an address, - // and no specific identifiers where requested in the import statement, - // then fetch all identifiers at this address - if len(identifiers) == 0 { - address := flow.Address(addressLocation.Address) - - contractNames, err := m.Accounts.GetContractNames(address) - if err != nil { - return nil, fmt.Errorf("ResolveLocation failed: %w", err) - } - - // if there are no contractNames deployed, - // then return no resolved locations - if len(contractNames) == 0 { - return nil, nil - } - - identifiers = make([]runtime.Identifier, len(contractNames)) - - for i := range identifiers { - identifiers[i] = runtime.Identifier{ - Identifier: contractNames[i], - } - } - } - - // return one resolved location per identifier. - // each resolved location is an address contract location - resolvedLocations := make([]runtime.ResolvedLocation, len(identifiers)) - for i := range resolvedLocations { - identifier := identifiers[i] - resolvedLocations[i] = runtime.ResolvedLocation{ - Location: common.AddressLocation{ - Address: addressLocation.Address, - Name: identifier.Identifier, - }, - Identifiers: []runtime.Identifier{identifier}, - } - } - - return resolvedLocations, nil -} - -func (m MigrationRuntimeInterface) GetCode(location runtime.Location) ([]byte, error) { - contractLocation, ok := location.(common.AddressLocation) - if !ok { - return nil, fmt.Errorf("GetCode failed: expected AddressLocation") - } - - add, err := m.Accounts.GetContract(contractLocation.Name, flow.Address(contractLocation.Address)) - if err != nil { - return nil, fmt.Errorf("GetCode failed: %w", err) - } - - return add, nil -} - -func (m MigrationRuntimeInterface) GetAccountContractCode( - l common.AddressLocation, -) (code []byte, err error) { - return m.Accounts.GetContract(l.Name, flow.Address(l.Address)) -} - -func (m MigrationRuntimeInterface) GetOrLoadProgram(location runtime.Location, load func() (*interpreter.Program, error)) (*interpreter.Program, error) { - if m.GetOrLoadProgramFunc != nil { - return m.GetOrLoadProgramFunc(location, load) - } - - return m.Programs.GetOrLoadProgram(location, load) -} - -func (m MigrationRuntimeInterface) MeterMemory(_ common.MemoryUsage) error { - return nil -} - -func (m MigrationRuntimeInterface) MeterComputation(_ common.ComputationKind, _ uint) error { - return nil -} - -func (m MigrationRuntimeInterface) GetValue(_, _ []byte) (value []byte, err error) { - panic("unexpected GetValue call") -} - -func (m MigrationRuntimeInterface) SetValue(_, _, _ []byte) (err error) { - panic("unexpected SetValue call") -} - -func (m MigrationRuntimeInterface) CreateAccount(_ runtime.Address) (address runtime.Address, err error) { - panic("unexpected CreateAccount call") -} - -func (m MigrationRuntimeInterface) AddEncodedAccountKey(_ runtime.Address, _ []byte) error { - panic("unexpected AddEncodedAccountKey call") -} - -func (m MigrationRuntimeInterface) RevokeEncodedAccountKey(_ runtime.Address, _ int) (publicKey []byte, err error) { - panic("unexpected RevokeEncodedAccountKey call") -} - -func (m MigrationRuntimeInterface) AddAccountKey(_ runtime.Address, _ *runtime.PublicKey, _ runtime.HashAlgorithm, _ int) (*runtime.AccountKey, error) { - panic("unexpected AddAccountKey call") -} - -func (m MigrationRuntimeInterface) GetAccountKey(_ runtime.Address, _ int) (*runtime.AccountKey, error) { - panic("unexpected GetAccountKey call") -} - -func (m MigrationRuntimeInterface) RevokeAccountKey(_ runtime.Address, _ int) (*runtime.AccountKey, error) { - panic("unexpected RevokeAccountKey call") -} - -func (m MigrationRuntimeInterface) UpdateAccountContractCode(_ common.AddressLocation, _ []byte) (err error) { - panic("unexpected UpdateAccountContractCode call") -} - -func (m MigrationRuntimeInterface) RemoveAccountContractCode(common.AddressLocation) (err error) { - panic("unexpected RemoveAccountContractCode call") -} - -func (m MigrationRuntimeInterface) GetSigningAccounts() ([]runtime.Address, error) { - panic("unexpected GetSigningAccounts call") -} - -func (m MigrationRuntimeInterface) ProgramLog(_ string) error { - panic("unexpected ProgramLog call") -} - -func (m MigrationRuntimeInterface) EmitEvent(_ cadence.Event) error { - panic("unexpected EmitEvent call") -} - -func (m MigrationRuntimeInterface) ValueExists(_, _ []byte) (exists bool, err error) { - panic("unexpected ValueExists call") -} - -func (m MigrationRuntimeInterface) GenerateUUID() (uint64, error) { - panic("unexpected GenerateUUID call") -} - -func (m MigrationRuntimeInterface) GetComputationLimit() uint64 { - panic("unexpected GetComputationLimit call") -} - -func (m MigrationRuntimeInterface) SetComputationUsed(_ uint64) error { - panic("unexpected SetComputationUsed call") -} - -func (m MigrationRuntimeInterface) DecodeArgument(_ []byte, _ cadence.Type) (cadence.Value, error) { - panic("unexpected DecodeArgument call") -} - -func (m MigrationRuntimeInterface) GetCurrentBlockHeight() (uint64, error) { - panic("unexpected GetCurrentBlockHeight call") -} - -func (m MigrationRuntimeInterface) GetBlockAtHeight(_ uint64) (block runtime.Block, exists bool, err error) { - panic("unexpected GetBlockAtHeight call") -} - -func (m MigrationRuntimeInterface) ReadRandom([]byte) error { - panic("unexpected ReadRandom call") -} - -func (m MigrationRuntimeInterface) VerifySignature(_ []byte, _ string, _ []byte, _ []byte, _ runtime.SignatureAlgorithm, _ runtime.HashAlgorithm) (bool, error) { - panic("unexpected VerifySignature call") -} - -func (m MigrationRuntimeInterface) Hash(_ []byte, _ string, _ runtime.HashAlgorithm) ([]byte, error) { - panic("unexpected Hash call") -} - -func (m MigrationRuntimeInterface) GetAccountBalance(_ common.Address) (value uint64, err error) { - panic("unexpected GetAccountBalance call") -} - -func (m MigrationRuntimeInterface) GetAccountAvailableBalance(_ common.Address) (value uint64, err error) { - panic("unexpected GetAccountAvailableBalance call") -} - -func (m MigrationRuntimeInterface) GetStorageUsed(_ runtime.Address) (value uint64, err error) { - panic("unexpected GetStorageUsed call") -} - -func (m MigrationRuntimeInterface) GetStorageCapacity(_ runtime.Address) (value uint64, err error) { - panic("unexpected GetStorageCapacity call") -} - -func (m MigrationRuntimeInterface) ImplementationDebugLog(_ string) error { - panic("unexpected ImplementationDebugLog call") -} - -func (m MigrationRuntimeInterface) ValidatePublicKey(_ *runtime.PublicKey) error { - panic("unexpected ValidatePublicKey call") -} - -func (m MigrationRuntimeInterface) GetAccountContractNames(_ runtime.Address) ([]string, error) { - panic("unexpected GetAccountContractNames call") -} - -func (m MigrationRuntimeInterface) AllocateStorageIndex(_ []byte) (atree.StorageIndex, error) { - panic("unexpected AllocateStorageIndex call") -} - -func (m MigrationRuntimeInterface) ComputationUsed() (uint64, error) { - panic("unexpected ComputationUsed call") -} - -func (m MigrationRuntimeInterface) MemoryUsed() (uint64, error) { - panic("unexpected MemoryUsed call") -} - -func (m MigrationRuntimeInterface) InteractionUsed() (uint64, error) { - panic("unexpected InteractionUsed call") -} - -func (m MigrationRuntimeInterface) SetInterpreterSharedState(_ *interpreter.SharedState) { - panic("unexpected SetInterpreterSharedState call") -} - -func (m MigrationRuntimeInterface) GetInterpreterSharedState() *interpreter.SharedState { - panic("unexpected GetInterpreterSharedState call") -} - -func (m MigrationRuntimeInterface) AccountKeysCount(_ runtime.Address) (uint64, error) { - panic("unexpected AccountKeysCount call") -} - -func (m MigrationRuntimeInterface) BLSVerifyPOP(_ *runtime.PublicKey, _ []byte) (bool, error) { - panic("unexpected BLSVerifyPOP call") -} - -func (m MigrationRuntimeInterface) BLSAggregateSignatures(_ [][]byte) ([]byte, error) { - panic("unexpected BLSAggregateSignatures call") -} - -func (m MigrationRuntimeInterface) BLSAggregatePublicKeys(_ []*runtime.PublicKey) (*runtime.PublicKey, error) { - panic("unexpected BLSAggregatePublicKeys call") -} - -func (m MigrationRuntimeInterface) ResourceOwnerChanged(_ *interpreter.Interpreter, _ *interpreter.CompositeValue, _ common.Address, _ common.Address) { - panic("unexpected ResourceOwnerChanged call") -} - -func (m MigrationRuntimeInterface) GenerateAccountID(_ common.Address) (uint64, error) { - panic("unexpected GenerateAccountID call") -} - -func (m MigrationRuntimeInterface) RecordTrace(_ string, _ runtime.Location, _ time.Duration, _ []attribute.KeyValue) { - panic("unexpected RecordTrace call") -} diff --git a/engine/access/apiproxy/access_api_proxy.go b/engine/access/apiproxy/access_api_proxy.go index d57f1681700..689d91de6cd 100644 --- a/engine/access/apiproxy/access_api_proxy.go +++ b/engine/access/apiproxy/access_api_proxy.go @@ -197,12 +197,6 @@ func (h *FlowAccessAPIRouter) GetSystemTransaction(context context.Context, req } func (h *FlowAccessAPIRouter) GetSystemTransactionResult(context context.Context, req *access.GetSystemTransactionResultRequest) (*access.TransactionResultResponse, error) { - if h.useIndex { - res, err := h.local.GetSystemTransactionResult(context, req) - h.log(LocalApiService, "GetSystemTransactionResult", err) - return res, err - } - res, err := h.upstream.GetSystemTransactionResult(context, req) h.log(UpstreamApiService, "GetSystemTransactionResult", err) return res, err @@ -347,48 +341,57 @@ func (h *FlowAccessAPIRouter) GetExecutionResultByID(context context.Context, re } func (h *FlowAccessAPIRouter) SubscribeBlocksFromStartBlockID(req *access.SubscribeBlocksFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlocksFromStartBlockIDServer) error { - // SubscribeBlocksFromStartBlockID is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlocksFromStartBlockID not implemented") + err := h.local.SubscribeBlocksFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlocksFromStartBlockID", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlocksFromStartHeight(req *access.SubscribeBlocksFromStartHeightRequest, server access.AccessAPI_SubscribeBlocksFromStartHeightServer) error { - // SubscribeBlocksFromStartHeight is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlocksFromStartHeight not implemented") + err := h.local.SubscribeBlocksFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlocksFromStartHeight", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlocksFromLatest(req *access.SubscribeBlocksFromLatestRequest, server access.AccessAPI_SubscribeBlocksFromLatestServer) error { - // SubscribeBlocksFromLatest is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlocksFromLatest not implemented") + err := h.local.SubscribeBlocksFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlocksFromLatest", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromStartBlockID(req *access.SubscribeBlockHeadersFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlockHeadersFromStartBlockIDServer) error { - // SubscribeBlockHeadersFromStartBlockID is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockHeadersFromStartBlockID not implemented") + err := h.local.SubscribeBlockHeadersFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromStartBlockID", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromStartHeight(req *access.SubscribeBlockHeadersFromStartHeightRequest, server access.AccessAPI_SubscribeBlockHeadersFromStartHeightServer) error { - // SubscribeBlockHeadersFromStartHeight is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockHeadersFromStartHeight not implemented") + err := h.local.SubscribeBlockHeadersFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromStartHeight", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockHeadersFromLatest(req *access.SubscribeBlockHeadersFromLatestRequest, server access.AccessAPI_SubscribeBlockHeadersFromLatestServer) error { - // SubscribeBlockHeadersFromLatest is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockHeadersFromLatest not implemented") + err := h.local.SubscribeBlockHeadersFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlockHeadersFromLatest", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromStartBlockID(req *access.SubscribeBlockDigestsFromStartBlockIDRequest, server access.AccessAPI_SubscribeBlockDigestsFromStartBlockIDServer) error { - // SubscribeBlockDigestsFromStartBlockID is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockDigestsFromStartBlockID not implemented") + err := h.local.SubscribeBlockDigestsFromStartBlockID(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromStartBlockID", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromStartHeight(req *access.SubscribeBlockDigestsFromStartHeightRequest, server access.AccessAPI_SubscribeBlockDigestsFromStartHeightServer) error { - // SubscribeBlockDigestsFromStartHeight is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockDigestsFromStartHeight not implemented") + err := h.local.SubscribeBlockDigestsFromStartHeight(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromStartHeight", err) + return err } func (h *FlowAccessAPIRouter) SubscribeBlockDigestsFromLatest(req *access.SubscribeBlockDigestsFromLatestRequest, server access.AccessAPI_SubscribeBlockDigestsFromLatestServer) error { - // SubscribeBlockDigestsFromLatest is not implemented for observer yet - return status.Errorf(codes.Unimplemented, "method SubscribeBlockDigestsFromLatest not implemented") + err := h.local.SubscribeBlockDigestsFromLatest(req, server) + h.log(LocalApiService, "SubscribeBlockDigestsFromLatest", err) + return err } func (h *FlowAccessAPIRouter) SendAndSubscribeTransactionStatuses(req *access.SendAndSubscribeTransactionStatusesRequest, server access.AccessAPI_SendAndSubscribeTransactionStatusesServer) error { diff --git a/engine/access/rpc/backend/backend.go b/engine/access/rpc/backend/backend.go index 5c27d1e9577..1b7ef03ddba 100644 --- a/engine/access/rpc/backend/backend.go +++ b/engine/access/rpc/backend/backend.go @@ -162,15 +162,6 @@ func New(params Params) (*Backend, error) { // initialize node version info nodeInfo := getNodeVersionInfo(params.State.Params()) - transactionsLocalDataProvider := &TransactionsLocalDataProvider{ - state: params.State, - collections: params.Collections, - blocks: params.Blocks, - eventsIndex: params.EventsIndex, - txResultsIndex: params.TxResultsIndex, - systemTxID: systemTxID, - } - b := &Backend{ state: params.State, BlockTracker: params.BlockTracker, @@ -187,25 +178,6 @@ func New(params Params) (*Backend, error) { scriptExecutor: params.ScriptExecutor, scriptExecMode: params.ScriptExecutionMode, }, - backendTransactions: backendTransactions{ - TransactionsLocalDataProvider: transactionsLocalDataProvider, - log: params.Log, - staticCollectionRPC: params.CollectionRPC, - chainID: params.ChainID, - transactions: params.Transactions, - executionReceipts: params.ExecutionReceipts, - transactionValidator: configureTransactionValidator(params.State, params.ChainID), - transactionMetrics: params.AccessMetrics, - retry: retry, - connFactory: params.ConnFactory, - previousAccessNodes: params.HistoricalAccessNodes, - nodeCommunicator: params.Communicator, - txResultCache: txResCache, - txErrorMessagesCache: txErrorMessagesCache, - txResultQueryMode: params.TxResultQueryMode, - systemTx: systemTx, - systemTxID: systemTxID, - }, backendEvents: backendEvents{ log: params.Log, chain: params.ChainID.Chain(), @@ -253,13 +225,7 @@ func New(params Params) (*Backend, error) { subscriptionHandler: params.SubscriptionHandler, blockTracker: params.BlockTracker, }, - backendSubscribeTransactions: backendSubscribeTransactions{ - txLocalDataProvider: transactionsLocalDataProvider, - log: params.Log, - executionResults: params.ExecutionResults, - subscriptionHandler: params.SubscriptionHandler, - blockTracker: params.BlockTracker, - }, + collections: params.Collections, executionReceipts: params.ExecutionReceipts, connFactory: params.ConnFactory, @@ -267,8 +233,47 @@ func New(params Params) (*Backend, error) { nodeInfo: nodeInfo, } + transactionsLocalDataProvider := &TransactionsLocalDataProvider{ + state: params.State, + collections: params.Collections, + blocks: params.Blocks, + eventsIndex: params.EventsIndex, + txResultsIndex: params.TxResultsIndex, + systemTxID: systemTxID, + } + + b.backendTransactions = backendTransactions{ + TransactionsLocalDataProvider: transactionsLocalDataProvider, + log: params.Log, + staticCollectionRPC: params.CollectionRPC, + chainID: params.ChainID, + transactions: params.Transactions, + executionReceipts: params.ExecutionReceipts, + transactionValidator: configureTransactionValidator(params.State, params.ChainID), + transactionMetrics: params.AccessMetrics, + retry: retry, + connFactory: params.ConnFactory, + previousAccessNodes: params.HistoricalAccessNodes, + nodeCommunicator: params.Communicator, + txResultCache: txResCache, + txErrorMessagesCache: txErrorMessagesCache, + txResultQueryMode: params.TxResultQueryMode, + systemTx: systemTx, + systemTxID: systemTxID, + } + + // TODO: The TransactionErrorMessage interface should be reorganized in future, as it is implemented in backendTransactions but used in TransactionsLocalDataProvider, and its initialization is somewhat quirky. b.backendTransactions.txErrorMessages = b + b.backendSubscribeTransactions = backendSubscribeTransactions{ + txLocalDataProvider: transactionsLocalDataProvider, + backendTransactions: &b.backendTransactions, + log: params.Log, + executionResults: params.ExecutionResults, + subscriptionHandler: params.SubscriptionHandler, + blockTracker: params.BlockTracker, + } + retry.SetBackend(b) preferredENIdentifiers, err = identifierList(params.PreferredExecutionNodeIDs) diff --git a/engine/access/rpc/backend/backend_stream_transactions.go b/engine/access/rpc/backend/backend_stream_transactions.go index e2759194c8f..a82b365240e 100644 --- a/engine/access/rpc/backend/backend_stream_transactions.go +++ b/engine/access/rpc/backend/backend_stream_transactions.go @@ -5,26 +5,26 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/module/irrecoverable" - "github.com/onflow/flow-go/state" - - "github.com/onflow/flow-go/engine/common/rpc/convert" - "github.com/onflow/flow-go/module/counters" - "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/rs/zerolog" + "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine/access/subscription" "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/irrecoverable" + "github.com/onflow/flow-go/state" "github.com/onflow/flow-go/storage" + + "github.com/onflow/flow/protobuf/go/flow/entities" ) // backendSubscribeTransactions handles transaction subscriptions. type backendSubscribeTransactions struct { txLocalDataProvider *TransactionsLocalDataProvider + backendTransactions *backendTransactions executionResults storage.ExecutionResults log zerolog.Logger @@ -34,30 +34,34 @@ type backendSubscribeTransactions struct { // TransactionSubscriptionMetadata holds data representing the status state for each transaction subscription. type TransactionSubscriptionMetadata struct { - txID flow.Identifier - txReferenceBlockID flow.Identifier - messageIndex counters.StrictMonotonousCounter - blockWithTx *flow.Header - blockID flow.Identifier - txExecuted bool - lastTxStatus flow.TransactionStatus + *access.TransactionResult + txReferenceBlockID flow.Identifier + blockWithTx *flow.Header + txExecuted bool + eventEncodingVersion entities.EventEncodingVersion } // SubscribeTransactionStatuses subscribes to transaction status changes starting from the transaction reference block ID. // If invalid tx parameters will be supplied SubscribeTransactionStatuses will return a failed subscription. -func (b *backendSubscribeTransactions) SubscribeTransactionStatuses(ctx context.Context, tx *flow.TransactionBody) subscription.Subscription { +func (b *backendSubscribeTransactions) SubscribeTransactionStatuses( + ctx context.Context, + tx *flow.TransactionBody, + requiredEventEncodingVersion entities.EventEncodingVersion, +) subscription.Subscription { nextHeight, err := b.blockTracker.GetStartHeightFromBlockID(tx.ReferenceBlockID) if err != nil { return subscription.NewFailedSubscription(err, "could not get start height") } txInfo := TransactionSubscriptionMetadata{ - txID: tx.ID(), - txReferenceBlockID: tx.ReferenceBlockID, - messageIndex: counters.NewMonotonousCounter(0), - blockWithTx: nil, - blockID: flow.ZeroID, - lastTxStatus: flow.TransactionStatusUnknown, + TransactionResult: &access.TransactionResult{ + TransactionID: tx.ID(), + BlockID: flow.ZeroID, + Status: flow.TransactionStatusUnknown, + }, + txReferenceBlockID: tx.ReferenceBlockID, + blockWithTx: nil, + eventEncodingVersion: requiredEventEncodingVersion, } return b.subscriptionHandler.Subscribe(ctx, nextHeight, b.getTransactionStatusResponse(&txInfo)) @@ -67,25 +71,25 @@ func (b *backendSubscribeTransactions) SubscribeTransactionStatuses(ctx context. // subscription responses based on new blocks. func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *TransactionSubscriptionMetadata) func(context.Context, uint64) (interface{}, error) { return func(ctx context.Context, height uint64) (interface{}, error) { - highestHeight, err := b.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) + err := b.checkBlockReady(height) if err != nil { - return nil, fmt.Errorf("could not get highest height for block %d: %w", height, err) - } - - // Fail early if no block finalized notification has been received for the given height. - // Note: It's possible that the block is locally finalized before the notification is - // received. This ensures a consistent view is available to all streams. - if height > highestHeight { - return nil, fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) + return nil, err } - if txInfo.lastTxStatus == flow.TransactionStatusSealed || txInfo.lastTxStatus == flow.TransactionStatusExpired { - return nil, fmt.Errorf("transaction final status %s was already reported: %w", txInfo.lastTxStatus.String(), subscription.ErrEndOfData) + // If the transaction status already reported the final status, return with no data available + if txInfo.Status == flow.TransactionStatusSealed || txInfo.Status == flow.TransactionStatusExpired { + return nil, fmt.Errorf("transaction final status %s was already reported: %w", txInfo.Status.String(), subscription.ErrEndOfData) } + // If on this step transaction block not available, search for it. if txInfo.blockWithTx == nil { - // Check if block contains transaction. - txInfo.blockWithTx, txInfo.blockID, err = b.searchForTransactionBlock(height, txInfo) + // Search for transaction`s block information. + txInfo.blockWithTx, + txInfo.BlockID, + txInfo.BlockHeight, + txInfo.CollectionID, + err = b.searchForTransactionBlockInfo(height, txInfo) + if err != nil { if errors.Is(err, storage.ErrNotFound) { return nil, fmt.Errorf("could not find block %d in storage: %w", height, subscription.ErrBlockNotReady) @@ -97,20 +101,32 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran } } - // Find the transaction status. - var txStatus flow.TransactionStatus - if txInfo.blockWithTx == nil { - txStatus, err = b.txLocalDataProvider.DeriveUnknownTransactionStatus(txInfo.txReferenceBlockID) - } else { - if !txInfo.txExecuted { - // Check if transaction was executed. - txInfo.txExecuted, err = b.searchForExecutionResult(txInfo.blockID) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get execution result for block %s: %v", txInfo.blockID, err) - } + // Get old status here, as it could be replaced by status from founded tx result + prevTxStatus := txInfo.Status + + // Check, if transaction executed and transaction result already available + if txInfo.blockWithTx != nil && !txInfo.txExecuted { + txResult, err := b.searchForTransactionResult(ctx, txInfo) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get execution result for block %s: %v", txInfo.BlockID, err) } - txStatus, err = b.txLocalDataProvider.DeriveTransactionStatus(txInfo.blockWithTx.Height, txInfo.txExecuted) + // If transaction result was found, fully replace it in metadata. New transaction status already included in result. + if txResult != nil { + txInfo.TransactionResult = txResult + //Fill in execution status for future usages + txInfo.txExecuted = true + } + } + + // If block with transaction was not found, get transaction status to check if it different from last status + if txInfo.blockWithTx == nil { + txInfo.Status, err = b.txLocalDataProvider.DeriveUnknownTransactionStatus(txInfo.txReferenceBlockID) + } else if txInfo.Status == prevTxStatus { + // When a block with the transaction is available, it is possible to receive a new transaction status while + // searching for the transaction result. Otherwise, it remains unchanged. So, if the old and new transaction + // statuses are the same, the current transaction status should be retrieved. + txInfo.Status, err = b.txLocalDataProvider.DeriveTransactionStatus(txInfo.blockWithTx.Height, txInfo.txExecuted) } if err != nil { if !errors.Is(err, state.ErrUnknownSnapshotReference) { @@ -119,64 +135,150 @@ func (b *backendSubscribeTransactions) getTransactionStatusResponse(txInfo *Tran return nil, rpc.ConvertStorageError(err) } - // The same transaction status should not be reported, so return here with no response - if txInfo.lastTxStatus == txStatus { + // If the old and new transaction statuses are still the same, the status change should not be reported, so + // return here with no response. + if prevTxStatus == txInfo.Status { return nil, nil } - txInfo.lastTxStatus = txStatus - messageIndex := txInfo.messageIndex.Value() - if ok := txInfo.messageIndex.Set(messageIndex + 1); !ok { - return nil, status.Errorf(codes.Internal, "the message index has already been incremented to %d", txInfo.messageIndex.Value()) + return b.generateResultsWithMissingStatuses(txInfo, prevTxStatus) + } +} + +// generateResultsWithMissingStatuses checks if the current result differs from the previous result by more than one step. +// If yes, it generates results for the missing transaction statuses. This is done because the subscription should send +// responses for each of the statuses in the transaction lifecycle, and the message should be sent in the order of transaction statuses. +// Possible orders of transaction statuses: +// 1. pending(1) -> finalized(2) -> executed(3) -> sealed(4) +// 2. pending(1) -> expired(5) +// No errors expected during normal operations. +func (b *backendSubscribeTransactions) generateResultsWithMissingStatuses( + txInfo *TransactionSubscriptionMetadata, + prevTxStatus flow.TransactionStatus, +) ([]*access.TransactionResult, error) { + // If the previous status is pending and the new status is expired, which is the last status, return its result. + // If the previous status is anything other than pending, return an error, as this transition is unexpected. + if txInfo.Status == flow.TransactionStatusExpired { + if prevTxStatus == flow.TransactionStatusPending { + return []*access.TransactionResult{ + txInfo.TransactionResult, + }, nil + } else { + return nil, fmt.Errorf("unexpected transition from %s to %s transaction status", prevTxStatus.String(), txInfo.Status.String()) + } + } + + var results []*access.TransactionResult + + // If the difference between statuses' values is more than one step, fill in the missing results. + if (txInfo.Status - prevTxStatus) > 1 { + for missingStatus := prevTxStatus + 1; missingStatus < txInfo.Status; missingStatus++ { + switch missingStatus { + case flow.TransactionStatusPending: + results = append(results, &access.TransactionResult{ + Status: missingStatus, + TransactionID: txInfo.TransactionID, + }) + case flow.TransactionStatusFinalized: + results = append(results, &access.TransactionResult{ + Status: missingStatus, + TransactionID: txInfo.TransactionID, + BlockID: txInfo.BlockID, + BlockHeight: txInfo.BlockHeight, + CollectionID: txInfo.CollectionID, + }) + case flow.TransactionStatusExecuted: + missingTxResult := *txInfo.TransactionResult + missingTxResult.Status = missingStatus + results = append(results, &missingTxResult) + default: + return nil, fmt.Errorf("unexpected missing transaction status") + } } + } + + results = append(results, txInfo.TransactionResult) + return results, nil +} + +// checkBlockReady checks if the given block height is valid and available based on the expected block status. +// Expected errors during normal operation: +// - subscription.ErrBlockNotReady: block for the given block height is not available. +func (b *backendSubscribeTransactions) checkBlockReady(height uint64) error { + // Get the highest available finalized block height + highestHeight, err := b.blockTracker.GetHighestHeight(flow.BlockStatusFinalized) + if err != nil { + return fmt.Errorf("could not get highest height for block %d: %w", height, err) + } - return &convert.TransactionSubscribeInfo{ - ID: txInfo.txID, - Status: txInfo.lastTxStatus, - MessageIndex: messageIndex, - }, nil + // Fail early if no block finalized notification has been received for the given height. + // Note: It's possible that the block is locally finalized before the notification is + // received. This ensures a consistent view is available to all streams. + if height > highestHeight { + return fmt.Errorf("block %d is not available yet: %w", height, subscription.ErrBlockNotReady) } + + return nil } -// searchForTransactionBlock searches for the block containing the specified transaction. +// searchForTransactionBlockInfo searches for the block containing the specified transaction. // It retrieves the block at the given height and checks if the transaction is included in that block. // Expected errors: -// - subscription.ErrBlockNotReady when unable to retrieve the block or collection ID +// - ErrTransactionNotInBlock when unable to retrieve the collection // - codes.Internal when other errors occur during block or collection lookup -func (b *backendSubscribeTransactions) searchForTransactionBlock( +func (b *backendSubscribeTransactions) searchForTransactionBlockInfo( height uint64, txInfo *TransactionSubscriptionMetadata, -) (*flow.Header, flow.Identifier, error) { +) (*flow.Header, flow.Identifier, uint64, flow.Identifier, error) { block, err := b.txLocalDataProvider.blocks.ByHeight(height) if err != nil { - return nil, flow.ZeroID, fmt.Errorf("error looking up block: %w", err) + return nil, flow.ZeroID, 0, flow.ZeroID, fmt.Errorf("error looking up block: %w", err) } - collectionID, err := b.txLocalDataProvider.LookupCollectionIDInBlock(block, txInfo.txID) + collectionID, err := b.txLocalDataProvider.LookupCollectionIDInBlock(block, txInfo.TransactionID) if err != nil { - return nil, flow.ZeroID, fmt.Errorf("error looking up transaction in block: %w", err) + return nil, flow.ZeroID, 0, flow.ZeroID, fmt.Errorf("error looking up transaction in block: %w", err) } if collectionID != flow.ZeroID { - return block.Header, block.ID(), nil + return block.Header, block.ID(), height, collectionID, nil } - return nil, flow.ZeroID, nil + return nil, flow.ZeroID, 0, flow.ZeroID, nil } -// searchForExecutionResult searches for the execution result of a block. It retrieves the execution result for the specified block ID. +// searchForTransactionResult searches for the transaction result of a block. It retrieves the execution result for the specified block ID. // Expected errors: // - codes.Internal if an internal error occurs while retrieving execution result. -func (b *backendSubscribeTransactions) searchForExecutionResult( - blockID flow.Identifier, -) (bool, error) { - _, err := b.executionResults.ByBlockID(blockID) +func (b *backendSubscribeTransactions) searchForTransactionResult( + ctx context.Context, + txInfo *TransactionSubscriptionMetadata, +) (*access.TransactionResult, error) { + _, err := b.executionResults.ByBlockID(txInfo.BlockID) if err != nil { if errors.Is(err, storage.ErrNotFound) { - return false, nil + return nil, nil + } + return nil, fmt.Errorf("failed to get execution result for block %s: %w", txInfo.BlockID, err) + } + + txResult, err := b.backendTransactions.GetTransactionResult( + ctx, + txInfo.TransactionID, + txInfo.BlockID, + txInfo.CollectionID, + txInfo.eventEncodingVersion, + ) + + if err != nil { + // if either the storage or execution node reported no results or there were not enough execution results + if status.Code(err) == codes.NotFound { + // No result yet, indicate that it has not been executed + return nil, nil } - return false, fmt.Errorf("failed to get execution result for block %s: %w", blockID, err) + // Other Error trying to retrieve the result, return with err + return nil, err } - return true, nil + return txResult, nil } diff --git a/engine/access/rpc/backend/backend_stream_transactions_test.go b/engine/access/rpc/backend/backend_stream_transactions_test.go index cf7438bf605..598105bf7bd 100644 --- a/engine/access/rpc/backend/backend_stream_transactions_test.go +++ b/engine/access/rpc/backend/backend_stream_transactions_test.go @@ -6,6 +6,10 @@ import ( "testing" "time" + "github.com/onflow/flow/protobuf/go/flow/entities" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + protocolint "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/engine/access/index" @@ -20,15 +24,14 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + accessapi "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine" access "github.com/onflow/flow-go/engine/access/mock" backendmock "github.com/onflow/flow-go/engine/access/rpc/backend/mock" connectionmock "github.com/onflow/flow-go/engine/access/rpc/connection/mock" "github.com/onflow/flow-go/engine/access/subscription" subscriptionmock "github.com/onflow/flow-go/engine/access/subscription/mock" - "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/metrics" protocol "github.com/onflow/flow-go/state/protocol/mock" storagemock "github.com/onflow/flow-go/storage/mock" @@ -135,7 +138,6 @@ func (s *TransactionStatusSuite) SetupTest() { s.reporter = syncmock.NewIndexReporter(s.T()) s.blocks.On("ByHeight", mock.AnythingOfType("uint64")).Return(mocks.StorageMapGetter(s.blockMap)) - s.state.On("Final").Return(s.finalSnapshot, nil) s.state.On("AtBlockID", mock.AnythingOfType("flow.Identifier")).Return(func(blockID flow.Identifier) protocolint.Snapshot { s.tempSnapshot.On("Head").Unset() @@ -167,7 +169,9 @@ func (s *TransactionStatusSuite) SetupTest() { }, nil) backendParams := s.backendParams() - err := backendParams.TxResultsIndex.Initialize(s.reporter) + err := backendParams.EventsIndex.Initialize(s.reporter) + require.NoError(s.T(), err) + err = backendParams.TxResultsIndex.Initialize(s.reporter) require.NoError(s.T(), err) s.backend, err = New(backendParams) @@ -201,8 +205,10 @@ func (s *TransactionStatusSuite) backendParams() Params { subscription.DefaultResponseLimit, subscription.DefaultSendBufferSize, ), - TxResultsIndex: index.NewTransactionResultsIndex(s.transactionResults), - EventsIndex: index.NewEventsIndex(s.events), + TxResultsIndex: index.NewTransactionResultsIndex(s.transactionResults), + EventQueryMode: IndexQueryModeLocalOnly, + TxResultQueryMode: IndexQueryModeLocalOnly, + EventsIndex: index.NewEventsIndex(s.events), } } @@ -225,6 +231,20 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + s.reporter.On("LowestIndexedHeight").Return(s.rootBlock.Header.Height, nil) + s.reporter.On("HighestIndexedHeight").Return(func() (uint64, error) { + finalizedHeader := s.finalizedBlock.Header + return finalizedHeader.Height, nil + }, nil) + s.blocks.On("ByID", mock.AnythingOfType("flow.Identifier")).Return(func(blockID flow.Identifier) (*flow.Block, error) { + for _, block := range s.blockMap { + if block.ID() == blockID { + return block, nil + } + } + + return nil, nil + }, nil) s.sealedSnapshot.On("Head").Return(func() *flow.Header { return s.sealedBlock.Header }, nil) @@ -234,12 +254,35 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { // Generate sent transaction with ref block of the current finalized block transaction := unittest.TransactionFixture() transaction.SetReferenceBlockID(s.finalizedBlock.ID()) + s.transactions.On("ByID", mock.AnythingOfType("flow.Identifier")).Return(&transaction.TransactionBody, nil) + col := flow.CollectionFromTransactions([]*flow.Transaction{&transaction}) guarantee := col.Guarantee() light := col.Light() txId := transaction.ID() + txResult := flow.LightTransactionResult{ + TransactionID: txId, + Failed: false, + ComputationUsed: 0, + } + + eventsForTx := unittest.EventsFixture(1, flow.EventAccountCreated) + eventMessages := make([]*entities.Event, 1) + for j, event := range eventsForTx { + eventMessages[j] = convert.EventToMessage(event) + } - expectedMsgIndexCounter := counters.NewMonotonousCounter(0) + s.events.On( + "ByBlockIDTransactionID", + mock.AnythingOfType("flow.Identifier"), + mock.AnythingOfType("flow.Identifier"), + ).Return(eventsForTx, nil) + + s.transactionResults.On( + "ByBlockIDTransactionID", + mock.AnythingOfType("flow.Identifier"), + mock.AnythingOfType("flow.Identifier"), + ).Return(&txResult, nil) // Create a special common function to read subscription messages from the channel and check converting it to transaction info // and check results for correctness @@ -250,21 +293,18 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", txId, s.finalizedBlock.ID(), sub.Err()) - txInfo, ok := v.(*convert.TransactionSubscribeInfo) + txResults, ok := v.([]*accessapi.TransactionResult) require.True(s.T(), ok, "unexpected response type: %T", v) + require.Len(s.T(), txResults, 1) - assert.Equal(s.T(), txId, txInfo.ID) - assert.Equal(s.T(), expectedTxStatus, txInfo.Status) - - expectedMsgIndex := expectedMsgIndexCounter.Value() - assert.Equal(s.T(), expectedMsgIndex, txInfo.MessageIndex) - wasSet := expectedMsgIndexCounter.Set(expectedMsgIndex + 1) - require.True(s.T(), wasSet) + result := txResults[0] + assert.Equal(s.T(), txId, result.TransactionID) + assert.Equal(s.T(), expectedTxStatus, result.Status) }, time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) } // 1. Subscribe to transaction status and receive the first message with pending status - sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody) + sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody, entities.EventEncodingVersion_CCF_V0) checkNewSubscriptionMessage(sub, flow.TransactionStatusPending) // 2. Make transaction reference block sealed, and add a new finalized block that includes the transaction @@ -278,7 +318,6 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusHappyCase() { // 3. Add one more finalized block on top of the transaction block and add execution results to storage finalizedResult := unittest.ExecutionResultFixture(unittest.WithBlock(s.finalizedBlock)) s.resultsMap[s.finalizedBlock.ID()] = finalizedResult - s.addNewFinalizedBlock(s.finalizedBlock.Header, true) checkNewSubscriptionMessage(sub, flow.TransactionStatusExecuted) @@ -315,8 +354,6 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusExpired() { transaction.SetReferenceBlockID(s.finalizedBlock.ID()) txId := transaction.ID() - expectedMsgIndexCounter := counters.NewMonotonousCounter(0) - // Create a special common function to read subscription messages from the channel and check converting it to transaction info // and check results for correctness checkNewSubscriptionMessage := func(sub subscription.Subscription, expectedTxStatus flow.TransactionStatus) { @@ -326,21 +363,18 @@ func (s *TransactionStatusSuite) TestSubscribeTransactionStatusExpired() { "channel closed while waiting for transaction info:\n\t- txID %x\n\t- blockID: %x \n\t- err: %v", txId, s.finalizedBlock.ID(), sub.Err()) - txInfo, ok := v.(*convert.TransactionSubscribeInfo) + txResults, ok := v.([]*accessapi.TransactionResult) require.True(s.T(), ok, "unexpected response type: %T", v) + require.Len(s.T(), txResults, 1) - assert.Equal(s.T(), txId, txInfo.ID) - assert.Equal(s.T(), expectedTxStatus, txInfo.Status) - - expectedMsgIndex := expectedMsgIndexCounter.Value() - assert.Equal(s.T(), expectedMsgIndex, txInfo.MessageIndex) - wasSet := expectedMsgIndexCounter.Set(expectedMsgIndex + 1) - require.True(s.T(), wasSet) + result := txResults[0] + assert.Equal(s.T(), txId, result.TransactionID) + assert.Equal(s.T(), expectedTxStatus, result.Status) }, time.Second, fmt.Sprintf("timed out waiting for transaction info:\n\t- txID: %x\n\t- blockID: %x", txId, s.finalizedBlock.ID())) } // Subscribe to transaction status and receive the first message with pending status - sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody) + sub := s.backend.SubscribeTransactionStatuses(ctx, &transaction.TransactionBody, entities.EventEncodingVersion_CCF_V0) checkNewSubscriptionMessage(sub, flow.TransactionStatusPending) // Generate 600 blocks without transaction included and check, that transaction still pending diff --git a/engine/common/rpc/convert/transactions.go b/engine/common/rpc/convert/transactions.go index 6b92f419fdd..221f41b0936 100644 --- a/engine/common/rpc/convert/transactions.go +++ b/engine/common/rpc/convert/transactions.go @@ -1,29 +1,11 @@ package convert import ( - "github.com/onflow/flow/protobuf/go/flow/access" "github.com/onflow/flow/protobuf/go/flow/entities" "github.com/onflow/flow-go/model/flow" ) -// TransactionSubscribeInfo represents information about a subscribed transaction. -// It contains the ID of the transaction, its status, and the index of the associated message. -type TransactionSubscribeInfo struct { - ID flow.Identifier - Status flow.TransactionStatus - MessageIndex uint64 -} - -// TransactionSubscribeInfoToMessage converts a TransactionSubscribeInfo struct to a protobuf message -func TransactionSubscribeInfoToMessage(data *TransactionSubscribeInfo) *access.SendAndSubscribeTransactionStatusesResponse { - return &access.SendAndSubscribeTransactionStatusesResponse{ - Id: data.ID[:], - Status: entities.TransactionStatus(data.Status), - MessageIndex: data.MessageIndex, - } -} - // TransactionToMessage converts a flow.TransactionBody to a protobuf message func TransactionToMessage(tb flow.TransactionBody) *entities.Transaction { proposalKeyMessage := &entities.Transaction_ProposalKey{ diff --git a/engine/execution/ingestion/core.go b/engine/execution/ingestion/core.go new file mode 100644 index 00000000000..ae53a0a14ba --- /dev/null +++ b/engine/execution/ingestion/core.go @@ -0,0 +1,444 @@ +package ingestion + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/execution" + "github.com/onflow/flow-go/engine/execution/ingestion/block_queue" + "github.com/onflow/flow-go/engine/execution/ingestion/stop" + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/mempool/entity" + "github.com/onflow/flow-go/storage" + "github.com/onflow/flow-go/utils/logging" +) + +// Core connects the execution components +// when it receives blocks and collections, it forwards them to the block queue. +// when the block queue decides to execute blocks, it forwards to the executor for execution +// when the block queue decides to fetch missing collections, it forwards to the collection fetcher +// when a block is executed, it notifies the block queue and forwards to execution state to save them. +type Core struct { + unit *engine.Unit // for async block execution + + log zerolog.Logger + + // state machine + blockQueue *block_queue.BlockQueue + throttle Throttle // for throttling blocks to be added to the block queue + execState state.ExecutionState + stopControl *stop.StopControl // decide whether to execute a block or not + + // data storage + headers storage.Headers + blocks storage.Blocks + collections storage.Collections + + // computation, data fetching, events + executor BlockExecutor + collectionFetcher CollectionFetcher + eventConsumer EventConsumer +} + +type Throttle interface { + Init(processables chan<- flow.Identifier) error + OnBlock(blockID flow.Identifier) error + OnBlockExecuted(blockID flow.Identifier, height uint64) error +} + +type BlockExecutor interface { + ExecuteBlock(ctx context.Context, block *entity.ExecutableBlock) (*execution.ComputationResult, error) +} + +type EventConsumer interface { + BeforeComputationResultSaved(ctx context.Context, result *execution.ComputationResult) + OnComputationResultSaved(ctx context.Context, result *execution.ComputationResult) string +} + +func NewCore( + logger zerolog.Logger, + throttle Throttle, + execState state.ExecutionState, + stopControl *stop.StopControl, + headers storage.Headers, + blocks storage.Blocks, + collections storage.Collections, + executor BlockExecutor, + collectionFetcher CollectionFetcher, + eventConsumer EventConsumer, +) *Core { + return &Core{ + log: logger.With().Str("engine", "ingestion_core").Logger(), + unit: engine.NewUnit(), + throttle: throttle, + execState: execState, + blockQueue: block_queue.NewBlockQueue(logger), + stopControl: stopControl, + headers: headers, + blocks: blocks, + collections: collections, + executor: executor, + collectionFetcher: collectionFetcher, + eventConsumer: eventConsumer, + } +} + +func (e *Core) Ready() <-chan struct{} { + if e.stopControl.IsExecutionStopped() { + return e.unit.Ready() + } + + e.launchWorkerToConsumeThrottledBlocks() + + return e.unit.Ready() +} + +func (e *Core) Done() <-chan struct{} { + return e.unit.Done() +} + +func (e *Core) OnBlock(header *flow.Header, qc *flow.QuorumCertificate) { + // qc.Block is equivalent to header.ID() + err := e.throttle.OnBlock(qc.BlockID) + if err != nil { + e.log.Fatal().Err(err).Msgf("error processing block %v (qc.BlockID: %v, blockID: %v)", + header.Height, qc.BlockID, header.ID()) + } +} + +func (e *Core) OnCollection(col *flow.Collection) { + err := e.onCollection(col) + if err != nil { + e.log.Fatal().Err(err).Msgf("error processing collection: %v", col.ID()) + } +} + +func (e *Core) launchWorkerToConsumeThrottledBlocks() { + // processables are throttled blocks + processables := make(chan flow.Identifier, 10000) + + // running worker in the background to consume + // processables blocks which are throttled, + // and forward them to the block queue for processing + e.unit.Launch(func() { + e.log.Info().Msgf("starting worker to consume throttled blocks") + err := e.forwardProcessableToHandler(processables) + if err != nil { + e.log.Fatal().Err(err).Msg("fail to process block") + } + }) + + e.log.Info().Msg("initializing throttle engine") + + err := e.throttle.Init(processables) + if err != nil { + e.log.Fatal().Err(err).Msg("fail to initialize throttle engine") + } + + e.log.Info().Msgf("throttle engine initialized") +} + +func (e *Core) forwardProcessableToHandler( + processables <-chan flow.Identifier, +) error { + for blockID := range processables { + err := e.onProcessableBlock(blockID) + if err != nil { + return fmt.Errorf("could not process block: %w", err) + } + } + + return nil +} + +func (e *Core) onProcessableBlock(blockID flow.Identifier) error { + header, err := e.headers.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get block: %w", err) + } + + // skip if stopControl tells to skip + if !e.stopControl.ShouldExecuteBlock(header) { + return nil + } + + executed, err := e.execState.IsBlockExecuted(header.Height, blockID) + if err != nil { + return fmt.Errorf("could not check whether block %v is executed: %w", blockID, err) + } + + if executed { + e.log.Debug().Msg("block has been executed already") + return nil + } + + block, err := e.blocks.ByID(blockID) + if err != nil { + return fmt.Errorf("failed to get block %s: %w", blockID, err) + } + + missingColls, executables, err := e.enqueuBlock(block, blockID) + if err != nil { + return fmt.Errorf("failed to enqueue block %v: %w", blockID, err) + } + + e.executeConcurrently(executables) + + err = e.fetch(missingColls) + if err != nil { + return fmt.Errorf("failed to fetch missing collections: %w", err) + } + + return nil +} + +func (e *Core) enqueuBlock(block *flow.Block, blockID flow.Identifier) ( + []*block_queue.MissingCollection, + []*entity.ExecutableBlock, + error, +) { + lg := e.log.With(). + Hex("block_id", blockID[:]). + Uint64("height", block.Header.Height). + Logger() + + lg.Info().Msg("handling new block") + + parentCommitment, err := e.execState.StateCommitmentByBlockID(block.Header.ParentID) + + if err == nil { + // the parent block is an executed block. + missingColls, executables, err := e.blockQueue.HandleBlock(block, &parentCommitment) + if err != nil { + return nil, nil, fmt.Errorf("unexpected error while adding block to block queue: %w", err) + } + + lg.Info().Bool("parent_is_executed", true). + Int("missing_col", len(missingColls)). + Int("executables", len(executables)). + Msgf("block is enqueued") + + return missingColls, executables, nil + } + + // handle exception + if !errors.Is(err, storage.ErrNotFound) { + return nil, nil, fmt.Errorf("failed to get state commitment for parent block %v of block %v (height: %v): %w", + block.Header.ParentID, blockID, block.Header.Height, err) + } + + // the parent block is an unexecuted block. + // we can enqueue the block without providing the state commitment + missingColls, executables, err := e.blockQueue.HandleBlock(block, nil) + if err != nil { + if !errors.Is(err, block_queue.ErrMissingParent) { + return nil, nil, fmt.Errorf("unexpected error while adding block to block queue: %w", err) + } + + // if parent is missing, there are two possibilities: + // 1) parent was never enqueued to block queue + // 2) parent was enqueued, but it has been executed and removed from the block queue + // however, actually 1) is not possible 2) is the only possible case here, why? + // because forwardProcessableToHandler guarantees we always enqueue a block before its child, + // which means when HandleBlock is called with a block, then its parent block must have been + // called with HandleBlock already. Therefore, 1) is not possible. + // And the reason 2) is possible is because the fact that its parent block is missing + // might be outdated since OnBlockExecuted might be called concurrently in a different thread. + // it means OnBlockExecuted is called in a different thread after us getting the parent commit + // and before HandleBlock was called, therefore, we should re-enqueue the block with the + // parent commit. It's necessary to check again whether the parent block is executed after the call. + lg.Warn().Msgf( + "block is missing parent block, re-enqueueing %v (parent: %v)", + blockID, block.Header.ParentID, + ) + + parentCommitment, err := e.execState.StateCommitmentByBlockID(block.Header.ParentID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get parent state commitment when re-enqueue block %v (parent: %v): %w", + blockID, block.Header.ParentID, err) + } + + // now re-enqueue the block with parent commit + missing, execs, err := e.blockQueue.HandleBlock(block, &parentCommitment) + if err != nil { + return nil, nil, fmt.Errorf("unexpected error while reenqueue block to block queue: %w", err) + } + + missingColls = flow.Deduplicate(append(missingColls, missing...)) + executables = flow.Deduplicate(append(executables, execs...)) + } + + lg.Info().Bool("parent_is_executed", false). + Int("missing_col", len(missingColls)). + Int("executables", len(executables)). + Msgf("block is enqueued") + + return missingColls, executables, nil +} + +func (e *Core) onBlockExecuted( + block *entity.ExecutableBlock, + computationResult *execution.ComputationResult, + startedAt time.Time, +) error { + commit := computationResult.CurrentEndState() + + wg := sync.WaitGroup{} + wg.Add(1) + defer wg.Wait() + + go func() { + defer wg.Done() + e.eventConsumer.BeforeComputationResultSaved(e.unit.Ctx(), computationResult) + }() + + err := e.execState.SaveExecutionResults(e.unit.Ctx(), computationResult) + if err != nil { + return fmt.Errorf("cannot persist execution state: %w", err) + } + + // must call OnBlockExecuted AFTER saving the execution result to storage + // because when enqueuing a block, we rely on execState.StateCommitmentByBlockID + // to determine whether a block has been executed or not. + executables, err := e.blockQueue.OnBlockExecuted(block.ID(), commit) + if err != nil { + return fmt.Errorf("unexpected error while marking block as executed: %w", err) + } + + e.stopControl.OnBlockExecuted(block.Block.Header) + + // notify event consumer so that the event consumer can do tasks + // such as broadcasting or uploading the result + logs := e.eventConsumer.OnComputationResultSaved(e.unit.Ctx(), computationResult) + + receipt := computationResult.ExecutionReceipt + e.log.Info(). + Hex("block_id", logging.Entity(block)). + Uint64("height", block.Block.Header.Height). + Int("collections", len(block.CompleteCollections)). + Hex("parent_block", block.Block.Header.ParentID[:]). + Int("collections", len(block.Block.Payload.Guarantees)). + Hex("start_state", block.StartState[:]). + Hex("final_state", commit[:]). + Hex("receipt_id", logging.Entity(receipt)). + Hex("result_id", logging.Entity(receipt.ExecutionResult)). + Hex("execution_data_id", receipt.ExecutionResult.ExecutionDataID[:]). + Bool("state_changed", commit != *block.StartState). + Uint64("num_txs", nonSystemTransactionCount(receipt.ExecutionResult)). + Int64("timeSpentInMS", time.Since(startedAt).Milliseconds()). + Str("logs", logs). // broadcasted + Msgf("block executed") + + // we ensures that the child blocks are only executed after the execution result of + // its parent block has been successfully saved to storage. + // this ensures OnBlockExecuted would not be called with blocks in a wrong order, such as + // OnBlockExecuted(childBlock) being called before OnBlockExecuted(parentBlock). + e.executeConcurrently(executables) + + return nil +} + +func (e *Core) onCollection(col *flow.Collection) error { + // EN might request a collection from multiple collection nodes, + // therefore might receive multiple copies of the same collection. + // we only need to store it once. + err := storeCollectionIfMissing(e.collections, col) + if err != nil { + return fmt.Errorf("failed to store collection %v: %w", col.ID(), err) + } + + // if the collection is a duplication, it's still good to add it to the block queue, + // because chances are the collection was stored before a restart, and + // is not in the queue after the restart. + // adding it to the queue ensures we don't miss any collection. + // since the queue's state is in memory, processing a duplicated collection should be + // a fast no-op, and won't return any executable blocks. + executables, err := e.blockQueue.HandleCollection(col) + if err != nil { + return fmt.Errorf("unexpected error while adding collection to block queue") + } + + e.executeConcurrently(executables) + + return nil +} + +func storeCollectionIfMissing(collections storage.Collections, col *flow.Collection) error { + _, err := collections.ByID(col.ID()) + if err != nil { + if !errors.Is(err, storage.ErrNotFound) { + return fmt.Errorf("failed to get collection %v: %w", col.ID(), err) + } + + err := collections.Store(col) + if err != nil { + return fmt.Errorf("failed to store collection %v: %w", col.ID(), err) + } + } + + return nil +} + +// execute block concurrently +func (e *Core) executeConcurrently(executables []*entity.ExecutableBlock) { + for _, executable := range executables { + func(executable *entity.ExecutableBlock) { + e.unit.Launch(func() { + e.log.Info().Msgf("starting worker to consume throttled blocks") + err := e.execute(executable) + if err != nil { + e.log.Error().Err(err).Msgf("failed to execute block %v", executable.Block.ID()) + } + }) + }(executable) + } +} + +func (e *Core) execute(executable *entity.ExecutableBlock) error { + if !e.stopControl.ShouldExecuteBlock(executable.Block.Header) { + return nil + } + + e.log.Info(). + Hex("block_id", logging.Entity(executable)). + Uint64("height", executable.Block.Header.Height). + Int("collections", len(executable.CompleteCollections)). + Msgf("executing block") + + startedAt := time.Now() + + result, err := e.executor.ExecuteBlock(e.unit.Ctx(), executable) + if err != nil { + return fmt.Errorf("failed to execute block %v: %w", executable.Block.ID(), err) + } + + err = e.onBlockExecuted(executable, result, startedAt) + if err != nil { + return fmt.Errorf("failed to handle execution result of block %v: %w", executable.Block.ID(), err) + } + + return nil +} + +func (e *Core) fetch(missingColls []*block_queue.MissingCollection) error { + for _, col := range missingColls { + err := e.collectionFetcher.FetchCollection(col.BlockID, col.Height, col.Guarantee) + if err != nil { + return fmt.Errorf("failed to fetch collection %v for block %v (height: %v): %w", + col.Guarantee.ID(), col.BlockID, col.Height, err) + } + } + + if len(missingColls) > 0 { + e.collectionFetcher.Force() + } + + return nil +} diff --git a/engine/execution/ingestion/throttle.go b/engine/execution/ingestion/throttle.go new file mode 100644 index 00000000000..a92566b6660 --- /dev/null +++ b/engine/execution/ingestion/throttle.go @@ -0,0 +1,249 @@ +package ingestion + +import ( + "fmt" + "sync" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine/execution/state" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// DefaultCatchUpThreshold is the number of blocks that if the execution is far behind +// the finalization then we will only lazy load the next unexecuted finalized +// blocks until the execution has caught up +const DefaultCatchUpThreshold = 500 + +// BlockThrottle is a helper struct that helps throttle the unexecuted blocks to be sent +// to the block queue for execution. +// It is useful for case when execution is falling far behind the finalization, in which case +// we want to throttle the blocks to be sent to the block queue for fetching data to execute +// them. Without throttle, the block queue will be flooded with blocks, and the network +// will be flooded with requests fetching collections, and the EN might quickly run out of memory. +type BlockThrottle struct { + // config + threshold int // catch up threshold + + // state + mu sync.Mutex + executed uint64 + finalized uint64 + + // notifier + processables chan<- flow.Identifier + + // dependencies + log zerolog.Logger + state protocol.State + headers storage.Headers +} + +func NewBlockThrottle( + log zerolog.Logger, + state protocol.State, + execState state.ExecutionState, + headers storage.Headers, + catchupThreshold int, +) (*BlockThrottle, error) { + finalizedHead, err := state.Final().Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized head: %w", err) + } + + finalized := finalizedHead.Height + // TODO: implement GetHighestFinalizedExecuted for execution state when storehouse + // is not used + executed := execState.GetHighestFinalizedExecuted() + + if executed > finalized { + return nil, fmt.Errorf("executed finalized %v is greater than finalized %v", executed, finalized) + } + + return &BlockThrottle{ + threshold: catchupThreshold, + executed: executed, + finalized: finalized, + + log: log.With().Str("component", "block_throttle").Logger(), + state: state, + headers: headers, + }, nil +} + +// inited returns true if the throttle has been inited +func (c *BlockThrottle) inited() bool { + return c.processables != nil +} + +func (c *BlockThrottle) Init(processables chan<- flow.Identifier) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.inited() { + return fmt.Errorf("throttle already inited") + } + + c.processables = processables + + var unexecuted []flow.Identifier + var err error + if caughtUp(c.executed, c.finalized, c.threshold) { + unexecuted, err = findAllUnexecutedBlocks(c.state, c.headers, c.executed, c.finalized) + if err != nil { + return err + } + c.log.Info().Msgf("loaded %d unexecuted blocks", len(unexecuted)) + } else { + unexecuted, err = findFinalized(c.state, c.headers, c.executed, c.executed+uint64(c.threshold)) + if err != nil { + return err + } + c.log.Info().Msgf("loaded %d unexecuted finalized blocks", len(unexecuted)) + } + + c.log.Info().Msgf("throttle initializing with %d unexecuted blocks", len(unexecuted)) + + // the ingestion core engine must have initialized the 'processables' with 10000 (default) buffer size, + // and the 'unexecuted' will only contain up to DefaultCatchUpThreshold (500) blocks, + // so pushing all the unexecuted to processables won't be blocked. + for _, id := range unexecuted { + c.processables <- id + } + + c.log.Info().Msgf("throttle initialized with %d unexecuted blocks", len(unexecuted)) + + return nil +} + +func (c *BlockThrottle) OnBlockExecuted(_ flow.Identifier, executed uint64) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.inited() { + return fmt.Errorf("throttle not inited") + } + + // we have already caught up, ignore + if c.caughtUp() { + return nil + } + + // the execution is still far behind from finalization + c.executed = executed + if !c.caughtUp() { + return nil + } + + c.log.Info().Uint64("executed", executed).Uint64("finalized", c.finalized). + Msgf("execution has caught up, processing remaining unexecuted blocks") + + // if the execution have just caught up close enough to the latest finalized blocks, + // then process all unexecuted blocks, including finalized unexecuted and pending unexecuted + unexecuted, err := findAllUnexecutedBlocks(c.state, c.headers, c.executed, c.finalized) + if err != nil { + return fmt.Errorf("could not find unexecuted blocks for processing: %w", err) + } + + c.log.Info().Int("unexecuted", len(unexecuted)).Msgf("forwarding unexecuted blocks") + + for _, id := range unexecuted { + c.processables <- id + } + + c.log.Info().Msgf("all unexecuted blocks have been processed") + + return nil +} + +func (c *BlockThrottle) OnBlock(blockID flow.Identifier) error { + c.mu.Lock() + defer c.mu.Unlock() + c.log.Debug().Msgf("recieved block (%v)", blockID) + + if !c.inited() { + return fmt.Errorf("throttle not inited") + } + + // ignore the block if has not caught up. + if !c.caughtUp() { + return nil + } + + // if has caught up, then process the block + c.processables <- blockID + c.log.Debug().Msgf("processed block (%v)", blockID) + + return nil +} + +func (c *BlockThrottle) OnBlockFinalized(lastFinalized *flow.Header) { + c.mu.Lock() + defer c.mu.Unlock() + if !c.inited() { + return + } + + if c.caughtUp() { + return + } + + if lastFinalized.Height <= c.finalized { + return + } + + c.finalized = lastFinalized.Height +} + +func (c *BlockThrottle) caughtUp() bool { + return caughtUp(c.executed, c.finalized, c.threshold) +} + +func caughtUp(executed, finalized uint64, threshold int) bool { + return finalized <= executed+uint64(threshold) +} + +func findFinalized(state protocol.State, headers storage.Headers, lastExecuted, finalizedHeight uint64) ([]flow.Identifier, error) { + // get finalized height + finalized := state.AtHeight(finalizedHeight) + final, err := finalized.Head() + if err != nil { + return nil, fmt.Errorf("could not get finalized block: %w", err) + } + + // dynamically bootstrapped execution node will have highest finalized executed as sealed root, + // which is lower than finalized root. so we will reload blocks from + // [sealedRoot.Height + 1, finalizedRoot.Height] and execute them on startup. + unexecutedFinalized := make([]flow.Identifier, 0) + + // starting from the first unexecuted block, go through each unexecuted and finalized block + // reload its block to execution queues + // loading finalized blocks + for height := lastExecuted + 1; height <= final.Height; height++ { + finalizedID, err := headers.BlockIDByHeight(height) + if err != nil { + return nil, fmt.Errorf("could not get block ID by height %v: %w", height, err) + } + + unexecutedFinalized = append(unexecutedFinalized, finalizedID) + } + + return unexecutedFinalized, nil +} + +func findAllUnexecutedBlocks(state protocol.State, headers storage.Headers, lastExecuted, finalizedHeight uint64) ([]flow.Identifier, error) { + unexecutedFinalized, err := findFinalized(state, headers, lastExecuted, finalizedHeight) + if err != nil { + return nil, fmt.Errorf("could not find finalized unexecuted blocks: %w", err) + } + + // loaded all pending blocks + pendings, err := state.AtHeight(finalizedHeight).Descendants() + if err != nil { + return nil, fmt.Errorf("could not get descendants of finalized block: %w", err) + } + + unexecuted := append(unexecutedFinalized, pendings...) + return unexecuted, nil +} diff --git a/engine/execution/ingestion/throttle_test.go b/engine/execution/ingestion/throttle_test.go new file mode 100644 index 00000000000..a2d8911b109 --- /dev/null +++ b/engine/execution/ingestion/throttle_test.go @@ -0,0 +1,16 @@ +package ingestion + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCaughtUp(t *testing.T) { + require.True(t, caughtUp(100, 200, 500)) + require.True(t, caughtUp(100, 100, 500)) + require.True(t, caughtUp(100, 600, 500)) + + require.False(t, caughtUp(100, 601, 500)) + require.False(t, caughtUp(100, 602, 500)) +} diff --git a/fvm/bootstrap.go b/fvm/bootstrap.go index 7ce37e0828b..858fea9d9b6 100644 --- a/fvm/bootstrap.go +++ b/fvm/bootstrap.go @@ -392,7 +392,7 @@ func (b *bootstrapExecutor) Execute() error { b.setStakingAllowlist(service, b.identities.NodeIDs()) // sets up the EVM environment - b.setupEVM(service, fungibleToken, flowToken) + b.setupEVM(service, nonFungibleToken, fungibleToken, flowToken) return nil } @@ -806,7 +806,7 @@ func (b *bootstrapExecutor) setStakingAllowlist( panicOnMetaInvokeErrf("failed to set staking allow-list: %s", txError, err) } -func (b *bootstrapExecutor) setupEVM(serviceAddress, fungibleTokenAddress, flowTokenAddress flow.Address) { +func (b *bootstrapExecutor) setupEVM(serviceAddress, nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress flow.Address) { if b.setupEVMEnabled { // account for storage // we dont need to deploy anything to this account, but it needs to exist @@ -817,7 +817,7 @@ func (b *bootstrapExecutor) setupEVM(serviceAddress, fungibleTokenAddress, flowT // deploy the EVM contract to the service account tx := blueprints.DeployContractTransaction( serviceAddress, - stdlib.ContractCode(flowTokenAddress), + stdlib.ContractCode(nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress), stdlib.ContractName, ) // WithEVMEnabled should only be used after we create an account for storage diff --git a/fvm/evm/emulator/emulator.go b/fvm/evm/emulator/emulator.go index feff252deaa..9e165c05c0b 100644 --- a/fvm/evm/emulator/emulator.go +++ b/fvm/evm/emulator/emulator.go @@ -39,6 +39,7 @@ func newConfig(ctx types.BlockContext) *Config { return NewConfig( WithChainID(ctx.ChainID), WithBlockNumber(new(big.Int).SetUint64(ctx.BlockNumber)), + WithBlockTime(ctx.BlockTimestamp), WithCoinbase(ctx.GasFeeCollector.ToCommon()), WithDirectCallBaseGasUsage(ctx.DirectCallBaseGasUsage), WithExtraPrecompiles(ctx.ExtraPrecompiles), diff --git a/fvm/evm/evm.go b/fvm/evm/evm.go index 44983524fcb..a29ab9f67cf 100644 --- a/fvm/evm/evm.go +++ b/fvm/evm/evm.go @@ -27,7 +27,6 @@ func SetupEnvironment( chainID flow.ChainID, fvmEnv environment.Environment, runtimeEnv runtime.Environment, - service flow.Address, flowToken flow.Address, ) error { evmStorageAccountAddress, err := StorageAccountAddress(chainID) @@ -42,15 +41,27 @@ func SetupEnvironment( backend := backends.NewWrappedEnvironment(fvmEnv) - em := evm.NewEmulator(backend, evmStorageAccountAddress) + emulator := evm.NewEmulator(backend, evmStorageAccountAddress) - bs := handler.NewBlockStore(backend, evmStorageAccountAddress) + blockStore := handler.NewBlockStore(backend, evmStorageAccountAddress) - aa := handler.NewAddressAllocator() + addressAllocator := handler.NewAddressAllocator() - contractHandler := handler.NewContractHandler(chainID, evmContractAccountAddress, common.Address(flowToken), bs, aa, backend, em) + contractHandler := handler.NewContractHandler( + chainID, + evmContractAccountAddress, + common.Address(flowToken), + blockStore, + addressAllocator, + backend, + emulator, + ) - stdlib.SetupEnvironment(runtimeEnv, contractHandler, evmContractAccountAddress) + stdlib.SetupEnvironment( + runtimeEnv, + contractHandler, + evmContractAccountAddress, + ) return nil } diff --git a/fvm/evm/evm_test.go b/fvm/evm/evm_test.go index 7d5b0bdc817..ecbd3b88133 100644 --- a/fvm/evm/evm_test.go +++ b/fvm/evm/evm_test.go @@ -249,6 +249,69 @@ func TestEVMRun(t *testing.T) { }) } +func TestEVMBlockData(t *testing.T) { + t.Parallel() + chain := flow.Emulator.Chain() + sc := systemcontracts.SystemContractsForChain(chain.ChainID()) + RunWithNewEnvironment(t, + chain, func( + ctx fvm.Context, + vm fvm.VM, + snapshot snapshot.SnapshotTree, + testContract *TestContract, + testAccount *EOATestAccount, + ) { + + // query the block timestamp + code := []byte(fmt.Sprintf( + ` + import EVM from %s + access(all) + fun main(tx: [UInt8], coinbaseBytes: [UInt8; 20]): EVM.Result { + let coinbase = EVM.EVMAddress(bytes: coinbaseBytes) + return EVM.run(tx: tx, coinbase: coinbase) + } + `, + sc.EVMContract.Address.HexWithPrefix(), + )) + + innerTxBytes := testAccount.PrepareSignAndEncodeTx(t, + testContract.DeployedAt.ToCommon(), + testContract.MakeCallData(t, "blockTime"), + big.NewInt(0), + uint64(100_000), + big.NewInt(0), + ) + + coinbase := cadence.NewArray( + ConvertToCadence(testAccount.Address().Bytes()), + ).WithType(stdlib.EVMAddressBytesCadenceType) + + innerTx := cadence.NewArray( + ConvertToCadence(innerTxBytes), + ).WithType(stdlib.EVMTransactionBytesCadenceType) + + script := fvm.Script(code).WithArguments( + json.MustEncode(innerTx), + json.MustEncode(coinbase), + ) + + _, output, err := vm.Run( + ctx, + script, + snapshot) + require.NoError(t, err) + require.NoError(t, output.Err) + + res, err := stdlib.ResultSummaryFromEVMResultValue(output.Value) + require.NoError(t, err) + require.Equal(t, types.StatusSuccessful, res.Status) + require.Equal(t, types.ErrCodeNoError, res.ErrorCode) + require.Equal(t, ctx.BlockHeader.Timestamp.Unix(), new(big.Int).SetBytes(res.ReturnedValue).Int64()) + + }) +} + func TestEVMAddressDeposit(t *testing.T) { t.Parallel() @@ -1058,6 +1121,7 @@ func RunWithNewEnvironment( fvm.WithAuthorizationChecksEnabled(false), fvm.WithSequenceNumberCheckAndIncrementEnabled(false), fvm.WithEntropyProvider(testutil.EntropyProviderFixture(nil)), + fvm.WithBlocks(blocks), } ctx := fvm.NewContext(opts...) @@ -1077,7 +1141,13 @@ func RunWithNewEnvironment( snapshotTree = snapshotTree.Append(executionSnapshot) - f(fvm.NewContextFromParent(ctx, fvm.WithEVMEnabled(true)), vm, snapshotTree, testContract, testAccount) + f( + fvm.NewContextFromParent(ctx, fvm.WithEVMEnabled(true)), + vm, + snapshotTree, + testContract, + testAccount, + ) }) }) }) diff --git a/fvm/evm/handler/blockstore.go b/fvm/evm/handler/blockstore.go index a93fc9bec88..32c282a0d84 100644 --- a/fvm/evm/handler/blockstore.go +++ b/fvm/evm/handler/blockstore.go @@ -1,6 +1,9 @@ package handler import ( + "fmt" + "time" + gethCommon "github.com/onflow/go-ethereum/common" "github.com/onflow/flow-go/fvm/evm/types" @@ -35,6 +38,19 @@ func (bs *BlockStore) BlockProposal() (*types.Block, error) { return bs.blockProposal, nil } + cadenceHeight, err := bs.backend.GetCurrentBlockHeight() + if err != nil { + return nil, err + } + + cadenceBlock, found, err := bs.backend.GetBlockAtHeight(cadenceHeight) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("cadence block not found") + } + lastExecutedBlock, err := bs.LatestBlock() if err != nil { return nil, err @@ -45,8 +61,14 @@ func (bs *BlockStore) BlockProposal() (*types.Block, error) { return nil, err } - bs.blockProposal = types.NewBlock(parentHash, + // cadence block timestamp is unix nanoseconds but evm blocks + // expect timestamps in unix seconds so we convert here + timestamp := uint64(cadenceBlock.Timestamp / int64(time.Second)) + + bs.blockProposal = types.NewBlock( + parentHash, lastExecutedBlock.Height+1, + timestamp, lastExecutedBlock.TotalSupply, gethCommon.Hash{}, make([]gethCommon.Hash, 0), diff --git a/fvm/evm/handler/handler.go b/fvm/evm/handler/handler.go index 111376f223d..663c71c8c03 100644 --- a/fvm/evm/handler/handler.go +++ b/fvm/evm/handler/handler.go @@ -253,6 +253,7 @@ func (h *ContractHandler) getBlockContext() (types.BlockContext, error) { return types.BlockContext{ ChainID: types.EVMChainIDFromFlowChainID(h.flowChainID), BlockNumber: bp.Height, + BlockTimestamp: bp.Timestamp, DirectCallBaseGasUsage: types.DefaultDirectCallBaseGasUsage, GetHashFunc: func(n uint64) gethCommon.Hash { hash, err := h.blockStore.BlockHash(n) diff --git a/fvm/evm/stdlib/contract.cdc b/fvm/evm/stdlib/contract.cdc index c3b7fb3a52f..048c4f7ee7c 100644 --- a/fvm/evm/stdlib/contract.cdc +++ b/fvm/evm/stdlib/contract.cdc @@ -1,4 +1,6 @@ import Crypto +import "NonFungibleToken" +import "FungibleToken" import "FlowToken" access(all) @@ -19,6 +21,19 @@ contract EVM { access(all) event FLOWTokensWithdrawn(addressBytes: [UInt8; 20], amount: UFix64) + /// BridgeAccessorUpdated is emitted when the BridgeAccessor Capability + /// is updated in the stored BridgeRouter along with identifying + /// information about both. + access(all) + event BridgeAccessorUpdated( + routerType: Type, + routerUUID: UInt64, + routerAddress: Address, + accessorType: Type, + accessorUUID: UInt64, + accessorAddress: Address + ) + /// EVMAddress is an EVM-compatible address access(all) struct EVMAddress { @@ -288,6 +303,59 @@ contract EVM { value: value.attoflow ) as! Result } + + /// Bridges the given NFT to the EVM environment, requiring a Provider from which to withdraw a fee to fulfill + /// the bridge request + access(all) + fun depositNFT( + nft: @NonFungibleToken.NFT, + feeProvider: &{FungibleToken.Provider} + ) { + EVM.borrowBridgeAccessor().depositNFT(nft: <-nft, to: self.address(), feeProvider: feeProvider) + } + + /// Bridges the given NFT from the EVM environment, requiring a Provider from which to withdraw a fee to fulfill + /// the bridge request. Note: the caller should own the requested NFT in EVM + access(all) + fun withdrawNFT( + type: Type, + id: UInt256, + feeProvider: &{FungibleToken.Provider} + ): @NonFungibleToken.NFT { + return <- EVM.borrowBridgeAccessor().withdrawNFT( + caller: &self as &CadenceOwnedAccount, + type: type, + id: id, + feeProvider: feeProvider + ) + } + + /// Bridges the given Vault to the EVM environment, requiring a Provider from which to withdraw a fee to fulfill + /// the bridge request + access(all) + fun depositTokens( + vault: @FungibleToken.Vault, + feeProvider: &{FungibleToken.Provider} + ) { + EVM.borrowBridgeAccessor().depositTokens(vault: <-vault, to: self.address(), feeProvider: feeProvider) + } + + /// Bridges the given fungible tokens from the EVM environment, requiring a Provider from which to withdraw a + /// fee to fulfill the bridge request. Note: the caller should own the requested tokens & sufficient balance of + /// requested tokens in EVM + access(all) + fun withdrawTokens( + type: Type, + amount: UInt256, + feeProvider: &{FungibleToken.Provider} + ): @FungibleToken.Vault { + return <- EVM.borrowBridgeAccessor().withdrawTokens( + caller: &self as &CadenceOwnedAccount, + type: type, + amount: amount, + feeProvider: feeProvider + ) + } } /// Creates a new cadence owned account @@ -476,10 +544,14 @@ contract EVM { access(all) let totalSupply: Int - init(height: UInt64, hash: String, totalSupply: Int) { + access(all) + let timestamp: UInt64 + + init(height: UInt64, hash: String, totalSupply: Int, timestamp: UInt64) { self.height = height self.hash = hash self.totalSupply = totalSupply + self.timestamp = timestamp } } @@ -488,4 +560,66 @@ contract EVM { fun getLatestBlock(): EVMBlock { return InternalEVM.getLatestBlock() as! EVMBlock } + + /// Interface for a resource which acts as an entrypoint to the VM bridge + access(all) + resource interface BridgeAccessor { + + /// Endpoint enabling the bridging of an NFT to EVM + access(all) + fun depositNFT( + nft: @NonFungibleToken.NFT, + to: EVMAddress, + feeProvider: &{FungibleToken.Provider} + ) + + /// Endpoint enabling the bridging of an NFT from EVM + access(all) + fun withdrawNFT( + caller: &CadenceOwnedAccount, + type: Type, + id: UInt256, + feeProvider: &{FungibleToken.Provider} + ): @NonFungibleToken.NFT + + /// Endpoint enabling the bridging of a fungible token vault to EVM + access(all) + fun depositTokens( + vault: @FungibleToken.Vault, + to: EVMAddress, + feeProvider: &{FungibleToken.Provider} + ) + + /// Endpoint enabling the bridging of fungible tokens from EVM + access(all) + fun withdrawTokens( + caller: &CadenceOwnedAccount, + type: Type, + amount: UInt256, + feeProvider: &{FungibleToken.Provider} + ): @FungibleToken.Vault + } + + /// Interface which captures a Capability to the bridge Accessor, saving it within the BridgeRouter resource + access(all) + resource interface BridgeRouter { + + /// Returns a reference to the BridgeAccessor designated for internal bridge requests + access(all) view fun borrowBridgeAccessor(): &{BridgeAccessor} + + /// Sets the BridgeAccessor Capability in the BridgeRouter + access(all) fun setBridgeAccessor(_ accessor: Capability<&{BridgeAccessor}>) { + pre { + accessor.check(): "Invalid BridgeAccessor Capability provided" + } + } + } + + /// Returns a reference to the BridgeAccessor designated for internal bridge requests + access(self) + view fun borrowBridgeAccessor(): &{BridgeAccessor} { + return self.account.borrow<&{BridgeRouter}>(from: /storage/evmBridgeRouter) + ?.borrowBridgeAccessor() + ?? panic("Could not borrow reference to the EVM bridge") + } } diff --git a/fvm/evm/stdlib/contract.go b/fvm/evm/stdlib/contract.go index c9e62317a8b..75736781890 100644 --- a/fvm/evm/stdlib/contract.go +++ b/fvm/evm/stdlib/contract.go @@ -27,13 +27,24 @@ import ( //go:embed contract.cdc var contractCode string -var flowTokenImportPattern = regexp.MustCompile(`(?m)^import "FlowToken"\n`) +var nftImportPattern = regexp.MustCompile(`(?m)^import "NonFungibleToken"`) +var fungibleTokenImportPattern = regexp.MustCompile(`(?m)^import "FungibleToken"`) +var flowTokenImportPattern = regexp.MustCompile(`(?m)^import "FlowToken"`) -func ContractCode(flowTokenAddress flow.Address) []byte { - return []byte(flowTokenImportPattern.ReplaceAllString( +func ContractCode(nonFungibleTokenAddress, fungibleTokenAddress, flowTokenAddress flow.Address) []byte { + evmContract := nftImportPattern.ReplaceAllString( contractCode, + fmt.Sprintf("import NonFungibleToken from %s", nonFungibleTokenAddress.HexWithPrefix()), + ) + evmContract = fungibleTokenImportPattern.ReplaceAllString( + evmContract, + fmt.Sprintf("import FungibleToken from %s", fungibleTokenAddress.HexWithPrefix()), + ) + evmContract = flowTokenImportPattern.ReplaceAllString( + evmContract, fmt.Sprintf("import FlowToken from %s", flowTokenAddress.HexWithPrefix()), - )) + ) + return []byte(evmContract) } const ContractName = "EVM" @@ -1773,6 +1784,10 @@ func NewEVMBlockValue( }, ), }, + { + Name: "timestamp", + Value: interpreter.UInt64Value(block.Timestamp), + }, }, common.ZeroAddress, ) @@ -1938,8 +1953,12 @@ var internalEVMStandardLibraryType = stdlib.StandardLibraryType{ Kind: common.DeclarationKindContract, } -func SetupEnvironment(env runtime.Environment, handler types.ContractHandler, service flow.Address) { - location := common.NewAddressLocation(nil, common.Address(service), ContractName) +func SetupEnvironment( + env runtime.Environment, + handler types.ContractHandler, + contractAddress flow.Address, +) { + location := common.NewAddressLocation(nil, common.Address(contractAddress), ContractName) env.DeclareType( internalEVMStandardLibraryType, @@ -2044,6 +2063,10 @@ func NewEVMBlockCadenceType(address common.Address) *cadence.StructType { Identifier: "totalSupply", Type: cadence.IntType{}, }, + { + Identifier: "timestamp", + Type: cadence.UInt64Type{}, + }, }, nil, ) diff --git a/fvm/evm/stdlib/contract_test.go b/fvm/evm/stdlib/contract_test.go index f92cb52ab68..4bb9e4103bf 100644 --- a/fvm/evm/stdlib/contract_test.go +++ b/fvm/evm/stdlib/contract_test.go @@ -223,7 +223,7 @@ func deployContracts( }, { name: stdlib.ContractName, - code: stdlib.ContractCode(contractsAddress), + code: stdlib.ContractCode(contractsAddress, contractsAddress, contractsAddress), }, } @@ -253,25 +253,25 @@ func deployContracts( } -func newEVMTransactionEnvironment(handler types.ContractHandler, service flow.Address) runtime.Environment { +func newEVMTransactionEnvironment(handler types.ContractHandler, contractAddress flow.Address) runtime.Environment { transactionEnvironment := runtime.NewBaseInterpreterEnvironment(runtime.Config{}) stdlib.SetupEnvironment( transactionEnvironment, handler, - service, + contractAddress, ) return transactionEnvironment } -func newEVMScriptEnvironment(handler types.ContractHandler, service flow.Address) runtime.Environment { +func newEVMScriptEnvironment(handler types.ContractHandler, contractAddress flow.Address) runtime.Environment { scriptEnvironment := runtime.NewScriptInterpreterEnvironment(runtime.Config{}) stdlib.SetupEnvironment( scriptEnvironment, handler, - service, + contractAddress, ) return scriptEnvironment @@ -4131,6 +4131,7 @@ func TestEVMGetLatestBlock(t *testing.T) { latestBlock := &types.Block{ Height: uint64(2), TotalSupply: big.NewInt(1500000000000000000), + Timestamp: uint64(1337), } handler := &testContractHandler{ evmContractAddress: common.Address(contractsAddress), @@ -4158,6 +4159,7 @@ func TestEVMGetLatestBlock(t *testing.T) { blockHash, err := cadence.NewString(hash.Hex()) require.NoError(t, err) blockTotalSupply := cadence.NewIntFromBig(latestBlock.TotalSupply) + timestamp := cadence.NewUInt64(latestBlock.Timestamp) expectedEVMBlock := cadence.Struct{ StructType: evmBlockCadenceType, @@ -4165,6 +4167,7 @@ func TestEVMGetLatestBlock(t *testing.T) { blockHeight, blockHash, blockTotalSupply, + timestamp, }, } diff --git a/fvm/evm/testutils/backend.go b/fvm/evm/testutils/backend.go index a8c831ca7d4..472d38201f4 100644 --- a/fvm/evm/testutils/backend.go +++ b/fvm/evm/testutils/backend.go @@ -6,6 +6,8 @@ import ( "fmt" "testing" + "github.com/onflow/cadence/runtime/stdlib" + "github.com/onflow/atree" "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" @@ -35,7 +37,7 @@ func RunWithTestBackend(t testing.TB, f func(*TestBackend)) { TestValueStore: GetSimpleValueStore(), testEventEmitter: getSimpleEventEmitter(), testMeter: getSimpleMeter(), - TestBlockInfo: &TestBlockInfo{}, + TestBlockInfo: getSimpleBlockStore(), TestRandomGenerator: getSimpleRandomGenerator(), TestContractFunctionInvoker: &TestContractFunctionInvoker{}, } @@ -157,6 +159,24 @@ func getSimpleMeter() *testMeter { } } +func getSimpleBlockStore() *TestBlockInfo { + var index int64 = 1 + return &TestBlockInfo{ + GetCurrentBlockHeightFunc: func() (uint64, error) { + index++ + return uint64(index), nil + }, + GetBlockAtHeightFunc: func(height uint64) (runtime.Block, bool, error) { + return runtime.Block{ + Height: height, + View: 0, + Hash: stdlib.BlockHash{}, + Timestamp: int64(height), + }, true, nil + }, + } +} + type TestBackend struct { *TestValueStore *testMeter diff --git a/fvm/evm/types/block.go b/fvm/evm/types/block.go index a7f10938b67..6c70903bbea 100644 --- a/fvm/evm/types/block.go +++ b/fvm/evm/types/block.go @@ -19,6 +19,10 @@ type Block struct { // Height returns the height of this block Height uint64 + // Timestamp is a Unix timestamp in seconds at which the block was created + // Note that this value must be provided from the FVM Block + Timestamp uint64 + // holds the total amount of the native token deposited in the evm side. (in attoflow) TotalSupply *big.Int @@ -66,6 +70,7 @@ func (b *Block) AppendTxHash(txHash gethCommon.Hash) { func NewBlock( parentBlockHash gethCommon.Hash, height uint64, + timestamp uint64, totalSupply *big.Int, receiptRoot gethCommon.Hash, txHashes []gethCommon.Hash, @@ -73,6 +78,7 @@ func NewBlock( return &Block{ ParentBlockHash: parentBlockHash, Height: height, + Timestamp: timestamp, TotalSupply: totalSupply, ReceiptRoot: receiptRoot, TransactionHashes: txHashes, diff --git a/fvm/evm/types/emulator.go b/fvm/evm/types/emulator.go index 3d684d525b1..b0118df2719 100644 --- a/fvm/evm/types/emulator.go +++ b/fvm/evm/types/emulator.go @@ -26,6 +26,7 @@ type Precompile interface { type BlockContext struct { ChainID *big.Int BlockNumber uint64 + BlockTimestamp uint64 DirectCallBaseGasUsage uint64 DirectCallGasPrice uint64 GasFeeCollector Address diff --git a/fvm/evm/types/events.go b/fvm/evm/types/events.go index 920f6216a2c..15a061bcea9 100644 --- a/fvm/evm/types/events.go +++ b/fvm/evm/types/events.go @@ -96,7 +96,8 @@ func init() { ) } -// we might break this event into two (tx included /tx executed) if size becomes an issue +// todo we might have to break this event into two (tx included /tx executed) if size becomes an issue + type TransactionExecutedPayload struct { BlockHeight uint64 TxEncoded []byte @@ -175,6 +176,7 @@ var blockExecutedEventCadenceType = &cadence.EventType{ Fields: []cadence.Field{ cadence.NewField("height", cadence.UInt64Type{}), cadence.NewField("hash", cadence.StringType{}), + cadence.NewField("timestamp", cadence.UInt64Type{}), cadence.NewField("totalSupply", cadence.IntType{}), cadence.NewField("parentHash", cadence.StringType{}), cadence.NewField("receiptRoot", cadence.StringType{}), @@ -203,6 +205,7 @@ func (p *BlockExecutedEventPayload) CadenceEvent() (cadence.Event, error) { fields := []cadence.Value{ cadence.NewUInt64(p.Block.Height), cadence.String(blockHash.String()), + cadence.NewUInt64(p.Block.Timestamp), cadence.NewIntFromBig(p.Block.TotalSupply), cadence.String(p.Block.ParentBlockHash.String()), cadence.String(p.Block.ReceiptRoot.String()), diff --git a/fvm/evm/types/events_test.go b/fvm/evm/types/events_test.go index 0904afc1fda..cdea389c0d9 100644 --- a/fvm/evm/types/events_test.go +++ b/fvm/evm/types/events_test.go @@ -22,6 +22,7 @@ import ( type blockEventPayload struct { Height uint64 `cadence:"height"` Hash string `cadence:"hash"` + Timestamp uint64 `cadence:"timestamp"` TotalSupply cadence.Int `cadence:"totalSupply"` ParentBlockHash string `cadence:"parentHash"` ReceiptRoot string `cadence:"receiptRoot"` @@ -47,6 +48,7 @@ func TestEVMBlockExecutedEventCCFEncodingDecoding(t *testing.T) { block := &types.Block{ Height: 2, + Timestamp: 100, TotalSupply: big.NewInt(1500), ParentBlockHash: gethCommon.HexToHash("0x2813452cff514c3054ac9f40cd7ce1b016cc78ab7f99f1c6d49708837f6e06d1"), ReceiptRoot: gethCommon.Hash{}, @@ -70,6 +72,7 @@ func TestEVMBlockExecutedEventCCFEncodingDecoding(t *testing.T) { assert.Equal(t, bep.Hash, blockHash.Hex()) assert.Equal(t, bep.TotalSupply.Value, block.TotalSupply) + assert.Equal(t, bep.Timestamp, block.Timestamp) assert.Equal(t, bep.ParentBlockHash, block.ParentBlockHash.Hex()) assert.Equal(t, bep.ReceiptRoot, block.ReceiptRoot.Hex()) diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 900a9f7a56f..4cc9059cbdf 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -8,6 +8,8 @@ import ( "strings" "testing" + envMock "github.com/onflow/flow-go/fvm/environment/mock" + "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" jsoncdc "github.com/onflow/cadence/encoding/json" @@ -3061,12 +3063,27 @@ func TestTransientNetworkCoreContractAddresses(t *testing.T) { } func TestEVM(t *testing.T) { + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + blocks.On("ByHeightFrom", + block1.Header.Height, + block1.Header, + ).Return(block1.Header, nil) + + ctxOpts := []fvm.Option{ + // default is testnet, but testnet has a special EVM storage contract location + // so we have to use emulator here so that the EVM storage contract is deployed + // to the 5th address + fvm.WithChain(flow.Emulator.Chain()), + fvm.WithEVMEnabled(true), + fvm.WithBlocks(blocks), + fvm.WithBlockHeader(block1.Header), + fvm.WithCadenceLogging(true), + } + t.Run("successful transaction", newVMTest(). withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). - withContextOptions( - fvm.WithEVMEnabled(true), - fvm.WithCadenceLogging(true), - ). + withContextOptions(ctxOpts...). run(func( t *testing.T, vm fvm.VM, @@ -3122,10 +3139,7 @@ func TestEVM(t *testing.T) { // this test makes sure the execution error is correctly handled and returned as a correct type t.Run("execution reverted", newVMTest(). withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). - withContextOptions( - fvm.WithChain(flow.Emulator.Chain()), - fvm.WithEVMEnabled(true), - ). + withContextOptions(ctxOpts...). run(func( t *testing.T, vm fvm.VM, @@ -3162,10 +3176,7 @@ func TestEVM(t *testing.T) { // we have implemented a snapshot wrapper to return an error from the EVM t.Run("internal evm error handling", newVMTest(). withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). - withContextOptions( - fvm.WithChain(flow.Emulator.Chain()), - fvm.WithEVMEnabled(true), - ). + withContextOptions(ctxOpts...). run(func( t *testing.T, vm fvm.VM, @@ -3218,15 +3229,8 @@ func TestEVM(t *testing.T) { ) t.Run("deploy contract code", newVMTest(). - withBootstrapProcedureOptions( - fvm.WithSetupEVMEnabled(true), - ). - withContextOptions( - // default is testnet, but testnet has a special EVM storage contract location - // so we have to use emulator here so that the EVM storage contract is deployed - // to the 5th address - fvm.WithChain(flow.Emulator.Chain()), - ). + withBootstrapProcedureOptions(fvm.WithSetupEVMEnabled(true)). + withContextOptions(ctxOpts...). run(func( t *testing.T, vm fvm.VM, diff --git a/fvm/script.go b/fvm/script.go index c310c73ba00..28067cfc1bd 100644 --- a/fvm/script.go +++ b/fvm/script.go @@ -207,7 +207,6 @@ func (executor *scriptExecutor) executeScript() error { chain.ChainID(), executor.env, rt.ScriptRuntimeEnv, - chain.ServiceAddress(), sc.FlowToken.Address, ) if err != nil { diff --git a/fvm/transactionInvoker.go b/fvm/transactionInvoker.go index 85d7375a0d3..5e05b9016d3 100644 --- a/fvm/transactionInvoker.go +++ b/fvm/transactionInvoker.go @@ -190,7 +190,6 @@ func (executor *transactionExecutor) preprocessTransactionBody() error { chain.ChainID(), executor.env, executor.cadenceRuntime.TxRuntimeEnv, - chain.ServiceAddress(), sc.FlowToken.Address, ) if err != nil { @@ -250,7 +249,6 @@ func (executor *transactionExecutor) ExecuteTransactionBody() error { chain.ChainID(), executor.env, executor.cadenceRuntime.TxRuntimeEnv, - chain.ServiceAddress(), sc.FlowToken.Address, ) if err != nil { diff --git a/go.mod b/go.mod index ce632fb3e6b..636f9cda7d6 100644 --- a/go.mod +++ b/go.mod @@ -51,13 +51,13 @@ require ( github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multihash v0.2.3 github.com/onflow/atree v0.6.0 - github.com/onflow/cadence v0.42.9 + github.com/onflow/cadence v0.42.10 github.com/onflow/crypto v0.25.1 github.com/onflow/flow v0.3.4 github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 github.com/onflow/flow-go-sdk v0.44.0 - github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e + github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 32e504a9426..17f4ba300d5 100644 --- a/go.sum +++ b/go.sum @@ -1351,8 +1351,8 @@ github.com/onflow/atree v0.6.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVF github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483 h1:LpiQhTAfM9CAmNVEs0n//cBBgCg+vJSiIxTHYUklZ84= github.com/onflow/boxo v0.0.0-20240201202436-f2477b92f483/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= -github.com/onflow/cadence v0.42.9 h1:EX+eak/Jjy9PyKcAEmOViGOHMyP/nCOwJO+deodZlJE= -github.com/onflow/cadence v0.42.9/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= +github.com/onflow/cadence v0.42.10 h1:3oC5ceeXhdCrhHcf9H0yYXQKW3Tw/vkSXLe+PUZa4i0= +github.com/onflow/cadence v0.42.10/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/flow v0.3.4 h1:FXUWVdYB90f/rjNcY0Owo30gL790tiYff9Pb/sycXYE= @@ -1370,8 +1370,8 @@ github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8 github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e h1:r4+gVDDMOOc04Y1qjCZULAdgoaxSMsqSdE1EyviG76U= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 h1:I+aosSiJny88O4p3nPbCiUcp/UqN6AepvO6uj82bjH0= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= diff --git a/insecure/go.mod b/insecure/go.mod index 60e6b23d162..5d2b0cba848 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -204,13 +204,13 @@ require ( github.com/multiformats/go-varint v0.0.7 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onflow/atree v0.6.0 // indirect - github.com/onflow/cadence v0.42.9 // indirect + github.com/onflow/cadence v0.42.10 // indirect github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 // indirect github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 // indirect github.com/onflow/flow-ft/lib/go/contracts v0.7.1-0.20230711213910-baad011d2b13 // indirect github.com/onflow/flow-go-sdk v0.46.0 // indirect github.com/onflow/flow-nft/lib/go/contracts v1.1.0 // indirect - github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e // indirect + github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 // indirect github.com/onflow/go-ethereum v1.13.4 // indirect github.com/onflow/sdks v0.5.0 // indirect github.com/onflow/wal v0.0.0-20240208022732-d756cd497d3b // indirect diff --git a/insecure/go.sum b/insecure/go.sum index f584e0922cf..7c356c47ac0 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -1316,8 +1316,8 @@ github.com/onflow/atree v0.1.0-beta1.0.20211027184039-559ee654ece9/go.mod h1:+6x github.com/onflow/atree v0.6.0 h1:j7nQ2r8npznx4NX39zPpBYHmdy45f4xwoi+dm37Jk7c= github.com/onflow/atree v0.6.0/go.mod h1:gBHU0M05qCbv9NN0kijLWMgC47gHVNBIp4KmsVFi0tc= github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= -github.com/onflow/cadence v0.42.9 h1:EX+eak/Jjy9PyKcAEmOViGOHMyP/nCOwJO+deodZlJE= -github.com/onflow/cadence v0.42.9/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= +github.com/onflow/cadence v0.42.10 h1:3oC5ceeXhdCrhHcf9H0yYXQKW3Tw/vkSXLe+PUZa4i0= +github.com/onflow/cadence v0.42.10/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 h1:xF5wHug6H8vKfz7p1LYy9jck6eD9K1HLjTdi6o4kg1k= @@ -1333,8 +1333,8 @@ github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8 github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e h1:r4+gVDDMOOc04Y1qjCZULAdgoaxSMsqSdE1EyviG76U= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 h1:I+aosSiJny88O4p3nPbCiUcp/UqN6AepvO6uj82bjH0= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/sdks v0.5.0 h1:2HCRibwqDaQ1c9oUApnkZtEAhWiNY2GTpRD5+ftdkN8= diff --git a/integration/benchmark/cmd/ci/main.go b/integration/benchmark/cmd/ci/main.go index b5f68cc0ec7..c3917f5b161 100644 --- a/integration/benchmark/cmd/ci/main.go +++ b/integration/benchmark/cmd/ci/main.go @@ -3,25 +3,22 @@ package main import ( "context" "flag" - "net" "os" "strings" "time" - "github.com/onflow/flow-go/integration/benchmark/load" - "github.com/prometheus/client_golang/prometheus" - "github.com/rs/zerolog" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" + "gopkg.in/yaml.v3" flowsdk "github.com/onflow/flow-go-sdk" "github.com/onflow/flow-go-sdk/access" client "github.com/onflow/flow-go-sdk/access/grpc" "github.com/onflow/flow-go/integration/benchmark" - pb "github.com/onflow/flow-go/integration/benchmark/proto" + "github.com/onflow/flow-go/integration/benchmark/load" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" @@ -33,7 +30,7 @@ type BenchmarkInfo struct { // Hardcoded CI values const ( - defaultLoadType = "token-transfer" + defaultLoadType = load.TokenTransferLoadType metricport = uint(8080) accessNodeAddress = "127.0.0.1:4001" pushgateway = "127.0.0.1:9091" @@ -45,35 +42,43 @@ const ( defaultMetricCollectionInterval = 20 * time.Second // gRPC constants - defaultMaxMsgSize = 1024 * 1024 * 16 // 16 MB - defaultGRPCAddress = "127.0.0.1:4777" + defaultMaxMsgSize = 1024 * 1024 * 16 // 16 MB ) func main() { logLvl := flag.String("log-level", "info", "set log level") // CI relevant flags - grpcAddressFlag := flag.String("grpc-address", defaultGRPCAddress, "listen address for gRPC server") initialTPSFlag := flag.Int("tps-initial", 10, "starting transactions per second") maxTPSFlag := flag.Int("tps-max", *initialTPSFlag, "maximum transactions per second allowed") minTPSFlag := flag.Int("tps-min", *initialTPSFlag, "minimum transactions per second allowed") + loadTypeFlag := flag.String("load-type", string(defaultLoadType), "load type (token-transfer / const-exec / evm) from the load config file") + loadConfigFileLocationFlag := flag.String("load-config", "", "load config file location. If not provided, default config will be used.") + adjustIntervalFlag := flag.Duration("tps-adjust-interval", defaultAdjustInterval, "interval for adjusting TPS") adjustDelayFlag := flag.Duration("tps-adjust-delay", 120*time.Second, "delay before adjusting TPS") - statIntervalFlag := flag.Duration("stat-interval", defaultMetricCollectionInterval, "") durationFlag := flag.Duration("duration", 10*time.Minute, "test duration") + + statIntervalFlag := flag.Duration("stat-interval", defaultMetricCollectionInterval, "") gitRepoPathFlag := flag.String("git-repo-path", "../..", "git repo path of the filesystem") gitRepoURLFlag := flag.String("git-repo-url", "https://github.com/onflow/flow-go.git", "git repo URL") bigQueryUpload := flag.Bool("bigquery-upload", true, "whether to upload results to BigQuery (true / false)") bigQueryProjectFlag := flag.String("bigquery-project", "dapperlabs-data", "project name for the bigquery uploader") bigQueryDatasetFlag := flag.String("bigquery-dataset", "dev_src_flow_tps_metrics", "dataset name for the bigquery uploader") bigQueryRawTableFlag := flag.String("bigquery-raw-table", "rawResults", "table name for the bigquery raw results") - loadTypeFlag := flag.String("load-type", defaultLoadType, "load type (token-transfer / const-exec / evm)") flag.Parse() - loadType := *loadTypeFlag - log := setupLogger(logLvl) + loadConfig := getLoadConfig( + log, + *loadConfigFileLocationFlag, + *loadTypeFlag, + *minTPSFlag, + *maxTPSFlag, + *initialTPSFlag, + ) + if *gitRepoPathFlag == "" { flag.PrintDefaults() log.Fatal().Msg("git repo path is required") @@ -86,26 +91,6 @@ func main() { <-server.Ready() loaderMetrics := metrics.NewLoaderCollector() - grpcServerOptions := []grpc.ServerOption{ - grpc.MaxRecvMsgSize(defaultMaxMsgSize), - grpc.MaxSendMsgSize(defaultMaxMsgSize), - } - grpcServer := grpc.NewServer(grpcServerOptions...) - defer grpcServer.Stop() - - pb.RegisterBenchmarkServer(grpcServer, &benchmarkServer{}) - - grpcListener, err := net.Listen("tcp", *grpcAddressFlag) - if err != nil { - log.Fatal().Err(err).Str("address", *grpcAddressFlag).Msg("failed to listen") - } - - go func() { - if err := grpcServer.Serve(grpcListener); err != nil { - log.Fatal().Err(err).Msg("failed to serve") - } - }() - sp := benchmark.NewStatsPusher(ctx, log, pushgateway, "loader", prometheus.DefaultGatherer) defer sp.Stop() @@ -136,10 +121,7 @@ func main() { // prepare load generator log.Info(). - Str("load_type", loadType). - Int("initialTPS", *initialTPSFlag). - Int("minTPS", *minTPSFlag). - Int("maxTPS", *maxTPSFlag). + Interface("loadConfig", loadConfig). Dur("duration", *durationFlag). Msg("Running load case") @@ -164,7 +146,7 @@ func main() { }, benchmark.LoadParams{ NumberOfAccounts: maxInflight, - LoadType: load.LoadType(loadType), + LoadConfig: loadConfig, FeedbackEnabled: feedbackEnabled, }, ) @@ -187,9 +169,9 @@ func main() { AdjusterParams{ Delay: *adjustDelayFlag, Interval: *adjustIntervalFlag, - InitialTPS: uint(*initialTPSFlag), - MinTPS: uint(*minTPSFlag), - MaxTPS: uint(*maxTPSFlag), + InitialTPS: uint(loadConfig.TPSInitial), + MinTPS: uint(loadConfig.TpsMin), + MaxTPS: uint(loadConfig.TpsMax), MaxInflight: uint(maxInflight / 2), }, ) @@ -218,7 +200,7 @@ func main() { // only upload valid data if *bigQueryUpload { repoInfo := MustGetRepoInfo(log, *gitRepoURLFlag, *gitRepoPathFlag) - mustUploadData(ctx, log, recorder, repoInfo, *bigQueryProjectFlag, *bigQueryDatasetFlag, *bigQueryRawTableFlag, loadType) + mustUploadData(ctx, log, recorder, repoInfo, *bigQueryProjectFlag, *bigQueryDatasetFlag, *bigQueryRawTableFlag, loadConfig.LoadName) } else { log.Info().Int("raw_tps_size", len(recorder.BenchmarkResults.RawTPS)).Msg("logging tps results locally") // log results locally when not uploading to BigQuery @@ -228,6 +210,55 @@ func main() { } } +func getLoadConfig( + log zerolog.Logger, + loadConfigLocation string, + load string, + minTPS int, + maxTPS int, + initialTPS int, +) benchmark.LoadConfig { + if loadConfigLocation == "" { + lc := benchmark.LoadConfig{ + LoadName: load, + LoadType: load, + TpsMax: maxTPS, + TpsMin: minTPS, + TPSInitial: initialTPS, + } + + log.Info(). + Interface("loadConfig", lc). + Msg("Load config file not provided, using parameters supplied in TPS flags") + return lc + } + + var loadConfigs map[string]benchmark.LoadConfig + + // check if the file exists + if _, err := os.Stat(loadConfigLocation); os.IsNotExist(err) { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("load config file not found") + } + + yamlFile, err := os.ReadFile(loadConfigLocation) + if err != nil { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("failed to read load config file") + } + + err = yaml.Unmarshal(yamlFile, &loadConfigs) + if err != nil { + log.Fatal().Err(err).Str("loadConfigLocation", loadConfigLocation).Msg("failed to unmarshal load config file") + } + + lc, ok := loadConfigs[load] + if !ok { + log.Fatal().Str("load", load).Msg("load not found in load config file") + } + lc.LoadName = load + + return lc +} + // setupLogger parses log level and apply to logger func setupLogger(logLvl *string) zerolog.Logger { log := zerolog.New(os.Stderr). @@ -252,7 +283,7 @@ func mustUploadData( bigQueryProject string, bigQueryDataset string, bigQueryRawTable string, - loadType string, + loadName string, ) { log.Info().Msg("Initializing BigQuery") db, err := NewDB(ctx, log, bigQueryProject) @@ -278,7 +309,7 @@ func mustUploadData( bigQueryRawTable, recorder.BenchmarkResults, *repoInfo, - BenchmarkInfo{BenchmarkType: loadType}, + BenchmarkInfo{BenchmarkType: loadName}, MustGetDefaultEnvironment(), ) if err != nil { diff --git a/integration/benchmark/cmd/ci/server.go b/integration/benchmark/cmd/ci/server.go deleted file mode 100644 index ba72e856ed4..00000000000 --- a/integration/benchmark/cmd/ci/server.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "context" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" - - pb "github.com/onflow/flow-go/integration/benchmark/proto" -) - -type benchmarkServer struct { - pb.UnimplementedBenchmarkServer -} - -func (s *benchmarkServer) StartMacroBenchmark(*pb.StartMacroBenchmarkRequest, pb.Benchmark_StartMacroBenchmarkServer) error { - return status.Errorf(codes.Unimplemented, "method StartMacroBenchmark not implemented") -} -func (s *benchmarkServer) GetMacroBenchmark(context.Context, *pb.GetMacroBenchmarkRequest) (*pb.GetMacroBenchmarkResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMacroBenchmark not implemented") -} -func (s *benchmarkServer) ListMacroBenchmarks(context.Context, *emptypb.Empty) (*pb.ListMacroBenchmarksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListMacroBenchmarks not implemented") -} -func (s *benchmarkServer) Status(context.Context, *emptypb.Empty) (*pb.StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} diff --git a/integration/benchmark/cmd/manual/main.go b/integration/benchmark/cmd/manual/main.go index f42e21ef894..ffaa9615570 100644 --- a/integration/benchmark/cmd/manual/main.go +++ b/integration/benchmark/cmd/manual/main.go @@ -9,8 +9,6 @@ import ( "strings" "time" - "github.com/onflow/flow-go/integration/benchmark/load" - "github.com/prometheus/client_golang/prometheus" "github.com/rs/zerolog" "google.golang.org/grpc" @@ -132,8 +130,14 @@ func main() { }, benchmark.LoadParams{ NumberOfAccounts: int(maxTPS) * *accountMultiplierFlag, - LoadType: load.LoadType(*loadTypeFlag), - FeedbackEnabled: *feedbackEnabled, + LoadConfig: benchmark.LoadConfig{ + LoadName: *loadTypeFlag, + LoadType: *loadTypeFlag, + TpsMax: int(maxTPS), + TpsMin: int(maxTPS), + TPSInitial: int(maxTPS), + }, + FeedbackEnabled: *feedbackEnabled, }, ) if err != nil { diff --git a/integration/benchmark/contLoadGenerator.go b/integration/benchmark/contLoadGenerator.go index df78ca74c0a..4b5c147b8ff 100644 --- a/integration/benchmark/contLoadGenerator.go +++ b/integration/benchmark/contLoadGenerator.go @@ -46,9 +46,20 @@ type NetworkParams struct { ChainId flow.ChainID } +type LoadConfig struct { + // LoadName is the name of the load. This can be different from the LoadType + // and is used to identify the load in the results. The use case is when a single + // load type is used to run multiple loads with different parameters. + LoadName string `yaml:"-"` + LoadType string `yaml:"load_type"` + TpsMax int `yaml:"tps_max"` + TpsMin int `yaml:"tps_min"` + TPSInitial int `yaml:"tps_initial"` +} + type LoadParams struct { NumberOfAccounts int - LoadType load.LoadType + LoadConfig LoadConfig // TODO(rbtz): inject a TxFollower FeedbackEnabled bool @@ -157,7 +168,7 @@ func New( Proposer: servAcc, } - l := load.CreateLoadType(log, loadParams.LoadType) + l := load.CreateLoadType(log, load.LoadType(loadParams.LoadConfig.LoadType)) err = l.Setup(log, lc) if err != nil { diff --git a/integration/benchmark/load/load_type_test.go b/integration/benchmark/load/load_type_test.go index 1517924a7e5..fee4c2b118f 100644 --- a/integration/benchmark/load/load_type_test.go +++ b/integration/benchmark/load/load_type_test.go @@ -19,6 +19,7 @@ import ( "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/environment" + envMock "github.com/onflow/flow-go/fvm/environment/mock" "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/integration/benchmark/account" "github.com/onflow/flow-go/integration/benchmark/common" @@ -119,12 +120,21 @@ func testLoad(log zerolog.Logger, l load.Load) func(t *testing.T) { func bootstrapVM(t *testing.T, chain flow.Chain) (*fvm.VirtualMachine, fvm.Context, snapshot.SnapshotTree) { source := testutil.EntropyProviderFixture(nil) + blocks := new(envMock.Blocks) + block1 := unittest.BlockFixture() + blocks.On("ByHeightFrom", + block1.Header.Height, + block1.Header, + ).Return(block1.Header, nil) + opts := computation.DefaultFVMOptions(chain.ChainID(), false, false) opts = append(opts, fvm.WithTransactionFeesEnabled(true), fvm.WithAccountStorageLimit(true), fvm.WithContractDeploymentRestricted(false), fvm.WithEntropyProvider(source), + fvm.WithBlocks(blocks), + fvm.WithBlockHeader(block1.Header), ) ctx := fvm.NewContext(opts...) diff --git a/integration/benchmark/mocksiface/mocks.go b/integration/benchmark/mocksiface/mocks.go deleted file mode 100644 index 0068b5676c2..00000000000 --- a/integration/benchmark/mocksiface/mocks.go +++ /dev/null @@ -1,10 +0,0 @@ -package mocksiface_test - -import ( - "github.com/onflow/flow-go-sdk/access" -) - -// This is a proxy for the real access.Client for mockery to use. -type Client interface { - access.Client -} diff --git a/integration/benchmark/proto/generate.go b/integration/benchmark/proto/generate.go deleted file mode 100644 index b36797e4592..00000000000 --- a/integration/benchmark/proto/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative macro_benchmark.proto - -package proto diff --git a/integration/benchmark/proto/macro_benchmark.pb.go b/integration/benchmark/proto/macro_benchmark.pb.go deleted file mode 100644 index 15fdb7b4cf9..00000000000 --- a/integration/benchmark/proto/macro_benchmark.pb.go +++ /dev/null @@ -1,435 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 -// source: macro_benchmark.proto - -package proto - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StartMacroBenchmarkRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StartMacroBenchmarkRequest) Reset() { - *x = StartMacroBenchmarkRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartMacroBenchmarkRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartMacroBenchmarkRequest) ProtoMessage() {} - -func (x *StartMacroBenchmarkRequest) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartMacroBenchmarkRequest.ProtoReflect.Descriptor instead. -func (*StartMacroBenchmarkRequest) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{0} -} - -type StartMacroBenchmarkResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StartMacroBenchmarkResponse) Reset() { - *x = StartMacroBenchmarkResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartMacroBenchmarkResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartMacroBenchmarkResponse) ProtoMessage() {} - -func (x *StartMacroBenchmarkResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartMacroBenchmarkResponse.ProtoReflect.Descriptor instead. -func (*StartMacroBenchmarkResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{1} -} - -type GetMacroBenchmarkRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetMacroBenchmarkRequest) Reset() { - *x = GetMacroBenchmarkRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetMacroBenchmarkRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetMacroBenchmarkRequest) ProtoMessage() {} - -func (x *GetMacroBenchmarkRequest) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetMacroBenchmarkRequest.ProtoReflect.Descriptor instead. -func (*GetMacroBenchmarkRequest) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{2} -} - -type GetMacroBenchmarkResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetMacroBenchmarkResponse) Reset() { - *x = GetMacroBenchmarkResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetMacroBenchmarkResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetMacroBenchmarkResponse) ProtoMessage() {} - -func (x *GetMacroBenchmarkResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetMacroBenchmarkResponse.ProtoReflect.Descriptor instead. -func (*GetMacroBenchmarkResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{3} -} - -type ListMacroBenchmarksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ListMacroBenchmarksResponse) Reset() { - *x = ListMacroBenchmarksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListMacroBenchmarksResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListMacroBenchmarksResponse) ProtoMessage() {} - -func (x *ListMacroBenchmarksResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListMacroBenchmarksResponse.ProtoReflect.Descriptor instead. -func (*ListMacroBenchmarksResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{4} -} - -type StatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StatusResponse) Reset() { - *x = StatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_macro_benchmark_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StatusResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatusResponse) ProtoMessage() {} - -func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_macro_benchmark_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. -func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_macro_benchmark_proto_rawDescGZIP(), []int{5} -} - -var File_macro_benchmark_proto protoreflect.FileDescriptor - -var file_macro_benchmark_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, - 0x72, 0x6b, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x1c, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1d, 0x0a, - 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x0a, 0x18, - 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x4d, - 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xef, 0x02, 0x0a, 0x09, 0x42, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x12, 0x68, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x12, 0x25, 0x2e, 0x62, 0x65, - 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, - 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, - 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x60, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, - 0x61, 0x72, 0x6b, 0x12, 0x23, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, - 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, - 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x57, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x26, 0x2e, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x06, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x62, 0x65, - 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, - 0x6f, 0x77, 0x2d, 0x67, 0x6f, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x62, 0x65, 0x63, 0x6e, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_macro_benchmark_proto_rawDescOnce sync.Once - file_macro_benchmark_proto_rawDescData = file_macro_benchmark_proto_rawDesc -) - -func file_macro_benchmark_proto_rawDescGZIP() []byte { - file_macro_benchmark_proto_rawDescOnce.Do(func() { - file_macro_benchmark_proto_rawDescData = protoimpl.X.CompressGZIP(file_macro_benchmark_proto_rawDescData) - }) - return file_macro_benchmark_proto_rawDescData -} - -var file_macro_benchmark_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_macro_benchmark_proto_goTypes = []interface{}{ - (*StartMacroBenchmarkRequest)(nil), // 0: benchmark.StartMacroBenchmarkRequest - (*StartMacroBenchmarkResponse)(nil), // 1: benchmark.StartMacroBenchmarkResponse - (*GetMacroBenchmarkRequest)(nil), // 2: benchmark.GetMacroBenchmarkRequest - (*GetMacroBenchmarkResponse)(nil), // 3: benchmark.GetMacroBenchmarkResponse - (*ListMacroBenchmarksResponse)(nil), // 4: benchmark.ListMacroBenchmarksResponse - (*StatusResponse)(nil), // 5: benchmark.StatusResponse - (*emptypb.Empty)(nil), // 6: google.protobuf.Empty -} -var file_macro_benchmark_proto_depIdxs = []int32{ - 0, // 0: benchmark.Benchmark.StartMacroBenchmark:input_type -> benchmark.StartMacroBenchmarkRequest - 2, // 1: benchmark.Benchmark.GetMacroBenchmark:input_type -> benchmark.GetMacroBenchmarkRequest - 6, // 2: benchmark.Benchmark.ListMacroBenchmarks:input_type -> google.protobuf.Empty - 6, // 3: benchmark.Benchmark.Status:input_type -> google.protobuf.Empty - 1, // 4: benchmark.Benchmark.StartMacroBenchmark:output_type -> benchmark.StartMacroBenchmarkResponse - 3, // 5: benchmark.Benchmark.GetMacroBenchmark:output_type -> benchmark.GetMacroBenchmarkResponse - 4, // 6: benchmark.Benchmark.ListMacroBenchmarks:output_type -> benchmark.ListMacroBenchmarksResponse - 5, // 7: benchmark.Benchmark.Status:output_type -> benchmark.StatusResponse - 4, // [4:8] is the sub-list for method output_type - 0, // [0:4] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_macro_benchmark_proto_init() } -func file_macro_benchmark_proto_init() { - if File_macro_benchmark_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_macro_benchmark_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartMacroBenchmarkRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartMacroBenchmarkResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMacroBenchmarkRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMacroBenchmarkResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListMacroBenchmarksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_macro_benchmark_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_macro_benchmark_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_macro_benchmark_proto_goTypes, - DependencyIndexes: file_macro_benchmark_proto_depIdxs, - MessageInfos: file_macro_benchmark_proto_msgTypes, - }.Build() - File_macro_benchmark_proto = out.File - file_macro_benchmark_proto_rawDesc = nil - file_macro_benchmark_proto_goTypes = nil - file_macro_benchmark_proto_depIdxs = nil -} diff --git a/integration/benchmark/proto/macro_benchmark.proto b/integration/benchmark/proto/macro_benchmark.proto deleted file mode 100644 index e461ea81892..00000000000 --- a/integration/benchmark/proto/macro_benchmark.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package benchmark; -option go_package = "github.com/onflow/flow-go/integration/becnhmark/proto"; - -import "google/protobuf/empty.proto"; - -message StartMacroBenchmarkRequest {} -message StartMacroBenchmarkResponse {} - -message GetMacroBenchmarkRequest {} -message GetMacroBenchmarkResponse {} - -message ListMacroBenchmarksResponse {} - -message StatusResponse {} - -service Benchmark { - rpc StartMacroBenchmark(StartMacroBenchmarkRequest) - returns (stream StartMacroBenchmarkResponse) {} - rpc GetMacroBenchmark(GetMacroBenchmarkRequest) - returns (GetMacroBenchmarkResponse) {} - rpc ListMacroBenchmarks(google.protobuf.Empty) - returns (ListMacroBenchmarksResponse) {} - - rpc Status(google.protobuf.Empty) returns (StatusResponse) {} -} - diff --git a/integration/benchmark/proto/macro_benchmark_grpc.pb.go b/integration/benchmark/proto/macro_benchmark_grpc.pb.go deleted file mode 100644 index 065a26fcb39..00000000000 --- a/integration/benchmark/proto/macro_benchmark_grpc.pb.go +++ /dev/null @@ -1,243 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.9 -// source: macro_benchmark.proto - -package proto - -import ( - context "context" - - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// BenchmarkClient is the client API for Benchmark service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type BenchmarkClient interface { - StartMacroBenchmark(ctx context.Context, in *StartMacroBenchmarkRequest, opts ...grpc.CallOption) (Benchmark_StartMacroBenchmarkClient, error) - GetMacroBenchmark(ctx context.Context, in *GetMacroBenchmarkRequest, opts ...grpc.CallOption) (*GetMacroBenchmarkResponse, error) - ListMacroBenchmarks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListMacroBenchmarksResponse, error) - Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StatusResponse, error) -} - -type benchmarkClient struct { - cc grpc.ClientConnInterface -} - -func NewBenchmarkClient(cc grpc.ClientConnInterface) BenchmarkClient { - return &benchmarkClient{cc} -} - -func (c *benchmarkClient) StartMacroBenchmark(ctx context.Context, in *StartMacroBenchmarkRequest, opts ...grpc.CallOption) (Benchmark_StartMacroBenchmarkClient, error) { - stream, err := c.cc.NewStream(ctx, &Benchmark_ServiceDesc.Streams[0], "/benchmark.Benchmark/StartMacroBenchmark", opts...) - if err != nil { - return nil, err - } - x := &benchmarkStartMacroBenchmarkClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Benchmark_StartMacroBenchmarkClient interface { - Recv() (*StartMacroBenchmarkResponse, error) - grpc.ClientStream -} - -type benchmarkStartMacroBenchmarkClient struct { - grpc.ClientStream -} - -func (x *benchmarkStartMacroBenchmarkClient) Recv() (*StartMacroBenchmarkResponse, error) { - m := new(StartMacroBenchmarkResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *benchmarkClient) GetMacroBenchmark(ctx context.Context, in *GetMacroBenchmarkRequest, opts ...grpc.CallOption) (*GetMacroBenchmarkResponse, error) { - out := new(GetMacroBenchmarkResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/GetMacroBenchmark", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *benchmarkClient) ListMacroBenchmarks(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListMacroBenchmarksResponse, error) { - out := new(ListMacroBenchmarksResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/ListMacroBenchmarks", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *benchmarkClient) Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/benchmark.Benchmark/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BenchmarkServer is the server API for Benchmark service. -// All implementations must embed UnimplementedBenchmarkServer -// for forward compatibility -type BenchmarkServer interface { - StartMacroBenchmark(*StartMacroBenchmarkRequest, Benchmark_StartMacroBenchmarkServer) error - GetMacroBenchmark(context.Context, *GetMacroBenchmarkRequest) (*GetMacroBenchmarkResponse, error) - ListMacroBenchmarks(context.Context, *emptypb.Empty) (*ListMacroBenchmarksResponse, error) - Status(context.Context, *emptypb.Empty) (*StatusResponse, error) - mustEmbedUnimplementedBenchmarkServer() -} - -// UnimplementedBenchmarkServer must be embedded to have forward compatible implementations. -type UnimplementedBenchmarkServer struct { -} - -func (UnimplementedBenchmarkServer) StartMacroBenchmark(*StartMacroBenchmarkRequest, Benchmark_StartMacroBenchmarkServer) error { - return status.Errorf(codes.Unimplemented, "method StartMacroBenchmark not implemented") -} -func (UnimplementedBenchmarkServer) GetMacroBenchmark(context.Context, *GetMacroBenchmarkRequest) (*GetMacroBenchmarkResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMacroBenchmark not implemented") -} -func (UnimplementedBenchmarkServer) ListMacroBenchmarks(context.Context, *emptypb.Empty) (*ListMacroBenchmarksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListMacroBenchmarks not implemented") -} -func (UnimplementedBenchmarkServer) Status(context.Context, *emptypb.Empty) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (UnimplementedBenchmarkServer) mustEmbedUnimplementedBenchmarkServer() {} - -// UnsafeBenchmarkServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to BenchmarkServer will -// result in compilation errors. -type UnsafeBenchmarkServer interface { - mustEmbedUnimplementedBenchmarkServer() -} - -func RegisterBenchmarkServer(s grpc.ServiceRegistrar, srv BenchmarkServer) { - s.RegisterService(&Benchmark_ServiceDesc, srv) -} - -func _Benchmark_StartMacroBenchmark_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StartMacroBenchmarkRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(BenchmarkServer).StartMacroBenchmark(m, &benchmarkStartMacroBenchmarkServer{stream}) -} - -type Benchmark_StartMacroBenchmarkServer interface { - Send(*StartMacroBenchmarkResponse) error - grpc.ServerStream -} - -type benchmarkStartMacroBenchmarkServer struct { - grpc.ServerStream -} - -func (x *benchmarkStartMacroBenchmarkServer) Send(m *StartMacroBenchmarkResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Benchmark_GetMacroBenchmark_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetMacroBenchmarkRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).GetMacroBenchmark(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/GetMacroBenchmark", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).GetMacroBenchmark(ctx, req.(*GetMacroBenchmarkRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Benchmark_ListMacroBenchmarks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).ListMacroBenchmarks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/ListMacroBenchmarks", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).ListMacroBenchmarks(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Benchmark_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/benchmark.Benchmark/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServer).Status(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// Benchmark_ServiceDesc is the grpc.ServiceDesc for Benchmark service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Benchmark_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "benchmark.Benchmark", - HandlerType: (*BenchmarkServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetMacroBenchmark", - Handler: _Benchmark_GetMacroBenchmark_Handler, - }, - { - MethodName: "ListMacroBenchmarks", - Handler: _Benchmark_ListMacroBenchmarks_Handler, - }, - { - MethodName: "Status", - Handler: _Benchmark_Status_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StartMacroBenchmark", - Handler: _Benchmark_StartMacroBenchmark_Handler, - ServerStreams: true, - }, - }, - Metadata: "macro_benchmark.proto", -} diff --git a/integration/benchmark/server/bench.sh b/integration/benchmark/server/bench.sh index 778cac6279d..161549aba0f 100755 --- a/integration/benchmark/server/bench.sh +++ b/integration/benchmark/server/bench.sh @@ -7,7 +7,7 @@ set -o pipefail # this will keep the TPS automation code separate from the code that's being tested so we won't run into issues # of having old versions of automation code just because we happen to be testing an older version flow-go git clone https://github.com/onflow/flow-go.git -cd flow-go/integration/localnet +cd flow-go/integration/localnet || exit git fetch git fetch --tags @@ -37,7 +37,7 @@ while read -r input; do # sleep is workaround for slow initialization of some node types, so that benchmark does not quit immediately with "connection refused" sleep 30; - go run ../benchmark/cmd/ci -log-level debug -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" + go run ../benchmark/cmd/ci -log-level info -git-repo-path ../../ -tps-initial 800 -tps-min 1 -tps-max 1200 -duration 30m -load-type "$load" -load-config "../benchmark/server/load-config.yml" # instead of running "make stop" which uses docker-compose for a lot of older versions, # we explicitly run the command here with "docker compose" diff --git a/integration/benchmark/server/branches.recent b/integration/benchmark/server/branches.recent deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/integration/benchmark/server/commits.recent b/integration/benchmark/server/commits.recent deleted file mode 100644 index 538b5965dcc..00000000000 --- a/integration/benchmark/server/commits.recent +++ /dev/null @@ -1 +0,0 @@ -janez/tps-benchmark-evm-load:894151a2390b11e3d9a399b41746d1c112f745fa:evm diff --git a/integration/benchmark/server/flow-go b/integration/benchmark/server/flow-go deleted file mode 160000 index 894151a2390..00000000000 --- a/integration/benchmark/server/flow-go +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 894151a2390b11e3d9a399b41746d1c112f745fa diff --git a/integration/benchmark/server/load-config.yml b/integration/benchmark/server/load-config.yml new file mode 100644 index 00000000000..f7c62d31729 --- /dev/null +++ b/integration/benchmark/server/load-config.yml @@ -0,0 +1,20 @@ +token-transfer: + load_type: token-transfer + tps_initial: 800 + tps_min: 1 + tps_max: 1200 +create-account: + load_type: create-account + tps_initial: 600 + tps_min: 1 + tps_max: 1200 +ledger-heavy: + load_type: ledger-heavy + tps_initial: 3 + tps_min: 1 + tps_max: 1200 +evm-transfer: + load_type: evm-transfer + tps_initial: 500 + tps_min: 1 + tps_max: 1200 diff --git a/integration/benchmark/worker_stats_tracker.go b/integration/benchmark/worker_stats_tracker.go index d2a0f60f92e..cd582a2c2bf 100644 --- a/integration/benchmark/worker_stats_tracker.go +++ b/integration/benchmark/worker_stats_tracker.go @@ -133,7 +133,7 @@ func NewPeriodicStatsLogger( w := NewWorker( ctx, 0, - 1*time.Second, + 3*time.Second, func(workerID int) { stats := st.GetStats() log.Info(). diff --git a/integration/go.mod b/integration/go.mod index d7bfe88b5f5..5e1a1997f5e 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -20,15 +20,15 @@ require ( github.com/ipfs/go-ds-badger2 v0.1.3 github.com/ipfs/go-ipfs-blockstore v1.3.0 github.com/libp2p/go-libp2p v0.32.2 - github.com/onflow/cadence v0.42.9 + github.com/onflow/cadence v0.42.10 github.com/onflow/crypto v0.25.1 github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 - github.com/onflow/flow-emulator v0.61.2-0.20240404201132-f53137a8e4cb - github.com/onflow/flow-go v0.33.2-0.20240404171354-0b0592cc5bba - github.com/onflow/flow-go-sdk v0.46.0 + github.com/onflow/flow-emulator v0.62.2-0.20240418140508-d969ff66d9cd + github.com/onflow/flow-go v0.33.2-0.20240412174857-015156b297b5 + github.com/onflow/flow-go-sdk v0.46.2 github.com/onflow/flow-go/insecure v0.0.0-00010101000000-000000000000 - github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e + github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 github.com/onflow/go-ethereum v1.13.4 github.com/plus3it/gorecurcopy v0.0.1 github.com/prometheus/client_golang v1.18.0 @@ -42,6 +42,7 @@ require ( golang.org/x/sync v0.6.0 google.golang.org/grpc v1.60.1 google.golang.org/protobuf v1.32.0 + gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -349,7 +350,6 @@ require ( google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect modernc.org/libc v1.22.3 // indirect modernc.org/mathutil v1.5.0 // indirect diff --git a/integration/go.sum b/integration/go.sum index f5bac96f1bd..f2f704a4c86 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1406,27 +1406,27 @@ github.com/onflow/atree v0.1.0-beta1.0.20211027184039-559ee654ece9/go.mod h1:+6x github.com/onflow/atree v0.6.1-0.20230711151834-86040b30171f h1:Z8/PgTqOgOg02MTRpTBYO2k16FE6z4wEOtaC2WBR9Xo= github.com/onflow/atree v0.6.1-0.20230711151834-86040b30171f/go.mod h1:xvP61FoOs95K7IYdIYRnNcYQGf4nbF/uuJ0tHf4DRuM= github.com/onflow/cadence v0.20.1/go.mod h1:7mzUvPZUIJztIbr9eTvs+fQjWWHTF8veC+yk4ihcNIA= -github.com/onflow/cadence v0.42.9 h1:EX+eak/Jjy9PyKcAEmOViGOHMyP/nCOwJO+deodZlJE= -github.com/onflow/cadence v0.42.9/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= +github.com/onflow/cadence v0.42.10 h1:3oC5ceeXhdCrhHcf9H0yYXQKW3Tw/vkSXLe+PUZa4i0= +github.com/onflow/cadence v0.42.10/go.mod h1:1wFd+LiNiN6qoZXof3MBdpM6d8BsxbVIxOA77LbIYmE= github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1 h1:xF5wHug6H8vKfz7p1LYy9jck6eD9K1HLjTdi6o4kg1k= github.com/onflow/flow-core-contracts/lib/go/contracts v0.15.1/go.mod h1:WHp24VkUQfcfZi0XjI1uRVRt5alM5SHVkwOil1U2Tpc= github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1 h1:EjWjbyVEA+bMxXbM44dE6MsYeqOu5a9q/EwSWa4ma2M= github.com/onflow/flow-core-contracts/lib/go/templates v0.15.1/go.mod h1:c09d6sNyF/j5/pAynK7sNPb1XKqJqk1rxZPEqEL+dUo= -github.com/onflow/flow-emulator v0.61.2-0.20240404201132-f53137a8e4cb h1:A2R42Vvw+HdAi3DnH2U/AFK4ziOk/wNkVB1lrhEzai8= -github.com/onflow/flow-emulator v0.61.2-0.20240404201132-f53137a8e4cb/go.mod h1:DicO8yliaj+0AFldfwa5No2FfZRQja1R7/abxSHqqDE= +github.com/onflow/flow-emulator v0.62.2-0.20240418140508-d969ff66d9cd h1:bR5IxvTK4HApiJt+OP+mLNKkVkr75piaLu8wDT6uKDA= +github.com/onflow/flow-emulator v0.62.2-0.20240418140508-d969ff66d9cd/go.mod h1:ONxdb0U5kE7XK8B1ZAAo6JAzYRAtC6oh9I8WAfi9I+E= github.com/onflow/flow-ft/lib/go/contracts v0.7.1-0.20230711213910-baad011d2b13 h1:B4ll7e3j+MqTJv2122Enq3RtDNzmIGRu9xjV7fo7un0= github.com/onflow/flow-ft/lib/go/contracts v0.7.1-0.20230711213910-baad011d2b13/go.mod h1:kTMFIySzEJJeupk+7EmXs0EJ6CBWY/MV9fv9iYQk+RU= github.com/onflow/flow-go-sdk v0.24.0/go.mod h1:IoptMLPyFXWvyd9yYA6/4EmSeeozl6nJoIv4FaEMg74= -github.com/onflow/flow-go-sdk v0.46.0 h1:mrIQziCDe6Oi5HH/aPFvYluh1XUwO6lYpoXLWrBZc2s= -github.com/onflow/flow-go-sdk v0.46.0/go.mod h1:azVWF0yHI8wT1erF0vuYGqQZybl6Frbc+0Zu3rIPeHc= +github.com/onflow/flow-go-sdk v0.46.2 h1:ypVGBeH9m5XpBOTU/CYVC0y/+z42e8mhUlq5aLiD24A= +github.com/onflow/flow-go-sdk v0.46.2/go.mod h1:tfLjB9FZmwqtT5gaacjvpIhz7KCd67YPm6v+iqYAjEA= github.com/onflow/flow-go/crypto v0.21.3/go.mod h1:vI6V4CY3R6c4JKBxdcRiR/AnjBfL8OSD97bJc60cLuQ= github.com/onflow/flow-nft/lib/go/contracts v1.1.0 h1:rhUDeD27jhLwOqQKI/23008CYfnqXErrJvc4EFRP2a0= github.com/onflow/flow-nft/lib/go/contracts v1.1.0/go.mod h1:YsvzYng4htDgRB9sa9jxdwoTuuhjK8WYWXTyLkIigZY= github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e h1:r4+gVDDMOOc04Y1qjCZULAdgoaxSMsqSdE1EyviG76U= -github.com/onflow/flow/protobuf/go/flow v0.3.7-0.20240404170900-c321c1475f1e/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030 h1:I+aosSiJny88O4p3nPbCiUcp/UqN6AepvO6uj82bjH0= +github.com/onflow/flow/protobuf/go/flow v0.4.1-0.20240412170550-911321113030/go.mod h1:NA2pX2nw8zuaxfKphhKsk00kWLwfd+tv8mS23YXO4Sk= github.com/onflow/go-ethereum v1.13.4 h1:iNO86fm8RbBbhZ87ZulblInqCdHnAQVY8okBrNsTevc= github.com/onflow/go-ethereum v1.13.4/go.mod h1:cE/gEUkAffhwbVmMJYz+t1dAfVNHNwZCgc3BWtZxBGY= github.com/onflow/nft-storefront/lib/go/contracts v0.0.0-20221222181731-14b90207cead h1:2j1Unqs76Z1b95Gu4C3Y28hzNUHBix7wL490e61SMSw= diff --git a/integration/testnet/network.go b/integration/testnet/network.go index 5a4484c39be..ab2b942055a 100644 --- a/integration/testnet/network.go +++ b/integration/testnet/network.go @@ -183,10 +183,10 @@ func (net *FlowNetwork) Identities() flow.IdentityList { } // ContainersByRole returns all the containers in the network with the specified role -func (net *FlowNetwork) ContainersByRole(role flow.Role) []*Container { +func (net *FlowNetwork) ContainersByRole(role flow.Role, ghost bool) []*Container { cl := make([]*Container, 0, len(net.Containers)) for _, c := range net.Containers { - if c.Config.Role == role { + if c.Config.Role == role && c.Config.Ghost == ghost { cl = append(cl, c) } } diff --git a/integration/tests/access/cohort1/access_api_test.go b/integration/tests/access/cohort1/access_api_test.go index 1cbf5b191c4..e3ad3369c43 100644 --- a/integration/tests/access/cohort1/access_api_test.go +++ b/integration/tests/access/cohort1/access_api_test.go @@ -278,12 +278,13 @@ func (s *AccessAPISuite) TestSendAndSubscribeTransactionStatuses() { // Send and subscribe to the transaction status using the access API subClient, err := accessClient.SendAndSubscribeTransactionStatuses(s.ctx, &accessproto.SendAndSubscribeTransactionStatusesRequest{ - Transaction: transactionMsg, + Transaction: transactionMsg, + EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, }) s.Require().NoError(err) expectedCounter := uint64(0) - var finalTxStatus entities.TransactionStatus + lastReportedTxStatus := entities.TransactionStatus_UNKNOWN var txID sdk.Identifier for { @@ -297,17 +298,22 @@ func (s *AccessAPISuite) TestSendAndSubscribeTransactionStatuses() { } if txID == sdk.EmptyID { - txID = sdk.Identifier(resp.GetId()) + txID = sdk.Identifier(resp.TransactionResults.TransactionId) } s.Assert().Equal(expectedCounter, resp.GetMessageIndex()) - s.Assert().Equal(txID, sdk.Identifier(resp.GetId())) + s.Assert().Equal(txID, sdk.Identifier(resp.TransactionResults.TransactionId)) + // Check if all statuses received one by one. The subscription should send responses for each of the statuses, + // and the message should be sent in the order of transaction statuses. + // Expected order: pending(1) -> finalized(2) -> executed(3) -> sealed(4) + s.Assert().Equal(lastReportedTxStatus, resp.TransactionResults.Status-1) expectedCounter++ - finalTxStatus = resp.Status + lastReportedTxStatus = resp.TransactionResults.Status } - s.Assert().Equal(entities.TransactionStatus_SEALED, finalTxStatus) + // Check, if the final transaction status is sealed. + s.Assert().Equal(entities.TransactionStatus_SEALED, lastReportedTxStatus) } func (s *AccessAPISuite) testGetAccount(client *client.Client) { diff --git a/integration/tests/access/cohort2/observer_indexer_enabled_test.go b/integration/tests/access/cohort2/observer_indexer_enabled_test.go index e8d68c20e90..29b7c7df3ae 100644 --- a/integration/tests/access/cohort2/observer_indexer_enabled_test.go +++ b/integration/tests/access/cohort2/observer_indexer_enabled_test.go @@ -18,6 +18,7 @@ import ( sdkcrypto "github.com/onflow/flow-go-sdk/crypto" "github.com/onflow/flow-go-sdk/templates" "github.com/onflow/flow-go/engine/access/rpc/backend" + "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" @@ -41,8 +42,8 @@ type ObserverIndexerEnabledSuite struct { ObserverSuite } -// SetupTest sets up the test suite by starting the network and preparing the observer client. -// By overriding this function, we can ensure that the observer is started with correct parameters and select +// SetupTest sets up the test suite by starting the network and preparing the observers client. +// By overriding this function, we can ensure that the observers are started with correct parameters and select // the RPCs and REST endpoints that are tested. func (s *ObserverIndexerEnabledSuite) SetupTest() { s.localRpc = map[string]struct{}{ @@ -109,17 +110,23 @@ func (s *ObserverIndexerEnabledSuite) SetupTest() { testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), } - observers := []testnet.ObserverConfig{{ - LogLevel: zerolog.InfoLevel, - AdditionalFlags: []string{ - fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), - fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), - "--execution-data-sync-enabled=true", - "--execution-data-indexing-enabled=true", - "--local-service-api-enabled=true", - "--event-query-mode=execution-nodes-only", + observers := []testnet.ObserverConfig{ + { + LogLevel: zerolog.InfoLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--execution-data-indexing-enabled=true", + "--local-service-api-enabled=true", + "--event-query-mode=execution-nodes-only", + }, }, - }} + { + ContainerName: "observer_2", + LogLevel: zerolog.InfoLevel, + }, + } // prepare the network conf := testnet.NewNetworkConfig("observer_indexing_enabled_test", nodeConfigs, testnet.WithObservers(observers...)) @@ -133,9 +140,6 @@ func (s *ObserverIndexerEnabledSuite) SetupTest() { } // TestObserverIndexedRPCsHappyPath tests RPCs that are handled by the observer by using a dedicated indexer for the events. -// For now the observer only supports the following RPCs: -// - GetEventsForHeightRange -// - GetEventsForBlockIDs // To ensure that the observer is handling these RPCs, we stop the upstream access node and verify that the observer client // returns success for valid requests and errors for invalid ones. func (s *ObserverIndexerEnabledSuite) TestObserverIndexedRPCsHappyPath() { @@ -260,7 +264,277 @@ func (s *ObserverIndexerEnabledSuite) TestObserverIndexedRPCsHappyPath() { } } require.True(t, found) +} + +// TestAllObserverIndexedRPCsHappyPath tests the observer with the indexer enabled, +// observer configured to proxy requests to an access node and access node itself. All responses are compared +// to ensure all of the endpoints are working as expected. +// For now the observer only supports the following RPCs: +// -GetAccountAtBlockHeight +// -GetEventsForHeightRange +// -GetEventsForBlockIDs +// -GetSystemTransaction +// -GetTransactionsByBlockID +// -GetTransactionResultsByBlockID +// -ExecuteScriptAtBlockID +// -ExecuteScriptAtBlockHeight +// -GetExecutionResultByID +// -GetCollectionByID +// -GetTransaction +// -GetTransactionResult +// -GetTransactionResultByIndex +func (s *ObserverIndexerEnabledSuite) TestAllObserverIndexedRPCsHappyPath() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t := s.T() + + // prepare environment to create a new account + serviceAccountClient, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(t, err) + + latestBlockID, err := serviceAccountClient.GetLatestBlockID(ctx) + require.NoError(t, err) + + // create new account to deploy Counter to + accountPrivateKey := lib.RandomPrivateKey() + + accountKey := sdk.NewAccountKey(). + FromPrivateKey(accountPrivateKey). + SetHashAlgo(sdkcrypto.SHA3_256). + SetWeight(sdk.AccountKeyWeightThreshold) + + serviceAddress := sdk.Address(serviceAccountClient.Chain.ServiceAddress()) + + // Generate the account creation transaction + createAccountTx, err := templates.CreateAccount( + []*sdk.AccountKey{accountKey}, + []templates.Contract{ + { + Name: lib.CounterContract.Name, + Source: lib.CounterContract.ToCadence(), + }, + }, serviceAddress) + require.NoError(t, err) + + createAccountTx. + SetReferenceBlockID(sdk.Identifier(latestBlockID)). + SetProposalKey(serviceAddress, 0, serviceAccountClient.GetSeqNumber()). + SetPayer(serviceAddress). + SetComputeLimit(9999) + + // send the create account tx + childCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + err = serviceAccountClient.SignAndSendTransaction(childCtx, createAccountTx) + require.NoError(t, err) + + cancel() + + // wait for account to be created + var accountCreationTxRes *sdk.TransactionResult + unittest.RequireReturnsBefore(t, func() { + accountCreationTxRes, err = serviceAccountClient.WaitForSealed(context.Background(), createAccountTx.ID()) + require.NoError(t, err) + }, 20*time.Second, "has to seal before timeout") + + // obtain the account address + var accountCreatedPayload []byte + var newAccountAddress sdk.Address + for _, event := range accountCreationTxRes.Events { + if event.Type == sdk.EventAccountCreated { + accountCreatedEvent := sdk.AccountCreatedEvent(event) + accountCreatedPayload = accountCreatedEvent.Payload + newAccountAddress = accountCreatedEvent.Address() + break + } + } + require.NotEqual(t, sdk.EmptyAddress, newAccountAddress) + + // now we can query events using observerLocal to data which has to be locally indexed + + // get an access node client + accessNode, err := s.getClient(s.net.ContainerByName(testnet.PrimaryAN).Addr(testnet.GRPCPort)) + require.NoError(t, err) + + // get an observer with indexer enabled client + observerLocal, err := s.getObserverClient() + require.NoError(t, err) + + // get an upstream observer client + observerUpstream, err := s.getClient(s.net.ContainerByName("observer_2").Addr(testnet.GRPCPort)) + require.NoError(t, err) + + // wait for data to be synced by observerLocal + require.Eventually(t, func() bool { + _, err := observerLocal.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + statusErr, ok := status.FromError(err) + if !ok || err == nil { + return true + } + return statusErr.Code() != codes.OutOfRange + }, 30*time.Second, 1*time.Second) + + blockWithAccount, err := observerLocal.GetBlockByID(ctx, &accessproto.GetBlockByIDRequest{ + Id: accountCreationTxRes.BlockID[:], + FullBlockResponse: true, + }) + require.NoError(t, err) + + checkRPC := func(rpcCall func(client accessproto.AccessAPIClient) (any, error)) { + observerRes, err := rpcCall(observerLocal) + require.NoError(s.T(), err) + observerUpstreamRes, err := rpcCall(observerUpstream) + require.NoError(s.T(), err) + accessRes, err := rpcCall(accessNode) + require.NoError(s.T(), err) + + require.Equal(s.T(), observerRes, observerUpstreamRes) + require.Equal(s.T(), observerRes, accessRes) + } + + // GetEventsForBlockIDs + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetEventsForBlockIDs(ctx, &accessproto.GetEventsForBlockIDsRequest{ + Type: sdk.EventAccountCreated, + BlockIds: [][]byte{blockWithAccount.Block.Id}, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.Results, err + }) + + var txIndex uint32 + found := false + + // GetEventsForHeightRange + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetEventsForHeightRange(ctx, &accessproto.GetEventsForHeightRangeRequest{ + Type: sdk.EventAccountCreated, + StartHeight: blockWithAccount.Block.Height, + EndHeight: blockWithAccount.Block.Height, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + + // Iterating through response Results to get txIndex of event + for _, eventsInBlock := range res.Results { + for _, event := range eventsInBlock.Events { + if event.Type == sdk.EventAccountCreated { + if bytes.Equal(event.Payload, accountCreatedPayload) { + found = true + txIndex = event.TransactionIndex + } + } + } + } + require.True(t, found) + return res.Results, err + }) + // GetSystemTransaction + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetSystemTransaction(ctx, &accessproto.GetSystemTransactionRequest{ + BlockId: blockWithAccount.Block.Id, + }) + return res.Transaction, err + }) + + // GetExecutionResultByID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + converted, err := convert.MessageToBlock(blockWithAccount.Block) + require.NoError(t, err) + + resultId := converted.Payload.Results[0].ID() + res, err := client.GetExecutionResultByID(ctx, &accessproto.GetExecutionResultByIDRequest{ + Id: convert.IdentifierToMessage(resultId), + }) + return res.ExecutionResult, err + }) + + // GetTransaction + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransaction(ctx, &accessproto.GetTransactionRequest{ + Id: accountCreationTxRes.TransactionID.Bytes(), + BlockId: blockWithAccount.Block.Id, + CollectionId: nil, + }) + return res.Transaction, err + }) + + // GetTransactionResult + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResult(ctx, &accessproto.GetTransactionRequest{ + Id: accountCreationTxRes.TransactionID.Bytes(), + BlockId: blockWithAccount.Block.Id, + CollectionId: accountCreationTxRes.CollectionID.Bytes(), + }) + return res.Events, err + }) + + // GetTransactionResultByIndex + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResultByIndex(ctx, &accessproto.GetTransactionByIndexRequest{ + BlockId: blockWithAccount.Block.Id, + Index: txIndex, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.Events, err + }) + + // GetTransactionResultsByBlockID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionResultsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + EventEncodingVersion: entities.EventEncodingVersion_JSON_CDC_V0, + }) + return res.TransactionResults, err + }) + + // GetTransactionsByBlockID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetTransactionsByBlockID(ctx, &accessproto.GetTransactionsByBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + }) + return res.Transactions, err + }) + + // GetCollectionByID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetCollectionByID(ctx, &accessproto.GetCollectionByIDRequest{ + Id: accountCreationTxRes.CollectionID.Bytes(), + }) + return res.Collection, err + }) + + // ExecuteScriptAtBlockHeight + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.ExecuteScriptAtBlockHeight(ctx, &accessproto.ExecuteScriptAtBlockHeightRequest{ + BlockHeight: blockWithAccount.Block.Height, + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return res.Value, err + }) + + // ExecuteScriptAtBlockID + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.ExecuteScriptAtBlockID(ctx, &accessproto.ExecuteScriptAtBlockIDRequest{ + BlockId: blockWithAccount.Block.Id, + Script: []byte(simpleScript), + Arguments: make([][]byte, 0), + }) + return res.Value, err + }) + + // GetAccountAtBlockHeight + checkRPC(func(client accessproto.AccessAPIClient) (any, error) { + res, err := client.GetAccountAtBlockHeight(ctx, &accessproto.GetAccountAtBlockHeightRequest{ + Address: newAccountAddress.Bytes(), + BlockHeight: accountCreationTxRes.BlockHeight, + }) + return res.Account, err + }) } func (s *ObserverIndexerEnabledSuite) getRPCs() []RPCTest { diff --git a/integration/tests/access/cohort3/grpc_state_stream_test.go b/integration/tests/access/cohort3/grpc_state_stream_test.go index 1691aa0ef6c..be6f0840b99 100644 --- a/integration/tests/access/cohort3/grpc_state_stream_test.go +++ b/integration/tests/access/cohort3/grpc_state_stream_test.go @@ -8,9 +8,9 @@ import ( "log" "sync" "testing" - "time" "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "google.golang.org/grpc" @@ -21,7 +21,9 @@ import ( "github.com/onflow/flow-go-sdk/test" "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/ghost/client" "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/utils/unittest" @@ -48,6 +50,7 @@ func TestGrpcStateStream(t *testing.T) { type GrpcStateStreamSuite struct { suite.Suite + lib.TestnetStateTracker log zerolog.Logger @@ -58,7 +61,9 @@ type GrpcStateStreamSuite struct { net *testnet.FlowNetwork // RPC methods to test - testedRPCs func() []RPCTest + testedRPCs func() []subscribeEventsRPCTest + + ghostID flow.Identifier } func (s *GrpcStateStreamSuite) TearDownTest() { @@ -99,6 +104,14 @@ func (s *GrpcStateStreamSuite) SetupTest() { testnet.WithAdditionalFlag("--event-query-mode=execution-nodes-only"), ) + // add the ghost (access) node config + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithID(s.ghostID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + consensusConfigs := []func(config *testnet.NodeConfig){ testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=400ms"), testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), @@ -117,6 +130,7 @@ func (s *GrpcStateStreamSuite) SetupTest() { testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), testANConfig, // access_1 controlANConfig, // access_2 + ghostNode, // access ghost } // add the observer node config @@ -142,6 +156,13 @@ func (s *GrpcStateStreamSuite) SetupTest() { s.testedRPCs = s.getRPCs s.net.Start(s.ctx) + s.Track(s.T(), s.ctx, s.Ghost()) +} + +func (s *GrpcStateStreamSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost client") + return client } // TestRestEventStreaming tests gRPC event streaming @@ -158,12 +179,17 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { sdkClientTestON, err := getClient(testONURL) s.Require().NoError(err) + // get the first block height + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + + // Let the network run for this many blocks + blockCount := uint64(5) + // wait for the requested number of sealed blocks + s.BlockState.WaitForSealed(s.T(), blockA.Header.Height+blockCount) + txGenerator, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() s.Require().NoError(err) - header, err := txGenerator.GetLatestSealedBlockHeader(s.ctx) - s.Require().NoError(err) - - time.Sleep(20 * time.Second) var startValue interface{} txCount := 10 @@ -171,24 +197,21 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { for _, rpc := range s.testedRPCs() { s.T().Run(rpc.name, func(t *testing.T) { if rpc.name == "SubscribeEventsFromStartBlockID" { - startValue = header.ID.Bytes() + startValue = convert.IdentifierToMessage(blockA.ID()) } else { - startValue = header.Height + startValue = blockA.Header.Height } - testANStream, err := rpc.call(s.ctx, sdkClientTestAN, startValue, &executiondata.EventFilter{}) - s.Require().NoError(err) - testANEvents, testANErrs, err := SubscribeEventsHandler(s.ctx, testANStream) + testANRecv := rpc.call(s.ctx, sdkClientTestAN, startValue, &executiondata.EventFilter{}) + testANEvents, testANErrs, err := SubscribeHandler(s.ctx, testANRecv, eventsResponseHandler) s.Require().NoError(err) - controlANStream, err := rpc.call(s.ctx, sdkClientControlAN, startValue, &executiondata.EventFilter{}) - s.Require().NoError(err) - controlANEvents, controlANErrs, err := SubscribeEventsHandler(s.ctx, controlANStream) + controlANRecv := rpc.call(s.ctx, sdkClientControlAN, startValue, &executiondata.EventFilter{}) + controlANEvents, controlANErrs, err := SubscribeHandler(s.ctx, controlANRecv, eventsResponseHandler) s.Require().NoError(err) - testONStream, err := rpc.call(s.ctx, sdkClientTestON, startValue, &executiondata.EventFilter{}) - s.Require().NoError(err) - testONEvents, testONErrs, err := SubscribeEventsHandler(s.ctx, testONStream) + testONRecv := rpc.call(s.ctx, sdkClientTestON, startValue, &executiondata.EventFilter{}) + testONEvents, testONErrs, err := SubscribeHandler(s.ctx, testONRecv, eventsResponseHandler) s.Require().NoError(err) if rpc.generateEvents { @@ -213,7 +236,7 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { foundONTxCount := 0 messageIndex := counters.NewMonotonousCounter(0) - r := newResponseTracker() + r := NewResponseTracker(compareEventsResponse, 3) for { select { @@ -226,7 +249,7 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { case event := <-testANEvents: if has(event.Events, targetEvent) { s.T().Logf("adding access test events: %d %d %v", event.Height, len(event.Events), event.Events) - r.Add(s.T(), event.Height, "access_test", &event) + r.Add(s.T(), event.Height, "access_test", event) foundANTxCount++ } case event := <-controlANEvents: @@ -236,12 +259,12 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { } s.T().Logf("adding control events: %d %d %v", event.Height, len(event.Events), event.Events) - r.Add(s.T(), event.Height, "access_control", &event) + r.Add(s.T(), event.Height, "access_control", event) } case event := <-testONEvents: if has(event.Events, targetEvent) { s.T().Logf("adding observer test events: %d %d %v", event.Height, len(event.Events), event.Events) - r.Add(s.T(), event.Height, "observer_test", &event) + r.Add(s.T(), event.Height, "observer_test", event) foundONTxCount++ } } @@ -250,6 +273,8 @@ func (s *GrpcStateStreamSuite) TestHappyPath() { break } } + + r.AssertAllResponsesHandled(t, txCount) }) } } @@ -270,104 +295,161 @@ func (s *GrpcStateStreamSuite) generateEvents(client *testnet.Client, txCount in } } -type RPCTest struct { +type subscribeEventsRPCTest struct { name string - call func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) + call func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) generateEvents bool // add ability to integration test generate new events or use old events to decrease running test time } -func (s *GrpcStateStreamSuite) getRPCs() []RPCTest { - return []RPCTest{ +func (s *GrpcStateStreamSuite) getRPCs() []subscribeEventsRPCTest { + return []subscribeEventsRPCTest{ { name: "SubscribeEventsFromLatest", - call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) { - return client.SubscribeEventsFromLatest(ctx, &executiondata.SubscribeEventsFromLatestRequest{ + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromLatest(ctx, &executiondata.SubscribeEventsFromLatestRequest{ EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, Filter: filter, HeartbeatInterval: 1, }) + s.Require().NoError(err) + return stream.Recv }, generateEvents: true, }, { name: "SubscribeEvents", - call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) { + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, _ interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { //nolint: staticcheck - return client.SubscribeEvents(ctx, &executiondata.SubscribeEventsRequest{ + stream, err := client.SubscribeEvents(ctx, &executiondata.SubscribeEventsRequest{ StartBlockId: convert.IdentifierToMessage(flow.ZeroID), StartBlockHeight: 0, EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, Filter: filter, HeartbeatInterval: 1, }) + s.Require().NoError(err) + return stream.Recv }, generateEvents: true, }, { name: "SubscribeEventsFromStartBlockID", - call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) { - return client.SubscribeEventsFromStartBlockID(ctx, &executiondata.SubscribeEventsFromStartBlockIDRequest{ + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromStartBlockID(ctx, &executiondata.SubscribeEventsFromStartBlockIDRequest{ StartBlockId: startValue.([]byte), EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, Filter: filter, HeartbeatInterval: 1, }) + s.Require().NoError(err) + return stream.Recv }, generateEvents: false, // use previous events }, { name: "SubscribeEventsFromStartHeight", - call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) (executiondata.ExecutionDataAPI_SubscribeEventsClient, error) { - return client.SubscribeEventsFromStartHeight(ctx, &executiondata.SubscribeEventsFromStartHeightRequest{ + call: func(ctx context.Context, client executiondata.ExecutionDataAPIClient, startValue interface{}, filter *executiondata.EventFilter) func() (*executiondata.SubscribeEventsResponse, error) { + stream, err := client.SubscribeEventsFromStartHeight(ctx, &executiondata.SubscribeEventsFromStartHeightRequest{ StartBlockHeight: startValue.(uint64), EventEncodingVersion: entities.EventEncodingVersion_CCF_V0, Filter: filter, HeartbeatInterval: 1, }) + s.Require().NoError(err) + return stream.Recv }, generateEvents: false, // use previous events }, } } -type ResponseTracker struct { - r map[uint64]map[string]SubscribeEventsResponse - mu sync.RWMutex +// ResponseTracker is a generic tracker for responses. +type ResponseTracker[T any] struct { + r map[uint64]map[string]T + mu sync.RWMutex + compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error + checkCount int // actual common count of responses we want to check + responsesCountToCompare int // count of responses that we want to compare with each other } -func newResponseTracker() *ResponseTracker { - return &ResponseTracker{ - r: make(map[uint64]map[string]SubscribeEventsResponse), +// NewResponseTracker creates a new ResponseTracker. +func NewResponseTracker[T any]( + compare func(t *testing.T, responses map[uint64]map[string]T, blockHeight uint64) error, + responsesCountToCompare int, +) *ResponseTracker[T] { + return &ResponseTracker[T]{ + r: make(map[uint64]map[string]T), + compare: compare, + responsesCountToCompare: responsesCountToCompare, + } +} + +func (r *ResponseTracker[T]) AssertAllResponsesHandled(t *testing.T, expectedCheckCount int) { + assert.Equal(t, expectedCheckCount, r.checkCount) + + // we check if response tracker has some responses which were not checked, but should be checked + hasNotComparedResponses := false + for _, valueMap := range r.r { + if len(valueMap) == r.responsesCountToCompare { + hasNotComparedResponses = true + break + } } + assert.False(t, hasNotComparedResponses) } -func (r *ResponseTracker) Add(t *testing.T, blockHeight uint64, name string, events *SubscribeEventsResponse) { +func (r *ResponseTracker[T]) Add(t *testing.T, blockHeight uint64, name string, response T) { r.mu.Lock() defer r.mu.Unlock() if _, ok := r.r[blockHeight]; !ok { - r.r[blockHeight] = make(map[string]SubscribeEventsResponse) + r.r[blockHeight] = make(map[string]T) } - r.r[blockHeight][name] = *events + r.r[blockHeight][name] = response - if len(r.r[blockHeight]) != 3 { + if len(r.r[blockHeight]) != r.responsesCountToCompare { return } - err := r.compare(t, r.r[blockHeight]["access_control"], r.r[blockHeight]["access_test"]) + r.checkCount += 1 + err := r.compare(t, r.r, blockHeight) if err != nil { - log.Fatalf("failure comparing access and access data %d: %v", blockHeight, err) - } - - err = r.compare(t, r.r[blockHeight]["access_control"], r.r[blockHeight]["observer_test"]) - if err != nil { - log.Fatalf("failure comparing access and observer data %d: %v", blockHeight, err) + log.Fatalf("comparison error at block height %d: %v", blockHeight, err) } delete(r.r, blockHeight) } -func (r *ResponseTracker) compare(t *testing.T, controlData SubscribeEventsResponse, testData SubscribeEventsResponse) error { +func eventsResponseHandler(msg *executiondata.SubscribeEventsResponse) (*SubscribeEventsResponse, error) { + events := convert.MessagesToEvents(msg.GetEvents()) + + return &SubscribeEventsResponse{ + EventsResponse: backend.EventsResponse{ + Height: msg.GetBlockHeight(), + BlockID: convert.MessageToIdentifier(msg.GetBlockId()), + Events: events, + BlockTimestamp: msg.GetBlockTimestamp().AsTime(), + }, + MessageIndex: msg.MessageIndex, + }, nil +} + +func compareEventsResponse(t *testing.T, responses map[uint64]map[string]*SubscribeEventsResponse, blockHeight uint64) error { + + accessControlData := responses[blockHeight]["access_control"] + accessTestData := responses[blockHeight]["access_test"] + observerTestData := responses[blockHeight]["observer_test"] + + // Compare access_control with access_test + compareEvents(t, accessControlData, accessTestData) + + // Compare access_control with observer_test + compareEvents(t, accessControlData, observerTestData) + + return nil +} + +func compareEvents(t *testing.T, controlData, testData *SubscribeEventsResponse) { require.Equal(t, controlData.BlockID, testData.BlockID) require.Equal(t, controlData.Height, testData.Height) require.Equal(t, controlData.BlockTimestamp, testData.BlockTimestamp) @@ -381,8 +463,6 @@ func (r *ResponseTracker) compare(t *testing.T, controlData SubscribeEventsRespo require.Equal(t, controlData.Events[i].EventIndex, testData.Events[i].EventIndex) require.True(t, bytes.Equal(controlData.Events[i].Payload, testData.Events[i].Payload)) } - - return nil } // TODO: switch to SDK versions once crypto library is fixed to support the latest SDK version @@ -396,11 +476,12 @@ func getClient(address string) (executiondata.ExecutionDataAPIClient, error) { return executiondata.NewExecutionDataAPIClient(conn), nil } -func SubscribeEventsHandler( +func SubscribeHandler[T any, V any]( ctx context.Context, - stream executiondata.ExecutionDataAPI_SubscribeEventsClient, -) (<-chan SubscribeEventsResponse, <-chan error, error) { - sub := make(chan SubscribeEventsResponse) + recv func() (T, error), + responseHandler func(T) (V, error), +) (<-chan V, <-chan error, error) { + sub := make(chan V) errChan := make(chan error) sendErr := func(err error) { @@ -415,26 +496,20 @@ func SubscribeEventsHandler( defer close(errChan) for { - resp, err := stream.Recv() + resp, err := recv() if err != nil { if err == io.EOF { return } - sendErr(fmt.Errorf("error receiving event: %w", err)) + sendErr(fmt.Errorf("error receiving response: %w", err)) return } - events := convert.MessagesToEvents(resp.GetEvents()) - - response := SubscribeEventsResponse{ - EventsResponse: backend.EventsResponse{ - Height: resp.GetBlockHeight(), - BlockID: convert.MessageToIdentifier(resp.GetBlockId()), - Events: events, - BlockTimestamp: resp.GetBlockTimestamp().AsTime(), - }, - MessageIndex: resp.MessageIndex, + response, err := responseHandler(resp) + if err != nil { + sendErr(fmt.Errorf("error converting response: %w", err)) + return } select { diff --git a/integration/tests/access/cohort3/grpc_streaming_blocks_test.go b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go new file mode 100644 index 00000000000..82e1c23cf28 --- /dev/null +++ b/integration/tests/access/cohort3/grpc_streaming_blocks_test.go @@ -0,0 +1,278 @@ +package cohort3 + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/common/rpc/convert" + "github.com/onflow/flow-go/engine/ghost/client" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" + + accessproto "github.com/onflow/flow/protobuf/go/flow/access" + "github.com/onflow/flow/protobuf/go/flow/entities" +) + +func TestGrpcBlocksStream(t *testing.T) { + suite.Run(t, new(GrpcBlocksStreamSuite)) +} + +type GrpcBlocksStreamSuite struct { + suite.Suite + lib.TestnetStateTracker + + log zerolog.Logger + + // root context for the current test + ctx context.Context + cancel context.CancelFunc + + net *testnet.FlowNetwork + + // RPC methods to test + testedRPCs func() []subscribeBlocksRPCTest + + ghostID flow.Identifier +} + +func (s *GrpcBlocksStreamSuite) TearDownTest() { + s.log.Info().Msg("================> Start TearDownTest") + s.net.Remove() + s.cancel() + s.log.Info().Msg("================> Finish TearDownTest") +} + +func (s *GrpcBlocksStreamSuite) SetupTest() { + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + // access node + accessConfig := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.InfoLevel), + testnet.WithAdditionalFlag("--execution-data-sync-enabled=true"), + testnet.WithAdditionalFlagf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + testnet.WithAdditionalFlag("--execution-data-retry-delay=1s"), + testnet.WithAdditionalFlag("--execution-data-indexing-enabled=true"), + testnet.WithAdditionalFlagf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + testnet.WithAdditionalFlag("--event-query-mode=local-only"), + testnet.WithAdditionalFlag("--supports-observer=true"), + testnet.WithAdditionalFlagf("--public-network-execution-data-sync-enabled=true"), + ) + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag("--cruise-ctl-fallback-proposal-duration=400ms"), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", 1)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", 1)), + testnet.WithLogLevel(zerolog.FatalLevel), + } + + // add the ghost (access) node config + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithID(s.ghostID), + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.AsGhost()) + + nodeConfigs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleCollection, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.FatalLevel)), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.FatalLevel)), + accessConfig, + ghostNode, // access ghost + } + + // add the observer node config + observers := []testnet.ObserverConfig{{ + ContainerName: testnet.PrimaryON, + LogLevel: zerolog.DebugLevel, + AdditionalFlags: []string{ + fmt.Sprintf("--execution-data-dir=%s", testnet.DefaultExecutionDataServiceDir), + fmt.Sprintf("--execution-state-dir=%s", testnet.DefaultExecutionStateDir), + "--execution-data-sync-enabled=true", + "--event-query-mode=execution-nodes-only", + "--execution-data-indexing-enabled=true", + }, + }} + + conf := testnet.NewNetworkConfig("access_blocks_streaming_test", nodeConfigs, testnet.WithObservers(observers...)) + s.net = testnet.PrepareFlowNetwork(s.T(), conf, flow.Localnet) + + // start the network + s.T().Logf("starting flow network with docker containers") + s.ctx, s.cancel = context.WithCancel(context.Background()) + + s.testedRPCs = s.getRPCs + + s.net.Start(s.ctx) + s.Track(s.T(), s.ctx, s.Ghost()) +} + +func (s *GrpcBlocksStreamSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost client") + return client +} + +// TestRestEventStreaming tests gRPC event streaming +func (s *GrpcBlocksStreamSuite) TestHappyPath() { + accessUrl := fmt.Sprintf("localhost:%s", s.net.ContainerByName(testnet.PrimaryAN).Port(testnet.GRPCPort)) + accessClient, err := getAccessAPIClient(accessUrl) + s.Require().NoError(err) + + observerURL := fmt.Sprintf("localhost:%s", s.net.ContainerByName(testnet.PrimaryON).Port(testnet.GRPCPort)) + observerClient, err := getAccessAPIClient(observerURL) + s.Require().NoError(err) + + // get the first block height + currentFinalized := s.BlockState.HighestFinalizedHeight() + blockA := s.BlockState.WaitForHighestFinalizedProgress(s.T(), currentFinalized) + + // Let the network run for this many blocks + blockCount := uint64(5) + // wait for the requested number of sealed blocks + s.BlockState.WaitForSealed(s.T(), blockA.Header.Height+blockCount) + + var startValue interface{} + txCount := 10 + + for _, rpc := range s.testedRPCs() { + s.T().Run(rpc.name, func(t *testing.T) { + if rpc.name == "SubscribeBlocksFromStartBlockID" { + startValue = convert.IdentifierToMessage(blockA.ID()) + } else { + startValue = blockA.Header.Height + } + + accessRecv := rpc.call(s.ctx, accessClient, startValue) + accessBlocks, accessBlockErrs, err := SubscribeHandler(s.ctx, accessRecv, blockResponseHandler) + s.Require().NoError(err) + + observerRecv := rpc.call(s.ctx, observerClient, startValue) + observerBlocks, observerBlockErrs, err := SubscribeHandler(s.ctx, observerRecv, blockResponseHandler) + s.Require().NoError(err) + + foundANTxCount := 0 + foundONTxCount := 0 + + r := NewResponseTracker(compareBlocksResponse, 2) + + for { + select { + case err := <-accessBlockErrs: + s.Require().NoErrorf(err, "unexpected AN error") + case err := <-observerBlockErrs: + s.Require().NoErrorf(err, "unexpected ON error") + case block := <-accessBlocks: + s.T().Logf("AN block received: height: %d", block.Header.Height) + r.Add(s.T(), block.Header.Height, "access", block) + foundANTxCount++ + case block := <-observerBlocks: + s.T().Logf("ON block received: height: %d", block.Header.Height) + r.Add(s.T(), block.Header.Height, "observer", block) + foundONTxCount++ + } + + if foundANTxCount >= txCount && foundONTxCount >= txCount { + break + } + } + + r.AssertAllResponsesHandled(t, txCount) + }) + } +} + +func blockResponseHandler(msg *accessproto.SubscribeBlocksResponse) (*flow.Block, error) { + return convert.MessageToBlock(msg.GetBlock()) +} + +func compareBlocksResponse(t *testing.T, responses map[uint64]map[string]*flow.Block, blockHeight uint64) error { + accessData := responses[blockHeight]["access"] + observerData := responses[blockHeight]["observer"] + + // Compare access with observer + compareBlocks(t, accessData, observerData) + + return nil +} + +func compareBlocks(t *testing.T, accessBlock *flow.Block, observerBlock *flow.Block) { + require.Equal(t, accessBlock.ID(), observerBlock.ID()) + require.Equal(t, accessBlock.Header.Height, observerBlock.Header.Height) + require.Equal(t, accessBlock.Header.Timestamp, observerBlock.Header.Timestamp) + require.Equal(t, accessBlock.Payload.Hash(), observerBlock.Payload.Hash()) +} + +type subscribeBlocksRPCTest struct { + name string + call func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) +} + +func (s *GrpcBlocksStreamSuite) getRPCs() []subscribeBlocksRPCTest { + return []subscribeBlocksRPCTest{ + { + name: "SubscribeBlocksFromLatest", + call: func(ctx context.Context, client accessproto.AccessAPIClient, _ interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromLatest(ctx, &accessproto.SubscribeBlocksFromLatestRequest{ + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + s.Require().NoError(err) + return stream.Recv + }, + }, + { + name: "SubscribeBlocksFromStartBlockID", + call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromStartBlockID(ctx, &accessproto.SubscribeBlocksFromStartBlockIDRequest{ + StartBlockId: startValue.([]byte), + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + s.Require().NoError(err) + return stream.Recv + }, + }, + { + name: "SubscribeBlocksFromStartHeight", + call: func(ctx context.Context, client accessproto.AccessAPIClient, startValue interface{}) func() (*accessproto.SubscribeBlocksResponse, error) { + stream, err := client.SubscribeBlocksFromStartHeight(ctx, &accessproto.SubscribeBlocksFromStartHeightRequest{ + StartBlockHeight: startValue.(uint64), + BlockStatus: entities.BlockStatus_BLOCK_FINALIZED, + FullBlockResponse: true, + }) + s.Require().NoError(err) + return stream.Recv + }, + }, + } +} + +func getAccessAPIClient(address string) (accessproto.AccessAPIClient, error) { + conn, err := grpc.Dial(address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + + return accessproto.NewAccessAPIClient(conn), nil +} diff --git a/integration/tests/epochs/base_suite.go b/integration/tests/epochs/base_suite.go new file mode 100644 index 00000000000..2fb8200cc0a --- /dev/null +++ b/integration/tests/epochs/base_suite.go @@ -0,0 +1,158 @@ +// Package epochs contains common functionality for the epoch integration test suite. +// Individual tests exist in sub-directories of this: cohort1, cohort2... +// Each cohort is run as a separate, sequential CI job. Since the epoch tests are long +// and resource-heavy, we split them into several cohorts, which can be run in parallel. +// +// If a new cohort is added in the future, it must be added to: +// - ci.yml, flaky-test-monitor.yml, bors.toml (ensure new cohort of tests is run) +// - Makefile (include new cohort in integration-test directive, etc.) +package epochs + +import ( + "context" + "fmt" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/ghost/client" + "github.com/onflow/flow-go/integration/testnet" + "github.com/onflow/flow-go/integration/tests/lib" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +// BaseSuite encapsulates common functionality for epoch integration tests. +type BaseSuite struct { + suite.Suite + lib.TestnetStateTracker + cancel context.CancelFunc + log zerolog.Logger + net *testnet.FlowNetwork + ghostID flow.Identifier + + Client *testnet.Client + Ctx context.Context + + // Epoch config (lengths in views) + StakingAuctionLen uint64 + DKGPhaseLen uint64 + EpochLen uint64 + EpochCommitSafetyThreshold uint64 + // Whether approvals are required for sealing (we only enable for VN tests because + // requiring approvals requires a longer DKG period to avoid flakiness) + RequiredSealApprovals uint // defaults to 0 (no approvals required) + // Consensus Node proposal duration + ConsensusProposalDuration time.Duration +} + +// SetupTest is run automatically by the testing framework before each test case. +func (s *BaseSuite) SetupTest() { + // If unset, use default value 100ms + if s.ConsensusProposalDuration == 0 { + s.ConsensusProposalDuration = time.Millisecond * 100 + } + + minEpochLength := s.StakingAuctionLen + s.DKGPhaseLen*3 + 20 + // ensure epoch lengths are set correctly + require.Greater(s.T(), s.EpochLen, minEpochLength+s.EpochCommitSafetyThreshold, "epoch too short") + + s.Ctx, s.cancel = context.WithCancel(context.Background()) + s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.log.Info().Msg("================> SetupTest") + defer func() { + s.log.Info().Msg("================> Finish SetupTest") + }() + + collectionConfigs := []func(*testnet.NodeConfig){ + testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms"), + testnet.WithLogLevel(zerolog.WarnLevel)} + + consensusConfigs := []func(config *testnet.NodeConfig){ + testnet.WithAdditionalFlag(fmt.Sprintf("--cruise-ctl-fallback-proposal-duration=%s", s.ConsensusProposalDuration)), + testnet.WithAdditionalFlag("--cruise-ctl-enabled=false"), // disable cruise control for integration tests + testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", s.RequiredSealApprovals)), + testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", s.RequiredSealApprovals)), + testnet.WithLogLevel(zerolog.DebugLevel)} + + // a ghost node masquerading as an access node + s.ghostID = unittest.IdentifierFixture() + ghostNode := testnet.NewNodeConfig( + flow.RoleAccess, + testnet.WithLogLevel(zerolog.FatalLevel), + testnet.WithID(s.ghostID), + testnet.AsGhost()) + + confs := []testnet.NodeConfig{ + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), + testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), + testnet.NewNodeConfig(flow.RoleCollection, collectionConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithAdditionalFlag("--extensive-logging=true")), + testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel)), + testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.WarnLevel)), + ghostNode, + } + + netConf := testnet.NewNetworkConfigWithEpochConfig("epochs-tests", confs, s.StakingAuctionLen, s.DKGPhaseLen, s.EpochLen, s.EpochCommitSafetyThreshold) + + // initialize the network + s.net = testnet.PrepareFlowNetwork(s.T(), netConf, flow.Localnet) + + // start the network + s.net.Start(s.Ctx) + + // start tracking blocks + s.Track(s.T(), s.Ctx, s.Ghost()) + + // use AN1 for test-related queries - the AN join/leave test will replace AN2 + client, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() + require.NoError(s.T(), err) + + s.Client = client + + // log network info periodically to aid in debugging future flaky tests + go lib.LogStatusPeriodically(s.T(), s.Ctx, s.log, s.Client, 5*time.Second) +} + +func (s *BaseSuite) Ghost() *client.GhostClient { + client, err := s.net.ContainerByID(s.ghostID).GhostClient() + require.NoError(s.T(), err, "could not get ghost Client") + return client +} + +// TimedLogf logs the message using t.Log and the suite logger, but prefixes the current time. +// This enables viewing logs inline with Docker logs as well as other test logs. +func (s *BaseSuite) TimedLogf(msg string, args ...interface{}) { + s.log.Info().Msgf(msg, args...) + args = append([]interface{}{time.Now().String()}, args...) + s.T().Logf("%s - "+msg, args...) +} + +// AwaitEpochPhase waits for the given phase, in the given epoch. +func (s *BaseSuite) AwaitEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase, waitFor, tick time.Duration) { + var actualEpoch uint64 + var actualPhase flow.EpochPhase + condition := func() bool { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + + actualEpoch, err = snapshot.Epochs().Current().Counter() + require.NoError(s.T(), err) + actualPhase, err = snapshot.Phase() + require.NoError(s.T(), err) + + return actualEpoch == expectedEpoch && actualPhase == expectedPhase + } + require.Eventuallyf(s.T(), condition, waitFor, tick, "did not reach expectedEpoch %d phase %s within %s. Last saw epoch=%d and phase=%s", expectedEpoch, expectedPhase, waitFor, actualEpoch, actualPhase) +} + +// GetContainersByRole returns all containers from the network for the specified role, making sure the containers are not ghost nodes. +func (s *BaseSuite) GetContainersByRole(role flow.Role) []*testnet.Container { + nodes := s.net.ContainersByRole(role, false) + require.True(s.T(), len(nodes) > 0) + return nodes +} diff --git a/integration/tests/epochs/cohort1/epoch_static_transition_test.go b/integration/tests/epochs/cohort1/epoch_static_transition_test.go index ae1708f514e..6c8ab6d6d3c 100644 --- a/integration/tests/epochs/cohort1/epoch_static_transition_test.go +++ b/integration/tests/epochs/cohort1/epoch_static_transition_test.go @@ -18,7 +18,7 @@ func TestEpochStaticTransition(t *testing.T) { // StaticEpochTransitionSuite is the suite used for epoch transition tests // with a static identity table. type StaticEpochTransitionSuite struct { - epochs.Suite + epochs.DynamicEpochTransitionSuite } func (s *StaticEpochTransitionSuite) SetupTest() { @@ -30,7 +30,7 @@ func (s *StaticEpochTransitionSuite) SetupTest() { s.EpochCommitSafetyThreshold = 50 // run the generic setup, which starts up the network - s.Suite.SetupTest() + s.BaseSuite.SetupTest() } // TestStaticEpochTransition asserts epoch state transitions over full epoch diff --git a/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go b/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go index ed8f7ef1ae1..f94066eb14e 100644 --- a/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go +++ b/integration/tests/epochs/cohort2/epoch_join_and_leave_vn_test.go @@ -32,7 +32,7 @@ func (s *EpochJoinAndLeaveVNSuite) SetupTest() { s.DKGPhaseLen = 100 s.EpochLen = 450 s.EpochCommitSafetyThreshold = 20 - s.Suite.SetupTest() + s.BaseSuite.SetupTest() } // TestEpochJoinAndLeaveVN should update verification nodes and assert healthy network conditions diff --git a/integration/tests/epochs/suite.go b/integration/tests/epochs/dynamic_epoch_transition_suite.go similarity index 71% rename from integration/tests/epochs/suite.go rename to integration/tests/epochs/dynamic_epoch_transition_suite.go index e0efecdf80a..192d931339f 100644 --- a/integration/tests/epochs/suite.go +++ b/integration/tests/epochs/dynamic_epoch_transition_suite.go @@ -14,21 +14,17 @@ import ( "strings" "time" - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "github.com/onflow/cadence" "github.com/onflow/crypto" "github.com/onflow/flow-core-contracts/lib/go/templates" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/engine/ghost/client" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/integration/testnet" - "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" @@ -44,119 +40,24 @@ import ( // NOTE: The snapshot must reference a block within the second epoch. type nodeUpdateValidation func(ctx context.Context, env templates.Environment, snapshot *inmem.Snapshot, info *StakedNodeOperationInfo) -// Suite encapsulates common functionality for epoch integration tests. -type Suite struct { - suite.Suite - lib.TestnetStateTracker - cancel context.CancelFunc - log zerolog.Logger - net *testnet.FlowNetwork - ghostID flow.Identifier - - Client *testnet.Client - Ctx context.Context - - // Epoch config (lengths in views) - StakingAuctionLen uint64 - DKGPhaseLen uint64 - EpochLen uint64 - EpochCommitSafetyThreshold uint64 - // Whether approvals are required for sealing (we only enable for VN tests because - // requiring approvals requires a longer DKG period to avoid flakiness) - RequiredSealApprovals uint // defaults to 0 (no approvals required) - // Consensus Node proposal duration - ConsensusProposalDuration time.Duration -} - -// SetupTest is run automatically by the testing framework before each test case. -func (s *Suite) SetupTest() { - // If unset, use default value 100ms - if s.ConsensusProposalDuration == 0 { - s.ConsensusProposalDuration = time.Millisecond * 100 - } - - minEpochLength := s.StakingAuctionLen + s.DKGPhaseLen*3 + 20 - // ensure epoch lengths are set correctly - require.Greater(s.T(), s.EpochLen, minEpochLength+s.EpochCommitSafetyThreshold, "epoch too short") - - s.Ctx, s.cancel = context.WithCancel(context.Background()) - s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) - s.log.Info().Msg("================> SetupTest") - defer func() { - s.log.Info().Msg("================> Finish SetupTest") - }() - - collectionConfigs := []func(*testnet.NodeConfig){ - testnet.WithAdditionalFlag("--hotstuff-proposal-duration=100ms"), - testnet.WithLogLevel(zerolog.WarnLevel)} - - consensusConfigs := []func(config *testnet.NodeConfig){ - testnet.WithAdditionalFlag(fmt.Sprintf("--cruise-ctl-fallback-proposal-duration=%s", s.ConsensusProposalDuration)), - testnet.WithAdditionalFlag("--cruise-ctl-enabled=false"), // disable cruise control for integration tests - testnet.WithAdditionalFlag(fmt.Sprintf("--required-verification-seal-approvals=%d", s.RequiredSealApprovals)), - testnet.WithAdditionalFlag(fmt.Sprintf("--required-construction-seal-approvals=%d", s.RequiredSealApprovals)), - testnet.WithLogLevel(zerolog.WarnLevel)} - - // a ghost node masquerading as an access node - s.ghostID = unittest.IdentifierFixture() - ghostNode := testnet.NewNodeConfig( - flow.RoleAccess, - testnet.WithLogLevel(zerolog.FatalLevel), - testnet.WithID(s.ghostID), - testnet.AsGhost()) - - confs := []testnet.NodeConfig{ - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleAccess, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleCollection, collectionConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleConsensus, consensusConfigs...), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithAdditionalFlag("--extensive-logging=true")), - testnet.NewNodeConfig(flow.RoleExecution, testnet.WithLogLevel(zerolog.WarnLevel)), - testnet.NewNodeConfig(flow.RoleVerification, testnet.WithLogLevel(zerolog.WarnLevel)), - ghostNode, - } - - netConf := testnet.NewNetworkConfigWithEpochConfig("epochs-tests", confs, s.StakingAuctionLen, s.DKGPhaseLen, s.EpochLen, s.EpochCommitSafetyThreshold) - - // initialize the network - s.net = testnet.PrepareFlowNetwork(s.T(), netConf, flow.Localnet) - - // start the network - s.net.Start(s.Ctx) - - // start tracking blocks - s.Track(s.T(), s.Ctx, s.Ghost()) - - // use AN1 for test-related queries - the AN join/leave test will replace AN2 - client, err := s.net.ContainerByName(testnet.PrimaryAN).TestnetClient() - require.NoError(s.T(), err) - - s.Client = client - - // log network info periodically to aid in debugging future flaky tests - go lib.LogStatusPeriodically(s.T(), s.Ctx, s.log, s.Client, 5*time.Second) -} - -func (s *Suite) Ghost() *client.GhostClient { - client, err := s.net.ContainerByID(s.ghostID).GhostClient() - require.NoError(s.T(), err, "could not get ghost Client") - return client +// DynamicEpochTransitionSuite is the suite used for epoch transitions tests +// with a dynamic identity table. +type DynamicEpochTransitionSuite struct { + BaseSuite } -// TimedLogf logs the message using t.Log and the suite logger, but prefixes the current time. -// This enables viewing logs inline with Docker logs as well as other test logs. -func (s *Suite) TimedLogf(msg string, args ...interface{}) { - s.log.Info().Msgf(msg, args...) - args = append([]interface{}{time.Now().String()}, args...) - s.T().Logf("%s - "+msg, args...) -} +func (s *DynamicEpochTransitionSuite) SetupTest() { + // use a longer staking auction length to accommodate staking operations for joining/leaving nodes + // NOTE: this value is set fairly aggressively to ensure shorter test times. + // If flakiness due to failure to complete staking operations in time is observed, + // try increasing (by 10-20 views). + s.StakingAuctionLen = 50 + s.DKGPhaseLen = 50 + s.EpochLen = 250 + s.EpochCommitSafetyThreshold = 20 -func (s *Suite) TearDownTest() { - s.log.Info().Msg("================> Start TearDownTest") - s.net.Remove() - s.cancel() - s.log.Info().Msg("================> Finish TearDownTest") + // run the generic setup, which starts up the network + s.BaseSuite.SetupTest() } // StakedNodeOperationInfo struct contains all the node information needed to @@ -189,7 +90,7 @@ type StakedNodeOperationInfo struct { // NOTE 2: This function performs steps 1-6 in one custom transaction, to reduce // the time taken by each test case. Individual transactions for each step can be // found in Git history, for example: 9867056a8b7246655047bc457f9000398f6687c0. -func (s *Suite) StakeNode(ctx context.Context, env templates.Environment, role flow.Role) *StakedNodeOperationInfo { +func (s *DynamicEpochTransitionSuite) StakeNode(ctx context.Context, env templates.Environment, role flow.Role) *StakedNodeOperationInfo { stakingAccountKey, networkingKey, stakingKey, machineAccountKey, machineAccountPubKey := s.generateAccountKeys(role) nodeID := flow.MakeID(stakingKey.PublicKey().Encode()) @@ -257,7 +158,7 @@ func (s *Suite) StakeNode(ctx context.Context, env templates.Environment, role f } // generates initial keys needed to bootstrap account -func (s *Suite) generateAccountKeys(role flow.Role) ( +func (s *DynamicEpochTransitionSuite) generateAccountKeys(role flow.Role) ( operatorAccountKey, networkingKey, stakingKey, @@ -285,7 +186,7 @@ func (s *Suite) generateAccountKeys(role flow.Role) ( // removeNodeFromProtocol removes the given node from the protocol. // NOTE: assumes staking occurs in first epoch (counter 0) -func (s *Suite) removeNodeFromProtocol(ctx context.Context, env templates.Environment, nodeID flow.Identifier) { +func (s *DynamicEpochTransitionSuite) removeNodeFromProtocol(ctx context.Context, env templates.Environment, nodeID flow.Identifier) { result, err := s.submitAdminRemoveNodeTx(ctx, env, nodeID) require.NoError(s.T(), err) require.NoError(s.T(), result.Error) @@ -295,7 +196,7 @@ func (s *Suite) removeNodeFromProtocol(ctx context.Context, env templates.Enviro } // submitAdminRemoveNodeTx will submit the admin remove node transaction -func (s *Suite) submitAdminRemoveNodeTx(ctx context.Context, +func (s *DynamicEpochTransitionSuite) submitAdminRemoveNodeTx(ctx context.Context, env templates.Environment, nodeID flow.Identifier, ) (*sdk.TransactionResult, error) { @@ -320,14 +221,14 @@ func (s *Suite) submitAdminRemoveNodeTx(ctx context.Context, return result, nil } -func (s *Suite) ExecuteGetProposedTableScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { +func (s *DynamicEpochTransitionSuite) ExecuteGetProposedTableScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateReturnProposedTableScript(env), []cadence.Value{}) require.NoError(s.T(), err) return v } // ExecuteGetNodeInfoScript executes a script to get staking info about the given node. -func (s *Suite) ExecuteGetNodeInfoScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { +func (s *DynamicEpochTransitionSuite) ExecuteGetNodeInfoScript(ctx context.Context, env templates.Environment, nodeID flow.Identifier) cadence.Value { cdcNodeID, err := cadence.NewString(nodeID.String()) require.NoError(s.T(), err) v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateGetNodeInfoScript(env), []cadence.Value{cdcNodeID}) @@ -336,7 +237,7 @@ func (s *Suite) ExecuteGetNodeInfoScript(ctx context.Context, env templates.Envi } // SubmitSetApprovedListTx adds a node to the approved node list, this must be done when a node joins the protocol during the epoch staking phase -func (s *Suite) SubmitSetApprovedListTx(ctx context.Context, env templates.Environment, identities ...flow.Identifier) *sdk.TransactionResult { +func (s *DynamicEpochTransitionSuite) SubmitSetApprovedListTx(ctx context.Context, env templates.Environment, identities ...flow.Identifier) *sdk.TransactionResult { latestBlockID, err := s.Client.GetLatestBlockID(ctx) require.NoError(s.T(), err) @@ -362,7 +263,7 @@ func (s *Suite) SubmitSetApprovedListTx(ctx context.Context, env templates.Envir } // ExecuteReadApprovedNodesScript executes the return proposal table script and returns a list of approved nodes -func (s *Suite) ExecuteReadApprovedNodesScript(ctx context.Context, env templates.Environment) cadence.Value { +func (s *DynamicEpochTransitionSuite) ExecuteReadApprovedNodesScript(ctx context.Context, env templates.Environment) cadence.Value { v, err := s.Client.ExecuteScriptBytes(ctx, templates.GenerateGetApprovedNodesScript(env), []cadence.Value{}) require.NoError(s.T(), err) @@ -370,14 +271,14 @@ func (s *Suite) ExecuteReadApprovedNodesScript(ctx context.Context, env template } // getTestContainerName returns a name for a test container in the form of ${role}_${nodeID}_test -func (s *Suite) getTestContainerName(role flow.Role) string { - i := len(s.net.ContainersByRole(role)) + 1 +func (s *DynamicEpochTransitionSuite) getTestContainerName(role flow.Role) string { + i := len(s.net.ContainersByRole(role, false)) + 1 return fmt.Sprintf("%s_test_%d", role, i) } // assertNodeApprovedAndProposed executes the read approved nodes list and get proposed table scripts // and checks that the info.NodeID is in both list -func (s *Suite) assertNodeApprovedAndProposed(ctx context.Context, env templates.Environment, info *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) assertNodeApprovedAndProposed(ctx context.Context, env templates.Environment, info *StakedNodeOperationInfo) { // ensure node ID in approved list //approvedNodes := s.ExecuteReadApprovedNodesScript(Ctx, env) //require.Containsf(s.T(), approvedNodes.(cadence.Array).Values, cadence.String(info.NodeID.String()), "expected new node to be in approved nodes list: %x", info.NodeID) @@ -395,7 +296,7 @@ func (s *Suite) assertNodeApprovedAndProposed(ctx context.Context, env templates } // newTestContainerOnNetwork configures a new container on the suites network -func (s *Suite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperationInfo) *testnet.Container { +func (s *DynamicEpochTransitionSuite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperationInfo) *testnet.Container { containerConfigs := []func(config *testnet.NodeConfig){ testnet.WithLogLevel(zerolog.WarnLevel), testnet.WithID(info.NodeID), @@ -417,10 +318,8 @@ func (s *Suite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperat nodeContainer.AddFlag("insecure-access-api", "false") accessNodeIDS := make([]string, 0) - for _, c := range s.net.ContainersByRole(flow.RoleAccess) { - if c.Config.Role == flow.RoleAccess && !c.Config.Ghost { - accessNodeIDS = append(accessNodeIDS, c.Config.NodeID.String()) - } + for _, c := range s.net.ContainersByRole(flow.RoleAccess, false) { + accessNodeIDS = append(accessNodeIDS, c.Config.NodeID.String()) } nodeContainer.AddFlag("access-node-ids", strings.Join(accessNodeIDS, ",")) } @@ -429,7 +328,7 @@ func (s *Suite) newTestContainerOnNetwork(role flow.Role, info *StakedNodeOperat } // StakeNewNode will stake a new node, and create the corresponding docker container for that node -func (s *Suite) StakeNewNode(ctx context.Context, env templates.Environment, role flow.Role) (*StakedNodeOperationInfo, *testnet.Container) { +func (s *DynamicEpochTransitionSuite) StakeNewNode(ctx context.Context, env templates.Environment, role flow.Role) (*StakedNodeOperationInfo, *testnet.Container) { // stake our new node info := s.StakeNode(ctx, env, role) @@ -442,40 +341,27 @@ func (s *Suite) StakeNewNode(ctx context.Context, env templates.Environment, rol return info, testContainer } -// getContainerToReplace return a container from the network, make sure the container is not a ghost -func (s *Suite) getContainerToReplace(role flow.Role) *testnet.Container { - nodes := s.net.ContainersByRole(role) - require.True(s.T(), len(nodes) > 0) - - for _, c := range nodes { - if !c.Config.Ghost { - return c - } - } - - return nil +// AwaitFinalizedView polls until it observes that the latest finalized block has a view +// greater than or equal to the input view. This is used to wait until when an epoch +// transition must have happened. +func (s *DynamicEpochTransitionSuite) AwaitFinalizedView(ctx context.Context, view uint64, waitFor, tick time.Duration) { + require.Eventually(s.T(), func() bool { + sealed := s.getLatestFinalizedHeader(ctx) + return sealed.View >= view + }, waitFor, tick) } -// AwaitEpochPhase waits for the given phase, in the given epoch. -func (s *Suite) AwaitEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase, waitFor, tick time.Duration) { - var actualEpoch uint64 - var actualPhase flow.EpochPhase - condition := func() bool { - snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - - actualEpoch, err = snapshot.Epochs().Current().Counter() - require.NoError(s.T(), err) - actualPhase, err = snapshot.Phase() - require.NoError(s.T(), err) - - return actualEpoch == expectedEpoch && actualPhase == expectedPhase - } - require.Eventuallyf(s.T(), condition, waitFor, tick, "did not reach expectedEpoch %d phase %s within %s. Last saw epoch=%d and phase=%s", expectedEpoch, expectedPhase, waitFor, actualEpoch, actualPhase) +// getLatestFinalizedHeader retrieves the latest finalized block, as reported in LatestSnapshot. +func (s *DynamicEpochTransitionSuite) getLatestFinalizedHeader(ctx context.Context) *flow.Header { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + finalized, err := snapshot.Head() + require.NoError(s.T(), err) + return finalized } // AssertInEpochPhase checks if we are in the phase of the given epoch. -func (s *Suite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase) { +func (s *DynamicEpochTransitionSuite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase) { snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) require.NoError(s.T(), err) actualEpoch, err := snapshot.Epochs().Current().Counter() @@ -491,7 +377,7 @@ func (s *Suite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, ex } // AssertInEpoch requires actual epoch counter is equal to counter provided. -func (s *Suite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { +func (s *DynamicEpochTransitionSuite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) require.NoError(s.T(), err) actualEpoch, err := snapshot.Epochs().Current().Counter() @@ -501,7 +387,7 @@ func (s *Suite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { // AssertNodeNotParticipantInEpoch asserts that the given node ID does not exist // in the epoch's identity table. -func (s *Suite) AssertNodeNotParticipantInEpoch(epoch protocol.Epoch, nodeID flow.Identifier) { +func (s *DynamicEpochTransitionSuite) AssertNodeNotParticipantInEpoch(epoch protocol.Epoch, nodeID flow.Identifier) { identities, err := epoch.InitialIdentities() require.NoError(s.T(), err) require.NotContains(s.T(), identities.NodeIDs(), nodeID) @@ -510,7 +396,7 @@ func (s *Suite) AssertNodeNotParticipantInEpoch(epoch protocol.Epoch, nodeID flo // AwaitSealedBlockHeightExceedsSnapshot polls until it observes that the latest // sealed block height has exceeded the snapshot height by numOfBlocks // the snapshot height and latest finalized height is greater than numOfBlocks. -func (s *Suite) AwaitSealedBlockHeightExceedsSnapshot(ctx context.Context, snapshot *inmem.Snapshot, threshold uint64, waitFor, tick time.Duration) { +func (s *DynamicEpochTransitionSuite) AwaitSealedBlockHeightExceedsSnapshot(ctx context.Context, snapshot *inmem.Snapshot, threshold uint64, waitFor, tick time.Duration) { header, err := snapshot.Head() require.NoError(s.T(), err) snapshotHeight := header.Height @@ -522,18 +408,8 @@ func (s *Suite) AwaitSealedBlockHeightExceedsSnapshot(ctx context.Context, snaps }, waitFor, tick) } -// AwaitFinalizedView polls until it observes that the latest finalized block has a view -// greater than or equal to the input view. This is used to wait until when an epoch -// transition must have happened. -func (s *Suite) AwaitFinalizedView(ctx context.Context, view uint64, waitFor, tick time.Duration) { - require.Eventually(s.T(), func() bool { - sealed := s.getLatestFinalizedHeader(ctx) - return sealed.View >= view - }, waitFor, tick) -} - // getLatestSealedHeader retrieves the latest sealed block, as reported in LatestSnapshot. -func (s *Suite) getLatestSealedHeader(ctx context.Context) *flow.Header { +func (s *DynamicEpochTransitionSuite) getLatestSealedHeader(ctx context.Context) *flow.Header { snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) require.NoError(s.T(), err) segment, err := snapshot.SealingSegment() @@ -542,18 +418,9 @@ func (s *Suite) getLatestSealedHeader(ctx context.Context) *flow.Header { return sealed.Header } -// getLatestFinalizedHeader retrieves the latest finalized block, as reported in LatestSnapshot. -func (s *Suite) getLatestFinalizedHeader(ctx context.Context) *flow.Header { - snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - finalized, err := snapshot.Head() - require.NoError(s.T(), err) - return finalized -} - // SubmitSmokeTestTransaction will submit a create account transaction to smoke test network // This ensures a single transaction can be sealed by the network. -func (s *Suite) SubmitSmokeTestTransaction(ctx context.Context) { +func (s *DynamicEpochTransitionSuite) SubmitSmokeTestTransaction(ctx context.Context) { _, err := utils.CreateFlowAccount(ctx, s.Client) require.NoError(s.T(), err) } @@ -565,7 +432,7 @@ func (s *Suite) SubmitSmokeTestTransaction(ctx context.Context) { // 3. Check that we can execute a script on the AN // // TODO test sending and observing result of a transaction via the new AN (blocked by https://github.com/onflow/flow-go/issues/3642) -func (s *Suite) AssertNetworkHealthyAfterANChange(ctx context.Context, env templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, info *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterANChange(ctx context.Context, env templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, info *StakedNodeOperationInfo) { // get snapshot directly from new AN and compare head with head from the // snapshot that was used to bootstrap the node @@ -586,14 +453,14 @@ func (s *Suite) AssertNetworkHealthyAfterANChange(ctx context.Context, env templ // AssertNetworkHealthyAfterVNChange performs a basic network health check after replacing a verification node. // 1. Ensure sealing continues into the second epoch (post-replacement) by observing // at least 10 blocks of sealing progress within the epoch -func (s *Suite) AssertNetworkHealthyAfterVNChange(ctx context.Context, _ templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, _ *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterVNChange(ctx context.Context, _ templates.Environment, snapshotInSecondEpoch *inmem.Snapshot, _ *StakedNodeOperationInfo) { s.AwaitSealedBlockHeightExceedsSnapshot(ctx, snapshotInSecondEpoch, 10, 30*time.Second, time.Millisecond*100) } // AssertNetworkHealthyAfterLNChange performs a basic network health check after replacing a collection node. // 1. Submit transaction to network that will target the newly staked LN by making // sure the reference block ID is after the first epoch. -func (s *Suite) AssertNetworkHealthyAfterLNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterLNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { // At this point we have reached the second epoch and our new LN is the only LN in the network. // To validate the LN joined the network successfully and is processing transactions we create // an account, which submits a transaction and verifies it is sealed. @@ -609,7 +476,7 @@ func (s *Suite) AssertNetworkHealthyAfterLNChange(ctx context.Context, _ templat // therefore the newly joined consensus node must be participating in consensus. // // In addition, here, we submit a transaction and verify that it is sealed. -func (s *Suite) AssertNetworkHealthyAfterSNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { +func (s *DynamicEpochTransitionSuite) AssertNetworkHealthyAfterSNChange(ctx context.Context, _ templates.Environment, _ *inmem.Snapshot, _ *StakedNodeOperationInfo) { s.SubmitSmokeTestTransaction(ctx) } @@ -621,7 +488,7 @@ func (s *Suite) AssertNetworkHealthyAfterSNChange(ctx context.Context, _ templat // * that nodes can stake and join the network at an epoch boundary // * that nodes can unstake and leave the network at an epoch boundary // * role-specific network health validation after the swap has completed -func (s *Suite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth nodeUpdateValidation) { +func (s *DynamicEpochTransitionSuite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth nodeUpdateValidation) { env := utils.LocalnetEnv() @@ -633,7 +500,7 @@ func (s *Suite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth node require.NotNil(s.T(), containerToReplace) } else { // grab the first container of this node role type, this is the container we will replace - containerToReplace = s.getContainerToReplace(role) + containerToReplace = s.GetContainersByRole(role)[0] require.NotNil(s.T(), containerToReplace) } @@ -694,23 +561,3 @@ func (s *Suite) RunTestEpochJoinAndLeave(role flow.Role, checkNetworkHealth node // make sure the network is healthy after adding new node checkNetworkHealth(s.Ctx, env, secondEpochSnapshot, info) } - -// DynamicEpochTransitionSuite is the suite used for epoch transitions tests -// with a dynamic identity table. -type DynamicEpochTransitionSuite struct { - Suite -} - -func (s *DynamicEpochTransitionSuite) SetupTest() { - // use a longer staking auction length to accommodate staking operations for joining/leaving nodes - // NOTE: this value is set fairly aggressively to ensure shorter test times. - // If flakiness due to failure to complete staking operations in time is observed, - // try increasing (by 10-20 views). - s.StakingAuctionLen = 50 - s.DKGPhaseLen = 50 - s.EpochLen = 250 - s.EpochCommitSafetyThreshold = 20 - - // run the generic setup, which starts up the network - s.Suite.SetupTest() -} diff --git a/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go new file mode 100644 index 00000000000..6de2caaba21 --- /dev/null +++ b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go @@ -0,0 +1,34 @@ +package recover_epoch + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/model/flow" +) + +func TestRecoverEpoch(t *testing.T) { + suite.Run(t, new(RecoverEpochSuite)) +} + +type RecoverEpochSuite struct { + Suite +} + +// TestRecoverEpoch ensures that the recover_epoch transaction flow works as expected. This test will simulate the network going +// into EFM by taking a consensus node offline before completing the DKG. While in EFM mode the test will execute the efm-recover-tx-args +// CLI command to generate transaction arguments to submit a recover_epoch transaction, after submitting the transaction the test will +// ensure the network is healthy. +func (s *RecoverEpochSuite) TestRecoverEpoch() { + s.AwaitEpochPhase(context.Background(), 0, flow.EpochPhaseSetup, 20*time.Second, time.Second) + fmt.Println("in epoch phase setup") + + sns := s.GetContainersByRole(flow.RoleConsensus) + _ = sns[0].Pause() + + // @TODO: trigger EFM manually +} diff --git a/integration/tests/epochs/recover_epoch/suite.go b/integration/tests/epochs/recover_epoch/suite.go new file mode 100644 index 00000000000..49e5a3ace58 --- /dev/null +++ b/integration/tests/epochs/recover_epoch/suite.go @@ -0,0 +1,21 @@ +package recover_epoch + +import ( + "github.com/onflow/flow-go/integration/tests/epochs" +) + +// Suite encapsulates common functionality for epoch integration tests. +type Suite struct { + epochs.BaseSuite +} + +func (s *Suite) SetupTest() { + // use a shorter staking auction because we don't have staking operations in this case + s.StakingAuctionLen = 2 + s.DKGPhaseLen = 50 + s.EpochLen = 250 + s.EpochCommitSafetyThreshold = 20 + + // run the generic setup, which starts up the network + s.BaseSuite.SetupTest() +} diff --git a/ledger/complete/wal/checkpoint_v6_reader.go b/ledger/complete/wal/checkpoint_v6_reader.go index 460343c49b4..8408b2a1683 100644 --- a/ledger/complete/wal/checkpoint_v6_reader.go +++ b/ledger/complete/wal/checkpoint_v6_reader.go @@ -20,8 +20,17 @@ import ( // ErrEOFNotReached for indicating end of file not reached error var ErrEOFNotReached = errors.New("expect to reach EOF, but actually didn't") -// TODO: validate the header file and the sub file that contains the root hashes -var ReadTriesRootHash = readTriesRootHash +func ReadTriesRootHash(logger zerolog.Logger, dir string, fileName string) ( + []ledger.RootHash, + error, +) { + err := validateCheckpointFile(logger, dir, fileName) + if err != nil { + return nil, err + } + return readTriesRootHash(logger, dir, fileName) +} + var CheckpointHasRootHash = checkpointHasRootHash // readCheckpointV6 reads checkpoint file from a main file and 17 file parts. @@ -849,3 +858,58 @@ func ensureReachedEOF(reader io.Reader) error { return fmt.Errorf("fail to check if reached EOF: %w", err) } + +func validateCheckpointFile(logger zerolog.Logger, dir, fileName string) error { + headerPath := filePathCheckpointHeader(dir, fileName) + // validate header file + subtrieChecksums, topTrieChecksum, err := readCheckpointHeader(headerPath, logger) + if err != nil { + return err + } + + // validate subtrie files + for index, expectedSum := range subtrieChecksums { + filepath, _, err := filePathSubTries(dir, fileName, index) + if err != nil { + return err + } + err = withFile(logger, filepath, func(f *os.File) error { + _, checksum, err := readSubTriesFooter(f) + if err != nil { + return fmt.Errorf("cannot read sub trie node count: %w", err) + } + + if checksum != expectedSum { + return fmt.Errorf("mismatch checksum in subtrie file. checksum from checkpoint header %v does not "+ + "match with the checksum in subtrie file %v", checksum, expectedSum) + } + return nil + }) + + if err != nil { + return err + } + } + + // validate top trie file + filepath, _ := filePathTopTries(dir, fileName) + err = withFile(logger, filepath, func(file *os.File) error { + // read subtrie Node count and validate + _, _, checkSum, err := readTopTriesFooter(file) + if err != nil { + return err + } + + if topTrieChecksum != checkSum { + return fmt.Errorf("mismatch top trie checksum, header file has %v, toptrie file has %v", + topTrieChecksum, checkSum) + } + + return nil + }) + if err != nil { + return err + } + + return nil +} diff --git a/ledger/complete/wal/checkpoint_v6_test.go b/ledger/complete/wal/checkpoint_v6_test.go index 1bf95e17419..ded3acf3e13 100644 --- a/ledger/complete/wal/checkpoint_v6_test.go +++ b/ledger/complete/wal/checkpoint_v6_test.go @@ -608,6 +608,33 @@ func TestReadCheckpointRootHash(t *testing.T) { }) } +func TestReadCheckpointRootHashValidateChecksum(t *testing.T) { + unittest.RunWithTempDir(t, func(dir string) { + tries := createSimpleTrie(t) + fileName := "checkpoint" + logger := unittest.Logger() + require.NoErrorf(t, StoreCheckpointV6Concurrently(tries, dir, fileName, logger), "fail to store checkpoint") + + // add a wrong checksum to top trie file + topTrieFilePath, _ := filePathTopTries(dir, fileName) + file, err := os.OpenFile(topTrieFilePath, os.O_RDWR, 0644) + require.NoError(t, err) + + fileInfo, err := file.Stat() + require.NoError(t, err) + fileSize := fileInfo.Size() + + invalidSum := encodeCRC32Sum(10) + _, err = file.WriteAt(invalidSum, fileSize-crc32SumSize) + require.NoError(t, err) + require.NoError(t, file.Close()) + + // ReadTriesRootHash will first validate the checksum and detect the error + _, err = ReadTriesRootHash(logger, dir, fileName) + require.Error(t, err) + }) +} + func TestReadCheckpointRootHashMulti(t *testing.T) { unittest.RunWithTempDir(t, func(dir string) { tries := createMultipleRandomTries(t) diff --git a/model/flow/entity.go b/model/flow/entity.go index 963d0b15791..f106e22eebb 100644 --- a/model/flow/entity.go +++ b/model/flow/entity.go @@ -1,5 +1,11 @@ package flow +type IDEntity interface { + // ID returns a unique id for this entity using a hash of the immutable + // fields of the entity. + ID() Identifier +} + // Entity defines how flow entities should be defined // Entities are flat data structures holding multiple data fields. // Entities don't include nested entities, they only include pointers to @@ -7,10 +13,7 @@ package flow // of keeping a slice of entity object itself. This simplifies storage, signature and validation // of entities. type Entity interface { - - // ID returns a unique id for this entity using a hash of the immutable - // fields of the entity. - ID() Identifier + IDEntity // Checksum returns a unique checksum for the entity, including the mutable // data such as signatures. @@ -24,3 +27,26 @@ func EntitiesToIDs[T Entity](entities []T) []Identifier { } return ids } + +// Deduplicate entities in a slice by the ID method +// The original order of the entities is preserved. +func Deduplicate[T IDEntity](entities []T) []T { + if entities == nil { + return nil + } + + seen := make(map[Identifier]struct{}, len(entities)) + result := make([]T, 0, len(entities)) + + for _, entity := range entities { + id := entity.ID() + if _, ok := seen[id]; ok { + continue + } + + seen[id] = struct{}{} + result = append(result, entity) + } + + return result +} diff --git a/model/flow/entity_test.go b/model/flow/entity_test.go new file mode 100644 index 00000000000..bb926159675 --- /dev/null +++ b/model/flow/entity_test.go @@ -0,0 +1,24 @@ +package flow_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestDeduplicate(t *testing.T) { + require.Nil(t, flow.Deduplicate[*flow.Collection](nil)) + + cols := unittest.CollectionListFixture(5) + require.Equal(t, cols, flow.Deduplicate(cols)) + + // create duplicates, and validate + require.Equal(t, cols, flow.Deduplicate[*flow.Collection](append(cols, cols...))) + + // verify the original order should be preserved + require.Equal(t, cols, flow.Deduplicate[*flow.Collection]( + append(cols, cols[3], cols[1], cols[4], cols[2], cols[0]))) +} diff --git a/model/flow/filter/identity.go b/model/flow/filter/identity.go index adbbceee9b0..2afca5e2212 100644 --- a/model/flow/filter/identity.go +++ b/model/flow/filter/identity.go @@ -145,3 +145,20 @@ var IsVotingConsensusCommitteeMember = And[flow.Identity]( // equivalent to the filter for consensus committee members, as these are // the same group for now. var IsValidDKGParticipant = IsConsensusCommitteeMember + +// NotEjectedFilter is an identity filter for peers that are not ejected. +var NotEjectedFilter = Not(HasParticipationStatus(flow.EpochParticipationStatusEjected)) + +// HasWeightGreaterThanZero returns a filter for nodes with a weight greater than zero. +func HasWeightGreaterThanZero[T flow.GenericIdentity](identity *T) bool { + return (*identity).GetInitialWeight() > 0 +} + +// IsValidProtocolParticipant is an identity filter for all valid protocol participants. +// A protocol participant is considered valid if and only if the following are both true. +// 1. The node is not ejected. +// 2. The node has a weight greater than 0. +var IsValidProtocolParticipant = And[flow.Identity]( + NotEjectedFilter, // enforces 1 + HasWeightGreaterThanZero[flow.Identity], // enforces 2 +) diff --git a/network/p2p/cache/node_blocklist_wrapper_test.go b/network/p2p/cache/node_blocklist_wrapper_test.go index 95ee5bc098b..c3e3d36a37f 100644 --- a/network/p2p/cache/node_blocklist_wrapper_test.go +++ b/network/p2p/cache/node_blocklist_wrapper_test.go @@ -17,7 +17,6 @@ import ( "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p/cache" - "github.com/onflow/flow-go/network/underlay" "github.com/onflow/flow-go/utils/unittest" ) @@ -177,7 +176,7 @@ func (s *NodeDisallowListWrapperTestSuite) TestDisallowListNode() { s.provider.On("Identities", mock.Anything).Return(combinedIdentities) - identities := s.wrapper.Identities(underlay.NotEjectedFilter) + identities := s.wrapper.Identities(filter.NotEjectedFilter) require.Equal(s.T(), len(honestIdentities), len(identities)) // expected only honest nodes to be returned for _, i := range identities { diff --git a/network/p2p/tracer/internal/rpc_sent_cache.go b/network/p2p/tracer/internal/rpc_sent_cache.go index 655ddf2179f..d1f5de9c294 100644 --- a/network/p2p/tracer/internal/rpc_sent_cache.go +++ b/network/p2p/tracer/internal/rpc_sent_cache.go @@ -1,8 +1,6 @@ package internal import ( - "fmt" - "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" @@ -79,5 +77,5 @@ func (r *rpcSentCache) size() uint { // Returns: // - flow.Identifier: the entity ID. func (r *rpcSentCache) rpcSentEntityID(messageId string, controlMsgType p2pmsg.ControlMessageType) flow.Identifier { - return flow.MakeIDFromFingerPrint([]byte(fmt.Sprintf("%s%s", messageId, controlMsgType))) + return flow.MakeIDFromFingerPrint([]byte(messageId + string(controlMsgType))) } diff --git a/network/underlay/network.go b/network/underlay/network.go index 6c238939b8b..9217aa099f4 100644 --- a/network/underlay/network.go +++ b/network/underlay/network.go @@ -74,14 +74,6 @@ var ( ErrUnicastMsgWithoutSub = errors.New("networking layer does not have subscription for the channel ID indicated in the unicast message received") ) -// NotEjectedFilter is an identity filter that, when applied to the identity -// table at a given snapshot, returns all nodes that we should communicate with -// over the networking layer. -// -// NOTE: The protocol state includes nodes from the previous/next epoch that should -// be included in network communication. We omit any nodes that have been ejected. -var NotEjectedFilter = filter.Not(filter.HasParticipationStatus(flow.EpochParticipationStatusEjected)) - // Network serves as the comprehensive networking layer that integrates three interfaces within Flow; Underlay, EngineRegistry, and ConduitAdapter. // It is responsible for creating conduits through which engines can send and receive messages to and from other engines on the network, as well as registering other services // such as BlobService and PingService. It also provides a set of APIs that can be used to send messages to other nodes on the network. @@ -545,7 +537,7 @@ func (n *Network) UnRegisterChannel(channel channels.Channel) error { } func (n *Network) Identities() flow.IdentityList { - return n.identityProvider.Identities(NotEjectedFilter) + return n.identityProvider.Identities(filter.NotEjectedFilter) } func (n *Network) Identity(pid peer.ID) (*flow.Identity, bool) { diff --git a/state/fork/traversal.go b/state/fork/traversal.go index 18fdcdcbc36..f66633884ca 100644 --- a/state/fork/traversal.go +++ b/state/fork/traversal.go @@ -107,9 +107,11 @@ func unsafeTraverse(headers storage.Headers, block *flow.Header, visitor onVisit return block, nil } - block, err = headers.ByBlockID(block.ParentID) + parent, err := headers.ByBlockID(block.ParentID) if err != nil { - return nil, fmt.Errorf("failed to revtrieve block header %x: %w", block.ParentID, err) + return nil, fmt.Errorf("failed to retrieve block header (id=%x height=%d): %w", block.ParentID, block.Height-1, err) } + + block = parent } } diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index 0bd6f77e87c..9248968fc39 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -3,9 +3,15 @@ package unittest import ( "crypto/rand" "encoding/hex" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" + jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/runtime/common" "github.com/onflow/crypto" @@ -537,7 +543,7 @@ func createEpochNodes() cadence.Array { func createEpochCollectors() cadence.Array { - clusterType := newFlowClusterQCClusterStructType() + clusterType := NewFlowClusterQCClusterStructType() voteType := newFlowClusterQCVoteStructType() @@ -699,76 +705,6 @@ func createVersionBeaconEvent() cadence.Event { }).WithType(NewNodeVersionBeaconVersionBeaconEventType()) } -func newFlowClusterQCVoteStructType() cadence.Type { - - // A.01cf0e2f2f715450.FlowClusterQC.Vote - - address, _ := common.HexToAddress("01cf0e2f2f715450") - location := common.NewAddressLocation(nil, address, "FlowClusterQC") - - return &cadence.StructType{ - Location: location, - QualifiedIdentifier: "FlowClusterQC.Vote", - Fields: []cadence.Field{ - { - Identifier: "nodeID", - Type: cadence.StringType{}, - }, - { - Identifier: "signature", - Type: cadence.NewOptionalType(cadence.StringType{}), - }, - { - Identifier: "message", - Type: cadence.NewOptionalType(cadence.StringType{}), - }, - { - Identifier: "clusterIndex", - Type: cadence.UInt16Type{}, - }, - { - Identifier: "weight", - Type: cadence.UInt64Type{}, - }, - }, - } -} - -func newFlowClusterQCClusterStructType() *cadence.StructType { - - // A.01cf0e2f2f715450.FlowClusterQC.Cluster - - address, _ := common.HexToAddress("01cf0e2f2f715450") - location := common.NewAddressLocation(nil, address, "FlowClusterQC") - - return &cadence.StructType{ - Location: location, - QualifiedIdentifier: "FlowClusterQC.Cluster", - Fields: []cadence.Field{ - { - Identifier: "index", - Type: cadence.UInt16Type{}, - }, - { - Identifier: "nodeWeights", - Type: cadence.NewDictionaryType(cadence.StringType{}, cadence.UInt64Type{}), - }, - { - Identifier: "totalWeight", - Type: cadence.UInt64Type{}, - }, - { - Identifier: "generatedVotes", - Type: cadence.NewDictionaryType(cadence.StringType{}, newFlowClusterQCVoteStructType()), - }, - { - Identifier: "uniqueVoteMessageTotalWeights", - Type: cadence.NewDictionaryType(cadence.StringType{}, cadence.UInt64Type{}), - }, - }, - } -} - func newFlowIDTableStakingNodeInfoStructType() *cadence.StructType { // A.01cf0e2f2f715450.FlowIDTableStaking.NodeInfo @@ -869,7 +805,7 @@ func newFlowEpochEpochSetupEventType() *cadence.EventType { }, { Identifier: "collectorClusters", - Type: cadence.NewVariableSizedArrayType(newFlowClusterQCClusterStructType()), + Type: cadence.NewVariableSizedArrayType(NewFlowClusterQCClusterStructType()), }, { Identifier: "randomSource", @@ -1098,3 +1034,89 @@ var VersionBeaconFixtureCCF = func() []byte { } return b }() + +func newFlowClusterQCVoteStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.FlowClusterQC.Vote + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowClusterQC") + + return &cadence.StructType{ + Location: location, + QualifiedIdentifier: "FlowClusterQC.Vote", + Fields: []cadence.Field{ + { + Identifier: "nodeID", + Type: cadence.StringType{}, + }, + { + Identifier: "signature", + Type: cadence.NewOptionalType(cadence.StringType{}), + }, + { + Identifier: "message", + Type: cadence.NewOptionalType(cadence.StringType{}), + }, + { + Identifier: "clusterIndex", + Type: cadence.UInt16Type{}, + }, + { + Identifier: "weight", + Type: cadence.UInt64Type{}, + }, + }, + } +} + +func VerifyCdcArguments(t *testing.T, expected []cadence.Value, actual []interface{}) { + + for index, arg := range actual { + + // marshal to bytes + bz, err := json.Marshal(arg) + require.NoError(t, err) + + // parse cadence value + decoded, err := jsoncdc.Decode(nil, bz) + require.NoError(t, err) + + assert.Equal(t, expected[index], decoded) + } +} + +func NewFlowClusterQCClusterStructType() *cadence.StructType { + + // A.01cf0e2f2f715450.FlowClusterQC.Cluster + + address, _ := common.HexToAddress("01cf0e2f2f715450") + location := common.NewAddressLocation(nil, address, "FlowClusterQC") + + return &cadence.StructType{ + Location: location, + QualifiedIdentifier: "FlowClusterQC.Cluster", + Fields: []cadence.Field{ + { + Identifier: "index", + Type: cadence.UInt16Type{}, + }, + { + Identifier: "nodeWeights", + Type: cadence.NewDictionaryType(cadence.StringType{}, cadence.UInt64Type{}), + }, + { + Identifier: "totalWeight", + Type: cadence.UInt64Type{}, + }, + { + Identifier: "generatedVotes", + Type: cadence.NewDictionaryType(cadence.StringType{}, newFlowClusterQCVoteStructType()), + }, + { + Identifier: "uniqueVoteMessageTotalWeights", + Type: cadence.NewDictionaryType(cadence.StringType{}, cadence.UInt64Type{}), + }, + }, + } +}