diff --git a/cmd/bootstrap/cmd/rootblock.go b/cmd/bootstrap/cmd/rootblock.go index 194e625000c..4e3e5ed813d 100644 --- a/cmd/bootstrap/cmd/rootblock.go +++ b/cmd/bootstrap/cmd/rootblock.go @@ -187,7 +187,7 @@ func rootBlock(cmd *cobra.Command, args []string) { log.Info().Msg("") log.Info().Msg("constructing root QCs for collection node clusters") - clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) + clusterQCs := run.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) log.Info().Msg("") log.Info().Msg("constructing root header") diff --git a/cmd/bootstrap/run/epochs.go b/cmd/bootstrap/run/epochs.go new file mode 100644 index 00000000000..bd742ebd8bf --- /dev/null +++ b/cmd/bootstrap/run/epochs.go @@ -0,0 +1,208 @@ +package run + +import ( + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/cadence" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/bootstrap" + model "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/cluster" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" + "github.com/onflow/flow-go/state/protocol/inmem" +) + +// GenerateRecoverEpochTxArgs generates the required transaction arguments for the `recoverEpoch` transaction. +func GenerateRecoverEpochTxArgs(log zerolog.Logger, + internalNodePrivInfoDir string, + nodeConfigJson string, + collectionClusters int, + epochCounter uint64, + rootChainID flow.ChainID, + numViewsInStakingAuction uint64, + numViewsInEpoch uint64, + targetDuration uint64, + initNewEpoch bool, + snapshot *inmem.Snapshot, +) ([]cadence.Value, error) { + epoch := snapshot.Epochs().Current() + + currentEpochIdentities, err := snapshot.Identities(filter.IsValidProtocolParticipant) + if err != nil { + return nil, fmt.Errorf("failed to get valid protocol participants from snapshot: %w", err) + } + // We need canonical ordering here; sanity check to enforce this: + if !currentEpochIdentities.Sorted(flow.Canonical[flow.Identity]) { + return nil, fmt.Errorf("identies from snapshot not in canonical order") + } + + // separate collector nodes by internal and partner nodes + collectors := currentEpochIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) + internalCollectors := make(flow.IdentityList, 0) + partnerCollectors := make(flow.IdentityList, 0) + + log.Info().Msg("collecting internal node network and staking keys") + internalNodes, err := common.ReadFullInternalNodeInfos(log, internalNodePrivInfoDir, nodeConfigJson) + if err != nil { + return nil, fmt.Errorf("failed to read full internal node infos: %w", err) + } + + internalNodesMap := make(map[flow.Identifier]struct{}) + for _, node := range internalNodes { + if !currentEpochIdentities.Exists(node.Identity()) { + return nil, fmt.Errorf("node ID found in internal node infos missing from protocol snapshot identities %s: %w", node.NodeID, err) + } + internalNodesMap[node.NodeID] = struct{}{} + } + log.Info().Msg("") + + for _, collector := range collectors { + if _, ok := internalNodesMap[collector.NodeID]; ok { + internalCollectors = append(internalCollectors, collector) + } else { + partnerCollectors = append(partnerCollectors, collector) + } + } + + currentEpochDKG, err := epoch.DKG() + if err != nil { + return nil, fmt.Errorf("failed to get DKG for current epoch: %w", err) + } + + log.Info().Msg("computing collection node clusters") + + assignments, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, collectionClusters) + if err != nil { + log.Fatal().Err(err).Msg("unable to generate cluster assignment") + } + log.Info().Msg("") + + log.Info().Msg("constructing root blocks for collection node clusters") + clusterBlocks := GenerateRootClusterBlocks(epochCounter, clusters) + log.Info().Msg("") + + log.Info().Msg("constructing root QCs for collection node clusters") + clusterQCs := ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) + log.Info().Msg("") + + dkgPubKeys := make([]cadence.Value, 0) + nodeIds := make([]cadence.Value, 0) + + // NOTE: The RecoveryEpoch will re-use the last successful DKG output. This means that the consensus + // committee in the RecoveryEpoch must be identical to the committee which participated in that DKG. + dkgGroupKeyCdc, cdcErr := cadence.NewString(currentEpochDKG.GroupKey().String()) + if cdcErr != nil { + log.Fatal().Err(cdcErr).Msg("failed to get dkg group key cadence string") + } + dkgPubKeys = append(dkgPubKeys, dkgGroupKeyCdc) + for _, id := range currentEpochIdentities { + if id.GetRole() == flow.RoleConsensus { + dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID()) + if keyShareErr != nil { + log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", id.GetNodeID())) + } + dkgPubKeyCdc, cdcErr := cadence.NewString(dkgPubKey.String()) + if cdcErr != nil { + log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", id.GetNodeID())) + } + dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc) + } + nodeIdCdc, err := cadence.NewString(id.GetNodeID().String()) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", id.GetNodeID())) + } + nodeIds = append(nodeIds, nodeIdCdc) + } + clusterQCAddress := systemcontracts.SystemContractsForChain(rootChainID).ClusterQC.Address.String() + qcVoteData, err := common.ConvertClusterQcsCdc(clusterQCs, clusters, clusterQCAddress) + if err != nil { + log.Fatal().Err(err).Msg("failed to convert cluster qcs to cadence type") + } + + currEpochFinalView, err := epoch.FinalView() + if err != nil { + log.Fatal().Err(err).Msg("failed to get final view of current epoch") + } + + currEpochTargetEndTime, err := epoch.TargetEndTime() + if err != nil { + log.Fatal().Err(err).Msg("failed to get target end time of current epoch") + } + + args := []cadence.Value{ + // epoch start view + cadence.NewUInt64(currEpochFinalView + 1), + // staking phase end view + cadence.NewUInt64(currEpochFinalView + numViewsInStakingAuction), + // epoch end view + cadence.NewUInt64(currEpochFinalView + numViewsInEpoch), + // target duration + cadence.NewUInt64(targetDuration), + // target end time + cadence.NewUInt64(currEpochTargetEndTime), + // clusters, + common.ConvertClusterAssignmentsCdc(assignments), + // qcVoteData + cadence.NewArray(qcVoteData), + // dkg pub keys + cadence.NewArray(dkgPubKeys), + // node ids + cadence.NewArray(nodeIds), + // recover the network by initializing a new recover epoch which will increment the smart contract epoch counter + // or overwrite the epoch metadata for the current epoch + cadence.NewBool(initNewEpoch), + } + + return args, nil +} + +// ConstructRootQCsForClusters constructs a root QC for each cluster in the list. +// Args: +// - log: the logger instance. +// - clusterList: list of clusters +// - nodeInfos: list of NodeInfos (must contain all internal nodes) +// - clusterBlocks: list of root blocks (one for each cluster) +// Returns: +// - flow.AssignmentList: the generated assignment list. +// - flow.ClusterList: the generate collection cluster list. +func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate { + if len(clusterBlocks) != len(clusterList) { + log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). + Msg("number of clusters needs to equal number of cluster blocks") + } + + qcs := make([]*flow.QuorumCertificate, len(clusterBlocks)) + for i, cluster := range clusterList { + signers := filterClusterSigners(cluster, nodeInfos) + + qc, err := GenerateClusterRootQC(signers, cluster, clusterBlocks[i]) + if err != nil { + log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed") + } + qcs[i] = qc + } + + return qcs +} + +// Filters a list of nodes to include only nodes that will sign the QC for the +// given cluster. The resulting list of nodes is only nodes that are in the +// given cluster AND are not partner nodes (ie. we have the private keys). +func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.NodeInfo) []model.NodeInfo { + var filtered []model.NodeInfo + for _, node := range nodeInfos { + _, isInCluster := cluster.ByNodeID(node.NodeID) + isPrivateKeyAvailable := node.Type() == model.NodeInfoTypePrivate + + if isInCluster && isPrivateKeyAvailable { + filtered = append(filtered, node) + } + } + + return filtered +} diff --git a/cmd/bootstrap/utils/key_generation.go b/cmd/bootstrap/utils/key_generation.go index 627030a789f..fd4c8c53444 100644 --- a/cmd/bootstrap/utils/key_generation.go +++ b/cmd/bootstrap/utils/key_generation.go @@ -7,14 +7,15 @@ import ( gohash "hash" "io" - "github.com/onflow/crypto" "golang.org/x/crypto/hkdf" + "github.com/onflow/crypto" + sdk "github.com/onflow/flow-go-sdk" sdkcrypto "github.com/onflow/flow-go-sdk/crypto" - "github.com/onflow/flow-go/fvm/systemcontracts" "github.com/onflow/flow-go/model/bootstrap" + model "github.com/onflow/flow-go/model/bootstrap" "github.com/onflow/flow-go/model/encodable" "github.com/onflow/flow-go/model/flow" ) @@ -296,3 +297,21 @@ func WriteStakingNetworkingKeyFiles(nodeInfos []bootstrap.NodeInfo, write WriteJ return nil } + +// WriteNodeInternalPubInfos writes the `node-internal-infos.pub.json` file. +// In a nutshell, this file contains the Role, address and weight for all authorized nodes. +func WriteNodeInternalPubInfos(nodeInfos []bootstrap.NodeInfo, write WriteJSONFileFunc) error { + configs := make([]model.NodeConfig, len(nodeInfos)) + for i, nodeInfo := range nodeInfos { + configs[i] = model.NodeConfig{ + Role: nodeInfo.Role, + Address: nodeInfo.Address, + Weight: nodeInfo.Weight, + } + } + err := write(bootstrap.PathNodeInfosPub, configs) + if err != nil { + return err + } + return nil +} diff --git a/cmd/util/cmd/common/clusters.go b/cmd/util/cmd/common/clusters.go index 3d51d06519e..7258dbf9ef8 100644 --- a/cmd/util/cmd/common/clusters.go +++ b/cmd/util/cmd/common/clusters.go @@ -7,11 +7,8 @@ import ( "github.com/rs/zerolog" "github.com/onflow/cadence" + cdcCommon "github.com/onflow/cadence/runtime/common" - "github.com/onflow/flow-go/cmd/bootstrap/run" - "github.com/onflow/flow-go/model/bootstrap" - model "github.com/onflow/flow-go/model/bootstrap" - "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/assignment" "github.com/onflow/flow-go/model/flow/factory" @@ -114,36 +111,6 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes return assignments, clusters, nil } -// ConstructRootQCsForClusters constructs a root QC for each cluster in the list. -// Args: -// - log: the logger instance. -// - clusterList: list of clusters -// - nodeInfos: list of NodeInfos (must contain all internal nodes) -// - clusterBlocks: list of root blocks for each cluster -// Returns: -// - flow.AssignmentList: the generated assignment list. -// - flow.ClusterList: the generate collection cluster list. -func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate { - - if len(clusterBlocks) != len(clusterList) { - log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)). - Msg("number of clusters needs to equal number of cluster blocks") - } - - qcs := make([]*flow.QuorumCertificate, len(clusterBlocks)) - for i, cluster := range clusterList { - signers := filterClusterSigners(cluster, nodeInfos) - - qc, err := run.GenerateClusterRootQC(signers, cluster, clusterBlocks[i]) - if err != nil { - log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed") - } - qcs[i] = qc - } - - return qcs -} - // ConvertClusterAssignmentsCdc converts golang cluster assignments type to Cadence type `[[String]]`. func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array { stringArrayType := cadence.NewVariableSizedArrayType(cadence.StringType) @@ -163,8 +130,18 @@ func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array } // ConvertClusterQcsCdc converts cluster QCs from `QuorumCertificate` type to `ClusterQCVoteData` type. -func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.ClusterList) ([]*flow.ClusterQCVoteData, error) { - voteData := make([]*flow.ClusterQCVoteData, len(qcs)) +// Args: +// - qcs: list of quorum certificates. +// - clusterList: the list of cluster lists each used to generate one of the quorum certificates in qcs. +// - flowClusterQCAddress: the FlowClusterQC contract address where the ClusterQCVoteData type is defined. +// +// Returns: +// - []cadence.Value: cadence representation of the list of cluster qcs. +// - error: error if the cluster qcs and cluster lists don't match in size or +// signature indices decoding fails. +func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.ClusterList, flowClusterQCAddress string) ([]cadence.Value, error) { + voteDataType := newClusterQCVoteDataCdcType(flowClusterQCAddress) + qcVoteData := make([]cadence.Value, len(qcs)) for i, qc := range qcs { c, ok := clusterList.ByIndex(uint(i)) if !ok { @@ -174,29 +151,42 @@ func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.Cluste if err != nil { return nil, fmt.Errorf("could not decode signer indices: %w", err) } - voteData[i] = &flow.ClusterQCVoteData{ - SigData: qc.SigData, - VoterIDs: voterIds, + cdcVoterIds := make([]cadence.Value, len(voterIds)) + for i, id := range voterIds { + cdcVoterIds[i] = cadence.String(id.String()) } - } - return voteData, nil -} + qcVoteData[i] = cadence.NewStruct([]cadence.Value{ + // aggregatedSignature + cadence.String(fmt.Sprintf("%#x", qc.SigData)), + // Node IDs of signers + cadence.NewArray(cdcVoterIds).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), + }).WithType(voteDataType) -// Filters a list of nodes to include only nodes that will sign the QC for the -// given cluster. The resulting list of nodes is only nodes that are in the -// given cluster AND are not partner nodes (ie. we have the private keys). -func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.NodeInfo) []model.NodeInfo { + } - var filtered []model.NodeInfo - for _, node := range nodeInfos { - _, isInCluster := cluster.ByNodeID(node.NodeID) - isNotPartner := node.Type() == model.NodeInfoTypePrivate + return qcVoteData, nil +} - if isInCluster && isNotPartner { - filtered = append(filtered, node) - } +// newClusterQCVoteDataCdcType returns the FlowClusterQC cadence struct type. +func newClusterQCVoteDataCdcType(clusterQcAddress string) *cadence.StructType { + + // FlowClusterQC.ClusterQCVoteData + address, _ := cdcCommon.HexToAddress(clusterQcAddress) + location := cdcCommon.NewAddressLocation(nil, address, "FlowClusterQC") + + return &cadence.StructType{ + Location: location, + QualifiedIdentifier: "FlowClusterQC.ClusterQCVoteData", + Fields: []cadence.Field{ + { + Identifier: "aggregatedSignature", + Type: cadence.StringType, + }, + { + Identifier: "voterIDs", + Type: cadence.NewVariableSizedArrayType(cadence.StringType), + }, + }, } - - return filtered } diff --git a/cmd/util/cmd/epochs/cmd/recover.go b/cmd/util/cmd/epochs/cmd/recover.go index 1e24d3a5460..cb56779d3f4 100644 --- a/cmd/util/cmd/epochs/cmd/recover.go +++ b/cmd/util/cmd/epochs/cmd/recover.go @@ -3,16 +3,14 @@ package cmd import ( "context" "fmt" + "os" "github.com/spf13/cobra" - "github.com/onflow/cadence" - "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/cmd/util/cmd/common" epochcmdutil "github.com/onflow/flow-go/cmd/util/cmd/epochs/utils" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module/grpcclient" "github.com/onflow/flow-go/state/protocol/inmem" ) @@ -40,14 +38,19 @@ This recovery process has some constraints: Run: generateRecoverEpochTxArgs(getSnapshot), } + flagOut string flagAnAddress string flagAnPubkey string + flagAnInsecure bool flagInternalNodePrivInfoDir string flagNodeConfigJson string flagCollectionClusters int flagNumViewsInEpoch uint64 flagNumViewsInStakingAuction uint64 flagEpochCounter uint64 + flagTargetDuration uint64 + flagInitNewEpoch bool + flagRootChainID string ) func init() { @@ -59,6 +62,11 @@ func init() { } func addGenerateRecoverEpochTxArgsCmdFlags() error { + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagOut, "out", "", "file to write tx args output") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagAnAddress, "access-address", "", "the address of the access node used to retrieve the information") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagRootChainID, "root-chain-id", "", "the root chain id") + generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagAnPubkey, "access-network-key", "", "the network key of the access node used for client connections in hex string format") + generateRecoverEpochTxArgsCmd.Flags().BoolVar(&flagAnInsecure, "insecure", false, "set to true if the protocol snapshot should be retrieved from the insecure AN endpoint") generateRecoverEpochTxArgsCmd.Flags().IntVar(&flagCollectionClusters, "collection-clusters", 0, "number of collection clusters") // required parameters for network configuration and generation of root node identities @@ -69,8 +77,18 @@ func addGenerateRecoverEpochTxArgsCmdFlags() error { generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInEpoch, "epoch-length", 0, "length of each epoch measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagNumViewsInStakingAuction, "epoch-staking-phase-length", 0, "length of the epoch staking phase measured in views") generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagEpochCounter, "epoch-counter", 0, "the epoch counter used to generate the root cluster block") - - err := generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") + generateRecoverEpochTxArgsCmd.Flags().Uint64Var(&flagTargetDuration, "epoch-timing-duration", 0, "the target duration of the epoch, in seconds") + // The following option allows the RecoveryEpoch specified by this command to overwrite an epoch which already exists in the smart contract. + // This is needed only if a previous recoverEpoch transaction was submitted and a race condition occurred such that: + // - the RecoveryEpoch in the admin transaction was accepted by the smart contract + // - the RecoveryEpoch service event (after sealing latency) was rejected by the Protocol State + generateRecoverEpochTxArgsCmd.Flags().BoolVar(&flagInitNewEpoch, "unsafe-overwrite-epoch-data", false, "set to true if the resulting transaction is allowed to overwrite an already specified epoch in the smart contract.") + + err := generateRecoverEpochTxArgsCmd.MarkFlagRequired("access-address") + if err != nil { + return fmt.Errorf("failed to mark access-address flag as required") + } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-length") if err != nil { return fmt.Errorf("failed to mark epoch-length flag as required") } @@ -86,12 +104,21 @@ func addGenerateRecoverEpochTxArgsCmdFlags() error { if err != nil { return fmt.Errorf("failed to mark collection-clusters flag as required") } + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("epoch-timing-duration") + if err != nil { + return fmt.Errorf("failed to mark epoch-timing-duration flag as required") + } + + err = generateRecoverEpochTxArgsCmd.MarkFlagRequired("root-chain-id") + if err != nil { + return fmt.Errorf("failed to mark root-chain-id flag as required") + } return nil } func getSnapshot() *inmem.Snapshot { // get flow client with secure client connection to download protocol snapshot from access node - config, err := grpcclient.NewFlowClientConfig(flagAnAddress, flagAnPubkey, flow.ZeroID, false) + config, err := grpcclient.NewFlowClientConfig(flagAnAddress, flagAnPubkey, flow.ZeroID, flagAnInsecure) if err != nil { log.Fatal().Err(err).Msg("failed to create flow client config") } @@ -112,139 +139,42 @@ func getSnapshot() *inmem.Snapshot { // generateRecoverEpochTxArgs generates recover epoch transaction arguments from a root protocol state snapshot and writes it to a JSON file func generateRecoverEpochTxArgs(getSnapshot func() *inmem.Snapshot) func(cmd *cobra.Command, args []string) { return func(cmd *cobra.Command, args []string) { - stdout := cmd.OutOrStdout() - - // extract arguments from recover epoch tx from snapshot - txArgs := extractRecoverEpochArgs(getSnapshot()) - + // generate transaction arguments + txArgs, err := run.GenerateRecoverEpochTxArgs( + log, + flagInternalNodePrivInfoDir, + flagNodeConfigJson, + flagCollectionClusters, + flagEpochCounter, + flow.ChainID(flagRootChainID), + flagNumViewsInStakingAuction, + flagNumViewsInEpoch, + flagTargetDuration, + flagInitNewEpoch, + getSnapshot(), + ) + if err != nil { + log.Fatal().Err(err).Msg("failed to generate recover epoch transaction arguments") + } // encode to JSON encodedTxArgs, err := epochcmdutil.EncodeArgs(txArgs) if err != nil { log.Fatal().Err(err).Msg("could not encode recover epoch transaction arguments") } - // write JSON args to stdout - _, err = stdout.Write(encodedTxArgs) - if err != nil { - log.Fatal().Err(err).Msg("could not write jsoncdc encoded arguments") - } - } -} - -// extractRecoverEpochArgs extracts the required transaction arguments for the `recoverEpoch` transaction. -func extractRecoverEpochArgs(snapshot *inmem.Snapshot) []cadence.Value { - epoch := snapshot.Epochs().Current() - - currentEpochIdentities, err := snapshot.Identities(filter.IsValidProtocolParticipant) - if err != nil { - log.Fatal().Err(err).Msg("failed to get valid protocol participants from snapshot") - } - - // separate collector nodes by internal and partner nodes - collectors := currentEpochIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection)) - internalCollectors := make(flow.IdentityList, 0) - partnerCollectors := make(flow.IdentityList, 0) - - log.Info().Msg("collecting internal node network and staking keys") - internalNodes, err := common.ReadFullInternalNodeInfos(log, flagInternalNodePrivInfoDir, flagNodeConfigJson) - if err != nil { - log.Fatal().Err(err).Msg("failed to read full internal node infos") - } - - internalNodesMap := make(map[flow.Identifier]struct{}) - for _, node := range internalNodes { - if !currentEpochIdentities.Exists(node.Identity()) { - log.Fatal().Msg(fmt.Sprintf("node ID found in internal node infos missing from protocol snapshot identities: %s", node.NodeID)) - } - internalNodesMap[node.NodeID] = struct{}{} - } - log.Info().Msg("") - - for _, collector := range collectors { - if _, ok := internalNodesMap[collector.NodeID]; ok { - internalCollectors = append(internalCollectors, collector) - } else { - partnerCollectors = append(partnerCollectors, collector) - } - } - - currentEpochDKG, err := epoch.DKG() - if err != nil { - log.Fatal().Err(err).Msg("failed to get DKG for current epoch") - } - - log.Info().Msg("computing collection node clusters") - - assignments, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, flagCollectionClusters) - if err != nil { - log.Fatal().Err(err).Msg("unable to generate cluster assignment") - } - log.Info().Msg("") - - log.Info().Msg("constructing root blocks for collection node clusters") - clusterBlocks := run.GenerateRootClusterBlocks(flagEpochCounter, clusters) - log.Info().Msg("") - - log.Info().Msg("constructing root QCs for collection node clusters") - clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks) - log.Info().Msg("") - - dkgPubKeys := make([]cadence.Value, 0) - nodeIds := make([]cadence.Value, 0) - - // NOTE: The RecoveryEpoch will re-use the last successful DKG output. This means that the consensus - // committee in the RecoveryEpoch must be identical to the committee which participated in that DKG. - dkgGroupKeyCdc, cdcErr := cadence.NewString(currentEpochDKG.GroupKey().String()) - if cdcErr != nil { - log.Fatal().Err(cdcErr).Msg("failed to get dkg group key cadence string") - } - dkgPubKeys = append(dkgPubKeys, dkgGroupKeyCdc) - for _, id := range currentEpochIdentities { - if id.GetRole() == flow.RoleConsensus { - dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID()) - if keyShareErr != nil { - log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", id.GetNodeID())) + if flagOut == "" { + // write JSON args to stdout + _, err = cmd.OutOrStdout().Write(encodedTxArgs) + if err != nil { + log.Fatal().Err(err).Msg("could not write jsoncdc encoded arguments") } - dkgPubKeyCdc, cdcErr := cadence.NewString(dkgPubKey.String()) - if cdcErr != nil { - log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", id.GetNodeID())) + } else { + // write JSON args to file specified by flag + err := os.WriteFile(flagOut, encodedTxArgs, 0644) + if err != nil { + log.Fatal().Err(err).Msg(fmt.Sprintf("could not write jsoncdc encoded arguments to file %s", flagOut)) } - dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc) + log.Info().Msgf("wrote transaction args to output file %s", flagOut) } - nodeIdCdc, err := cadence.NewString(id.GetNodeID().String()) - if err != nil { - log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", id.GetNodeID())) - } - nodeIds = append(nodeIds, nodeIdCdc) - } - - // @TODO: cluster qcs are converted into flow.ClusterQCVoteData types, - // we need a corresponding type in cadence on the FlowClusterQC contract - // to store this struct. - _, err = common.ConvertClusterQcsCdc(clusterQCs, clusters) - if err != nil { - log.Fatal().Err(err).Msg("failed to convert cluster qcs to cadence type") - } - - currEpochFinalView, err := epoch.FinalView() - if err != nil { - log.Fatal().Err(err).Msg("failed to get final view of current epoch") } - - args := []cadence.Value{ - // epoch start view - cadence.NewUInt64(currEpochFinalView + 1), - // staking phase end view - cadence.NewUInt64(currEpochFinalView + flagNumViewsInStakingAuction), - // epoch end view - cadence.NewUInt64(currEpochFinalView + flagNumViewsInEpoch), - // dkg pub keys - cadence.NewArray(dkgPubKeys), - // node ids - cadence.NewArray(nodeIds), - // clusters, - common.ConvertClusterAssignmentsCdc(assignments), - } - - return args } diff --git a/cmd/util/cmd/epochs/cmd/recover_test.go b/cmd/util/cmd/epochs/cmd/recover_test.go index 980a9788a55..0877b1bcd23 100644 --- a/cmd/util/cmd/epochs/cmd/recover_test.go +++ b/cmd/util/cmd/epochs/cmd/recover_test.go @@ -3,14 +3,16 @@ package cmd import ( "bytes" "encoding/json" + "fmt" "testing" - "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/model/flow" - "github.com/stretchr/testify/require" + "github.com/onflow/cadence" + "github.com/onflow/flow-go/cmd/bootstrap/utils" + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/unittest" ) @@ -26,18 +28,31 @@ func TestRecoverEpochHappyPath(t *testing.T) { require.NoError(t, err) allNodeIds := make(flow.IdentityList, 0) - for _, node := range internalNodes { - allNodeIds = append(allNodeIds, node.Identity()) - } - for _, node := range partnerNodes { + allNodeIdsCdc := make(map[cadence.String]*flow.Identity) + for _, node := range append(internalNodes, partnerNodes...) { allNodeIds = append(allNodeIds, node.Identity()) + allNodeIdsCdc[cadence.String(node.Identity().NodeID.String())] = node.Identity() } // create a root snapshot rootSnapshot := unittest.RootSnapshotFixture(allNodeIds) - snapshotFn := func() *inmem.Snapshot { return rootSnapshot } + // get expected dkg information + currentEpochDKG, err := rootSnapshot.Epochs().Current().DKG() + require.NoError(t, err) + expectedDKGPubKeys := make(map[cadence.String]struct{}) + expectedDKGGroupKey := cadence.String(currentEpochDKG.GroupKey().String()) + for _, id := range allNodeIds { + if id.GetRole() == flow.RoleConsensus { + dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID()) + if keyShareErr != nil { + log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", id.GetNodeID())) + } + expectedDKGPubKeys[cadence.String(dkgPubKey.String())] = struct{}{} + } + } + // run command with overwritten stdout stdout := bytes.NewBuffer(nil) generateRecoverEpochTxArgsCmd.SetOut(stdout) @@ -45,9 +60,10 @@ func TestRecoverEpochHappyPath(t *testing.T) { flagInternalNodePrivInfoDir = internalPrivDir flagNodeConfigJson = configPath flagCollectionClusters = 2 - flagNumViewsInEpoch = 4000 - flagNumViewsInStakingAuction = 100 flagEpochCounter = 2 + flagRootChainID = flow.Localnet.String() + flagNumViewsInStakingAuction = 100 + flagNumViewsInEpoch = 4000 generateRecoverEpochTxArgs(snapshotFn)(generateRecoverEpochTxArgsCmd, nil) @@ -55,9 +71,55 @@ func TestRecoverEpochHappyPath(t *testing.T) { var outputTxArgs []interface{} err = json.NewDecoder(stdout).Decode(&outputTxArgs) require.NoError(t, err) - // compare to expected values - expectedArgs := extractRecoverEpochArgs(rootSnapshot) - unittest.VerifyCdcArguments(t, expectedArgs[:len(expectedArgs)-1], outputTxArgs[:len(expectedArgs)-1]) - // @TODO validate cadence values for generated cluster assignments and clusters + + // verify each argument + decodedValues := unittest.InterfafceToCdcValues(t, outputTxArgs) + currEpoch := rootSnapshot.Epochs().Current() + finalView, err := currEpoch.FinalView() + require.NoError(t, err) + + // epoch start view + require.Equal(t, decodedValues[0], cadence.NewUInt64(finalView+1)) + // staking phase end view + require.Equal(t, decodedValues[1], cadence.NewUInt64(finalView+flagNumViewsInStakingAuction)) + // epoch end view + require.Equal(t, decodedValues[2], cadence.NewUInt64(finalView+flagNumViewsInEpoch)) + // target duration + require.Equal(t, decodedValues[3], cadence.NewUInt64(flagTargetDuration)) + // target end time + expectedTargetEndTime, err := rootSnapshot.Epochs().Current().TargetEndTime() + require.NoError(t, err) + require.Equal(t, decodedValues[4], cadence.NewUInt64(expectedTargetEndTime)) + // clusters: we cannot guarantee order of the cluster when we generate the test fixtures + // so, we ensure each cluster member is part of the full set of node ids + for _, cluster := range decodedValues[5].(cadence.Array).Values { + for _, nodeId := range cluster.(cadence.Array).Values { + _, ok := allNodeIdsCdc[nodeId.(cadence.String)] + require.True(t, ok) + } + } + // qcVoteData: we cannot guarantee order of the cluster when we generate the test fixtures + // so, we ensure each voter id that participated in a qc vote exists and is a collection node + for _, voteData := range decodedValues[6].(cadence.Array).Values { + fields := cadence.FieldsMappedByName(voteData.(cadence.Struct)) + for _, voterId := range fields["voterIDs"].(cadence.Array).Values { + id, ok := allNodeIdsCdc[voterId.(cadence.String)] + require.True(t, ok) + require.Equal(t, flow.RoleCollection, id.Role) + } + } + // dkg pub keys + require.Equal(t, expectedDKGGroupKey, decodedValues[7].(cadence.Array).Values[0]) + for _, dkgPubKey := range decodedValues[7].(cadence.Array).Values[1:] { + _, ok := expectedDKGPubKeys[dkgPubKey.(cadence.String)] + require.True(t, ok) + } + // node ids + for _, nodeId := range decodedValues[8].(cadence.Array).Values { + _, ok := allNodeIdsCdc[nodeId.(cadence.String)] + require.True(t, ok) + } + // initNewEpoch + require.Equal(t, decodedValues[9], cadence.NewBool(false)) }) } diff --git a/engine/execution/state/bootstrap/bootstrap_test.go b/engine/execution/state/bootstrap/bootstrap_test.go index cd3edb6f2cb..e716d9f6ec2 100644 --- a/engine/execution/state/bootstrap/bootstrap_test.go +++ b/engine/execution/state/bootstrap/bootstrap_test.go @@ -53,7 +53,7 @@ func TestBootstrapLedger(t *testing.T) { } func TestBootstrapLedger_ZeroTokenSupply(t *testing.T) { - expectedStateCommitmentBytes, _ := hex.DecodeString("7cb3d30faaaab3cb402338023b3a068bc856fb788086b1212aa0f1950f24d854") + expectedStateCommitmentBytes, _ := hex.DecodeString("a11fe9eb514a20f849649c9d903dc32f93ce216226e49d568757707a9995aec6") expectedStateCommitment, err := flow.ToStateCommitment(expectedStateCommitmentBytes) require.NoError(t, err) diff --git a/go.mod b/go.mod index 4478289e694..b2c26cc8ee2 100644 --- a/go.mod +++ b/go.mod @@ -50,8 +50,8 @@ require ( github.com/onflow/cadence v1.0.0-preview.34 github.com/onflow/crypto v0.25.1 github.com/onflow/flow v0.3.4 - github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0 - github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0 + github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920 + github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920 github.com/onflow/flow-go-sdk v1.0.0-preview.36 github.com/onflow/flow/protobuf/go/flow v0.4.4 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 diff --git a/go.sum b/go.sum index 7beb1e4191c..7efdc702141 100644 --- a/go.sum +++ b/go.sum @@ -2178,10 +2178,10 @@ github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/flow v0.3.4 h1:FXUWVdYB90f/rjNcY0Owo30gL790tiYff9Pb/sycXYE= github.com/onflow/flow v0.3.4/go.mod h1:lzyAYmbu1HfkZ9cfnL5/sjrrsnJiUU8fRL26CqLP7+c= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0 h1:cq3RfBr9TnTSnsGlUHMjMGZib24Horfb1XJqMpkN5ew= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0/go.mod h1:u/mkP/B+PbV33tEG3qfkhhBlydSvAKxfLZSfB4lsJHg= -github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0 h1:aMFJdB2CW+Dzm+AJ5QN6J1yWh+a0l2RxHN2/TtLaXUo= -github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0/go.mod h1:NgbMOYnMh0GN48VsNKZuiwK7uyk38Wyo8jN9+C9QE30= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920 h1:dvpU5WG++j9s3XXbFnMv/eVdupEHh1Xb0NyLZYDEz7A= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920/go.mod h1:u/mkP/B+PbV33tEG3qfkhhBlydSvAKxfLZSfB4lsJHg= +github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920 h1:SBUd8cq9OThExucjUO3EFMuzA5Sp8NLGUjch2phxKfg= +github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920/go.mod h1:NgbMOYnMh0GN48VsNKZuiwK7uyk38Wyo8jN9+C9QE30= github.com/onflow/flow-ft/lib/go/contracts v1.0.0 h1:mToacZ5NWqtlWwk/7RgIl/jeKB/Sy/tIXdw90yKHcV0= github.com/onflow/flow-ft/lib/go/contracts v1.0.0/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.0 h1:6cMS/lUJJ17HjKBfMO/eh0GGvnpElPgBXx7h5aoWJhs= diff --git a/insecure/go.mod b/insecure/go.mod index 653b5dddc0f..f61c3719c47 100644 --- a/insecure/go.mod +++ b/insecure/go.mod @@ -200,8 +200,8 @@ require ( github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onflow/atree v0.7.0-rc.2 // indirect github.com/onflow/cadence v1.0.0-preview.34 // indirect - github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0 // indirect - github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0 // indirect + github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920 // indirect + github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920 // indirect github.com/onflow/flow-ft/lib/go/contracts v1.0.0 // indirect github.com/onflow/flow-ft/lib/go/templates v1.0.0 // indirect github.com/onflow/flow-go-sdk v1.0.0-preview.36 // indirect diff --git a/insecure/go.sum b/insecure/go.sum index 030ffa3f4cb..5feb6732c31 100644 --- a/insecure/go.sum +++ b/insecure/go.sum @@ -2165,10 +2165,10 @@ github.com/onflow/cadence v1.0.0-preview.34/go.mod h1:jOwvPSSLTr9TvaKMs7KKiBYMmp github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0 h1:cq3RfBr9TnTSnsGlUHMjMGZib24Horfb1XJqMpkN5ew= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0/go.mod h1:u/mkP/B+PbV33tEG3qfkhhBlydSvAKxfLZSfB4lsJHg= -github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0 h1:aMFJdB2CW+Dzm+AJ5QN6J1yWh+a0l2RxHN2/TtLaXUo= -github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0/go.mod h1:NgbMOYnMh0GN48VsNKZuiwK7uyk38Wyo8jN9+C9QE30= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920 h1:dvpU5WG++j9s3XXbFnMv/eVdupEHh1Xb0NyLZYDEz7A= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920/go.mod h1:u/mkP/B+PbV33tEG3qfkhhBlydSvAKxfLZSfB4lsJHg= +github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920 h1:SBUd8cq9OThExucjUO3EFMuzA5Sp8NLGUjch2phxKfg= +github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920/go.mod h1:NgbMOYnMh0GN48VsNKZuiwK7uyk38Wyo8jN9+C9QE30= github.com/onflow/flow-ft/lib/go/contracts v1.0.0 h1:mToacZ5NWqtlWwk/7RgIl/jeKB/Sy/tIXdw90yKHcV0= github.com/onflow/flow-ft/lib/go/contracts v1.0.0/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.0 h1:6cMS/lUJJ17HjKBfMO/eh0GGvnpElPgBXx7h5aoWJhs= diff --git a/integration/go.mod b/integration/go.mod index ce66485b5a2..37939172b51 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -21,8 +21,8 @@ require ( github.com/libp2p/go-libp2p v0.32.2 github.com/onflow/cadence v1.0.0-preview.34 github.com/onflow/crypto v0.25.1 - github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0 - github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0 + github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920 + github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920 github.com/onflow/flow-emulator v1.0.0-preview.24 github.com/onflow/flow-go v0.35.5-0.20240517202625-55f862b45dfd github.com/onflow/flow-go-sdk v1.0.0-preview.36 diff --git a/integration/go.sum b/integration/go.sum index 821a5c27fd9..760917ecf47 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -2155,10 +2155,10 @@ github.com/onflow/cadence v1.0.0-preview.34/go.mod h1:jOwvPSSLTr9TvaKMs7KKiBYMmp github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/crypto v0.25.1 h1:0txy2PKPMM873JbpxQNbJmuOJtD56bfs48RQfm0ts5A= github.com/onflow/crypto v0.25.1/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0 h1:cq3RfBr9TnTSnsGlUHMjMGZib24Horfb1XJqMpkN5ew= -github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.0/go.mod h1:u/mkP/B+PbV33tEG3qfkhhBlydSvAKxfLZSfB4lsJHg= -github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0 h1:aMFJdB2CW+Dzm+AJ5QN6J1yWh+a0l2RxHN2/TtLaXUo= -github.com/onflow/flow-core-contracts/lib/go/templates v1.3.0/go.mod h1:NgbMOYnMh0GN48VsNKZuiwK7uyk38Wyo8jN9+C9QE30= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920 h1:dvpU5WG++j9s3XXbFnMv/eVdupEHh1Xb0NyLZYDEz7A= +github.com/onflow/flow-core-contracts/lib/go/contracts v1.3.1-0.20240710025346-d8135ebb9920/go.mod h1:u/mkP/B+PbV33tEG3qfkhhBlydSvAKxfLZSfB4lsJHg= +github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920 h1:SBUd8cq9OThExucjUO3EFMuzA5Sp8NLGUjch2phxKfg= +github.com/onflow/flow-core-contracts/lib/go/templates v1.3.1-0.20240710025346-d8135ebb9920/go.mod h1:NgbMOYnMh0GN48VsNKZuiwK7uyk38Wyo8jN9+C9QE30= github.com/onflow/flow-emulator v1.0.0-preview.24 h1:SonXMBeYxVwNn94M+OUmKIYScIMQG22wugh9n/tHY5k= github.com/onflow/flow-emulator v1.0.0-preview.24/go.mod h1:QprPouTWO3iv9VF/y4Ksltv2XIbzNMzjjr5zzq51i7Q= github.com/onflow/flow-ft/lib/go/contracts v1.0.0 h1:mToacZ5NWqtlWwk/7RgIl/jeKB/Sy/tIXdw90yKHcV0= diff --git a/integration/testnet/network.go b/integration/testnet/network.go index 2d76863109a..729d13f7d78 100644 --- a/integration/testnet/network.go +++ b/integration/testnet/network.go @@ -568,7 +568,7 @@ func PrepareFlowNetwork(t *testing.T, networkConf NetworkConfig, chainID flow.Ch t.Logf("BootstrapDir: %s \n", bootstrapDir) bootstrapData, err := BootstrapNetwork(networkConf, bootstrapDir, chainID) - require.Nil(t, err) + require.NoError(t, err) root := bootstrapData.Root result := bootstrapData.Result @@ -1112,6 +1112,11 @@ func BootstrapNetwork(networkConf NetworkConfig, bootstrapDir string, chainID fl return nil, fmt.Errorf("failed to write machine account files: %w", err) } + err = utils.WriteNodeInternalPubInfos(allNodeInfos, writeJSONFile) + if err != nil { + return nil, fmt.Errorf("failed to node pub info file: %w", err) + } + // define root block parameters parentID := flow.ZeroID height := uint64(0) diff --git a/integration/tests/epochs/base_suite.go b/integration/tests/epochs/base_suite.go index 29fdc88d0d5..fc6e3c13710 100644 --- a/integration/tests/epochs/base_suite.go +++ b/integration/tests/epochs/base_suite.go @@ -21,6 +21,7 @@ import ( "github.com/onflow/flow-go/integration/testnet" "github.com/onflow/flow-go/integration/tests/lib" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol/inmem" "github.com/onflow/flow-go/utils/unittest" ) @@ -29,7 +30,7 @@ type BaseSuite struct { suite.Suite lib.TestnetStateTracker cancel context.CancelFunc - log zerolog.Logger + Log zerolog.Logger Net *testnet.FlowNetwork ghostID flow.Identifier @@ -41,6 +42,7 @@ type BaseSuite struct { DKGPhaseLen uint64 EpochLen uint64 EpochCommitSafetyThreshold uint64 + NumOfCollectionClusters int // Whether approvals are required for sealing (we only enable for VN tests because // requiring approvals requires a longer DKG period to avoid flakiness) RequiredSealApprovals uint // defaults to 0 (no approvals required) @@ -59,10 +61,10 @@ func (s *BaseSuite) SetupTest() { require.Greater(s.T(), s.EpochLen, minEpochLength+s.EpochCommitSafetyThreshold, "epoch too short") s.Ctx, s.cancel = context.WithCancel(context.Background()) - s.log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) - s.log.Info().Msg("================> SetupTest") + s.Log = unittest.LoggerForTest(s.Suite.T(), zerolog.InfoLevel) + s.Log.Info().Msg("================> SetupTest") defer func() { - s.log.Info().Msg("================> Finish SetupTest") + s.Log.Info().Msg("================> Finish SetupTest") }() accessConfig := []func(*testnet.NodeConfig){ @@ -119,7 +121,14 @@ func (s *BaseSuite) SetupTest() { s.Client = client // log network info periodically to aid in debugging future flaky tests - go lib.LogStatusPeriodically(s.T(), s.Ctx, s.log, s.Client, 5*time.Second) + go lib.LogStatusPeriodically(s.T(), s.Ctx, s.Log, s.Client, 5*time.Second) +} + +func (s *BaseSuite) TearDownTest() { + s.Log.Info().Msg("================> Start TearDownTest") + s.Net.Remove() + s.cancel() + s.Log.Info().Msg("================> Finish TearDownTest") } func (s *BaseSuite) Ghost() *client.GhostClient { @@ -131,7 +140,7 @@ func (s *BaseSuite) Ghost() *client.GhostClient { // TimedLogf logs the message using t.Log and the suite logger, but prefixes the current time. // This enables viewing logs inline with Docker logs as well as other test logs. func (s *BaseSuite) TimedLogf(msg string, args ...interface{}) { - s.log.Info().Msgf(msg, args...) + s.Log.Info().Msgf(msg, args...) args = append([]interface{}{time.Now().String()}, args...) s.T().Logf("%s - "+msg, args...) } @@ -155,8 +164,53 @@ func (s *BaseSuite) AwaitEpochPhase(ctx context.Context, expectedEpoch uint64, e } // GetContainersByRole returns all containers from the network for the specified role, making sure the containers are not ghost nodes. +// Since go maps have random iteration order the list of containers returned will be in random order. func (s *BaseSuite) GetContainersByRole(role flow.Role) []*testnet.Container { nodes := s.Net.ContainersByRole(role, false) require.True(s.T(), len(nodes) > 0) return nodes } + +// AwaitFinalizedView polls until it observes that the latest finalized block has a view +// greater than or equal to the input view. This is used to wait until when an epoch +// transition must have happened. +func (s *BaseSuite) AwaitFinalizedView(ctx context.Context, view uint64, waitFor, tick time.Duration) { + require.Eventually(s.T(), func() bool { + finalized := s.GetLatestFinalizedHeader(ctx) + return finalized.View >= view + }, waitFor, tick) +} + +// GetLatestFinalizedHeader retrieves the latest finalized block, as reported in LatestSnapshot. +func (s *BaseSuite) GetLatestFinalizedHeader(ctx context.Context) *flow.Header { + snapshot := s.GetLatestProtocolSnapshot(ctx) + finalized, err := snapshot.Head() + require.NoError(s.T(), err) + return finalized +} + +// AssertInEpoch requires that the current epoch's counter (as of the latest finalized block) is equal to the counter value provided. +func (s *BaseSuite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { + actualEpoch := s.CurrentEpoch(ctx) + require.Equalf(s.T(), expectedEpoch, actualEpoch, "expected to be in epoch %d got %d", expectedEpoch, actualEpoch) +} + +// CurrentEpoch returns the current epoch counter (as of the latest finalized block). +func (s *BaseSuite) CurrentEpoch(ctx context.Context) uint64 { + snapshot := s.GetLatestProtocolSnapshot(ctx) + counter, err := snapshot.Epochs().Current().Counter() + require.NoError(s.T(), err) + return counter +} + +// GetLatestProtocolSnapshot returns the protocol snapshot as of the latest finalized block. +func (s *BaseSuite) GetLatestProtocolSnapshot(ctx context.Context) *inmem.Snapshot { + snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) + require.NoError(s.T(), err) + return snapshot +} + +// GetDKGEndView returns the end view of the dkg. +func (s *BaseSuite) GetDKGEndView() uint64 { + return s.StakingAuctionLen + (s.DKGPhaseLen * 3) +} diff --git a/integration/tests/epochs/dynamic_epoch_transition_suite.go b/integration/tests/epochs/dynamic_epoch_transition_suite.go index 3c714b42a7f..47dc158c631 100644 --- a/integration/tests/epochs/dynamic_epoch_transition_suite.go +++ b/integration/tests/epochs/dynamic_epoch_transition_suite.go @@ -341,25 +341,6 @@ func (s *DynamicEpochTransitionSuite) StakeNewNode(ctx context.Context, env temp return info, testContainer } -// AwaitFinalizedView polls until it observes that the latest finalized block has a view -// greater than or equal to the input view. This is used to wait until when an epoch -// transition must have happened. -func (s *DynamicEpochTransitionSuite) AwaitFinalizedView(ctx context.Context, view uint64, waitFor, tick time.Duration) { - require.Eventually(s.T(), func() bool { - sealed := s.getLatestFinalizedHeader(ctx) - return sealed.View >= view - }, waitFor, tick) -} - -// getLatestFinalizedHeader retrieves the latest finalized block, as reported in LatestSnapshot. -func (s *DynamicEpochTransitionSuite) getLatestFinalizedHeader(ctx context.Context) *flow.Header { - snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - finalized, err := snapshot.Head() - require.NoError(s.T(), err) - return finalized -} - // AssertInEpochPhase checks if we are in the phase of the given epoch. func (s *DynamicEpochTransitionSuite) AssertInEpochPhase(ctx context.Context, expectedEpoch uint64, expectedPhase flow.EpochPhase) { snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) @@ -376,15 +357,6 @@ func (s *DynamicEpochTransitionSuite) AssertInEpochPhase(ctx context.Context, ex s.TimedLogf("asserted in epoch %d, phase %s, finalized height/view: %d/%d", expectedEpoch, expectedPhase, head.Height, head.View) } -// AssertInEpoch requires actual epoch counter is equal to counter provided. -func (s *DynamicEpochTransitionSuite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) { - snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx) - require.NoError(s.T(), err) - actualEpoch, err := snapshot.Epochs().Current().Counter() - require.NoError(s.T(), err) - require.Equalf(s.T(), expectedEpoch, actualEpoch, "expected to be in epoch %d got %d", expectedEpoch, actualEpoch) -} - // AssertNodeNotParticipantInEpoch asserts that the given node ID does not exist // in the epoch's identity table. func (s *DynamicEpochTransitionSuite) AssertNodeNotParticipantInEpoch(epoch protocol.Epoch, nodeID flow.Identifier) { diff --git a/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go index 6de2caaba21..853768d6a41 100644 --- a/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go +++ b/integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go @@ -1,13 +1,15 @@ package recover_epoch import ( - "context" - "fmt" + "strings" "testing" "time" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + sdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/flow" ) @@ -19,16 +21,78 @@ type RecoverEpochSuite struct { Suite } -// TestRecoverEpoch ensures that the recover_epoch transaction flow works as expected. This test will simulate the network going -// into EFM by taking a consensus node offline before completing the DKG. While in EFM mode the test will execute the efm-recover-tx-args -// CLI command to generate transaction arguments to submit a recover_epoch transaction, after submitting the transaction the test will -// ensure the network is healthy. +// TestRecoverEpoch ensures that the recover epoch governance transaction flow works as expected and a network that +// enters Epoch Fallback Mode can successfully recover. This test will do the following: +// 1. Triggers EFM by turning off the sole collection node before the end of the DKG forcing the DKG to fail. +// 2. Generates epoch recover transaction args using the epoch efm-recover-tx-args. +// 3. Submit recover epoch transaction. +// 4. Ensure expected EpochRecover event is emitted. +// TODO(EFM, #6164): Currently, this test does not test the processing of the EpochRecover event func (s *RecoverEpochSuite) TestRecoverEpoch() { - s.AwaitEpochPhase(context.Background(), 0, flow.EpochPhaseSetup, 20*time.Second, time.Second) - fmt.Println("in epoch phase setup") + // 1. Manually trigger EFM + // wait until the epoch setup phase to force network into EFM + s.AwaitEpochPhase(s.Ctx, 0, flow.EpochPhaseSetup, 10*time.Second, 500*time.Millisecond) - sns := s.GetContainersByRole(flow.RoleConsensus) - _ = sns[0].Pause() + // We set the DKG phase view len to 10 which is very short and should cause the network to go into EFM + // without pausing the collection node. This is not the case, we still need to pause the collection node. + //TODO(EFM, #6164): Why short DKG phase len of 10 views does not trigger EFM without pausing container ; see https://github.com/onflow/flow-go/issues/6164 + ln := s.GetContainersByRole(flow.RoleCollection)[0] + require.NoError(s.T(), ln.Pause()) + s.AwaitFinalizedView(s.Ctx, s.GetDKGEndView(), 2*time.Minute, 500*time.Millisecond) + // start the paused collection node now that we are in EFM + require.NoError(s.T(), ln.Start()) - // @TODO: trigger EFM manually + // get final view form the latest snapshot + epoch1FinalView, err := s.Net.BootstrapSnapshot.Epochs().Current().FinalView() + require.NoError(s.T(), err) + + // wait for at least the first block of the next epoch to be sealed so that we can + // ensure that we are still in the same epoch after the final view of that epoch indicating we are in EFM + s.TimedLogf("waiting for epoch transition (finalized view %d)", epoch1FinalView+1) + s.AwaitFinalizedView(s.Ctx, epoch1FinalView+1, 2*time.Minute, 500*time.Millisecond) + s.TimedLogf("observed finalized view %d", epoch1FinalView+1) + + // assert transition to second epoch did not happen + // if counter is still 0, epoch emergency fallback was triggered as expected + s.AssertInEpoch(s.Ctx, 0) + + // 2. Generate transaction arguments for epoch recover transaction. + // generate epoch recover transaction args + collectionClusters := s.NumOfCollectionClusters + numViewsInRecoveryEpoch := s.EpochLen + numViewsInStakingAuction := s.StakingAuctionLen + epochCounter := uint64(1) + + txArgs := s.executeEFMRecoverTXArgsCMD( + collectionClusters, + numViewsInRecoveryEpoch, + numViewsInStakingAuction, + epochCounter, + // cruise control is disabled for integration tests + // targetDuration and targetEndTime will be ignored + 3000, + 4000, + ) + + // 3. Submit recover epoch transaction to the network. + // submit the recover epoch transaction + env := utils.LocalnetEnv() + result := s.recoverEpoch(env, txArgs) + require.NoError(s.T(), result.Error) + require.Equal(s.T(), result.Status, sdk.TransactionStatusSealed) + + // 3. Ensure EpochRecover event was emitted. + eventType := "" + for _, evt := range result.Events { + if strings.Contains(evt.Type, "FlowEpoch.EpochRecover") { + eventType = evt.Type + break + } + } + require.NotEmpty(s.T(), eventType, "expected FlowEpoch.EpochRecover event type") + events, err := s.Client.GetEventsForBlockIDs(s.Ctx, eventType, []sdk.Identifier{result.BlockID}) + require.NoError(s.T(), err) + require.Equal(s.T(), events[0].Events[0].Type, eventType) + + // 4. TODO(EFM, #6164) ensure EpochRecover service event is processed by the fallback state machine and the network recovers. } diff --git a/integration/tests/epochs/recover_epoch/suite.go b/integration/tests/epochs/recover_epoch/suite.go index 49e5a3ace58..9b00b0345e4 100644 --- a/integration/tests/epochs/recover_epoch/suite.go +++ b/integration/tests/epochs/recover_epoch/suite.go @@ -1,7 +1,20 @@ package recover_epoch import ( + "fmt" + + "github.com/onflow/cadence" + "github.com/onflow/flow-core-contracts/lib/go/templates" + + sdk "github.com/onflow/flow-go-sdk" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/cmd/bootstrap/run" "github.com/onflow/flow-go/integration/tests/epochs" + "github.com/onflow/flow-go/integration/utils" + "github.com/onflow/flow-go/model/bootstrap" + "github.com/onflow/flow-go/model/flow" ) // Suite encapsulates common functionality for epoch integration tests. @@ -12,10 +25,78 @@ type Suite struct { func (s *Suite) SetupTest() { // use a shorter staking auction because we don't have staking operations in this case s.StakingAuctionLen = 2 - s.DKGPhaseLen = 50 - s.EpochLen = 250 + // to manually trigger EFM we assign very short dkg phase len ensuring the dkg will fail + s.DKGPhaseLen = 10 + s.EpochLen = 80 s.EpochCommitSafetyThreshold = 20 + s.NumOfCollectionClusters = 1 // run the generic setup, which starts up the network s.BaseSuite.SetupTest() } + +// getNodeInfoDirs returns the internal node private info dir and the node config dir from a container with the specified role. +func (s *Suite) getNodeInfoDirs(role flow.Role) (string, string) { + bootstrapPath := s.GetContainersByRole(role)[0].BootstrapPath() + internalNodePrivInfoDir := fmt.Sprintf("%s/%s", bootstrapPath, bootstrap.DirPrivateRoot) + nodeConfigJson := fmt.Sprintf("%s/%s", bootstrapPath, bootstrap.PathNodeInfosPub) + return internalNodePrivInfoDir, nodeConfigJson +} + +// executeEFMRecoverTXArgsCMD executes the efm-recover-tx-args CLI command to generate EpochRecover transaction arguments. +// Args: +// +// collectionClusters: the number of collector clusters. +// numViewsInEpoch: the number of views in the recovery epoch. +// numViewsInStakingAuction: the number of views in the staking auction of the recovery epoch. +// epochCounter: the epoch counter. +// targetDuration: the target duration for the recover epoch. +// targetEndTime: the target end time for the recover epoch. +// +// Returns: +// +// []cadence.Value: the transaction arguments. +func (s *Suite) executeEFMRecoverTXArgsCMD(collectionClusters int, numViewsInEpoch, numViewsInStakingAuction, epochCounter, targetDuration, targetEndTime uint64) []cadence.Value { + // read internal node info from one of the consensus nodes + internalNodePrivInfoDir, nodeConfigJson := s.getNodeInfoDirs(flow.RoleConsensus) + snapshot := s.GetLatestProtocolSnapshot(s.Ctx) + txArgs, err := run.GenerateRecoverEpochTxArgs( + s.Log, + internalNodePrivInfoDir, + nodeConfigJson, + collectionClusters, + epochCounter, + flow.Localnet, + numViewsInStakingAuction, + numViewsInEpoch, + targetDuration, + false, + snapshot, + ) + require.NoError(s.T(), err) + return txArgs +} + +// recoverEpoch submits the recover epoch transaction to the network. +func (s *Suite) recoverEpoch(env templates.Environment, args []cadence.Value) *sdk.TransactionResult { + latestBlockID, err := s.Client.GetLatestBlockID(s.Ctx) + require.NoError(s.T(), err) + + tx, err := utils.MakeRecoverEpochTx( + env, + s.Client.Account(), + 0, + sdk.Identifier(latestBlockID), + args, + ) + require.NoError(s.T(), err) + + err = s.Client.SignAndSendTransaction(s.Ctx, tx) + require.NoError(s.T(), err) + result, err := s.Client.WaitForSealed(s.Ctx, tx.ID()) + require.NoError(s.T(), err) + s.Client.Account().Keys[0].SequenceNumber++ + require.NoError(s.T(), result.Error) + + return result +} diff --git a/integration/utils/arguments.go b/integration/utils/arguments.go new file mode 100644 index 00000000000..d14d730265f --- /dev/null +++ b/integration/utils/arguments.go @@ -0,0 +1,42 @@ +package utils + +import ( + "encoding/json" + + "github.com/onflow/cadence" + jsoncdc "github.com/onflow/cadence/encoding/json" +) + +type cadenceArgument struct { + Value cadence.Value +} + +func (v *cadenceArgument) MarshalJSON() ([]byte, error) { + return jsoncdc.Encode(v.Value) +} + +func (v *cadenceArgument) UnmarshalJSON(b []byte) (err error) { + v.Value, err = jsoncdc.Decode(nil, b) + if err != nil { + return err + } + return nil +} + +// ParseJSON parses string representing JSON array with Cadence arguments. +// +// Cadence arguments must be defined in the JSON-Cadence format https://developers.flow.com/cadence/json-cadence-spec +func ParseJSON(args []byte) ([]cadence.Value, error) { + var arg []cadenceArgument + err := json.Unmarshal(args, &arg) + + if err != nil { + return nil, err + } + + cadenceArgs := make([]cadence.Value, len(arg)) + for i, arg := range arg { + cadenceArgs[i] = arg.Value + } + return cadenceArgs, nil +} diff --git a/integration/utils/transactions.go b/integration/utils/transactions.go index 1eaef320b68..475d5ad3b6d 100644 --- a/integration/utils/transactions.go +++ b/integration/utils/transactions.go @@ -29,6 +29,7 @@ var setProtocolStateVersionScript string func LocalnetEnv() templates.Environment { return templates.Environment{ + EpochAddress: "f8d6e0586b0a20c7", IDTableAddress: "f8d6e0586b0a20c7", FungibleTokenAddress: "ee82856bf20e2aa6", FlowTokenAddress: "0ae53cb6e3f42a79", @@ -214,7 +215,7 @@ func MakeSetProtocolStateVersionTx( return tx, nil } -// submitSmokeTestTransaction will submit a create account transaction to smoke test network +// CreateFlowAccount will submit a create account transaction to smoke test network // This ensures a single transaction can be sealed by the network. func CreateFlowAccount(ctx context.Context, client *testnet.Client) (sdk.Address, error) { fullAccountKey := sdk.NewAccountKey(). @@ -235,3 +236,30 @@ func CreateFlowAccount(ctx context.Context, client *testnet.Client) (sdk.Address return addr, nil } + +// MakeRecoverEpochTx makes an admin transaction to recover the network when it is in EFM mode. +func MakeRecoverEpochTx( + env templates.Environment, + adminAccount *sdk.Account, + adminAccountKeyID int, + latestBlockID sdk.Identifier, + args []cadence.Value, +) (*sdk.Transaction, error) { + accountKey := adminAccount.Keys[adminAccountKeyID] + tx := sdk.NewTransaction(). + SetScript([]byte(templates.GenerateRecoverEpochScript(env))). + SetComputeLimit(9999). + SetReferenceBlockID(latestBlockID). + SetProposalKey(adminAccount.Address, adminAccountKeyID, accountKey.SequenceNumber). + SetPayer(adminAccount.Address). + AddAuthorizer(adminAccount.Address) + + for _, arg := range args { + err := tx.AddArgument(arg) + if err != nil { + return nil, err + } + } + + return tx, nil +} diff --git a/model/convert/service_event.go b/model/convert/service_event.go index 5e55b021f9e..e5643957c18 100644 --- a/model/convert/service_event.go +++ b/model/convert/service_event.go @@ -733,11 +733,6 @@ func convertClusterQCVoteData(cdcClusterQCVoteData []cadence.Value) ([]flow.Clus ) } - cdcRawVotes, err := getField[cadence.Array](fields, "voteSignatures") - if err != nil { - return nil, fmt.Errorf("failed to decode clusterQCVoteData struct: %w", err) - } - cdcVoterIDs, err := getField[cadence.Array](fields, "voterIDs") if err != nil { return nil, fmt.Errorf("failed to decode clusterQCVoteData struct: %w", err) @@ -760,32 +755,14 @@ func convertClusterQCVoteData(cdcClusterQCVoteData []cadence.Value) ([]flow.Clus voterIDs = append(voterIDs, voterID) } - // gather all the vote signatures - signatures := make([]crypto.Signature, 0, len(cdcRawVotes.Values)) - for _, cdcRawVote := range cdcRawVotes.Values { - rawVoteHex, ok := cdcRawVote.(cadence.String) - if !ok { - return nil, invalidCadenceTypeError( - "clusterQC[i].vote", - cdcRawVote, - cadence.String(""), - ) - } - rawVoteBytes, err := hex.DecodeString(string(rawVoteHex)) - if err != nil { - return nil, fmt.Errorf("could not convert raw vote from hex: %w", err) - } - signatures = append(signatures, rawVoteBytes) + cdcAggSignature, err := getField[cadence.String](fields, "aggregatedSignature") + if err != nil { + return nil, fmt.Errorf("failed to decode clusterQCVoteData struct: %w", err) } - // Aggregate BLS signatures - aggregatedSignature, err := crypto.AggregateBLSSignatures(signatures) + + aggregatedSignature, err := hex.DecodeString(string(cdcAggSignature)) if err != nil { - // expected errors of the function are: - // - empty list of signatures - // - an input signature does not deserialize to a valid point - // Both are not expected at this stage because list is guaranteed not to be - // empty and individual signatures have been validated. - return nil, fmt.Errorf("cluster qc vote aggregation failed: %w", err) + return nil, fmt.Errorf("could not convert raw vote from hex: %w", err) } // check that aggregated signature is not identity, because an identity signature diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index 48c86d5b1ea..c65d392e18e 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -23,7 +23,7 @@ const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key -const GenesisStateCommitmentHex = "ede86048a53464cdd3cef060e5e2e1603d91c2d2a0568417501af5ca9455f430" +const GenesisStateCommitmentHex = "d4bcde7560ab8aff284d6d4bfeafad435fa9ad50b1934733a8aeb316636981b2" var GenesisStateCommitment flow.StateCommitment @@ -87,10 +87,10 @@ func genesisCommitHexByChainID(chainID flow.ChainID) string { return GenesisStateCommitmentHex } if chainID == flow.Testnet { - return "ac9887d488bc90e33ce97478ca3f59c69795ed66061cf68d60d518be2fa0b658" + return "5caa5d4461ca577fffa09d2d97aba96672098b8126cbefffb4f2cd92e83d94f5" } if chainID == flow.Sandboxnet { return "e1c08b17f9e5896f03fe28dd37ca396c19b26628161506924fbf785834646ea1" } - return "eb6a5749ba1a4bd9ef5b3aa804b123e0d257f68735867e661b1a1418b26f9229" + return "da1c27803b64703dbdb3ce0fbbf90368f7961bc9b0e6703a67da59be2b4d0aa0" } diff --git a/utils/unittest/service_events_fixtures.go b/utils/unittest/service_events_fixtures.go index f5936e0b2b2..b5538c4a727 100644 --- a/utils/unittest/service_events_fixtures.go +++ b/utils/unittest/service_events_fixtures.go @@ -804,13 +804,9 @@ func createEpochRecoverEvent(randomSourceHex string) cadence.Event { clusterQCVoteDataType := newFlowClusterQCClusterQCVoteDataStructType() cluster1 := cadence.NewStruct([]cadence.Value{ - // voteSignatures - cadence.NewArray([]cadence.Value{ - cadence.String("a39cd1e1bf7e2fb0609b7388ce5215a6a4c01eef2aee86e1a007faa28a6b2a3dc876e11bb97cdb26c3846231d2d01e4d"), - cadence.String("91673ad9c717d396c9a0953617733c128049ac1a639653d4002ab245b121df1939430e313bcbfd06948f6a281f6bf853"), - }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), - - // voterIDs + // aggregatedSignature + cadence.String("b072ed22ed305acd44818a6c836e09b4e844eebde6a4fdbf5cec983e2872b86c8b0f6c34c0777bf52e385ab7c45dc55d"), + // Node IDs of signers cadence.NewArray([]cadence.Value{ cadence.String("0000000000000000000000000000000000000000000000000000000000000001"), cadence.String("0000000000000000000000000000000000000000000000000000000000000002"), @@ -818,13 +814,9 @@ func createEpochRecoverEvent(randomSourceHex string) cadence.Event { }).WithType(clusterQCVoteDataType) cluster2 := cadence.NewStruct([]cadence.Value{ - // voteSignatures - cadence.NewArray([]cadence.Value{ - cadence.String("b2bff159971852ed63e72c37991e62c94822e52d4fdcd7bf29aaf9fb178b1c5b4ce20dd9594e029f3574cb29533b857a"), - cadence.String("9931562f0248c9195758da3de4fb92f24fa734cbc20c0cb80280163560e0e0348f843ac89ecbd3732e335940c1e8dccb"), - }).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)), - - // voterIDs + // aggregatedSignature + cadence.String("899e266a543e1b3a564f68b22f7be571f2e944ec30fadc4b39e2d5f526ba044c0f3cb2648f8334fc216fa3360a0418b2"), + // Node IDs of signers cadence.NewArray([]cadence.Value{ cadence.String("0000000000000000000000000000000000000000000000000000000000000003"), cadence.String("0000000000000000000000000000000000000000000000000000000000000004"), @@ -878,7 +870,9 @@ func createEpochRecoverEvent(randomSourceHex string) cadence.Event { // clusterQCs cadence.NewArray([]cadence.Value{ + // cluster 1 cluster1, + // cluster 2 cluster2, }).WithType(cadence.NewVariableSizedArrayType(clusterQCVoteDataType)), @@ -1240,8 +1234,8 @@ func newFlowClusterQCClusterQCVoteDataStructType() *cadence.StructType { QualifiedIdentifier: "FlowClusterQC.ClusterQCVoteData", Fields: []cadence.Field{ { - Identifier: "voteSignatures", - Type: cadence.NewVariableSizedArrayType(cadence.StringType), + Identifier: "aggregatedSignature", + Type: cadence.StringType, }, { Identifier: "voterIDs", @@ -1457,6 +1451,25 @@ func VerifyCdcArguments(t *testing.T, expected []cadence.Value, actual []interfa } } +// InterfafceToCdcValues decodes jsoncdc encoded values from interface -> cadence value. +func InterfafceToCdcValues(t *testing.T, vals []interface{}) []cadence.Value { + decoded := make([]cadence.Value, len(vals)) + for index, val := range vals { + + // marshal to bytes + bz, err := json.Marshal(val) + require.NoError(t, err) + + // parse cadence value + cdcVal, err := jsoncdc.Decode(nil, bz) + require.NoError(t, err) + + decoded[index] = cdcVal + } + + return decoded +} + func NewFlowClusterQCClusterStructType() *cadence.StructType { // A.01cf0e2f2f715450.FlowClusterQC.Cluster