Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

EFM Recovery Integration Tests: Part 1 #6156

Merged
merged 67 commits into from
Jul 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
67 commits
Select commit Hold shift + click to select a range
d9a99c9
trigger epoch fallback mode manually in test
kc1116 May 2, 2024
ebe557a
add flags
kc1116 May 8, 2024
0d5dcf5
add execute cmd func
kc1116 May 8, 2024
a842760
write node infos pub to dir
kc1116 May 8, 2024
c3a05be
add flag output dir
kc1116 May 8, 2024
0d0378c
generate transaction args and submit recover epoch transaction
kc1116 May 9, 2024
f0670d0
Apply suggestions from code review
kc1116 Jun 14, 2024
ee7fa73
move warning log
kc1116 Jun 17, 2024
09f29e7
update comments
kc1116 Jun 17, 2024
96b9776
Update recover_epoch_efm_test.go
kc1116 Jun 17, 2024
d48d438
Update suite.go
kc1116 Jun 17, 2024
d75354b
add ClusterQCVoteData conversion to cdc arg
kc1116 Jun 21, 2024
b93e8ac
update test
kc1116 Jun 21, 2024
ec964a8
update cluster qc vote data address, fix epoch configuration
kc1116 Jun 25, 2024
d0c40f2
fix service event decoding
kc1116 Jun 26, 2024
d3d7af6
write node node pub infos file when test network is bootstrapped
kc1116 May 8, 2024
4be8e65
update test godocs
kc1116 Jun 27, 2024
62028cd
update test components
kc1116 Jun 28, 2024
7a3b6b9
Update clusters.go
kc1116 Jun 28, 2024
4ba9d6a
update go.mod(s)
kc1116 Jun 28, 2024
1a02397
Apply suggestions from code review
kc1116 Jul 9, 2024
7c6d36b
Merge branch 'khalil/efm-recovery-integration-part1' of github.com:on…
kc1116 Jul 9, 2024
d5da087
Update integration/tests/epochs/base_suite.go
kc1116 Jul 10, 2024
d70de6e
use epoch-timing-duration and epoch-timing-end-time
kc1116 Jul 10, 2024
b1c23ed
use --root-chain-id
kc1116 Jul 10, 2024
53b2bbf
Update integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go
kc1116 Jul 10, 2024
04880e7
Update integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go
kc1116 Jul 10, 2024
d052730
Update integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go
kc1116 Jul 10, 2024
9cff7d5
Update integration/tests/epochs/recover_epoch/recover_epoch_efm_test.go
kc1116 Jul 10, 2024
5943125
Merge branch 'khalil/efm-recovery-integration-part1' of github.com:on…
kc1116 Jul 10, 2024
fc2355d
remove dkg end view magic number
kc1116 Jul 10, 2024
c549f36
outdated comment
kc1116 Jul 10, 2024
9e062d1
use aggregatedSignature
kc1116 Jul 10, 2024
582ca24
make sure eventType not empty
kc1116 Jul 10, 2024
777267f
get recover epoch config from test suite
kc1116 Jul 10, 2024
0d41389
add note that cruise ctl disabled in integration tests
kc1116 Jul 10, 2024
b64a5ca
add short dkg len comment
kc1116 Jul 10, 2024
a46ae41
add godoc comment for GetContainersByRole
kc1116 Jul 10, 2024
78fb541
uodate executeEFMRecoverTXArgsCMD godoc
kc1116 Jul 10, 2024
3b6d707
replace use of rootcmd.execute and move logic to run package
kc1116 Jul 10, 2024
4db4afc
Apply suggestions from code review
kc1116 Jul 15, 2024
d2fb6bc
add godoc to ConvertClusterQcsCdc
kc1116 Jul 15, 2024
0d225b7
rename newFlowClusterQCVoteDataStructType -> newClusterQCVoteDataCdcType
kc1116 Jul 15, 2024
216ca7e
Update epochs.go
kc1116 Jul 16, 2024
a650821
Merge branch 'khalil/efm-recovery-integration-part1' of github.com:on…
kc1116 Jul 16, 2024
e9532ef
remove duplicate code update test
kc1116 Jul 16, 2024
a0df4a2
get current epoch target time from snapshot
kc1116 Jul 16, 2024
c331ab2
remove outdated comments "pausing container"
kc1116 Jul 16, 2024
01bfe0d
add EFM TODO to investigate short DKG phase len config not triggering…
kc1116 Jul 16, 2024
4a2a888
added root go.sum
kc1116 Jul 17, 2024
a982d13
added insecure and integration go.sum
kc1116 Jul 17, 2024
3aaf62f
fix lint
kc1116 Jul 17, 2024
3b5c953
Update key_generation.go
kc1116 Jul 17, 2024
a614c62
Merge branch 'feature/efm-recovery' into khalil/efm-recovery-integrat…
kc1116 Jul 17, 2024
f1db034
fix suite.go imports
kc1116 Jul 17, 2024
00e2f40
fix recover event fixture
kc1116 Jul 17, 2024
df64765
Merge branch 'feature/efm-recovery' of github.com:onflow/flow-go into…
kc1116 Jul 17, 2024
5a4cd70
Update suite.go
kc1116 Jul 17, 2024
1eb81e8
Update suite.go
kc1116 Jul 17, 2024
c60abe3
Update suite.go
kc1116 Jul 17, 2024
b51a73f
Update suite.go
kc1116 Jul 18, 2024
2ae7bd7
Update suite.go
kc1116 Jul 18, 2024
5b92090
Update suite.go
kc1116 Jul 18, 2024
34a49bf
Update suite.go
kc1116 Jul 18, 2024
772133f
Merge branch 'feature/efm-recovery' into khalil/efm-recovery-integrat…
kc1116 Jul 18, 2024
d1fab36
Merge branch 'feature/efm-recovery' into khalil/efm-recovery-integrat…
kc1116 Jul 18, 2024
1d2e034
Update base_suite.go
kc1116 Jul 19, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/bootstrap/cmd/rootblock.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ func rootBlock(cmd *cobra.Command, args []string) {
log.Info().Msg("")

log.Info().Msg("constructing root QCs for collection node clusters")
clusterQCs := common.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks)
clusterQCs := run.ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks)
log.Info().Msg("")

log.Info().Msg("constructing root header")
Expand Down
208 changes: 208 additions & 0 deletions cmd/bootstrap/run/epochs.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
package run

import (
"fmt"

"github.com/rs/zerolog"

"github.com/onflow/cadence"

"github.com/onflow/flow-go/cmd/util/cmd/common"
"github.com/onflow/flow-go/fvm/systemcontracts"
"github.com/onflow/flow-go/model/bootstrap"
model "github.com/onflow/flow-go/model/bootstrap"
"github.com/onflow/flow-go/model/cluster"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/model/flow/filter"
"github.com/onflow/flow-go/state/protocol/inmem"
)

// GenerateRecoverEpochTxArgs generates the required transaction arguments for the `recoverEpoch` transaction.
func GenerateRecoverEpochTxArgs(log zerolog.Logger,
internalNodePrivInfoDir string,
nodeConfigJson string,
collectionClusters int,
epochCounter uint64,
rootChainID flow.ChainID,
numViewsInStakingAuction uint64,
numViewsInEpoch uint64,
targetDuration uint64,
initNewEpoch bool,
snapshot *inmem.Snapshot,
) ([]cadence.Value, error) {
epoch := snapshot.Epochs().Current()

currentEpochIdentities, err := snapshot.Identities(filter.IsValidProtocolParticipant)
if err != nil {
return nil, fmt.Errorf("failed to get valid protocol participants from snapshot: %w", err)
}
kc1116 marked this conversation as resolved.
Show resolved Hide resolved
// We need canonical ordering here; sanity check to enforce this:
if !currentEpochIdentities.Sorted(flow.Canonical[flow.Identity]) {
return nil, fmt.Errorf("identies from snapshot not in canonical order")
}

// separate collector nodes by internal and partner nodes
collectors := currentEpochIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection))
internalCollectors := make(flow.IdentityList, 0)
partnerCollectors := make(flow.IdentityList, 0)

log.Info().Msg("collecting internal node network and staking keys")
internalNodes, err := common.ReadFullInternalNodeInfos(log, internalNodePrivInfoDir, nodeConfigJson)
if err != nil {
return nil, fmt.Errorf("failed to read full internal node infos: %w", err)
}

internalNodesMap := make(map[flow.Identifier]struct{})
for _, node := range internalNodes {
if !currentEpochIdentities.Exists(node.Identity()) {
return nil, fmt.Errorf("node ID found in internal node infos missing from protocol snapshot identities %s: %w", node.NodeID, err)
}
internalNodesMap[node.NodeID] = struct{}{}
}
log.Info().Msg("")

for _, collector := range collectors {
if _, ok := internalNodesMap[collector.NodeID]; ok {
internalCollectors = append(internalCollectors, collector)
} else {
partnerCollectors = append(partnerCollectors, collector)
}
}

currentEpochDKG, err := epoch.DKG()
if err != nil {
return nil, fmt.Errorf("failed to get DKG for current epoch: %w", err)
}

log.Info().Msg("computing collection node clusters")

assignments, clusters, err := common.ConstructClusterAssignment(log, partnerCollectors, internalCollectors, collectionClusters)
if err != nil {
log.Fatal().Err(err).Msg("unable to generate cluster assignment")
}
log.Info().Msg("")

log.Info().Msg("constructing root blocks for collection node clusters")
clusterBlocks := GenerateRootClusterBlocks(epochCounter, clusters)
log.Info().Msg("")

log.Info().Msg("constructing root QCs for collection node clusters")
clusterQCs := ConstructRootQCsForClusters(log, clusters, internalNodes, clusterBlocks)
log.Info().Msg("")

dkgPubKeys := make([]cadence.Value, 0)
nodeIds := make([]cadence.Value, 0)

// NOTE: The RecoveryEpoch will re-use the last successful DKG output. This means that the consensus
// committee in the RecoveryEpoch must be identical to the committee which participated in that DKG.
dkgGroupKeyCdc, cdcErr := cadence.NewString(currentEpochDKG.GroupKey().String())
if cdcErr != nil {
log.Fatal().Err(cdcErr).Msg("failed to get dkg group key cadence string")
}
dkgPubKeys = append(dkgPubKeys, dkgGroupKeyCdc)
for _, id := range currentEpochIdentities {
if id.GetRole() == flow.RoleConsensus {
dkgPubKey, keyShareErr := currentEpochDKG.KeyShare(id.GetNodeID())
if keyShareErr != nil {
log.Fatal().Err(keyShareErr).Msg(fmt.Sprintf("failed to get dkg pub key share for node: %s", id.GetNodeID()))
}
dkgPubKeyCdc, cdcErr := cadence.NewString(dkgPubKey.String())
if cdcErr != nil {
log.Fatal().Err(cdcErr).Msg(fmt.Sprintf("failed to get dkg pub key cadence string for node: %s", id.GetNodeID()))
}
dkgPubKeys = append(dkgPubKeys, dkgPubKeyCdc)
}
Comment on lines +103 to +114
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

appreciate the clear documentation:

// NOTE: The RecoveryEpoch will re-use the last successful DKG output. This means that the consensus
// committee in the RecoveryEpoch must be identical to the committee which participated in that DKG.

  • Your implementation here enforces that every consensus node in
    currentEpochIdentities is listed in currentEpochDKG. That is one part of the requirement 👍
  • However, what is with nodes that were part of the current epoch initially, but were ejected. You filter them out here. In other words, there can be a node in currentEpochDKG that is not in currentEpochIdentities. I think your current code would not recognize this and only list the nodes that are in currentEpochIdentities. Then, the set of consensus nodes for the recovery epoch would be smaller than the set of nodes in currentEpochDKG, so we have an inconsistent configuration.

Implications:

  1. For recovery epochs, we might need to be able to include consensus nodes that are ejected. I am not entirely sure -- we should Talk to Tarak about this edge case.

    If it is indeed a problem, I would be inclined account for this in the EpochRecover struct, by including an IdentifierList where we list the nodeIDs for all nodes that are ejected.

  2. I would like to have a test case specifically covering this case for the different node roles. I think implementing this test would be beyond the scope of this PR. So if you could create an issue that would be great.

  3. In absence of a proper solution so far, could you

    1. add a TODO here to clarify the limitations of the current implementation
    2. add a counter for how many consensus nodes we have in currentEpochIdentities. After running through this for loop. This counter must equal currentEpochDKG.Size(). Otherwise, the consensus committee is inconsistent with the DKG and we should not proceed.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For recovery epochs, we might need to be able to include consensus nodes that are ejected

This is my understanding -- we do need to include consensus nodes that are ejected. The committee and indices need to be exactly the same.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that is also my understanding. Though I still have the vague hope that this might not be absolutely necessary. Trying to set up a conversation with Tarak...

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copying some context here:
Slack thread

https://github.com/onflow/flow-go/blob/master/module/signature/threshold.go#L7
here we need to pass in the size of the original DKG committee (qhich equals the length of the []PublicKey shares vector)

For this function (it is the recipient of the DKG information):
https://github.com/onflow/crypto/blob/main/bls_thresholdsign.go#L113
we concluded that ideally sharePublicKeys []PublicKey would be a map from DKG's threshold participant index to PublicKey.
However, we thought that this change is not absolutely necessary, if we relied on the requirement that DKG indices are consecutive from 0,...,n-1 for n the number of DKG participants.

In summary, I think there are two conventions currently used in the code base:
convention that the DKG committee is identical to the consensus committee
this is the convention we want to generalize.
Specifically, we want to allow that the consensus committee is a subset of the DKG committee (this is important when we have ejected nodes, that participated in the DKG but since they are ejected are not part of the consensus committee for the recovery epoch). Lets refer to the set of nodeIDs that participated in the DKG as D
furthermore, we want to future-proof our implementation and allow the consensus committee to be larger than the DKG committee (because consensus can likely scale to a larger number of participants than DKG can). Lets refer to the set of nodeIDs in the consensus committee as C
therefore, we conclude (Yurii phrased this very nicely): the mapping from nodeID to DKG-index would have the the set intersection D ∩ C as its domain; the set of DKG-indices would be the map's codomain.
An independent degree of freedom in my opinion is the convention whether the set of DKG-indices is always a consecutive sequence 0,...,n-1 for n the number of DKG participants.
In our meeting I had suggested to also remove this assumption. However, I am not sure anymore that this is such a good idea, because it might require changes specifically on the crypto layer.
If our implementation becomes super messy with convention 2. in place, I think that would be solid reason to remove this convention. But I think it could be very well possible that this is not the case.
In my head, the biggest source of potential confusion is that we refer to the "DKG-indices" as indices -- and we also have the notion of indices in the consensus committee (based on canonical ordering)
maybe it would help to change our terminology and call them "DKG-identifiers", which are non-negative integers.
we could retain the convention that "DKG-identifiers" of n DKG participants always cover the interval 0,...,n-1.
In other words, I think calling them something different than indices might already alleviate lots of possible confusion ... while the convention being the numbers 0,...,n-1 could be treated as an implementation detail.

nodeIdCdc, err := cadence.NewString(id.GetNodeID().String())
if err != nil {
log.Fatal().Err(err).Msg(fmt.Sprintf("failed to convert node ID to cadence string: %s", id.GetNodeID()))
}
nodeIds = append(nodeIds, nodeIdCdc)
}
clusterQCAddress := systemcontracts.SystemContractsForChain(rootChainID).ClusterQC.Address.String()
qcVoteData, err := common.ConvertClusterQcsCdc(clusterQCs, clusters, clusterQCAddress)
if err != nil {
log.Fatal().Err(err).Msg("failed to convert cluster qcs to cadence type")
}

currEpochFinalView, err := epoch.FinalView()
if err != nil {
log.Fatal().Err(err).Msg("failed to get final view of current epoch")
}

currEpochTargetEndTime, err := epoch.TargetEndTime()
if err != nil {
log.Fatal().Err(err).Msg("failed to get target end time of current epoch")
}

args := []cadence.Value{
// epoch start view
cadence.NewUInt64(currEpochFinalView + 1),
// staking phase end view
cadence.NewUInt64(currEpochFinalView + numViewsInStakingAuction),
// epoch end view
cadence.NewUInt64(currEpochFinalView + numViewsInEpoch),
// target duration
cadence.NewUInt64(targetDuration),
// target end time
cadence.NewUInt64(currEpochTargetEndTime),
// clusters,
common.ConvertClusterAssignmentsCdc(assignments),
// qcVoteData
cadence.NewArray(qcVoteData),
// dkg pub keys
cadence.NewArray(dkgPubKeys),
// node ids
cadence.NewArray(nodeIds),
// recover the network by initializing a new recover epoch which will increment the smart contract epoch counter
// or overwrite the epoch metadata for the current epoch
cadence.NewBool(initNewEpoch),
}

return args, nil
}

// ConstructRootQCsForClusters constructs a root QC for each cluster in the list.
// Args:
// - log: the logger instance.
// - clusterList: list of clusters
// - nodeInfos: list of NodeInfos (must contain all internal nodes)
// - clusterBlocks: list of root blocks (one for each cluster)
// Returns:
// - flow.AssignmentList: the generated assignment list.
// - flow.ClusterList: the generate collection cluster list.
func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate {
if len(clusterBlocks) != len(clusterList) {
log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)).
Msg("number of clusters needs to equal number of cluster blocks")
}

qcs := make([]*flow.QuorumCertificate, len(clusterBlocks))
for i, cluster := range clusterList {
signers := filterClusterSigners(cluster, nodeInfos)

qc, err := GenerateClusterRootQC(signers, cluster, clusterBlocks[i])
if err != nil {
log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed")
}
qcs[i] = qc
}

return qcs
}

// Filters a list of nodes to include only nodes that will sign the QC for the
// given cluster. The resulting list of nodes is only nodes that are in the
// given cluster AND are not partner nodes (ie. we have the private keys).
func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.NodeInfo) []model.NodeInfo {
var filtered []model.NodeInfo
for _, node := range nodeInfos {
_, isInCluster := cluster.ByNodeID(node.NodeID)
isPrivateKeyAvailable := node.Type() == model.NodeInfoTypePrivate

if isInCluster && isPrivateKeyAvailable {
filtered = append(filtered, node)
}
}

return filtered
}
23 changes: 21 additions & 2 deletions cmd/bootstrap/utils/key_generation.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,15 @@ import (
gohash "hash"
"io"

"github.com/onflow/crypto"
"golang.org/x/crypto/hkdf"

"github.com/onflow/crypto"

sdk "github.com/onflow/flow-go-sdk"
sdkcrypto "github.com/onflow/flow-go-sdk/crypto"

"github.com/onflow/flow-go/fvm/systemcontracts"
"github.com/onflow/flow-go/model/bootstrap"
model "github.com/onflow/flow-go/model/bootstrap"
"github.com/onflow/flow-go/model/encodable"
"github.com/onflow/flow-go/model/flow"
)
Expand Down Expand Up @@ -296,3 +297,21 @@ func WriteStakingNetworkingKeyFiles(nodeInfos []bootstrap.NodeInfo, write WriteJ

return nil
}

// WriteNodeInternalPubInfos writes the `node-internal-infos.pub.json` file.
// In a nutshell, this file contains the Role, address and weight for all authorized nodes.
func WriteNodeInternalPubInfos(nodeInfos []bootstrap.NodeInfo, write WriteJSONFileFunc) error {
configs := make([]model.NodeConfig, len(nodeInfos))
for i, nodeInfo := range nodeInfos {
configs[i] = model.NodeConfig{
Role: nodeInfo.Role,
Address: nodeInfo.Address,
Weight: nodeInfo.Weight,
}
}
err := write(bootstrap.PathNodeInfosPub, configs)
if err != nil {
return err
}
return nil
}
100 changes: 45 additions & 55 deletions cmd/util/cmd/common/clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,8 @@ import (
"github.com/rs/zerolog"

"github.com/onflow/cadence"
cdcCommon "github.com/onflow/cadence/runtime/common"

"github.com/onflow/flow-go/cmd/bootstrap/run"
"github.com/onflow/flow-go/model/bootstrap"
model "github.com/onflow/flow-go/model/bootstrap"
"github.com/onflow/flow-go/model/cluster"
"github.com/onflow/flow-go/model/flow"
"github.com/onflow/flow-go/model/flow/assignment"
"github.com/onflow/flow-go/model/flow/factory"
Expand Down Expand Up @@ -114,36 +111,6 @@ func ConstructClusterAssignment(log zerolog.Logger, partnerNodes, internalNodes
return assignments, clusters, nil
}

// ConstructRootQCsForClusters constructs a root QC for each cluster in the list.
// Args:
// - log: the logger instance.
// - clusterList: list of clusters
// - nodeInfos: list of NodeInfos (must contain all internal nodes)
// - clusterBlocks: list of root blocks for each cluster
// Returns:
// - flow.AssignmentList: the generated assignment list.
// - flow.ClusterList: the generate collection cluster list.
func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate {

if len(clusterBlocks) != len(clusterList) {
log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)).
Msg("number of clusters needs to equal number of cluster blocks")
}

qcs := make([]*flow.QuorumCertificate, len(clusterBlocks))
for i, cluster := range clusterList {
signers := filterClusterSigners(cluster, nodeInfos)

qc, err := run.GenerateClusterRootQC(signers, cluster, clusterBlocks[i])
if err != nil {
log.Fatal().Err(err).Int("cluster index", i).Msg("generating collector cluster root QC failed")
}
qcs[i] = qc
}

return qcs
}

// ConvertClusterAssignmentsCdc converts golang cluster assignments type to Cadence type `[[String]]`.
func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array {
stringArrayType := cadence.NewVariableSizedArrayType(cadence.StringType)
Expand All @@ -163,8 +130,18 @@ func ConvertClusterAssignmentsCdc(assignments flow.AssignmentList) cadence.Array
}

// ConvertClusterQcsCdc converts cluster QCs from `QuorumCertificate` type to `ClusterQCVoteData` type.
func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.ClusterList) ([]*flow.ClusterQCVoteData, error) {
voteData := make([]*flow.ClusterQCVoteData, len(qcs))
// Args:
// - qcs: list of quorum certificates.
// - clusterList: the list of cluster lists each used to generate one of the quorum certificates in qcs.
// - flowClusterQCAddress: the FlowClusterQC contract address where the ClusterQCVoteData type is defined.
//
// Returns:
// - []cadence.Value: cadence representation of the list of cluster qcs.
// - error: error if the cluster qcs and cluster lists don't match in size or
// signature indices decoding fails.
func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.ClusterList, flowClusterQCAddress string) ([]cadence.Value, error) {
voteDataType := newClusterQCVoteDataCdcType(flowClusterQCAddress)
qcVoteData := make([]cadence.Value, len(qcs))
for i, qc := range qcs {
c, ok := clusterList.ByIndex(uint(i))
if !ok {
Expand All @@ -174,29 +151,42 @@ func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.Cluste
if err != nil {
return nil, fmt.Errorf("could not decode signer indices: %w", err)
}
voteData[i] = &flow.ClusterQCVoteData{
SigData: qc.SigData,
VoterIDs: voterIds,
cdcVoterIds := make([]cadence.Value, len(voterIds))
for i, id := range voterIds {
cdcVoterIds[i] = cadence.String(id.String())
}
}

return voteData, nil
}
qcVoteData[i] = cadence.NewStruct([]cadence.Value{
// aggregatedSignature
cadence.String(fmt.Sprintf("%#x", qc.SigData)),
// Node IDs of signers
cadence.NewArray(cdcVoterIds).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)),
}).WithType(voteDataType)

// Filters a list of nodes to include only nodes that will sign the QC for the
// given cluster. The resulting list of nodes is only nodes that are in the
// given cluster AND are not partner nodes (ie. we have the private keys).
func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.NodeInfo) []model.NodeInfo {
}

var filtered []model.NodeInfo
for _, node := range nodeInfos {
_, isInCluster := cluster.ByNodeID(node.NodeID)
isNotPartner := node.Type() == model.NodeInfoTypePrivate
return qcVoteData, nil
}

if isInCluster && isNotPartner {
filtered = append(filtered, node)
}
// newClusterQCVoteDataCdcType returns the FlowClusterQC cadence struct type.
func newClusterQCVoteDataCdcType(clusterQcAddress string) *cadence.StructType {

// FlowClusterQC.ClusterQCVoteData
address, _ := cdcCommon.HexToAddress(clusterQcAddress)
location := cdcCommon.NewAddressLocation(nil, address, "FlowClusterQC")

return &cadence.StructType{
Location: location,
QualifiedIdentifier: "FlowClusterQC.ClusterQCVoteData",
Fields: []cadence.Field{
{
Identifier: "aggregatedSignature",
Type: cadence.StringType,
},
{
Identifier: "voterIDs",
Type: cadence.NewVariableSizedArrayType(cadence.StringType),
},
},
}

return filtered
}
Loading
Loading