Skip to content

Commit

Permalink
Apply suggestions from code review
Browse files Browse the repository at this point in the history
Co-authored-by: Alexander Hentschel <[email protected]>
Co-authored-by: Jordan Schalm <[email protected]>
  • Loading branch information
3 people authored Jul 15, 2024
1 parent 3b6d707 commit 4db4afc
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 14 deletions.
12 changes: 8 additions & 4 deletions cmd/bootstrap/run/epochs.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,18 @@ func GenerateRecoverEpochTxArgs(log zerolog.Logger,
targetDuration uint64,
targetEndTime uint64,
initNewEpoch bool,
snapshot *inmem.Snapshot) ([]cadence.Value, error) {
snapshot *inmem.Snapshot,
) ([]cadence.Value, error) {
epoch := snapshot.Epochs().Current()

currentEpochIdentities, err := snapshot.Identities(filter.IsValidProtocolParticipant)
if err != nil {
return nil, fmt.Errorf("failed to get valid protocol participants from snapshot: %w", err)
}
// We need canonical ordering here; sanity check to enforce this:
if !currentEpochIdentities.Sorted(flow.Canonical[flow.Identity]) {
return nil, fmt.Errorf("identies from snapshot not in canonical order")
}

// separate collector nodes by internal and partner nodes
collectors := currentEpochIdentities.Filter(filter.HasRole[flow.Identity](flow.RoleCollection))
Expand Down Expand Up @@ -157,12 +162,11 @@ func GenerateRecoverEpochTxArgs(log zerolog.Logger,
// - log: the logger instance.
// - clusterList: list of clusters
// - nodeInfos: list of NodeInfos (must contain all internal nodes)
// - clusterBlocks: list of root blocks for each cluster
// - clusterBlocks: list of root blocks (one for each cluster)
// Returns:
// - flow.AssignmentList: the generated assignment list.
// - flow.ClusterList: the generate collection cluster list.
func ConstructRootQCsForClusters(log zerolog.Logger, clusterList flow.ClusterList, nodeInfos []bootstrap.NodeInfo, clusterBlocks []*cluster.Block) []*flow.QuorumCertificate {

if len(clusterBlocks) != len(clusterList) {
log.Fatal().Int("len(clusterBlocks)", len(clusterBlocks)).Int("len(clusterList)", len(clusterList)).
Msg("number of clusters needs to equal number of cluster blocks")
Expand All @@ -189,7 +193,7 @@ func filterClusterSigners(cluster flow.IdentitySkeletonList, nodeInfos []model.N
var filtered []model.NodeInfo
for _, node := range nodeInfos {
_, isInCluster := cluster.ByNodeID(node.NodeID)
isNotPartner := node.Type() == model.NodeInfoTypePrivate
isPrivateKeyAvailable := node.Type() == model.NodeInfoTypePrivate

Check failure on line 196 in cmd/bootstrap/run/epochs.go

View workflow job for this annotation

GitHub Actions / Lint (./)

isPrivateKeyAvailable declared and not used

Check failure on line 196 in cmd/bootstrap/run/epochs.go

View workflow job for this annotation

GitHub Actions / Lint (./)

isPrivateKeyAvailable declared and not used

Check failure on line 196 in cmd/bootstrap/run/epochs.go

View workflow job for this annotation

GitHub Actions / Lint (./)

isPrivateKeyAvailable declared and not used

if isInCluster && isNotPartner {

Check failure on line 198 in cmd/bootstrap/run/epochs.go

View workflow job for this annotation

GitHub Actions / Lint (./)

undefined: isNotPartner) (typecheck)

Check failure on line 198 in cmd/bootstrap/run/epochs.go

View workflow job for this annotation

GitHub Actions / Lint (./)

undefined: isNotPartner (typecheck)

Check failure on line 198 in cmd/bootstrap/run/epochs.go

View workflow job for this annotation

GitHub Actions / Lint (./)

undefined: isNotPartner) (typecheck)
filtered = append(filtered, node)
Expand Down
3 changes: 2 additions & 1 deletion cmd/bootstrap/utils/key_generation.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,8 @@ func WriteStakingNetworkingKeyFiles(nodeInfos []bootstrap.NodeInfo, write WriteJ
return nil
}

// WriteNodeInternalPubInfos writes the node-internal-infos.pub.json file.
// WriteNodeInternalPubInfos writes the `node-internal-infos.pub.json` file.
// In a nutshell, this file contains the Role, address and weight for all authorized nodes.
func WriteNodeInternalPubInfos(nodeInfos []bootstrap.NodeInfo, write WriteJSONFileFunc) error {
configs := make([]model.NodeConfig, len(nodeInfos))
for i, nodeInfo := range nodeInfos {
Expand Down
2 changes: 1 addition & 1 deletion cmd/util/cmd/common/clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ func ConvertClusterQcsCdc(qcs []*flow.QuorumCertificate, clusterList flow.Cluste
qcVoteData[i] = cadence.NewStruct([]cadence.Value{
// aggregatedSignature
cadence.String(fmt.Sprintf("%#x", qc.SigData)),
// voterIDs
// Node IDs of signers
cadence.NewArray(cdcVoterIds).WithType(cadence.NewVariableSizedArrayType(cadence.StringType)),
}).WithType(voteDataType)

Expand Down
4 changes: 2 additions & 2 deletions cmd/util/cmd/epochs/cmd/recover.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func init() {

func addGenerateRecoverEpochTxArgsCmdFlags() error {
generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagOut, "out", "", "file to write tx args output")
generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagAnAddress, "access-address", "", "the address of the access node used for client connections")
generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagAnAddress, "access-address", "", "the address of the access node used to retrieve the information")
generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagRootChainID, "root-chain-id", "", "the root chain id")
generateRecoverEpochTxArgsCmd.Flags().StringVar(&flagAnPubkey, "access-network-key", "", "the network key of the access node used for client connections in hex string format")
generateRecoverEpochTxArgsCmd.Flags().BoolVar(&flagAnInsecure, "insecure", false, "set to true if the protocol snapshot should be retrieved from the insecure AN endpoint")
Expand All @@ -88,7 +88,7 @@ func addGenerateRecoverEpochTxArgsCmdFlags() error {
// This is needed only if a previous recoverEpoch transaction was submitted and a race condition occurred such that:
// - the RecoveryEpoch in the admin transaction was accepted by the smart contract
// - the RecoveryEpoch service event (after sealing latency) was rejected by the Protocol State
generateRecoverEpochTxArgsCmd.Flags().BoolVar(&flagInitNewEpoch, "unsafe-overwrite-epoch-data", false, "set to true if the resulting transaction is allowed to overwrite an existing epoch data entry in the smart contract. ")
generateRecoverEpochTxArgsCmd.Flags().BoolVar(&flagInitNewEpoch, "unsafe-overwrite-epoch-data", false, "set to true if the resulting transaction is allowed to overwrite an already specified epoch in the smart contract.")

err := generateRecoverEpochTxArgsCmd.MarkFlagRequired("access-address")
if err != nil {
Expand Down
6 changes: 3 additions & 3 deletions integration/tests/epochs/base_suite.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,21 +184,21 @@ func (s *BaseSuite) GetLatestFinalizedHeader(ctx context.Context) *flow.Header {
return finalized
}

// AssertInEpoch requires actual epoch counter is equal to counter provided.
// AssertInEpoch requires that the current epoch's counter (as of the latest finalized block) is equal to the counter value provided.
func (s *BaseSuite) AssertInEpoch(ctx context.Context, expectedEpoch uint64) {
actualEpoch := s.CurrentEpoch(ctx)
require.Equalf(s.T(), expectedEpoch, actualEpoch, "expected to be in epoch %d got %d", expectedEpoch, actualEpoch)
}

// CurrentEpoch returns the current epoch counter.
// CurrentEpoch returns the current epoch counter (as of the latest finalized block).
func (s *BaseSuite) CurrentEpoch(ctx context.Context) uint64 {
snapshot := s.GetLatestProtocolSnapshot(ctx)
counter, err := snapshot.Epochs().Current().Counter()
require.NoError(s.T(), err)
return counter
}

// GetLatestProtocolSnapshot returns the latest protocol snapshot.
// GetLatestProtocolSnapshot returns the protocol snapshot as of the latest finalized block.
func (s *BaseSuite) GetLatestProtocolSnapshot(ctx context.Context) *inmem.Snapshot {
snapshot, err := s.Client.GetLatestProtocolSnapshot(ctx)
require.NoError(s.T(), err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ type RecoverEpochSuite struct {

// TestRecoverEpoch ensures that the recover epoch governance transaction flow works as expected and a network that
// enters Epoch Fallback Mode can successfully recover. This test will do the following:
// 1. Manually triggers EFM by turning off a collection node before the end of the DKG forcing the DKG to fail.
// 1. Triggers EFM by turning off the sole collection node before the end of the DKG forcing the DKG to fail.
// 2. Generates epoch recover transaction args using the epoch efm-recover-tx-args.
// 3. Submit recover epoch transaction.
// 4. Ensure expected EpochRecover event is emitted.
Expand All @@ -34,7 +34,7 @@ func (s *RecoverEpochSuite) TestRecoverEpoch() {
s.AwaitEpochPhase(s.Ctx, 0, flow.EpochPhaseSetup, 10*time.Second, 500*time.Millisecond)
// pausing collection node will force the network into EFM
ln := s.GetContainersByRole(flow.RoleCollection)[0]
_ = ln.Pause()
require.NoError(s.T(), ln.Pause())
s.AwaitFinalizedView(s.Ctx, s.GetDKGEndView(), 2*time.Minute, 500*time.Millisecond)
// start the paused collection node now that we are in EFM
require.NoError(s.T(), ln.Start())
Expand Down Expand Up @@ -91,5 +91,5 @@ func (s *RecoverEpochSuite) TestRecoverEpoch() {
require.NoError(s.T(), err)
require.Equal(s.T(), events[0].Events[0].Type, eventType)

// 4. @TODO ensure EpochRecover service event is processed by the fallback state machine and the network recovers.
// 4. TODO(EFM, #6164) ensure EpochRecover service event is processed by the fallback state machine and the network recovers.
}

0 comments on commit 4db4afc

Please sign in to comment.