@@ -897,35 +897,43 @@ func TestDKGResharingNewNodesThreshold(t *testing.T) {
897
897
898
898
}
899
899
900
- // Test resharing to a different set of nodes with one common
900
+ // Test resharing to a different set of nodes with two common.
901
901
func TestDKGResharingNewNodes (t * testing.T ) {
902
902
oldPubs , oldPrivs , dkgs := generate (defaultN , vss .MinimumT (defaultN ))
903
903
fullExchange (t , dkgs , true )
904
904
905
905
shares := make ([]* DistKeyShare , len (dkgs ))
906
906
sshares := make ([]* share.PriShare , len (dkgs ))
907
+
907
908
for i , dkg := range dkgs {
908
909
share , err := dkg .DistKeyShare ()
909
910
require .NoError (t , err )
910
911
shares [i ] = share
911
912
sshares [i ] = shares [i ].Share
912
913
}
914
+
913
915
// start resharing to a different group
916
+
914
917
oldN := defaultN
915
918
oldT := len (shares [0 ].Commits )
916
919
newN := oldN + 1
917
920
newT := oldT + 1
918
921
newPrivs := make ([]kyber.Scalar , newN )
919
922
newPubs := make ([]kyber.Point , newN )
923
+
924
+ // new[0], new[1] = old[-1], old[-2]
920
925
newPrivs [0 ] = oldPrivs [oldN - 1 ]
921
926
newPubs [0 ] = oldPubs [oldN - 1 ]
922
- for i := 1 ; i < newN ; i ++ {
927
+ newPrivs [1 ] = oldPrivs [oldN - 2 ]
928
+ newPubs [1 ] = oldPubs [oldN - 2 ]
929
+
930
+ for i := 2 ; i < newN ; i ++ {
923
931
newPrivs [i ], newPubs [i ] = genPair ()
924
932
}
925
933
926
- // creating the old dkgs and new dkgs
934
+ // creating the old dkgs
935
+
927
936
oldDkgs := make ([]* DistKeyGenerator , oldN )
928
- newDkgs := make ([]* DistKeyGenerator , newN )
929
937
var err error
930
938
for i := 0 ; i < oldN ; i ++ {
931
939
c := & Config {
@@ -937,26 +945,37 @@ func TestDKGResharingNewNodes(t *testing.T) {
937
945
Threshold : newT ,
938
946
OldThreshold : oldT ,
939
947
}
948
+
940
949
oldDkgs [i ], err = NewDistKeyHandler (c )
941
950
require .NoError (t , err )
942
- if i == oldN - 1 {
951
+
952
+ // because the node's public key is already in newPubs
953
+ if i >= oldN - 2 {
943
954
require .True (t , oldDkgs [i ].canReceive )
944
955
require .True (t , oldDkgs [i ].canIssue )
945
956
require .True (t , oldDkgs [i ].isResharing )
946
957
require .True (t , oldDkgs [i ].newPresent )
947
958
require .Equal (t , oldDkgs [i ].oidx , i )
948
- require .Equal (t , 0 , oldDkgs [i ].nidx )
959
+ require .Equal (t , oldN - i - 1 , oldDkgs [i ].nidx )
949
960
continue
950
961
}
962
+
951
963
require .False (t , oldDkgs [i ].canReceive )
952
964
require .True (t , oldDkgs [i ].canIssue )
953
965
require .True (t , oldDkgs [i ].isResharing )
954
966
require .False (t , oldDkgs [i ].newPresent )
967
+ require .Equal (t , 0 , oldDkgs [i ].nidx ) // default for nidx
955
968
require .Equal (t , oldDkgs [i ].oidx , i )
956
969
}
957
- // the first one is the last old one
958
- newDkgs [0 ] = oldDkgs [oldN - 1 ]
959
- for i := 1 ; i < newN ; i ++ {
970
+
971
+ // creating the new dkg
972
+
973
+ newDkgs := make ([]* DistKeyGenerator , newN )
974
+
975
+ newDkgs [0 ] = oldDkgs [oldN - 1 ] // the first one is the last old one
976
+ newDkgs [1 ] = oldDkgs [oldN - 2 ] // the second one is the before-last old one
977
+
978
+ for i := 2 ; i < newN ; i ++ {
960
979
c := & Config {
961
980
Suite : suite ,
962
981
Longterm : newPrivs [i ],
@@ -966,27 +985,40 @@ func TestDKGResharingNewNodes(t *testing.T) {
966
985
Threshold : newT ,
967
986
OldThreshold : oldT ,
968
987
}
988
+
969
989
newDkgs [i ], err = NewDistKeyHandler (c )
990
+
970
991
require .NoError (t , err )
971
992
require .True (t , newDkgs [i ].canReceive )
972
993
require .False (t , newDkgs [i ].canIssue )
973
994
require .True (t , newDkgs [i ].isResharing )
974
995
require .True (t , newDkgs [i ].newPresent )
975
996
require .Equal (t , newDkgs [i ].nidx , i )
997
+ // each old dkg act as a verifier
998
+ require .Len (t , newDkgs [i ].Verifiers (), oldN )
976
999
}
977
1000
978
1001
// full secret sharing exchange
1002
+
979
1003
// 1. broadcast deals
980
- deals := make ([]map [int ]* Deal , 0 , newN * newN )
981
- for _ , dkg := range oldDkgs {
1004
+ deals := make ([]map [int ]* Deal , len (oldDkgs ))
1005
+
1006
+ for i , dkg := range oldDkgs {
982
1007
localDeals , err := dkg .Deals ()
983
- require .Nil (t , err )
984
- deals = append (deals , localDeals )
1008
+ require .NoError (t , err )
1009
+
1010
+ // each old DKG will sent a deal to each other dkg, including
1011
+ // themselves.
1012
+ require .Len (t , localDeals , newN )
1013
+
1014
+ deals [i ] = localDeals
1015
+
985
1016
v , exists := dkg .verifiers [uint32 (dkg .oidx )]
986
- if dkg .canReceive && dkg .nidx == 0 {
987
- // this node should save its own response for its own deal
988
- lenResponses := len (v .Aggregator .Responses ())
989
- require .Equal (t , 1 , lenResponses )
1017
+ if dkg .canReceive && dkg .nidx <= 1 {
1018
+ // staying nodes don't save their responses locally because they
1019
+ // will broadcast them for the old comities.
1020
+ require .Len (t , v .Responses (), 0 )
1021
+ require .True (t , exists )
990
1022
} else {
991
1023
// no verifiers since these dkg are not in in the new list
992
1024
require .False (t , exists )
@@ -995,11 +1027,12 @@ func TestDKGResharingNewNodes(t *testing.T) {
995
1027
996
1028
// the index key indicates the dealer index for which the responses are for
997
1029
resps := make (map [int ][]* Response )
1030
+
998
1031
for i , localDeals := range deals {
999
- for j , d := range localDeals {
1000
- dkg := newDkgs [j ]
1032
+ for dest , d := range localDeals {
1033
+ dkg := newDkgs [dest ]
1001
1034
resp , err := dkg .ProcessDeal (d )
1002
- require .Nil (t , err )
1035
+ require .NoError (t , err )
1003
1036
require .Equal (t , vss .StatusApproval , resp .Response .Status )
1004
1037
resps [i ] = append (resps [i ], resp )
1005
1038
}
@@ -1008,37 +1041,27 @@ func TestDKGResharingNewNodes(t *testing.T) {
1008
1041
// all new dkgs should have the same length of verifiers map
1009
1042
for _ , dkg := range newDkgs {
1010
1043
// one deal per old participants
1011
- require .Equal (t , oldN , len ( dkg .verifiers ) , "dkg nidx %d failing" , dkg .nidx )
1044
+ require .Len (t , dkg .verifiers , oldN , "dkg nidx %d failing" , dkg .nidx )
1012
1045
}
1013
1046
1014
1047
// 2. Broadcast responses
1015
1048
for _ , dealResponses := range resps {
1016
1049
for _ , resp := range dealResponses {
1017
- for _ , dkg := range oldDkgs {
1018
- // Ignore messages from ourselves
1019
- if resp .Response .Index == uint32 (dkg .nidx ) {
1020
- continue
1021
- }
1050
+ // the two last ones will be processed while doing this step on the
1051
+ // newDkgs, since they are in the new set.
1052
+ for _ , dkg := range oldDkgs [:oldN - 2 ] {
1022
1053
j , err := dkg .ProcessResponse (resp )
1023
- //fmt.Printf("old dkg %d process responses from new dkg %d about deal %d\n", dkg.oidx, dkg.nidx, resp.Index)
1024
- if err != nil {
1025
- fmt .Printf ("old dkg at (oidx %d, nidx %d) has received response from idx %d for dealer idx %d\n " , dkg .oidx , dkg .nidx , resp .Response .Index , resp .Index )
1026
- }
1027
- require .Nil (t , err )
1054
+ require .NoError (t , err , "old dkg at (oidx %d, nidx %d) has received response from idx %d for dealer idx %d\n " , dkg .oidx , dkg .nidx , resp .Response .Index , resp .Index )
1028
1055
require .Nil (t , j )
1029
1056
}
1030
1057
1031
- for _ , dkg := range newDkgs [ 1 :] {
1058
+ for _ , dkg := range newDkgs {
1032
1059
// Ignore messages from ourselves
1033
1060
if resp .Response .Index == uint32 (dkg .nidx ) {
1034
1061
continue
1035
1062
}
1036
1063
j , err := dkg .ProcessResponse (resp )
1037
- //fmt.Printf("new dkg %d process responses from new dkg %d about deal %d\n", dkg.nidx, dkg.nidx, resp.Index)
1038
- if err != nil {
1039
- fmt .Printf ("new dkg at nidx %d has received response from idx %d for deal %d\n " , dkg .nidx , resp .Response .Index , resp .Index )
1040
- }
1041
- require .Nil (t , err )
1064
+ require .NoError (t , err , "new dkg at nidx %d has received response from idx %d for deal %d\n " , dkg .nidx , resp .Response .Index , resp .Index )
1042
1065
require .Nil (t , j )
1043
1066
}
1044
1067
@@ -1058,6 +1081,16 @@ func TestDKGResharingNewNodes(t *testing.T) {
1058
1081
}
1059
1082
}
1060
1083
1084
+ // make sure the new dkg members can certify
1085
+ for _ , dkg := range newDkgs {
1086
+ require .True (t , dkg .Certified (), "new dkg %d can't certify" , dkg .nidx )
1087
+ }
1088
+
1089
+ // make sure the old dkg members can certify
1090
+ for _ , dkg := range oldDkgs {
1091
+ require .True (t , dkg .Certified (), "old dkg %d can't certify" , dkg .oidx )
1092
+ }
1093
+
1061
1094
newShares := make ([]* DistKeyShare , newN )
1062
1095
newSShares := make ([]* share.PriShare , newN )
1063
1096
for i := range newDkgs {
@@ -1066,6 +1099,7 @@ func TestDKGResharingNewNodes(t *testing.T) {
1066
1099
newShares [i ] = dks
1067
1100
newSShares [i ] = newShares [i ].Share
1068
1101
}
1102
+
1069
1103
// check shares reconstruct to the same secret
1070
1104
oldSecret , err := share .RecoverSecret (suite , sshares , oldT , oldN )
1071
1105
require .NoError (t , err )
@@ -1138,6 +1172,7 @@ func TestDKGResharingPartialNewNodes(t *testing.T) {
1138
1172
require .False (t , totalDkgs [i ].newPresent )
1139
1173
require .Equal (t , totalDkgs [i ].oidx , i )
1140
1174
}
1175
+
1141
1176
// the first one is the last old one
1142
1177
for i := oldN ; i < total ; i ++ {
1143
1178
newIdx := i - oldN + newOffset
@@ -1172,10 +1207,11 @@ func TestDKGResharingPartialNewNodes(t *testing.T) {
1172
1207
deals = append (deals , localDeals )
1173
1208
v , exists := dkg .verifiers [uint32 (dkg .oidx )]
1174
1209
if dkg .canReceive && dkg .newPresent {
1175
- // this node should save its own response for its own deal
1210
+ // staying nodes don't process their responses locally because they
1211
+ // broadcast them for the old comities to receive the responses.
1176
1212
lenResponses := len (v .Aggregator .Responses ())
1177
1213
require .True (t , exists )
1178
- require .Equal (t , 1 , lenResponses )
1214
+ require .Equal (t , 0 , lenResponses )
1179
1215
} else {
1180
1216
require .False (t , exists )
1181
1217
}
0 commit comments