Skip to content

Commit 44f07e3

Browse files
authored
Merge pull request #471 from dedis/dkg-reshare-certified
Pedersen: Fixes Responses not sent while resharing
2 parents c69494d + 43decfe commit 44f07e3

File tree

3 files changed

+90
-47
lines changed

3 files changed

+90
-47
lines changed

share/dkg/pedersen/dkg.go

+14-8
Original file line numberDiff line numberDiff line change
@@ -258,9 +258,9 @@ func NewDistKeyGenerator(suite Suite, longterm kyber.Scalar, participants []kybe
258258
// and is ommitted from the returned map. To know which participant a deal
259259
// belongs to, loop over the keys as indices in the list of new participants:
260260
//
261-
// for i,dd := range distDeals {
262-
// sendTo(participants[i],dd)
263-
// }
261+
// for i,dd := range distDeals {
262+
// sendTo(participants[i],dd)
263+
// }
264264
//
265265
// If this method cannot process its own Deal, that indicates a
266266
// severe problem with the configuration or implementation and
@@ -292,7 +292,10 @@ func (d *DistKeyGenerator) Deals() (map[int]*Deal, error) {
292292
return nil, err
293293
}
294294

295-
if i == int(d.nidx) && d.newPresent {
295+
// if there is a resharing in progress, nodes that stay must send their
296+
// deals to the old nodes, otherwise old nodes won't get responses from
297+
// staying nodes and won't be certified.
298+
if i == int(d.nidx) && d.newPresent && !d.isResharing {
296299
if d.processed {
297300
continue
298301
}
@@ -378,11 +381,14 @@ func (d *DistKeyGenerator) ProcessDeal(dd *Deal) (*Response, error) {
378381
}
379382
}
380383

381-
// if the dealer in the old list is also present in the new list, then set
384+
// If the dealer in the old list is also present in the new list, then set
382385
// his response to approval since he won't issue his own response for his
383-
// own deal
386+
// own deal.
387+
// In the case of resharing the dealer will issue his own response in order
388+
// for the old comities to get responses and be certified, which is why we
389+
// don't add it manually there.
384390
newIdx, found := findPub(d.c.NewNodes, pub)
385-
if found {
391+
if found && !d.isResharing {
386392
d.verifiers[dd.Index].UnsafeSetResponseDKG(uint32(newIdx), vss.StatusApproval)
387393
}
388394

@@ -807,7 +813,7 @@ func (d *DistKeyGenerator) initVerifiers(c *Config) error {
807813
return nil
808814
}
809815

810-
//Renew adds the new distributed key share g (with secret 0) to the distributed key share d.
816+
// Renew adds the new distributed key share g (with secret 0) to the distributed key share d.
811817
func (d *DistKeyShare) Renew(suite Suite, g *DistKeyShare) (*DistKeyShare, error) {
812818
// Check G(0) = 0*G.
813819
if !g.Public().Equal(suite.Point().Base().Mul(suite.Scalar().Zero(), nil)) {

share/dkg/pedersen/dkg_test.go

+75-39
Original file line numberDiff line numberDiff line change
@@ -897,35 +897,43 @@ func TestDKGResharingNewNodesThreshold(t *testing.T) {
897897

898898
}
899899

900-
// Test resharing to a different set of nodes with one common
900+
// Test resharing to a different set of nodes with two common.
901901
func TestDKGResharingNewNodes(t *testing.T) {
902902
oldPubs, oldPrivs, dkgs := generate(defaultN, vss.MinimumT(defaultN))
903903
fullExchange(t, dkgs, true)
904904

905905
shares := make([]*DistKeyShare, len(dkgs))
906906
sshares := make([]*share.PriShare, len(dkgs))
907+
907908
for i, dkg := range dkgs {
908909
share, err := dkg.DistKeyShare()
909910
require.NoError(t, err)
910911
shares[i] = share
911912
sshares[i] = shares[i].Share
912913
}
914+
913915
// start resharing to a different group
916+
914917
oldN := defaultN
915918
oldT := len(shares[0].Commits)
916919
newN := oldN + 1
917920
newT := oldT + 1
918921
newPrivs := make([]kyber.Scalar, newN)
919922
newPubs := make([]kyber.Point, newN)
923+
924+
// new[0], new[1] = old[-1], old[-2]
920925
newPrivs[0] = oldPrivs[oldN-1]
921926
newPubs[0] = oldPubs[oldN-1]
922-
for i := 1; i < newN; i++ {
927+
newPrivs[1] = oldPrivs[oldN-2]
928+
newPubs[1] = oldPubs[oldN-2]
929+
930+
for i := 2; i < newN; i++ {
923931
newPrivs[i], newPubs[i] = genPair()
924932
}
925933

926-
// creating the old dkgs and new dkgs
934+
// creating the old dkgs
935+
927936
oldDkgs := make([]*DistKeyGenerator, oldN)
928-
newDkgs := make([]*DistKeyGenerator, newN)
929937
var err error
930938
for i := 0; i < oldN; i++ {
931939
c := &Config{
@@ -937,26 +945,37 @@ func TestDKGResharingNewNodes(t *testing.T) {
937945
Threshold: newT,
938946
OldThreshold: oldT,
939947
}
948+
940949
oldDkgs[i], err = NewDistKeyHandler(c)
941950
require.NoError(t, err)
942-
if i == oldN-1 {
951+
952+
// because the node's public key is already in newPubs
953+
if i >= oldN-2 {
943954
require.True(t, oldDkgs[i].canReceive)
944955
require.True(t, oldDkgs[i].canIssue)
945956
require.True(t, oldDkgs[i].isResharing)
946957
require.True(t, oldDkgs[i].newPresent)
947958
require.Equal(t, oldDkgs[i].oidx, i)
948-
require.Equal(t, 0, oldDkgs[i].nidx)
959+
require.Equal(t, oldN-i-1, oldDkgs[i].nidx)
949960
continue
950961
}
962+
951963
require.False(t, oldDkgs[i].canReceive)
952964
require.True(t, oldDkgs[i].canIssue)
953965
require.True(t, oldDkgs[i].isResharing)
954966
require.False(t, oldDkgs[i].newPresent)
967+
require.Equal(t, 0, oldDkgs[i].nidx) // default for nidx
955968
require.Equal(t, oldDkgs[i].oidx, i)
956969
}
957-
// the first one is the last old one
958-
newDkgs[0] = oldDkgs[oldN-1]
959-
for i := 1; i < newN; i++ {
970+
971+
// creating the new dkg
972+
973+
newDkgs := make([]*DistKeyGenerator, newN)
974+
975+
newDkgs[0] = oldDkgs[oldN-1] // the first one is the last old one
976+
newDkgs[1] = oldDkgs[oldN-2] // the second one is the before-last old one
977+
978+
for i := 2; i < newN; i++ {
960979
c := &Config{
961980
Suite: suite,
962981
Longterm: newPrivs[i],
@@ -966,27 +985,40 @@ func TestDKGResharingNewNodes(t *testing.T) {
966985
Threshold: newT,
967986
OldThreshold: oldT,
968987
}
988+
969989
newDkgs[i], err = NewDistKeyHandler(c)
990+
970991
require.NoError(t, err)
971992
require.True(t, newDkgs[i].canReceive)
972993
require.False(t, newDkgs[i].canIssue)
973994
require.True(t, newDkgs[i].isResharing)
974995
require.True(t, newDkgs[i].newPresent)
975996
require.Equal(t, newDkgs[i].nidx, i)
997+
// each old dkg act as a verifier
998+
require.Len(t, newDkgs[i].Verifiers(), oldN)
976999
}
9771000

9781001
// full secret sharing exchange
1002+
9791003
// 1. broadcast deals
980-
deals := make([]map[int]*Deal, 0, newN*newN)
981-
for _, dkg := range oldDkgs {
1004+
deals := make([]map[int]*Deal, len(oldDkgs))
1005+
1006+
for i, dkg := range oldDkgs {
9821007
localDeals, err := dkg.Deals()
983-
require.Nil(t, err)
984-
deals = append(deals, localDeals)
1008+
require.NoError(t, err)
1009+
1010+
// each old DKG will sent a deal to each other dkg, including
1011+
// themselves.
1012+
require.Len(t, localDeals, newN)
1013+
1014+
deals[i] = localDeals
1015+
9851016
v, exists := dkg.verifiers[uint32(dkg.oidx)]
986-
if dkg.canReceive && dkg.nidx == 0 {
987-
// this node should save its own response for its own deal
988-
lenResponses := len(v.Aggregator.Responses())
989-
require.Equal(t, 1, lenResponses)
1017+
if dkg.canReceive && dkg.nidx <= 1 {
1018+
// staying nodes don't save their responses locally because they
1019+
// will broadcast them for the old comities.
1020+
require.Len(t, v.Responses(), 0)
1021+
require.True(t, exists)
9901022
} else {
9911023
// no verifiers since these dkg are not in in the new list
9921024
require.False(t, exists)
@@ -995,11 +1027,12 @@ func TestDKGResharingNewNodes(t *testing.T) {
9951027

9961028
// the index key indicates the dealer index for which the responses are for
9971029
resps := make(map[int][]*Response)
1030+
9981031
for i, localDeals := range deals {
999-
for j, d := range localDeals {
1000-
dkg := newDkgs[j]
1032+
for dest, d := range localDeals {
1033+
dkg := newDkgs[dest]
10011034
resp, err := dkg.ProcessDeal(d)
1002-
require.Nil(t, err)
1035+
require.NoError(t, err)
10031036
require.Equal(t, vss.StatusApproval, resp.Response.Status)
10041037
resps[i] = append(resps[i], resp)
10051038
}
@@ -1008,37 +1041,27 @@ func TestDKGResharingNewNodes(t *testing.T) {
10081041
// all new dkgs should have the same length of verifiers map
10091042
for _, dkg := range newDkgs {
10101043
// one deal per old participants
1011-
require.Equal(t, oldN, len(dkg.verifiers), "dkg nidx %d failing", dkg.nidx)
1044+
require.Len(t, dkg.verifiers, oldN, "dkg nidx %d failing", dkg.nidx)
10121045
}
10131046

10141047
// 2. Broadcast responses
10151048
for _, dealResponses := range resps {
10161049
for _, resp := range dealResponses {
1017-
for _, dkg := range oldDkgs {
1018-
// Ignore messages from ourselves
1019-
if resp.Response.Index == uint32(dkg.nidx) {
1020-
continue
1021-
}
1050+
// the two last ones will be processed while doing this step on the
1051+
// newDkgs, since they are in the new set.
1052+
for _, dkg := range oldDkgs[:oldN-2] {
10221053
j, err := dkg.ProcessResponse(resp)
1023-
//fmt.Printf("old dkg %d process responses from new dkg %d about deal %d\n", dkg.oidx, dkg.nidx, resp.Index)
1024-
if err != nil {
1025-
fmt.Printf("old dkg at (oidx %d, nidx %d) has received response from idx %d for dealer idx %d\n", dkg.oidx, dkg.nidx, resp.Response.Index, resp.Index)
1026-
}
1027-
require.Nil(t, err)
1054+
require.NoError(t, err, "old dkg at (oidx %d, nidx %d) has received response from idx %d for dealer idx %d\n", dkg.oidx, dkg.nidx, resp.Response.Index, resp.Index)
10281055
require.Nil(t, j)
10291056
}
10301057

1031-
for _, dkg := range newDkgs[1:] {
1058+
for _, dkg := range newDkgs {
10321059
// Ignore messages from ourselves
10331060
if resp.Response.Index == uint32(dkg.nidx) {
10341061
continue
10351062
}
10361063
j, err := dkg.ProcessResponse(resp)
1037-
//fmt.Printf("new dkg %d process responses from new dkg %d about deal %d\n", dkg.nidx, dkg.nidx, resp.Index)
1038-
if err != nil {
1039-
fmt.Printf("new dkg at nidx %d has received response from idx %d for deal %d\n", dkg.nidx, resp.Response.Index, resp.Index)
1040-
}
1041-
require.Nil(t, err)
1064+
require.NoError(t, err, "new dkg at nidx %d has received response from idx %d for deal %d\n", dkg.nidx, resp.Response.Index, resp.Index)
10421065
require.Nil(t, j)
10431066
}
10441067

@@ -1058,6 +1081,16 @@ func TestDKGResharingNewNodes(t *testing.T) {
10581081
}
10591082
}
10601083

1084+
// make sure the new dkg members can certify
1085+
for _, dkg := range newDkgs {
1086+
require.True(t, dkg.Certified(), "new dkg %d can't certify", dkg.nidx)
1087+
}
1088+
1089+
// make sure the old dkg members can certify
1090+
for _, dkg := range oldDkgs {
1091+
require.True(t, dkg.Certified(), "old dkg %d can't certify", dkg.oidx)
1092+
}
1093+
10611094
newShares := make([]*DistKeyShare, newN)
10621095
newSShares := make([]*share.PriShare, newN)
10631096
for i := range newDkgs {
@@ -1066,6 +1099,7 @@ func TestDKGResharingNewNodes(t *testing.T) {
10661099
newShares[i] = dks
10671100
newSShares[i] = newShares[i].Share
10681101
}
1102+
10691103
// check shares reconstruct to the same secret
10701104
oldSecret, err := share.RecoverSecret(suite, sshares, oldT, oldN)
10711105
require.NoError(t, err)
@@ -1138,6 +1172,7 @@ func TestDKGResharingPartialNewNodes(t *testing.T) {
11381172
require.False(t, totalDkgs[i].newPresent)
11391173
require.Equal(t, totalDkgs[i].oidx, i)
11401174
}
1175+
11411176
// the first one is the last old one
11421177
for i := oldN; i < total; i++ {
11431178
newIdx := i - oldN + newOffset
@@ -1172,10 +1207,11 @@ func TestDKGResharingPartialNewNodes(t *testing.T) {
11721207
deals = append(deals, localDeals)
11731208
v, exists := dkg.verifiers[uint32(dkg.oidx)]
11741209
if dkg.canReceive && dkg.newPresent {
1175-
// this node should save its own response for its own deal
1210+
// staying nodes don't process their responses locally because they
1211+
// broadcast them for the old comities to receive the responses.
11761212
lenResponses := len(v.Aggregator.Responses())
11771213
require.True(t, exists)
1178-
require.Equal(t, 1, lenResponses)
1214+
require.Equal(t, 0, lenResponses)
11791215
} else {
11801216
require.False(t, exists)
11811217
}

share/vss/pedersen/vss.go

+1
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,7 @@ type Verifier struct {
311311
// - its longterm secret key
312312
// - the longterm dealer public key
313313
// - the list of public key of verifiers. The list MUST include the public key of this Verifier also.
314+
//
314315
// The security parameter t of the secret sharing scheme is automatically set to
315316
// a default safe value. If a different t value is required, it is possible to set
316317
// it with `verifier.SetT()`.

0 commit comments

Comments
 (0)