diff --git a/.gitignore b/.gitignore index 07d68e0..f59e850 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,6 @@ docs/ config/ .DS_Store /bin +contracts/script/output/avs_deploy_localhost.json +contracts/script/output/tee_deploy_output_localhost.json +scripts/deploy.sh diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 8e4fd23..7dd0339 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -33,6 +33,7 @@ import ( type Config struct { ListenAddr string TimeToExpirySecs int + MinWaitSecs int EcdsaPrivateKey string EthHttpEndpoint string @@ -49,6 +50,9 @@ type Config struct { Threshold uint64 Sampling uint64 + GenTaskSampling uint64 + ExecTaskSampling uint64 + OpenTelemetry *xmetric.OpenTelemetryConfig TaskFetcher []*xtask.TaskManagerConfig @@ -56,6 +60,19 @@ type Config struct { Simulation bool } +func (cfg *Config) Init() error { + if cfg.Sampling == 0 { + cfg.Sampling = 2000 + } + if cfg.ExecTaskSampling == 0 { + cfg.ExecTaskSampling = cfg.Sampling + } + if cfg.GenTaskSampling == 0 { + cfg.GenTaskSampling = cfg.Sampling + } + return nil +} + type Aggregator struct { cfg *Config @@ -89,9 +106,10 @@ type Task struct { } func NewAggregator(ctx context.Context, cfg *Config) (*Aggregator, error) { - if cfg.Sampling == 0 { - cfg.Sampling = 2000 + if err := cfg.Init(); err != nil { + return nil, logex.Trace(err) } + logex.Info("Multi Prover Aggregator Initializing...") ecdsaPrivateKey, err := crypto.HexToECDSA(cfg.EcdsaPrivateKey) if err != nil { @@ -148,7 +166,7 @@ func NewAggregator(ctx context.Context, cfg *Config) (*Aggregator, error) { collector := xmetric.NewAggregatorCollector("avs") - taskManager, err := xtask.NewTaskManager(collector, int64(cfg.Sampling), eigenClients.EthHttpClient, cfg.TaskFetcher) + taskManager, err := xtask.NewTaskManager(collector, int64(cfg.GenTaskSampling), eigenClients.EthHttpClient, cfg.TaskFetcher) if err != nil { return nil, logex.Trace(err) } @@ -321,7 +339,7 @@ func (agg *Aggregator) submitStateHeader(ctx context.Context, req *TaskRequest) return logex.Trace(err) } if md.BatchId > 0 { - if md.BatchId%agg.cfg.Sampling != 0 { + if md.BatchId%agg.cfg.ExecTaskSampling != 0 { logex.Infof("[scroll] skip task: %#v", md) return nil } @@ -340,6 +358,7 @@ func (agg *Aggregator) submitStateHeader(ctx context.Context, req *TaskRequest) } req.Task.QuorumThresholdPercentages = types.QuorumThresholdPercentages(quorumThresholdPercentages).UnderlyingType() timeToExpiry := time.Duration(agg.cfg.TimeToExpirySecs) * time.Second + minWait := time.Duration(agg.cfg.MinWaitSecs) * time.Second agg.taskMutex.Lock() task, ok := agg.taskIndexMap[digest] @@ -351,7 +370,7 @@ func (agg *Aggregator) submitStateHeader(ctx context.Context, req *TaskRequest) agg.taskIndexMap[digest] = task agg.taskIndexSeq += 1 - err = agg.blsAggregationService.InitializeNewTask(task.index, req.Task.ReferenceBlockNumber, quorumNumbers, quorumThresholdPercentages, timeToExpiry) + err = agg.blsAggregationService.InitializeNewTask(ctx, task.index, req.Task.ReferenceBlockNumber, quorumNumbers, quorumThresholdPercentages, minWait, timeToExpiry) } agg.taskMutex.Unlock() @@ -367,7 +386,7 @@ func (agg *Aggregator) submitStateHeader(ctx context.Context, req *TaskRequest) return nil } -func (agg *Aggregator) sendAggregatedResponseToContract(ctx context.Context, task *Task, blsAggServiceResp BlsAggregationServiceResponse) error { +func (agg *Aggregator) sendAggregatedResponseToContract(ctx context.Context, task *Task, blsAggServiceResp *BlsAggregationServiceResponse) error { if blsAggServiceResp.Err != nil { return logex.Trace(blsAggServiceResp.Err) } diff --git a/aggregator/aggregator_api.go b/aggregator/aggregator_api.go index 960b559..309c014 100644 --- a/aggregator/aggregator_api.go +++ b/aggregator/aggregator_api.go @@ -83,6 +83,7 @@ func (a *AggregatorApi) fetchTask(ctx context.Context, req *FetchTaskReq) (*Fetc func (a *AggregatorApi) SubmitTask(ctx context.Context, req *TaskRequest) error { defer func() { if err := recover(); err != nil { + logex.Pretty(req) logex.Error(err, string(debug.Stack())) panic(err) } @@ -108,6 +109,7 @@ func (a *AggregatorApi) submitTask(ctx context.Context, req *TaskRequest) error if err != nil { return logex.Trace(err, taskCtx) } + taskCtx = append(taskCtx, fmt.Sprintf("digest=%x", digest)) operatorAddr, err := bindings.GetOperatorAddrByOperatorID(a.agg.client, a.agg.registryCoordinator, req.OperatorId) if err != nil { diff --git a/aggregator/blsagg.go b/aggregator/blsagg.go index 93fe63d..e81671d 100644 --- a/aggregator/blsagg.go +++ b/aggregator/blsagg.go @@ -14,6 +14,7 @@ import ( "github.com/Layr-Labs/eigensdk-go/services/avsregistry" "github.com/Layr-Labs/eigensdk-go/types" "github.com/automata-network/multi-prover-avs/utils" + "github.com/chzyer/logex" "github.com/ethereum/go-ethereum/accounts/abi/bind" ) @@ -27,11 +28,14 @@ var ( TaskNotFoundErrorFn = func(taskIndex types.TaskIndex) error { return fmt.Errorf("task %d not initialized or already completed", taskIndex) } - OperatorNotPartOfTaskQuorumErrorFn = func(operatorId types.OperatorId, taskIndex types.TaskIndex) error { - return fmt.Errorf("operator %x not part of task %d's quorum", operatorId, taskIndex) + OperatorNotPartOfTaskQuorumErrorFn = func(operatorId types.OperatorId) error { + return fmt.Errorf("operator %x not part of task's quorum", operatorId) } - SignatureVerificationError = func(err error) error { - return fmt.Errorf("Failed to verify signature: %w", err) + SignatureVerificationError = func(operatorId types.OperatorId, err error) error { + return fmt.Errorf("operator %x Failed to verify signature: %w", operatorId, err) + } + OperatorG2KeyNotFound = func(operatorId types.OperatorId) error { + return fmt.Errorf("operator %x g2 key not found", operatorId) } IncorrectSignatureError = errors.New("Signature verification failed. Incorrect Signature.") ) @@ -55,54 +59,6 @@ type BlsAggregationServiceResponse struct { NonSignerStakeIndices [][]uint32 } -// aggregatedOperators is meant to be used as a value in a map -// map[taskResponseDigest]aggregatedOperators -type aggregatedOperators struct { - // aggregate g2 pubkey of all operatos who signed on this taskResponseDigest - signersApkG2 *bls.G2Point - // aggregate signature of all operators who signed on this taskResponseDigest - signersAggSigG1 *bls.Signature - // aggregate stake of all operators who signed on this header for each quorum - signersTotalStakePerQuorum map[types.QuorumNum]*big.Int - // set of OperatorId of operators who signed on this header - signersOperatorIdsSet map[types.OperatorId]bool -} - -// BlsAggregationService is the interface provided to avs aggregator code for doing bls aggregation -// Currently its only implementation is the BlsAggregatorService, so see the comment there for more details -type BlsAggregationService interface { - // InitializeNewTask should be called whenever a new task is created. ProcessNewSignature will return an error - // if the task it is trying to process has not been initialized yet. - // quorumNumbers and quorumThresholdPercentages set the requirements for this task to be considered complete, which happens - // when a particular TaskResponseDigest (received via the a.taskChans[taskIndex]) has been signed by signers whose stake - // in each of the listed quorums adds up to at least quorumThresholdPercentages[i] of the total stake in that quorum - InitializeNewTask( - taskIndex types.TaskIndex, - taskCreatedBlock uint32, - quorumNumbers types.QuorumNums, - quorumThresholdPercentages types.QuorumThresholdPercentages, - timeToExpiry time.Duration, - ) error - - // ProcessNewSignature processes a new signature over a taskResponseDigest for a particular taskIndex by a particular operator - // It verifies that the signature is correct and returns an error if it is not, and then aggregates the signature and stake of - // the operator with all other signatures for the same taskIndex and taskResponseDigest pair. - // Note: This function currently only verifies signatures over the taskResponseDigest directly, so avs code needs to verify that the digest - // passed to ProcessNewSignature is indeed the digest of a valid taskResponse (that is, BlsAggregationService does not verify semantic integrity of the taskResponses) - ProcessNewSignature( - ctx context.Context, - taskIndex types.TaskIndex, - taskResponseDigest types.TaskResponseDigest, - blsSignature *bls.Signature, - operatorId types.OperatorId, - ) error - - // GetResponseChannel returns the single channel that meant to be used as the response channel - // Any task that is completed (see the completion criterion in the comment above InitializeNewTask) - // will be sent on this channel along with all the necessary information to call BLSSignatureChecker onchain - GetResponseChannel() <-chan BlsAggregationServiceResponse -} - // BlsAggregatorService is a service that performs BLS signature aggregation for an AVS' tasks // Assumptions: // 1. BlsAggregatorService only verifies digest signatures, so avs code needs to verify that the digest @@ -118,7 +74,7 @@ type BlsAggregationService interface { type BlsAggregatorService struct { // aggregatedResponsesC is the channel which all goroutines share to send their responses back to the // main thread after they are done aggregating (either they reached the threshold, or timeout expired) - aggregatedResponsesC chan BlsAggregationServiceResponse + aggregatedResponsesC chan *BlsAggregationServiceResponse // signedTaskRespsCs are the channels to send the signed task responses to the goroutines processing them // each new task is assigned a new goroutine and a new channel signedTaskRespsCs map[types.TaskIndex]chan types.SignedTaskResponseDigest @@ -131,11 +87,9 @@ type BlsAggregatorService struct { logger logging.Logger } -var _ BlsAggregationService = (*BlsAggregatorService)(nil) - func NewBlsAggregatorService(avsRegistryService avsregistry.AvsRegistryService, logger logging.Logger) *BlsAggregatorService { return &BlsAggregatorService{ - aggregatedResponsesC: make(chan BlsAggregationServiceResponse, 4), + aggregatedResponsesC: make(chan *BlsAggregationServiceResponse, 4), signedTaskRespsCs: make(map[types.TaskIndex]chan types.SignedTaskResponseDigest), taskChansMutex: sync.RWMutex{}, avsRegistryService: avsRegistryService, @@ -143,7 +97,7 @@ func NewBlsAggregatorService(avsRegistryService avsregistry.AvsRegistryService, } } -func (a *BlsAggregatorService) GetResponseChannel() <-chan BlsAggregationServiceResponse { +func (a *BlsAggregatorService) GetResponseChannel() <-chan *BlsAggregationServiceResponse { return a.aggregatedResponsesC } @@ -153,13 +107,15 @@ func (a *BlsAggregatorService) GetResponseChannel() <-chan BlsAggregationService // when a particular TaskResponseDigest (received via the a.taskChans[taskIndex]) has been signed by signers whose stake // in each of the listed quorums adds up to at least quorumThresholdPercentages[i] of the total stake in that quorum func (a *BlsAggregatorService) InitializeNewTask( + ctx context.Context, taskIndex types.TaskIndex, taskCreatedBlock uint32, quorumNumbers types.QuorumNums, - quorumThresholdPercentages types.QuorumThresholdPercentages, + minQuorumThresholdPercentages types.QuorumThresholdPercentages, + minWait time.Duration, timeToExpiry time.Duration, ) error { - a.logger.Info("AggregatorService initializing new task", "taskIndex", taskIndex, "taskCreatedBlock", taskCreatedBlock, "quorumNumbers", quorumNumbers, "quorumThresholdPercentages", quorumThresholdPercentages, "timeToExpiry", timeToExpiry) + a.logger.Info("AggregatorService initializing new task", "taskIndex", taskIndex, "taskCreatedBlock", taskCreatedBlock, "quorumNumbers", quorumNumbers, "minQuorumThresholdPercentages", minQuorumThresholdPercentages, "minWait", minWait, "timeToExpiry", timeToExpiry) a.taskChansMutex.Lock() signedTaskRespsC, taskExists := a.signedTaskRespsCs[taskIndex] @@ -172,7 +128,12 @@ func (a *BlsAggregatorService) InitializeNewTask( return TaskAlreadyInitializedErrorFn(taskIndex) } - go a.singleTaskAggregatorGoroutineFunc(taskIndex, taskCreatedBlock, quorumNumbers, quorumThresholdPercentages, timeToExpiry, signedTaskRespsC) + operatorStates, err := NewOperatorStates(ctx, a.avsRegistryService, quorumNumbers, minQuorumThresholdPercentages, taskIndex, taskCreatedBlock) + if err != nil { + return logex.Trace(err) + } + + go a.singleTaskAggregatorGoroutineFunc(operatorStates, minWait, timeToExpiry, signedTaskRespsC) return nil } @@ -215,134 +176,55 @@ func (a *BlsAggregatorService) ProcessNewSignature( } func (a *BlsAggregatorService) singleTaskAggregatorGoroutineFunc( - taskIndex types.TaskIndex, - taskCreatedBlock uint32, - quorumNumbers types.QuorumNums, - quorumThresholdPercentages []types.QuorumThresholdPercentage, + operatorStates *OperatorStates, + minWait time.Duration, timeToExpiry time.Duration, signedTaskRespsC <-chan types.SignedTaskResponseDigest, ) { - defer a.closeTaskGoroutine(taskIndex) + defer a.closeTaskGoroutine(operatorStates.taskIndex) - quorumThresholdPercentagesMap := make(map[types.QuorumNum]types.QuorumThresholdPercentage) - for i, quorumNumber := range quorumNumbers { - quorumThresholdPercentagesMap[quorumNumber] = quorumThresholdPercentages[i] - } - operatorsAvsStateDict, err := utils.Retry(5, time.Second, func() (map[types.OperatorId]types.OperatorAvsState, error) { - return a.avsRegistryService.GetOperatorsAvsStateAtBlock(context.Background(), quorumNumbers, taskCreatedBlock) - }, "taskIndex", taskIndex, "taskCreatedBlock", taskCreatedBlock) - if err != nil { - // TODO: how should we handle such an error? - a.logger.Fatal("AggregatorService failed to get operators state from avs registry", "err", err, "blockNumber", taskCreatedBlock) - } - quorumsAvsStakeDict, err := utils.Retry(5, time.Second, func() (map[types.QuorumNum]types.QuorumAvsState, error) { - return a.avsRegistryService.GetQuorumsAvsStateAtBlock(context.Background(), quorumNumbers, taskCreatedBlock) - }, "taskIndex", taskIndex, "taskCreatedBlock", taskCreatedBlock) - if err != nil { - a.logger.Fatal("Aggregator failed to get quorums state from avs registry", "err", err) - } - totalStakePerQuorum := make(map[types.QuorumNum]*big.Int) - for quorumNum, quorumAvsState := range quorumsAvsStakeDict { - totalStakePerQuorum[quorumNum] = quorumAvsState.TotalStake - } - quorumApksG1 := []*bls.G1Point{} - for _, quorumNumber := range quorumNumbers { - quorumApksG1 = append(quorumApksG1, quorumsAvsStakeDict[quorumNumber].AggPubkeyG1) + fastSend := false + if minWait == time.Duration(0) { + minWait = timeToExpiry + fastSend = true } - // TODO(samlaf): instead of taking a TTE, we should take a block as input - // and monitor the chain and only close the task goroutine when that block is reached taskExpiredTimer := time.NewTimer(timeToExpiry) + defer taskExpiredTimer.Stop() + fireTimer := time.NewTimer(minWait) + defer fireTimer.Stop() + + task := newBlsAggTask(operatorStates) - aggregatedOperatorsDict := map[types.TaskResponseDigest]aggregatedOperators{} - var thresholdInfo map[types.QuorumNum]ThresholdInfo for { select { - case signedTaskResponseDigest := <-signedTaskRespsC: - a.logger.Info("Task goroutine received new signed task response digest", "taskIndex", taskIndex, "signedTaskResponseDigest", signedTaskResponseDigest) - err := a.verifySignature(taskIndex, signedTaskResponseDigest, operatorsAvsStateDict) - signedTaskResponseDigest.SignatureVerificationErrorC <- err - if err != nil { + case signer := <-signedTaskRespsC: + a.logger.Info("Task goroutine received new signed task response digest", "taskIndex", operatorStates.taskIndex, "signedTaskResponseDigest", signer) + if !task.AddNewSigner(signer) { + a.logger.Info("Signature Verification Failed", "taskIndex", operatorStates.taskIndex, "signedTaskResponseDigest", signer) continue } - // after verifying signature we aggregate its sig and pubkey, and update the signed stake amount - digestAggregatedOperators, ok := aggregatedOperatorsDict[signedTaskResponseDigest.TaskResponseDigest] - if !ok { - // first operator to sign on this digest - digestAggregatedOperators = aggregatedOperators{ - // we've already verified that the operator is part of the task's quorum, so we don't need checks here - signersApkG2: bls.NewZeroG2Point().Add(operatorsAvsStateDict[signedTaskResponseDigest.OperatorId].Pubkeys.G2Pubkey), - signersAggSigG1: signedTaskResponseDigest.BlsSignature, - signersOperatorIdsSet: map[types.OperatorId]bool{signedTaskResponseDigest.OperatorId: true}, - signersTotalStakePerQuorum: cloneStakePerQuorumMap(operatorsAvsStateDict[signedTaskResponseDigest.OperatorId].StakePerQuorum), - } - } else { - digestAggregatedOperators.signersAggSigG1.Add(signedTaskResponseDigest.BlsSignature) - digestAggregatedOperators.signersApkG2.Add(operatorsAvsStateDict[signedTaskResponseDigest.OperatorId].Pubkeys.G2Pubkey) - digestAggregatedOperators.signersOperatorIdsSet[signedTaskResponseDigest.OperatorId] = true - for quorumNum, stake := range operatorsAvsStateDict[signedTaskResponseDigest.OperatorId].StakePerQuorum { - if _, ok := digestAggregatedOperators.signersTotalStakePerQuorum[quorumNum]; !ok { - // if we haven't seen this quorum before, initialize its signed stake to 0 - // possible if previous operators who sent us signatures were not part of this quorum - digestAggregatedOperators.signersTotalStakePerQuorum[quorumNum] = big.NewInt(0) - } - digestAggregatedOperators.signersTotalStakePerQuorum[quorumNum].Add(digestAggregatedOperators.signersTotalStakePerQuorum[quorumNum], stake) - } - } - // update the aggregatedOperatorsDict. Note that we need to assign the whole struct value at once, - // because of https://github.com/golang/go/issues/3117 - aggregatedOperatorsDict[signedTaskResponseDigest.TaskResponseDigest] = digestAggregatedOperators - - thresholdInfo = formatThreshold(digestAggregatedOperators.signersTotalStakePerQuorum, totalStakePerQuorum, quorumThresholdPercentagesMap) - a.logger.Infof("shares update for taskIndex=%v: %+v", taskIndex, thresholdInfo) - if checkIfStakeThresholdsMet(a.logger, digestAggregatedOperators.signersTotalStakePerQuorum, totalStakePerQuorum, quorumThresholdPercentagesMap) { - nonSignersOperatorIds := []types.OperatorId{} - for operatorId := range operatorsAvsStateDict { - if _, operatorSigned := digestAggregatedOperators.signersOperatorIdsSet[operatorId]; !operatorSigned { - nonSignersOperatorIds = append(nonSignersOperatorIds, operatorId) - } - } - // the contract requires a sorted nonSignersOperatorIds - sort.SliceStable(nonSignersOperatorIds, func(i, j int) bool { - iOprInt := new(big.Int).SetBytes(nonSignersOperatorIds[i][:]) - jOprInt := new(big.Int).SetBytes(nonSignersOperatorIds[j][:]) - return iOprInt.Cmp(jOprInt) == -1 - }) - - nonSignersG1Pubkeys := []*bls.G1Point{} - for _, operatorId := range nonSignersOperatorIds { - operator := operatorsAvsStateDict[operatorId] - nonSignersG1Pubkeys = append(nonSignersG1Pubkeys, operator.Pubkeys.G1Pubkey) - } + a.logger.Infof("shares update for taskIndex=%v: digest=%x, %+v", operatorStates.taskIndex, task.GetHighestDigest(), task.getThreshold(signer.TaskResponseDigest)) - indices, err := a.avsRegistryService.GetCheckSignaturesIndices(&bind.CallOpts{}, taskCreatedBlock, quorumNumbers, nonSignersOperatorIds) - if err != nil { - a.aggregatedResponsesC <- BlsAggregationServiceResponse{ - Err: types.WrapError(errors.New("failed to get check signatures indices"), err), - } + if fastSend { + result := task.Result(a.avsRegistryService) + if result != nil { + a.aggregatedResponsesC <- result return } - blsAggregationServiceResponse := BlsAggregationServiceResponse{ - Err: nil, - TaskIndex: taskIndex, - TaskResponseDigest: signedTaskResponseDigest.TaskResponseDigest, - NonSignersPubkeysG1: nonSignersG1Pubkeys, - QuorumApksG1: quorumApksG1, - SignersApkG2: digestAggregatedOperators.signersApkG2, - SignersAggSigG1: digestAggregatedOperators.signersAggSigG1, - NonSignerQuorumBitmapIndices: indices.NonSignerQuorumBitmapIndices, - QuorumApkIndices: indices.QuorumApkIndices, - TotalStakeIndices: indices.TotalStakeIndices, - NonSignerStakeIndices: indices.NonSignerStakeIndices, - } - a.aggregatedResponsesC <- blsAggregationServiceResponse - return } - case <-taskExpiredTimer.C: - a.aggregatedResponsesC <- BlsAggregationServiceResponse{ - Err: TaskExpiredError(taskIndex, thresholdInfo), + case <-fireTimer.C: + result := task.Result(a.avsRegistryService) + if result == nil { + // We could not make it in time, in this case, so we should send the result ASAP + fastSend = true + continue } + a.aggregatedResponsesC <- result + return + case <-taskExpiredTimer.C: + a.aggregatedResponsesC <- task.Err(fmt.Errorf("task expired")) return } } @@ -367,33 +249,31 @@ func (a *BlsAggregatorService) closeTaskGoroutine(taskIndex types.TaskIndex) { // this forces the avs code to verify that the digest is indeed the digest of a valid taskResponse // we could take taskResponse as an interface{} and have avs code pass us a taskResponseHashFunction // that we could use to hash and verify the taskResponse itself -func (a *BlsAggregatorService) verifySignature( - taskIndex types.TaskIndex, +func (o *OperatorStates) verifySignature( signedTaskResponseDigest types.SignedTaskResponseDigest, - operatorsAvsStateDict map[types.OperatorId]types.OperatorAvsState, ) error { - _, ok := operatorsAvsStateDict[signedTaskResponseDigest.OperatorId] + operatorId := signedTaskResponseDigest.OperatorId + avsState, ok := o.operatorsAvsStateDict[operatorId] if !ok { - a.logger.Warnf("Operator %#v not found. Skipping message.", signedTaskResponseDigest.OperatorId) - return OperatorNotPartOfTaskQuorumErrorFn(signedTaskResponseDigest.OperatorId, taskIndex) + return logex.Trace(OperatorNotPartOfTaskQuorumErrorFn(operatorId)) } // 0. verify that the msg actually came from the correct operator - operatorG2Pubkey := operatorsAvsStateDict[signedTaskResponseDigest.OperatorId].Pubkeys.G2Pubkey + operatorG2Pubkey := avsState.Pubkeys.G2Pubkey if operatorG2Pubkey == nil { - a.logger.Fatal("Operator G2 pubkey not found") + return logex.Trace(OperatorG2KeyNotFound(operatorId)) } - a.logger.Debug("Verifying signed task response digest signature", + logex.Debug("Verifying signed task response digest signature", "operatorG2Pubkey", operatorG2Pubkey, "taskResponseDigest", signedTaskResponseDigest.TaskResponseDigest, "blsSignature", signedTaskResponseDigest.BlsSignature, ) signatureVerified, err := signedTaskResponseDigest.BlsSignature.Verify(operatorG2Pubkey, signedTaskResponseDigest.TaskResponseDigest) if err != nil { - return SignatureVerificationError(err) + return logex.Trace(err, operatorId) } if !signatureVerified { - return IncorrectSignatureError + return logex.Trace(IncorrectSignatureError, operatorId) } return nil } @@ -405,16 +285,232 @@ type ThresholdInfo struct { Percent *big.Float } -func formatThreshold(signedStakePerQuorum map[types.QuorumNum]*big.Int, - totalStakePerQuorum map[types.QuorumNum]*big.Int, - quorumThresholdPercentagesMap map[types.QuorumNum]types.QuorumThresholdPercentage) map[types.QuorumNum]ThresholdInfo { +// aggregatedOperators is meant to be used as a value in a map +// map[taskResponseDigest]aggregatedOperators +type aggregatedOperators struct { + // aggregate g2 pubkey of all operatos who signed on this taskResponseDigest + signersApkG2 *bls.G2Point + // aggregate signature of all operators who signed on this taskResponseDigest + signersAggSigG1 *bls.Signature + // aggregate stake of all operators who signed on this header for each quorum + signersTotalStakePerQuorum map[types.QuorumNum]*big.Int + // set of OperatorId of operators who signed on this header + signersOperatorIdsSet map[types.OperatorId]bool +} + +func newAggregatedOperators() *aggregatedOperators { + return &aggregatedOperators{ + // we've already verified that the operator is part of the task's quorum, so we don't need checks here + signersApkG2: bls.NewZeroG2Point(), + signersAggSigG1: bls.NewZeroSignature(), + signersOperatorIdsSet: make(map[types.OperatorId]bool), + signersTotalStakePerQuorum: make(map[types.QuorumNum]*big.Int), + } +} + +func (a *aggregatedOperators) Add(sig types.SignedTaskResponseDigest, states types.OperatorAvsState) { + a.signersAggSigG1.Add(sig.BlsSignature) + a.signersApkG2.Add(states.Pubkeys.G2Pubkey) + a.signersOperatorIdsSet[sig.OperatorId] = true + for quorumNum, stake := range states.StakePerQuorum { + oldStake, ok := a.signersTotalStakePerQuorum[quorumNum] + if !ok { + oldStake = big.NewInt(0) + a.signersTotalStakePerQuorum[quorumNum] = oldStake + } + oldStake.Add(oldStake, stake) + } +} + +type blsAggTask struct { + states *OperatorStates + aggregatedOperatorsDict map[types.TaskResponseDigest]*aggregatedOperators +} + +func newBlsAggTask(operatorStates *OperatorStates) *blsAggTask { + return &blsAggTask{ + states: operatorStates, + aggregatedOperatorsDict: make(map[types.TaskResponseDigest]*aggregatedOperators), + } +} + +func (t *blsAggTask) getThreshold(digest types.TaskResponseDigest) map[types.QuorumNum]ThresholdInfo { + return t.states.formatThreshold(t.getDigestOperators(digest)) +} + +func (t *blsAggTask) getDigestOperators(digest types.TaskResponseDigest) *aggregatedOperators { + digestAggregatedOperators, ok := t.aggregatedOperatorsDict[digest] + if !ok { + digestAggregatedOperators = newAggregatedOperators() + t.aggregatedOperatorsDict[digest] = digestAggregatedOperators + } + return digestAggregatedOperators +} + +func (t *blsAggTask) GetHighestDigest() types.TaskResponseDigest { + var highestDigest *types.TaskResponseDigest + highestPct := big.NewFloat(0) + for digest, oprs := range t.aggregatedOperatorsDict { + thresholds := t.states.formatThreshold(oprs) + minPct := big.NewFloat(1) + for _, n := range thresholds { + if n.Percent.Cmp(minPct) < 0 { + minPct = n.Percent + } + } + if minPct.Cmp(highestPct) > 0 { + highestPct = minPct + highestDigest = &digest + } + } + return *highestDigest +} + +func (t *blsAggTask) Err(err error) *BlsAggregationServiceResponse { + digest := t.GetHighestDigest() + oprs := t.aggregatedOperatorsDict[digest] + threshold := t.states.formatThreshold(oprs) + + return &BlsAggregationServiceResponse{ + Err: logex.Trace(err, fmt.Sprintf("threshold:%v", threshold), fmt.Sprintf("taskIndex:%v", t.states.taskIndex)), + TaskIndex: t.states.taskIndex, + TaskResponseDigest: digest, + } +} + +func (t *blsAggTask) Result(avsRegistryService avsregistry.AvsRegistryService) *BlsAggregationServiceResponse { + digest := t.GetHighestDigest() + oprs := t.aggregatedOperatorsDict[digest] + + thresholds := t.states.formatThreshold(oprs) + if !t.states.checkIfStakeThresholdsMet(thresholds) { + return nil + } + + nonSignersOperatorIds := t.states.NonSignerOperatorIds(oprs) + quorumApksG1 := t.states.QuorumApksG1() + + nonSignersG1Pubkeys := make([]*bls.G1Point, 0, len(nonSignersOperatorIds)) + for _, operatorId := range nonSignersOperatorIds { + operator := t.states.operatorsAvsStateDict[operatorId] + nonSignersG1Pubkeys = append(nonSignersG1Pubkeys, operator.Pubkeys.G1Pubkey) + } + + response := &BlsAggregationServiceResponse{ + Err: nil, + TaskIndex: t.states.taskIndex, + TaskResponseDigest: digest, + NonSignersPubkeysG1: nonSignersG1Pubkeys, + QuorumApksG1: quorumApksG1, + SignersApkG2: oprs.signersApkG2, + SignersAggSigG1: oprs.signersAggSigG1, + } + + indices, err := avsRegistryService.GetCheckSignaturesIndices(&bind.CallOpts{}, t.states.taskCreatedBlock, t.states.quorumNumbers, nonSignersOperatorIds) + if err != nil { + response.Err = types.WrapError(errors.New("failed to get check signatures indices"), err) + return response + } + response.NonSignerQuorumBitmapIndices = indices.NonSignerQuorumBitmapIndices + response.QuorumApkIndices = indices.QuorumApkIndices + response.TotalStakeIndices = indices.TotalStakeIndices + response.NonSignerStakeIndices = indices.NonSignerStakeIndices + return response +} + +func (t *blsAggTask) AddNewSigner(signedTaskResponseDigest types.SignedTaskResponseDigest) bool { + operatorId := signedTaskResponseDigest.OperatorId + if err := t.states.verifySignature(signedTaskResponseDigest); err != nil { + signedTaskResponseDigest.SignatureVerificationErrorC <- logex.Trace(err, t.states.taskIndex) + return false + } + signedTaskResponseDigest.SignatureVerificationErrorC <- nil + + avsState := t.states.operatorsAvsStateDict[operatorId] + + oprs := t.getDigestOperators(signedTaskResponseDigest.TaskResponseDigest) + oprs.Add(signedTaskResponseDigest, avsState) + return true +} + +type OperatorStates struct { + taskIndex types.TaskIndex + quorumNumbers types.QuorumNums + taskCreatedBlock uint32 + minQuorumThresholdPercentages []types.QuorumThresholdPercentage + operatorsAvsStateDict map[types.OperatorId]types.OperatorAvsState + quorumsAvsStakeDict map[types.QuorumNum]types.QuorumAvsState +} + +func NewOperatorStates( + ctx context.Context, + avsRegistryService avsregistry.AvsRegistryService, + quorumNumbers types.QuorumNums, + minQuorumThresholdPercentages []types.QuorumThresholdPercentage, + taskIndex uint32, + taskCreatedBlock uint32, +) (*OperatorStates, error) { + operatorsAvsStateDict, err := utils.Retry(5, time.Second, func() (map[types.OperatorId]types.OperatorAvsState, error) { + return avsRegistryService.GetOperatorsAvsStateAtBlock(ctx, quorumNumbers, taskCreatedBlock) + }, "taskIndex", taskIndex, "taskCreatedBlock", taskCreatedBlock) + if err != nil { + return nil, logex.Trace(err, + fmt.Sprintf("taskIndex:%v", taskIndex), + fmt.Sprintf("taskCreatedBlock:%v", taskCreatedBlock), + ) + } + quorumsAvsStakeDict, err := utils.Retry(5, time.Second, func() (map[types.QuorumNum]types.QuorumAvsState, error) { + return avsRegistryService.GetQuorumsAvsStateAtBlock(ctx, quorumNumbers, taskCreatedBlock) + }, "taskIndex", taskIndex, "taskCreatedBlock", taskCreatedBlock) + if err != nil { + return nil, logex.Trace(err, + fmt.Sprintf("taskIndex:%v", taskIndex), + fmt.Sprintf("taskCreatedBlock:%v", taskCreatedBlock), + ) + } + + return &OperatorStates{ + taskIndex, + quorumNumbers, + taskCreatedBlock, + minQuorumThresholdPercentages, + operatorsAvsStateDict, + quorumsAvsStakeDict, + }, nil +} + +func (o *OperatorStates) QuorumApksG1() []*bls.G1Point { + quorumApksG1 := make([]*bls.G1Point, 0, len(o.quorumNumbers)) + for _, quorumNumber := range o.quorumNumbers { + quorumApksG1 = append(quorumApksG1, o.quorumsAvsStakeDict[quorumNumber].AggPubkeyG1) + } + return quorumApksG1 +} + +func (o *OperatorStates) NonSignerOperatorIds(oprs *aggregatedOperators) []types.OperatorId { + nonSignersOperatorIds := make([]types.OperatorId, 0, len(o.operatorsAvsStateDict)) + for operatorId := range o.operatorsAvsStateDict { + if _, ok := oprs.signersOperatorIdsSet[operatorId]; !ok { + nonSignersOperatorIds = append(nonSignersOperatorIds, operatorId) + } + } + + sort.SliceStable(nonSignersOperatorIds, func(i, j int) bool { + iOprInt := new(big.Int).SetBytes(nonSignersOperatorIds[i][:]) + jOprInt := new(big.Int).SetBytes(nonSignersOperatorIds[j][:]) + return iOprInt.Cmp(jOprInt) == -1 + }) + return nonSignersOperatorIds +} + +func (o *OperatorStates) formatThreshold(oprs *aggregatedOperators) map[types.QuorumNum]ThresholdInfo { out := make(map[types.QuorumNum]ThresholdInfo) - for quorumNum, quorumThresholdPercentage := range quorumThresholdPercentagesMap { + for idx, quorumNum := range o.quorumNumbers { info := ThresholdInfo{ - Signed: signedStakePerQuorum[quorumNum], - Total: totalStakePerQuorum[quorumNum], - Threshold: quorumThresholdPercentage, + Signed: oprs.signersTotalStakePerQuorum[quorumNum], + Total: o.quorumsAvsStakeDict[quorumNum].TotalStake, + Threshold: o.minQuorumThresholdPercentages[idx], } signed := new(big.Float).SetInt(info.Signed) total := new(big.Float).SetInt(info.Total) @@ -424,48 +520,13 @@ func formatThreshold(signedStakePerQuorum map[types.QuorumNum]*big.Int, return out } -// checkIfStakeThresholdsMet checks at least quorumThresholdPercentage of stake -// has signed for each quorum. -func checkIfStakeThresholdsMet( - logger logging.Logger, - signedStakePerQuorum map[types.QuorumNum]*big.Int, - totalStakePerQuorum map[types.QuorumNum]*big.Int, - quorumThresholdPercentagesMap map[types.QuorumNum]types.QuorumThresholdPercentage, -) bool { - for quorumNum, quorumThresholdPercentage := range quorumThresholdPercentagesMap { - signedStakeByQuorum, ok := signedStakePerQuorum[quorumNum] - if !ok { - // signedStakePerQuorum not contain the quorum, - // this case means signedStakePerQuorum has not signed for each quorum. - // even the total stake for this quorum is zero. - return false - } - - totalStakeByQuorum, ok := totalStakePerQuorum[quorumNum] - if !ok { - // Note this case should not happend. - // The `totalStakePerQuorum` is got from the contract, so if we not found the - // totalStakeByQuorum, that means the code have a bug. - logger.Errorf("TotalStake not found for quorum %d.", quorumNum) - return false - } - - // we check that signedStake >= totalStake * quorumThresholdPercentage / 100 - // to be exact (and do like the contracts), we actually check that - // signedStake * 100 >= totalStake * quorumThresholdPercentage - signedStake := big.NewInt(0).Mul(signedStakeByQuorum, big.NewInt(100)) - thresholdStake := big.NewInt(0).Mul(totalStakeByQuorum, big.NewInt(int64(quorumThresholdPercentage))) +func (o *OperatorStates) checkIfStakeThresholdsMet(info map[types.QuorumNum]ThresholdInfo) bool { + for _, info := range info { + signedStake := new(big.Int).Mul(info.Signed, big.NewInt(100)) + thresholdStake := new(big.Int).Mul(info.Total, big.NewInt(int64(info.Threshold))) if signedStake.Cmp(thresholdStake) < 0 { return false } } return true } - -func cloneStakePerQuorumMap(stakes map[types.QuorumNum]types.StakeAmount) map[types.QuorumNum]types.StakeAmount { - out := make(map[types.QuorumNum]types.StakeAmount, len(stakes)) - for k, v := range stakes { - out[k] = new(big.Int).Set(v) - } - return out -} diff --git a/contracts/dcap-v3-attestation b/contracts/dcap-v3-attestation index 51d41d6..9aa7502 160000 --- a/contracts/dcap-v3-attestation +++ b/contracts/dcap-v3-attestation @@ -1 +1 @@ -Subproject commit 51d41d609dcc004db4ccb44c9016f7a18900d06b +Subproject commit 9aa7502fc33542407245b87af240fb1eec13c9aa diff --git a/contracts/script/DeployTEELivenessService.s.sol b/contracts/script/DeployTEELivenessService.s.sol index 4acda19..4303e97 100644 --- a/contracts/script/DeployTEELivenessService.s.sol +++ b/contracts/script/DeployTEELivenessService.s.sol @@ -1,6 +1,7 @@ pragma solidity ^0.8.12; import "forge-std/Script.sol"; +import {VmSafe} from "forge-std/Vm.sol"; import {TransparentUpgradeableProxy, ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol"; @@ -16,8 +17,10 @@ import "./utils/CRLParser.s.sol"; contract DeployTEELivenessVerifier is Script, DcapTestUtils, CRLParser { string internal constant defaultTcbInfoPath = "dcap-v3-attestation/contracts/assets/0923/tcbInfo.json"; + string internal constant defaultTcbInfoDirPath = + "dcap-v3-attestation/contracts/assets/latest/tcb_info/"; string internal constant defaultQeIdPath = - "dcap-v3-attestation/contracts/assets/0923/identity.json"; + "dcap-v3-attestation/contracts/assets/latest/identity.json"; function setUp() public {} @@ -87,24 +90,25 @@ contract DeployTEELivenessVerifier is Script, DcapTestUtils, CRLParser { saveJson(output); } - function deployAttestation() public { + function updateAttestationConfig() public { string memory output = readJson(); - vm.startBroadcast(); - AutomataDcapV3Attestation attestation = new AutomataDcapV3Attestation( - vm.parseJsonAddress(output, ".SigVerifyLib"), - vm.parseJsonAddress(output, ".PEMCertChainLib") + AutomataDcapV3Attestation attestation = AutomataDcapV3Attestation( + vm.parseJsonAddress(output, ".AutomataDcapV3Attestation") ); + vm.startBroadcast(); { - string memory tcbInfoJson = vm.readFile(defaultTcbInfoPath); - - ( - bool tcbParsedSuccess, - TCBInfoStruct.TCBInfo memory parsedTcbInfo - ) = parseTcbInfoJson(tcbInfoJson); - require(tcbParsedSuccess, "failed to parse tcb"); - string memory fmspc = parsedTcbInfo.fmspc; - attestation.configureTcbInfoJson(fmspc, parsedTcbInfo); + VmSafe.DirEntry[] memory files = vm.readDir(defaultTcbInfoDirPath); + for (uint i = 0; i < files.length; i++) { + string memory tcbInfoJson = vm.readFile(files[i].path); + ( + bool tcbParsedSuccess, + TCBInfoStruct.TCBInfo memory parsedTcbInfo + ) = parseTcbInfoJson(tcbInfoJson); + require(tcbParsedSuccess, "failed to parse tcb"); + string memory fmspc = parsedTcbInfo.fmspc; + attestation.configureTcbInfoJson(fmspc, parsedTcbInfo); + } } { @@ -118,26 +122,44 @@ contract DeployTEELivenessVerifier is Script, DcapTestUtils, CRLParser { attestation.configureQeIdentityJson(parsedEnclaveId); } + vm.stopBroadcast(); + } + function deployAttestation() public { + string memory output = readJson(); + vm.startBroadcast(); + AutomataDcapV3Attestation attestation = new AutomataDcapV3Attestation( + vm.parseJsonAddress(output, ".SigVerifyLib"), + vm.parseJsonAddress(output, ".PEMCertChainLib") + ); { // CRLs are provided directly in the CRLParser.s.sol script in it's DER encoded form bytes[] memory crl = decodeCrl(samplePckCrl); attestation.addRevokedCertSerialNum(0, crl); } - vm.stopBroadcast(); - bytes - memory data = hex"030002000000000009000e00939a7233f79c4ca9940a0db3957f0607f28dda234595e56eaeb7ce9b681a62cd000000000e0e100fffff0100000000000000000000000000000000000000000000000000000000000000000000000000000000000500000000000000e700000000000000ce040fe9ad608f90e417897d0839886cfef2cf238e37c099eb3d8745ab5296f90000000000000000000000000000000000000000000000000000000000000000de79b29d706d9f00ddbbdf03aa7142df1d7ef1562ec5d4e9dbf95192ccbb650a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a73c92f9c85fe649134cbd089aa6e5dda332a1c334640b160ef93e5a9971d262ae73a3503ad2184f91e6dc82740d75851a18494bf0ffc21420d8d75dafd10e3ca1000007a91529a8b4a235d330142f7faa68dea398c060e6b55dfe279b42feb12cf2a5046211dd4754a1d677d8bb631e6d66904648bd526c87a7b17217140cffc8431f2b1fb4d19c4b4071656cbdb8eaa942c89a359e6e84f51827247a3ac35b08d03abb52e537eae321e112bf351e1f5b9d7eeb3c3ea01e278e65cec3af7f8bb6fdec40e0e100fffff0100000000000000000000000000000000000000000000000000000000000000000000000000000000001500000000000000e700000000000000192aa50ce1c0cef03ccf89e7b5b16b0d7978f5c2b1edcf774d87702e8154d8bf00000000000000000000000000000000000000000000000000000000000000008c4f5775d796503e96137f77c68a829a0056ac8ded70140b081b094490c57bff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000066d3aaf3395111d7e0f2298bf4b31be75deaa4e205829fc512a4468b4177e67e0000000000000000000000000000000000000000000000000000000000000000a91eb85c23f448565f1f8edc2c0a64849c05d9ebe3af7f1d503c5374ce33a8329546cd1f8c7fa25859aa6fa21b46bfb8cb2cf49d35ddc71ba7016030b7e98d822000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f0500620e00002d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d494945386a4343424a6d674177494241674956414e556f5a4d75787a767164353268495755667233414a6e6d6253574d416f4743437147534d343942414d430a4d484178496a416742674e5642414d4d47556c756447567349464e4857434251513073675547786864475a76636d306751304578476a415942674e5642416f4d0a45556c756447567349454e76636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155450a4341774351304578437a414a42674e5642415954416c56544d423458445449304d444d774e7a45314d4445784d6c6f5844544d784d444d774e7a45314d4445780a4d6c6f77634445694d434147413155454177775a535735305a5777675530645949464244537942445a584a3061575a70593246305a5445614d426747413155450a43677752535735305a577767513239796347397959585270623234784644415342674e564241634d43314e68626e526849454e7359584a684d517377435159440a5651514944414a445154454c4d416b474131554542684d4356564d775754415442676371686b6a4f5051494242676771686b6a4f50514d4242774e43414153440a30594d43645a65616e49706b52704c72516e78456a34305241585258353563437a6f4c512b4336786c45734a466346465955546b3851616c477a777756676e4e0a4c7469373461464248794c68354e55616666574f6f344944446a434341776f77487759445652306a42426777466f41556c5739647a62306234656c4153636e550a3944504f4156634c336c5177617759445652306642475177596a42676f46366758495a616148523063484d364c79396863476b7564484a316333526c5a484e6c0a636e5a705932567a4c6d6c75644756734c6d4e766253397a5a3367765932567964476c6d61574e6864476c76626939324d7939775932746a636d772f593245390a6347786864475a76636d306d5a57356a62325270626d63395a4756794d42304741315564446751574242534863356e4b574a694e3278684f39523875657543500a434e4b596254414f42674e56485138424166384542414d434273417744415944565230544151482f4241497741444343416a734743537147534962345451454e0a41515343416977776767496f4d42344743697147534962345451454e4151454545426870554c6259304254596e77775554523251363630776767466c42676f710a686b69472b453042445145434d4949425654415142677371686b69472b4530424451454341514942446a415142677371686b69472b45304244514543416749420a446a415142677371686b69472b4530424451454341774942417a415142677371686b69472b4530424451454342414942417a415242677371686b69472b4530420a4451454342514943415038774551594c4b6f5a496876684e41513042416759434167442f4d42414743797147534962345451454e41514948416745424d4241470a43797147534962345451454e41514949416745414d42414743797147534962345451454e4151494a416745414d42414743797147534962345451454e4151494b0a416745414d42414743797147534962345451454e4151494c416745414d42414743797147534962345451454e4151494d416745414d42414743797147534962340a5451454e4151494e416745414d42414743797147534962345451454e4151494f416745414d42414743797147534962345451454e41514950416745414d4241470a43797147534962345451454e41514951416745414d42414743797147534962345451454e415149524167454e4d42384743797147534962345451454e415149530a4242414f44674d442f2f38424141414141414141414141414d42414743697147534962345451454e41514d45416741414d42514743697147534962345451454e0a4151514542674267616741414144415042676f71686b69472b45304244514546436745424d42344743697147534962345451454e4151594545482b5767692b640a5a43486c4264547956765a63557a67775241594b4b6f5a496876684e41513042427a41324d42414743797147534962345451454e415163424151482f4d4241470a43797147534962345451454e41516343415145414d42414743797147534962345451454e41516344415145414d416f4743437147534d343942414d43413063410a4d4551434943732f764b4849486742427a3833746866314a7a42686c547631595339546779544f724a5449627a496d3541694243705242537754772f784a49710a38337a617553376a3847367448424b3342722b71597a55704154616a4a513d3d0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a2d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d4949436c6a4343416a32674177494241674956414a567658633239472b487051456e4a3150517a7a674658433935554d416f4743437147534d343942414d430a4d476778476a415942674e5642414d4d45556c756447567349464e48574342536232393049454e424d526f77474159445651514b4442464a626e526c624342440a62334a7762334a6864476c76626a45554d424947413155454277774c553246756447456751327868636d4578437a414a42674e564241674d416b4e424d5173770a435159445651514745774a56557a4165467730784f4441314d6a45784d4455774d5442614677307a4d7a41314d6a45784d4455774d5442614d484178496a41670a42674e5642414d4d47556c756447567349464e4857434251513073675547786864475a76636d306751304578476a415942674e5642416f4d45556c75644756730a49454e76636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b474131554543417743513045780a437a414a42674e5642415954416c56544d466b77457759484b6f5a497a6a3043415159494b6f5a497a6a304441516344516741454e53422f377432316c58534f0a3243757a7078773734654a423732457944476757357258437478327456544c7136684b6b367a2b5569525a436e71523770734f766771466553786c6d546c4a6c0a65546d693257597a33714f42757a43427544416642674e5648534d4547444157674251695a517a575770303069664f44744a5653763141624f536347724442530a42674e5648523845537a424a4d45656752614244686b466f64485277637a6f764c324e6c636e52705a6d6c6a5958526c63793530636e567a6447566b633256790a646d6c6a5a584d75615735305a577775593239744c306c756447567355306459556d397664454e424c6d526c636a416442674e5648513445466751556c5739640a7a62306234656c4153636e553944504f4156634c336c517744675944565230504151482f42415144416745474d42494741315564457745422f7751494d4159420a4166384341514177436759494b6f5a497a6a30454177494452774177524149675873566b6930772b6936565947573355462f32327561586530594a446a3155650a6e412b546a44316169356343494359623153416d4435786b66545670766f34556f79695359787244574c6d5552344349394e4b7966504e2b0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a2d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d4949436a7a4343416a53674177494241674955496d554d316c71644e496e7a6737535655723951477a6b6e42717777436759494b6f5a497a6a3045417749770a614445614d4267474131554541777752535735305a5777675530645949464a766233516751304578476a415942674e5642416f4d45556c756447567349454e760a636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155454341774351304578437a414a0a42674e5642415954416c56544d423458445445344d4455794d5445774e4455784d466f58445451354d54497a4d54497a4e546b314f566f77614445614d4267470a4131554541777752535735305a5777675530645949464a766233516751304578476a415942674e5642416f4d45556c756447567349454e76636e4276636d46300a615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155454341774351304578437a414a42674e56424159540a416c56544d466b77457759484b6f5a497a6a3043415159494b6f5a497a6a3044415163445167414543366e45774d4449595a4f6a2f69505773437a61454b69370a314f694f534c52466857476a626e42564a66566e6b59347533496a6b4459594c304d784f346d717379596a6c42616c54565978465032734a424b357a6c4b4f420a757a43427544416642674e5648534d4547444157674251695a517a575770303069664f44744a5653763141624f5363477244425342674e5648523845537a424a0a4d45656752614244686b466f64485277637a6f764c324e6c636e52705a6d6c6a5958526c63793530636e567a6447566b63325679646d6c6a5a584d75615735300a5a577775593239744c306c756447567355306459556d397664454e424c6d526c636a416442674e564851344546675155496d554d316c71644e496e7a673753560a55723951477a6b6e4271777744675944565230504151482f42415144416745474d42494741315564457745422f7751494d4159424166384341514577436759490a4b6f5a497a6a3045417749445351417752674968414f572f35516b522b533943695344634e6f6f774c7550524c735747662f59693747535839344267775477670a41694541344a306c72486f4d732b586f356f2f7358364f39515778485241765a55474f6452513763767152586171493d0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a00"; - (bool succ, ) = attestation.verifyAttestation(data); - require(succ); - vm.serializeAddress( output, "AutomataDcapV3Attestation", address(attestation) ); saveJson(output); + + updateAttestationConfig(); + verifyQuote(); + } + + function verifyQuote() public { + string memory output = readJson(); + AutomataDcapV3Attestation attestation = AutomataDcapV3Attestation( + vm.parseJsonAddress(output, ".AutomataDcapV3Attestation") + ); + + bytes + memory data = hex"030002000000000009000e00939a7233f79c4ca9940a0db3957f0607f28dda234595e56eaeb7ce9b681a62cd000000000e0e100fffff0100000000000000000000000000000000000000000000000000000000000000000000000000000000000500000000000000e700000000000000ce040fe9ad608f90e417897d0839886cfef2cf238e37c099eb3d8745ab5296f90000000000000000000000000000000000000000000000000000000000000000de79b29d706d9f00ddbbdf03aa7142df1d7ef1562ec5d4e9dbf95192ccbb650a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a73c92f9c85fe649134cbd089aa6e5dda332a1c334640b160ef93e5a9971d262ae73a3503ad2184f91e6dc82740d75851a18494bf0ffc21420d8d75dafd10e3ca1000007a91529a8b4a235d330142f7faa68dea398c060e6b55dfe279b42feb12cf2a5046211dd4754a1d677d8bb631e6d66904648bd526c87a7b17217140cffc8431f2b1fb4d19c4b4071656cbdb8eaa942c89a359e6e84f51827247a3ac35b08d03abb52e537eae321e112bf351e1f5b9d7eeb3c3ea01e278e65cec3af7f8bb6fdec40e0e100fffff0100000000000000000000000000000000000000000000000000000000000000000000000000000000001500000000000000e700000000000000192aa50ce1c0cef03ccf89e7b5b16b0d7978f5c2b1edcf774d87702e8154d8bf00000000000000000000000000000000000000000000000000000000000000008c4f5775d796503e96137f77c68a829a0056ac8ded70140b081b094490c57bff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000066d3aaf3395111d7e0f2298bf4b31be75deaa4e205829fc512a4468b4177e67e0000000000000000000000000000000000000000000000000000000000000000a91eb85c23f448565f1f8edc2c0a64849c05d9ebe3af7f1d503c5374ce33a8329546cd1f8c7fa25859aa6fa21b46bfb8cb2cf49d35ddc71ba7016030b7e98d822000000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f0500620e00002d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d494945386a4343424a6d674177494241674956414e556f5a4d75787a767164353268495755667233414a6e6d6253574d416f4743437147534d343942414d430a4d484178496a416742674e5642414d4d47556c756447567349464e4857434251513073675547786864475a76636d306751304578476a415942674e5642416f4d0a45556c756447567349454e76636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155450a4341774351304578437a414a42674e5642415954416c56544d423458445449304d444d774e7a45314d4445784d6c6f5844544d784d444d774e7a45314d4445780a4d6c6f77634445694d434147413155454177775a535735305a5777675530645949464244537942445a584a3061575a70593246305a5445614d426747413155450a43677752535735305a577767513239796347397959585270623234784644415342674e564241634d43314e68626e526849454e7359584a684d517377435159440a5651514944414a445154454c4d416b474131554542684d4356564d775754415442676371686b6a4f5051494242676771686b6a4f50514d4242774e43414153440a30594d43645a65616e49706b52704c72516e78456a34305241585258353563437a6f4c512b4336786c45734a466346465955546b3851616c477a777756676e4e0a4c7469373461464248794c68354e55616666574f6f344944446a434341776f77487759445652306a42426777466f41556c5739647a62306234656c4153636e550a3944504f4156634c336c5177617759445652306642475177596a42676f46366758495a616148523063484d364c79396863476b7564484a316333526c5a484e6c0a636e5a705932567a4c6d6c75644756734c6d4e766253397a5a3367765932567964476c6d61574e6864476c76626939324d7939775932746a636d772f593245390a6347786864475a76636d306d5a57356a62325270626d63395a4756794d42304741315564446751574242534863356e4b574a694e3278684f39523875657543500a434e4b596254414f42674e56485138424166384542414d434273417744415944565230544151482f4241497741444343416a734743537147534962345451454e0a41515343416977776767496f4d42344743697147534962345451454e4151454545426870554c6259304254596e77775554523251363630776767466c42676f710a686b69472b453042445145434d4949425654415142677371686b69472b4530424451454341514942446a415142677371686b69472b45304244514543416749420a446a415142677371686b69472b4530424451454341774942417a415142677371686b69472b4530424451454342414942417a415242677371686b69472b4530420a4451454342514943415038774551594c4b6f5a496876684e41513042416759434167442f4d42414743797147534962345451454e41514948416745424d4241470a43797147534962345451454e41514949416745414d42414743797147534962345451454e4151494a416745414d42414743797147534962345451454e4151494b0a416745414d42414743797147534962345451454e4151494c416745414d42414743797147534962345451454e4151494d416745414d42414743797147534962340a5451454e4151494e416745414d42414743797147534962345451454e4151494f416745414d42414743797147534962345451454e41514950416745414d4241470a43797147534962345451454e41514951416745414d42414743797147534962345451454e415149524167454e4d42384743797147534962345451454e415149530a4242414f44674d442f2f38424141414141414141414141414d42414743697147534962345451454e41514d45416741414d42514743697147534962345451454e0a4151514542674267616741414144415042676f71686b69472b45304244514546436745424d42344743697147534962345451454e4151594545482b5767692b640a5a43486c4264547956765a63557a67775241594b4b6f5a496876684e41513042427a41324d42414743797147534962345451454e415163424151482f4d4241470a43797147534962345451454e41516343415145414d42414743797147534962345451454e41516344415145414d416f4743437147534d343942414d43413063410a4d4551434943732f764b4849486742427a3833746866314a7a42686c547631595339546779544f724a5449627a496d3541694243705242537754772f784a49710a38337a617553376a3847367448424b3342722b71597a55704154616a4a513d3d0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a2d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d4949436c6a4343416a32674177494241674956414a567658633239472b487051456e4a3150517a7a674658433935554d416f4743437147534d343942414d430a4d476778476a415942674e5642414d4d45556c756447567349464e48574342536232393049454e424d526f77474159445651514b4442464a626e526c624342440a62334a7762334a6864476c76626a45554d424947413155454277774c553246756447456751327868636d4578437a414a42674e564241674d416b4e424d5173770a435159445651514745774a56557a4165467730784f4441314d6a45784d4455774d5442614677307a4d7a41314d6a45784d4455774d5442614d484178496a41670a42674e5642414d4d47556c756447567349464e4857434251513073675547786864475a76636d306751304578476a415942674e5642416f4d45556c75644756730a49454e76636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b474131554543417743513045780a437a414a42674e5642415954416c56544d466b77457759484b6f5a497a6a3043415159494b6f5a497a6a304441516344516741454e53422f377432316c58534f0a3243757a7078773734654a423732457944476757357258437478327456544c7136684b6b367a2b5569525a436e71523770734f766771466553786c6d546c4a6c0a65546d693257597a33714f42757a43427544416642674e5648534d4547444157674251695a517a575770303069664f44744a5653763141624f536347724442530a42674e5648523845537a424a4d45656752614244686b466f64485277637a6f764c324e6c636e52705a6d6c6a5958526c63793530636e567a6447566b633256790a646d6c6a5a584d75615735305a577775593239744c306c756447567355306459556d397664454e424c6d526c636a416442674e5648513445466751556c5739640a7a62306234656c4153636e553944504f4156634c336c517744675944565230504151482f42415144416745474d42494741315564457745422f7751494d4159420a4166384341514177436759494b6f5a497a6a30454177494452774177524149675873566b6930772b6936565947573355462f32327561586530594a446a3155650a6e412b546a44316169356343494359623153416d4435786b66545670766f34556f79695359787244574c6d5552344349394e4b7966504e2b0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a2d2d2d2d2d424547494e2043455254494649434154452d2d2d2d2d0a4d4949436a7a4343416a53674177494241674955496d554d316c71644e496e7a6737535655723951477a6b6e42717777436759494b6f5a497a6a3045417749770a614445614d4267474131554541777752535735305a5777675530645949464a766233516751304578476a415942674e5642416f4d45556c756447567349454e760a636e4276636d4630615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155454341774351304578437a414a0a42674e5642415954416c56544d423458445445344d4455794d5445774e4455784d466f58445451354d54497a4d54497a4e546b314f566f77614445614d4267470a4131554541777752535735305a5777675530645949464a766233516751304578476a415942674e5642416f4d45556c756447567349454e76636e4276636d46300a615739754d5251774567594456515148444174545957353059534244624746795954454c4d416b47413155454341774351304578437a414a42674e56424159540a416c56544d466b77457759484b6f5a497a6a3043415159494b6f5a497a6a3044415163445167414543366e45774d4449595a4f6a2f69505773437a61454b69370a314f694f534c52466857476a626e42564a66566e6b59347533496a6b4459594c304d784f346d717379596a6c42616c54565978465032734a424b357a6c4b4f420a757a43427544416642674e5648534d4547444157674251695a517a575770303069664f44744a5653763141624f5363477244425342674e5648523845537a424a0a4d45656752614244686b466f64485277637a6f764c324e6c636e52705a6d6c6a5958526c63793530636e567a6447566b63325679646d6c6a5a584d75615735300a5a577775593239744c306c756447567355306459556d397664454e424c6d526c636a416442674e564851344546675155496d554d316c71644e496e7a673753560a55723951477a6b6e4271777744675944565230504151482f42415144416745474d42494741315564457745422f7751494d4159424166384341514577436759490a4b6f5a497a6a3045417749445351417752674968414f572f35516b522b533943695344634e6f6f774c7550524c735747662f59693747535839344267775477670a41694541344a306c72486f4d732b586f356f2f7358364f39515778485241765a55474f6452513763767152586171493d0a2d2d2d2d2d454e442043455254494649434154452d2d2d2d2d0a00"; + (bool succ, ) = attestation.verifyAttestation(data); + require(succ); } function deployProxyAdmin() public { diff --git a/contracts/script/TEECommitteeManagement.s.sol b/contracts/script/TEECommitteeManagement.s.sol index f6fae9c..1e83227 100644 --- a/contracts/script/TEECommitteeManagement.s.sol +++ b/contracts/script/TEECommitteeManagement.s.sol @@ -1,12 +1,19 @@ pragma solidity ^0.8.12; import {IMultiProverServiceManager} from "../src/interfaces/IMultiProverServiceManager.sol"; +import {IStakeRegistry} from "eigenlayer-middleware/interfaces/IStakeRegistry.sol"; +import {StakeRegistry, IStrategy} from "eigenlayer-middleware/StakeRegistry.sol"; +import {IRegistryCoordinator} from "eigenlayer-middleware/interfaces/IRegistryCoordinator.sol"; +import {RegistryCoordinator} from "eigenlayer-middleware/RegistryCoordinator.sol"; import "forge-std/Script.sol"; contract TEECommitteeManagement is Script { - IMultiProverServiceManager serviceManager = IMultiProverServiceManager(vm.envAddress("MULTI_PROVER_SERVICE_MANAGER")); - + IMultiProverServiceManager serviceManager = + IMultiProverServiceManager( + vm.envAddress("MULTI_PROVER_SERVICE_MANAGER") + ); + function run() public { uint256 id = 1; string memory description = "Scroll Prover Committee"; @@ -16,26 +23,57 @@ contract TEECommitteeManagement is Script { addCommittee(id, description, metadata, teeQuorumNumbers); } - function addCommittee(uint256 id, string memory description, bytes memory metadata, bytes memory teeQuorumNumbers) public { + function addLineaCommittee() public { + uint256 id = 2; + string memory description = "Linea Prover Committee"; + bytes memory metadata = abi.encodePacked('{"chainId":59144}'); + bytes memory teeQuorumNumbers = new bytes(1); + teeQuorumNumbers[0] = bytes1(uint8(1)); + + vm.startBroadcast(); - IMultiProverServiceManager.Committee memory committee = IMultiProverServiceManager.Committee({ - id: id, - description: description, - metadata: metadata, - teeQuorumNumbers: teeQuorumNumbers + IMultiProverServiceManager.TEEQuorum memory teeQuorum = IMultiProverServiceManager.TEEQuorum({ + teeType: IMultiProverServiceManager.TEE.INTEL_SGX, + quorumNumber: 1 }); + serviceManager.addTEEQuorum(teeQuorum); + vm.stopBroadcast(); + + addCommittee(id, description, metadata, teeQuorumNumbers); + } + + function addCommittee( + uint256 id, + string memory description, + bytes memory metadata, + bytes memory teeQuorumNumbers + ) public { + vm.startBroadcast(); + IMultiProverServiceManager.Committee + memory committee = IMultiProverServiceManager.Committee({ + id: id, + description: description, + metadata: metadata, + teeQuorumNumbers: teeQuorumNumbers + }); serviceManager.addCommittee(committee); vm.stopBroadcast(); } - function updateCommittee(uint256 id, string memory description, bytes memory metadata, bytes memory teeQuorumNumbers) public { + function updateCommittee( + uint256 id, + string memory description, + bytes memory metadata, + bytes memory teeQuorumNumbers + ) public { vm.startBroadcast(); - IMultiProverServiceManager.Committee memory committee = IMultiProverServiceManager.Committee({ - id: id, - description: description, - metadata: metadata, - teeQuorumNumbers: teeQuorumNumbers - }); + IMultiProverServiceManager.Committee + memory committee = IMultiProverServiceManager.Committee({ + id: id, + description: description, + metadata: metadata, + teeQuorumNumbers: teeQuorumNumbers + }); serviceManager.updateCommittee(committee); vm.stopBroadcast(); } @@ -45,4 +83,82 @@ contract TEECommitteeManagement is Script { serviceManager.removeCommittee(committeeId); vm.stopBroadcast(); } -} \ No newline at end of file + + function addLineaQuorum() public { + vm.startBroadcast(); + // holesky + address[] memory strategies = new address[](11); + { + strategies[0] = 0xbeaC0eeEeeeeEEeEeEEEEeeEEeEeeeEeeEEBEaC0; // Virtual strategy for beacon chain ETH + strategies[1] = 0x7D704507b76571a51d9caE8AdDAbBFd0ba0e63d3; // stETH strategy + strategies[2] = 0x3A8fBdf9e77DFc25d09741f51d3E181b25d0c4E0; // rETH strategy + strategies[3] = 0x05037A81BD7B4C9E0F7B430f1F2A22c31a2FD943; // lsETH strategy + strategies[4] = 0x9281ff96637710Cd9A5CAcce9c6FAD8C9F54631c; // sfrxETH strategy + strategies[5] = 0x31B6F59e1627cEfC9fA174aD03859fC337666af7; // ETHx strategy + strategies[6] = 0x46281E3B7fDcACdBa44CADf069a94a588Fd4C6Ef; // osETH strategy + strategies[7] = 0x70EB4D3c164a6B4A5f908D4FBb5a9cAfFb66bAB6; // cbETH strategy + strategies[8] = 0xaccc5A86732BE85b5012e8614AF237801636F8e5; // mETH strategy + strategies[9] = 0x7673a47463F80c6a3553Db9E54c8cDcd5313d0ac; // ankrETH strategy + strategies[10] = 0x80528D6e9A2BAbFc766965E0E26d5aB08D9CFaF9; // WETH strategy + } + IStakeRegistry.StrategyParams[] + memory strategyParams = new IStakeRegistry.StrategyParams[](11); + { + for (uint i = 0; i < strategies.length; i++) { + strategyParams[i] = IStakeRegistry.StrategyParams({ + strategy: IStrategy(strategies[i]), + multiplier: 1 ether + }); + } + } + + uint96 minimumStakeForQuourm = 10000000000000000; + IRegistryCoordinator.OperatorSetParam + memory operatorSetParams = IRegistryCoordinator.OperatorSetParam( + uint32(100), + uint16(11000), + uint16(100) + ); + + string memory output = readJson(); + RegistryCoordinator registryCoordinator = RegistryCoordinator( + vm.parseJsonAddress(output, ".registryCoordinator") + ); + + registryCoordinator.createQuorum( + operatorSetParams, + minimumStakeForQuourm, + strategyParams + ); + vm.stopBroadcast(); + } + + function getOutputFilePath() private view returns (string memory) { + string memory env = vm.envString("ENV"); + return + string.concat( + vm.projectRoot(), + "/script/output/avs_deploy_", + env, + ".json" + ); + } + + function readJson() private returns (string memory) { + bytes32 remark = keccak256(abi.encodePacked("remark")); + string memory output = vm.readFile(getOutputFilePath()); + string[] memory keys = vm.parseJsonKeys(output, "."); + for (uint i = 0; i < keys.length; i++) { + if (keccak256(abi.encodePacked(keys[i])) == remark) { + continue; + } + string memory keyPath = string(abi.encodePacked(".", keys[i])); + vm.serializeAddress( + output, + keys[i], + vm.parseJsonAddress(output, keyPath) + ); + } + return output; + } +} diff --git a/contracts/script/TEELivenessManager.s.sol b/contracts/script/TEELivenessManager.s.sol index 9f2f178..dd2461e 100644 --- a/contracts/script/TEELivenessManager.s.sol +++ b/contracts/script/TEELivenessManager.s.sol @@ -14,6 +14,11 @@ contract TEELivenessManager is Script { liveness.changeAttestValiditySeconds(secs); } + function changeMaxBlockNumberDiff(uint256 blocks) public { + vm.startBroadcast(); + liveness.changeMaxBlockNumberDiff(blocks); + } + function sendAttestation(TEELivenessVerifier.ReportDataV2 calldata reportData, bytes calldata quote) public { // string memory quote = vm.readFile(path); vm.startBroadcast(); diff --git a/contracts/script/output/avs_deploy_holesky.json b/contracts/script/output/avs_deploy_holesky.json index 035ada5..3c2e3c8 100644 --- a/contracts/script/output/avs_deploy_holesky.json +++ b/contracts/script/output/avs_deploy_holesky.json @@ -2,7 +2,7 @@ "blsApkRegistry": "0x2b6C2584760eDbcEC42391862f97dBB872b5e2Eb", "indexRegistry": "0x158583f023ca440e79F199f037aa8b53b198F500", "multiProverServiceManager": "0x4665Af665df5703445645D243f0FD63eD3b9D132", - "object": "multi-prover avs contracts deployment output", + "remark": "multi-prover avs contracts deployment output", "operatorStateRetriever": "0xbfd43ac0a19c843e44491c3207ea13914818E214", "registryCoordinator": "0x62c715575cE3Ad7C5a43aA325b881c70564f2215", "stakeRegistry": "0x5C7BbAfA3d5A3Fa0b592cDCF4b7B52261FaA99A8", diff --git a/contracts/src/core/TEELivenessVerifier.sol b/contracts/src/core/TEELivenessVerifier.sol index 05e328a..54cf05d 100644 --- a/contracts/src/core/TEELivenessVerifier.sol +++ b/contracts/src/core/TEELivenessVerifier.sol @@ -32,26 +32,38 @@ contract TEELivenessVerifier is OwnableUpgradeable { // added at v2 uint256 public maxBlockNumberDiff; - constructor() { _disableInitializers(); } - function initialize(address _initialOwner, address _attestationAddr, uint256 _maxBlockNumberDiff, uint256 _attestValiditySeconds) public initializer { + function initialize( + address _initialOwner, + address _attestationAddr, + uint256 _maxBlockNumberDiff, + uint256 _attestValiditySeconds + ) public initializer { dcapAttestation = IAttestation(_attestationAddr); maxBlockNumberDiff = _maxBlockNumberDiff; attestValiditySeconds = _attestValiditySeconds; _transferOwnership(_initialOwner); } - function reinitialize(uint8 i, address _initialOwner, address _attestationAddr, uint256 _maxBlockNumberDiff, uint256 _attestValiditySeconds) public reinitializer(i) { + function reinitialize( + uint8 i, + address _initialOwner, + address _attestationAddr, + uint256 _maxBlockNumberDiff, + uint256 _attestValiditySeconds + ) public reinitializer(i) { dcapAttestation = IAttestation(_attestationAddr); maxBlockNumberDiff = _maxBlockNumberDiff; attestValiditySeconds = _attestValiditySeconds; _transferOwnership(_initialOwner); } - function changeMaxBlockNumberDiff(uint256 _maxBlockNumberDiff) public onlyOwner { + function changeMaxBlockNumberDiff( + uint256 _maxBlockNumberDiff + ) public onlyOwner { maxBlockNumberDiff = _maxBlockNumberDiff; } @@ -89,7 +101,9 @@ contract TEELivenessVerifier is OwnableUpgradeable { require(dataHash == reportDataHash, "report data hash mismatch"); Prover memory prover = Prover(_data.pubkey, block.timestamp); - attestedProvers[keccak256(abi.encode(_data.pubkey.x, _data.pubkey.y))] = prover; + attestedProvers[ + keccak256(abi.encode(_data.pubkey.x, _data.pubkey.y)) + ] = prover; attestedReports[reportHash] = true; } @@ -136,11 +150,15 @@ contract TEELivenessVerifier is OwnableUpgradeable { return (x, y); } - // this function will make sure the attestation report - function checkBlockNumber(uint256 blockNumber, bytes32 blockHash) private view { + // this function will make sure the attestation report generated in recent ${maxBlockNumberDiff} blocks + function checkBlockNumber( + uint256 blockNumber, + bytes32 blockHash + ) private view { + require(blockNumber < block.number, "invalid block number"); require( - blockNumber < block.number && block.number - blockNumber < maxBlockNumberDiff, - "invalid block number" + block.number - blockNumber < maxBlockNumberDiff, + "block number out-of-date" ); require(blockhash(blockNumber) == blockHash, "block number mismatch"); diff --git a/operator/config.go b/operator/config.go index f91c6a9..85cf36d 100644 --- a/operator/config.go +++ b/operator/config.go @@ -88,7 +88,7 @@ func ParseConfigContext(cfgPath string, ecdsaKey *ecdsa.PrivateKey) (*ConfigCont EthHttpUrl: cfg.EthRpcUrl, EthWsUrl: cfg.EthRpcUrl, RegistryCoordinatorAddr: cfg.RegistryCoordinatorAddress.String(), - OperatorStateRetrieverAddr: common.Address{}.String(), + OperatorStateRetrieverAddr: cfg.OperatorStateRetrieverAddress.String(), AvsName: avsName, PromMetricsIpPortAddress: cfg.EigenMetricsIpPortAddress, } @@ -132,7 +132,9 @@ type Config struct { AttestationLayerEcdsaKey string AttestationLayerRpcURL string - RegistryCoordinatorAddress common.Address + RegistryCoordinatorAddress common.Address + OperatorStateRetrieverAddress common.Address + TEELivenessVerifierAddress common.Address EigenMetricsIpPortAddress string NodeApiIpPortAddress string @@ -142,6 +144,14 @@ func (c *Config) InitFromEnv() { if c.NodeApiIpPortAddress == "" { c.NodeApiIpPortAddress = ":15692" } + + preset := utils.PresetConfigByRegistryCoordinatorAddress(c.RegistryCoordinatorAddress) + + if preset != nil { + if c.OperatorStateRetrieverAddress == utils.ZeroAddress { + c.OperatorStateRetrieverAddress = preset.OperatorStateRetrieverAddress + } + } } func (c *Config) check(env *string) { diff --git a/operator/operator.go b/operator/operator.go index 83f589c..78b8d2b 100644 --- a/operator/operator.go +++ b/operator/operator.go @@ -9,10 +9,10 @@ import ( "math/big" "os" "strings" + "sync" "time" "github.com/Layr-Labs/eigensdk-go/nodeapi" - sdkTypes "github.com/Layr-Labs/eigensdk-go/types" "github.com/automata-network/multi-prover-avs/aggregator" "github.com/automata-network/multi-prover-avs/contracts/bindings" @@ -81,13 +81,8 @@ func NewOperator(path string, semVer string) (*Operator, error) { return nil, logex.NewErrorf("operator is not registered") } - quorumNames := map[sdkTypes.QuorumNum]string{ - 0: "Scroll SGX Quorum", - } - quorumNumbers := []byte{0} - operatorMetric := xmetric.NewOperatorCollector("avs", cfg.EigenClients.PrometheusRegistry) - metrics := NewMetrics(cfg.AvsName, cfg.EigenClients, logger, operatorAddress, cfg.Config.EigenMetricsIpPortAddress, quorumNames) + metrics := NewMetrics(cfg.AvsName, cfg.EigenClients, logger, operatorAddress, cfg.Config.EigenMetricsIpPortAddress, xtask.GetQuorumNames()) nodeApi := nodeapi.NewNodeApi(cfg.AvsName, semVer, cfg.Config.NodeApiIpPortAddress, logger) @@ -97,7 +92,6 @@ func NewOperator(path string, semVer string) (*Operator, error) { semVer: semVer, proverClient: proverClient, logger: logger, - quorumNumbers: quorumNumbers, aggregator: aggClient, operatorAddress: operatorAddress, metrics: metrics, @@ -138,15 +132,27 @@ func (o *Operator) Start(ctx context.Context) error { o.operatorAddress, o.cfg.BlsKey.GetPubKeyG1(), o.cfg.BlsKey.GetPubKeyG2(), - md.Version, md.WithContext, + md.Version, md.TaskWithContext, ) go o.metricExporterLoop(ctx) go o.metadataExport(ctx) - if err := o.subscribeTask(ctx, md.WithContext); err != nil { - return logex.Trace(err) + var wg sync.WaitGroup + for _, quorum := range o.quorumNumbers { + ty := xtask.NewTaskType(quorum) + if !ty.IsValid() { + logex.Errorf("unknown quorum: %v", quorum) + continue + } + wg.Add(1) + go func() { + defer wg.Done() + o.subscribeTask(ctx, ty, md.GetWithContext(ty)) + }() + } + wg.Wait() return nil } @@ -175,6 +181,8 @@ func (o *Operator) metadataExport(ctx context.Context) { proverUrlHash.String(), proverVersion, fmt.Sprint(proverWithContext), + fmt.Sprint(md.GetWithContext(xtask.ScrollTask)), + fmt.Sprint(md.GetWithContext(xtask.LineaTask)), ).Add(1) } @@ -185,15 +193,15 @@ func (o *Operator) metadataExport(ctx context.Context) { } } -func (o *Operator) subscribeTask(ctx context.Context, withContext bool) error { +func (o *Operator) subscribeTask(ctx context.Context, ty xtask.TaskType, withContext bool) { req := &aggregator.FetchTaskReq{ PrevTaskID: 0, - TaskType: xtask.ScrollTask, + TaskType: ty, MaxWaitSecs: 100, WithContext: withContext, } for { - logex.Infof("fetch task: %#v", req) + logex.Infof("fetch task[%v]: %#v", ty.Value(), req) resp, err := o.aggregator.FetchTask(ctx, req) if err != nil { time.Sleep(time.Second) @@ -207,8 +215,8 @@ func (o *Operator) subscribeTask(ctx context.Context, withContext bool) error { logex.Infof("accept new task: [%v] %v", resp.TaskType.Value(), resp.TaskID) req.PrevTaskID = resp.TaskID - o.operatorMetric.FetchTask.WithLabelValues(o.cfg.AvsName, xtask.ScrollTask.Value(), fmt.Sprint(req.WithContext)).Add(1) - o.operatorMetric.LatestTask.WithLabelValues(o.cfg.AvsName, xtask.ScrollTask.Value()).Set(float64(resp.TaskID)) + o.operatorMetric.FetchTask.WithLabelValues(o.cfg.AvsName, ty.Value(), fmt.Sprint(req.WithContext)).Add(1) + o.operatorMetric.LatestTask.WithLabelValues(o.cfg.AvsName, ty.Value()).Set(float64(resp.TaskID)) startProcessTask := time.Now() switch resp.TaskType { @@ -216,8 +224,12 @@ func (o *Operator) subscribeTask(ctx context.Context, withContext bool) error { if err := o.processScrollTask(ctx, resp); err != nil { logex.Error(err) } + case xtask.LineaTask: + if err := o.processLineaTask(ctx, resp); err != nil { + logex.Error(err) + } } - o.operatorMetric.ProcessTaskMs.WithLabelValues(o.cfg.AvsName, xtask.ScrollTask.Value()).Set(float64(time.Since(startProcessTask).Milliseconds())) + o.operatorMetric.ProcessTaskMs.WithLabelValues(o.cfg.AvsName, ty.Value()).Set(float64(time.Since(startProcessTask).Milliseconds())) } } @@ -248,7 +260,83 @@ func (o *Operator) metricExporterLoop(ctx context.Context) { } } +func (o *Operator) processLineaTask(ctx context.Context, resp *aggregator.FetchTaskResp) (err error) { + ty := xtask.LineaTask + var ext xtask.LineaTaskExt + if err := json.Unmarshal(resp.Ext, &ext); err != nil { + return logex.Trace(err) + } + var taskCtx *xtask.ScrollContext + if len(resp.Context) == 0 { + logex.Info("[linea] generating task context for:", ext.StartBlock.ToInt(), ext.EndBlock.ToInt()) + var skip bool + taskCtx, skip, err = o.proverClient.GenerateLineaContext(ctx, ext.StartBlock.ToInt().Int64(), ext.EndBlock.ToInt().Int64(), xtask.LineaTask) + if err != nil { + return logex.Trace(err) + } + if skip { + return nil + } + } else { + if err := json.Unmarshal(resp.Context, &taskCtx); err != nil { + return logex.Trace(err) + } + } + + genPoeStart := time.Now() + poe, skip, err := o.proverClient.ProveLinea(ctx, o.operatorAddress, &ext, taskCtx) + if err != nil { + return logex.Trace(err) + } + o.operatorMetric.GenPoeMs.WithLabelValues(o.cfg.AvsName, ty.Value()).Set(float64(time.Since(genPoeStart).Milliseconds())) + + logex.Pretty(poe) + if skip { + return nil + } + + md := &aggregator.Metadata{ + BatchId: poe.BatchId, + StartBlock: poe.StartBlock, + EndBlock: poe.EndBlock, + } + mdBytes, err := json.Marshal(md) + if err != nil { + return logex.Trace(err) + } + + stateHeader := &aggregator.StateHeader{ + Identifier: (*hexutil.Big)(big.NewInt(int64(ty))), + Metadata: mdBytes, + State: poe.Poe.Pack(), + QuorumNumbers: []byte{ty.GetQuorum()}, + QuorumThresholdPercentages: []byte{0}, + ReferenceBlockNumber: uint32(ext.ReferenceBlockNumber), + } + + digest, err := stateHeader.Digest() + if err != nil { + return logex.Trace(err) + } + sig := o.cfg.BlsKey.SignMessage(digest) + + o.operatorMetric.SubmitTask.WithLabelValues(o.cfg.AvsName, ty.Value()).Add(1) + submitTaskTime := time.Now() + // submit to aggregator + if err := o.aggregator.SubmitTask(ctx, &aggregator.TaskRequest{ + Task: stateHeader, + Signature: sig, + OperatorId: o.operatorId, + }); err != nil { + return logex.Trace(err) + } + o.operatorMetric.SubmitTaskMs.WithLabelValues(o.cfg.AvsName, ty.Value()).Set(float64(time.Since(submitTaskTime).Milliseconds())) + // logex.Info(poe) + return nil +} + func (o *Operator) processScrollTask(ctx context.Context, resp *aggregator.FetchTaskResp) (err error) { + ty := xtask.ScrollTask var ext xtask.ScrollTaskExt if err := json.Unmarshal(resp.Ext, &ext); err != nil { return logex.Trace(err) @@ -257,7 +345,7 @@ func (o *Operator) processScrollTask(ctx context.Context, resp *aggregator.Fetch if len(resp.Context) == 0 { logex.Info("[scroll] generating task context for:", ext.StartBlock.ToInt(), ext.EndBlock.ToInt()) var skip bool - taskCtx, skip, err = o.proverClient.GenerateContext(ctx, ext.StartBlock.ToInt().Int64(), ext.EndBlock.ToInt().Int64(), xtask.ScrollTask) + taskCtx, skip, err = o.proverClient.GenerateScrollContext(ctx, ext.StartBlock.ToInt().Int64(), ext.EndBlock.ToInt().Int64(), ty) if err != nil { return logex.Trace(err) } @@ -275,7 +363,7 @@ func (o *Operator) processScrollTask(ctx context.Context, resp *aggregator.Fetch if err != nil { return logex.Trace(err) } - o.operatorMetric.GenPoeMs.WithLabelValues(o.cfg.AvsName, xtask.ScrollTask.Value()).Set(float64(time.Since(genPoeStart).Milliseconds())) + o.operatorMetric.GenPoeMs.WithLabelValues(o.cfg.AvsName, ty.Value()).Set(float64(time.Since(genPoeStart).Milliseconds())) logex.Pretty(poe) if skip { @@ -293,10 +381,10 @@ func (o *Operator) processScrollTask(ctx context.Context, resp *aggregator.Fetch } stateHeader := &aggregator.StateHeader{ - Identifier: (*hexutil.Big)(big.NewInt(o.cfg.Config.Identifier)), + Identifier: (*hexutil.Big)(big.NewInt(int64(ty))), Metadata: mdBytes, State: poe.Poe.Pack(), - QuorumNumbers: o.quorumNumbers, + QuorumNumbers: []byte{ty.GetQuorum()}, QuorumThresholdPercentages: []byte{0}, ReferenceBlockNumber: uint32(ext.ReferenceBlockNumber), } @@ -307,7 +395,7 @@ func (o *Operator) processScrollTask(ctx context.Context, resp *aggregator.Fetch } sig := o.cfg.BlsKey.SignMessage(digest) - o.operatorMetric.SubmitTask.WithLabelValues(o.cfg.AvsName, xtask.ScrollTask.Value()).Add(1) + o.operatorMetric.SubmitTask.WithLabelValues(o.cfg.AvsName, ty.Value()).Add(1) submitTaskTime := time.Now() // submit to aggregator if err := o.aggregator.SubmitTask(ctx, &aggregator.TaskRequest{ @@ -317,7 +405,7 @@ func (o *Operator) processScrollTask(ctx context.Context, resp *aggregator.Fetch }); err != nil { return logex.Trace(err) } - o.operatorMetric.SubmitTaskMs.WithLabelValues(o.cfg.AvsName, xtask.ScrollTask.Value()).Set(float64(time.Since(submitTaskTime).Milliseconds())) + o.operatorMetric.SubmitTaskMs.WithLabelValues(o.cfg.AvsName, ty.Value()).Set(float64(time.Since(submitTaskTime).Milliseconds())) // logex.Info(poe) return nil } @@ -334,6 +422,14 @@ func (o *Operator) checkIsRegistered() error { if err != nil { return logex.Trace(err) } + quorumNumbers, _, err := o.cfg.EigenClients.AvsRegistryChainReader.GetOperatorsStakeInQuorumsOfOperatorAtCurrentBlock(&bind.CallOpts{}, o.operatorId) + if err != nil { + return logex.Trace(err) + } + o.quorumNumbers = make([]byte, len(quorumNumbers)) + for i, qn := range quorumNumbers { + o.quorumNumbers[i] = byte(qn) + } return nil } diff --git a/scripts/avs.sh b/scripts/avs.sh index bd9988f..724a3b7 100755 --- a/scripts/avs.sh +++ b/scripts/avs.sh @@ -8,4 +8,22 @@ function add_whitelist() { _script script/Whitelist.s.sol --sig 'add(address)' $1 } +function changeMaxBlockNumberDiff() { + TEE_LIVENESS=$(_get_key $TEE_DEPLOY .TEELivenessVerifierProxy) \ + DEPLOY_KEY_SUFFIX=DEPLOY_KEY \ + _script script/TEELivenessManager.s.sol --sig 'changeMaxBlockNumberDiff(uint256)' $1 +} + +function addLineaQuorum() { + MULTI_PROVER_SERVICE_MANAGER=$(_get_key $AVS_DEPLOY .multiProverServiceManager) \ + DEPLOY_KEY_SUFFIX=AVS_DEPLOY_KEY \ + _script script/TEECommitteeManagement.s.sol --sig 'addLineaQuorum()' +} + +function addLineaCommittee() { + MULTI_PROVER_SERVICE_MANAGER=$(_get_key $AVS_DEPLOY .multiProverServiceManager) \ + DEPLOY_KEY_SUFFIX=AVS_DEPLOY_KEY \ + _script script/TEECommitteeManagement.s.sol --sig 'addLineaCommittee()' +} + "$@" \ No newline at end of file diff --git a/scripts/env.sh b/scripts/env.sh index 966a8f5..489f1a0 100755 --- a/scripts/env.sh +++ b/scripts/env.sh @@ -11,7 +11,7 @@ function _script() { fi cd $WORKDIR # ETH_GAS_PRICE=0.1gwei - forge script "$@" -v $NOSEND --rpc-url $(_get_env RPC_URL) --private-key $(_get_env $DEPLOY_KEY_SUFFIX) + ENV=$ENV forge script "$@" -v $NOSEND --rpc-url $(_get_env RPC_URL) --private-key $(_get_env $DEPLOY_KEY_SUFFIX) --revert-strings debug cd - } diff --git a/utils/log_trace.go b/utils/log_trace.go index f2a3fe2..7d3eb9e 100644 --- a/utils/log_trace.go +++ b/utils/log_trace.go @@ -29,6 +29,20 @@ type LogTracer struct { scanIntervalSecs int64 } +type KeyLogTracer struct{} + +func (c KeyLogTracer) Get(ctx context.Context) *LogTracer { + val := ctx.Value(c) + if val == nil { + return nil + } + return val.(*LogTracer) +} + +func (c KeyLogTracer) Save(ctx context.Context, client *LogTracer) context.Context { + return context.WithValue(ctx, c, client) +} + type KeyLogTracerSourceClient struct{} func (c KeyLogTracerSourceClient) Get(ctx context.Context) *ethclient.Client { @@ -87,6 +101,37 @@ func (l *LogTracer) saveOffset(off uint64) error { return nil } +func (l *LogTracer) LookBack(ctx context.Context, end int64) (*types.Log, error) { + for end > 0 { + select { + case <-ctx.Done(): + return nil, logex.Trace(ctx.Err()) + default: + filter := l.filter + start := end - int64(l.max) + if start < 0 { + start = 0 + } + filter.ToBlock = big.NewInt(int64(end - 1)) + filter.FromBlock = big.NewInt(int64(start)) + + logs, err := l.source.FilterLogs(ctx, filter) + if err != nil { + logex.Errorf("[lookback][%v][%v-%v] fetch logs fail: %v => %v, retry in 4secs..", l.id, start, end, filter, err) + l.sleepSecs(ctx, 4) + continue + } + + logex.Infof("[lookback][%v] finished scan blocks [%v, %v], logs: %v", l.id, start, end, len(logs)) + if len(logs) > 0 { + return &logs[len(logs)-1], nil + } + end = start + } + } + return nil, nil +} + func (l *LogTracer) Run(ctx context.Context) error { logex.Info("starting log-tracer:", l.id) start, err := l.handler.GetBlock() @@ -94,6 +139,7 @@ func (l *LogTracer) Run(ctx context.Context) error { return logex.Trace(err) } ctx = KeyLogTracerSourceClient{}.Save(ctx, l.source) + ctx = KeyLogTracer{}.Save(ctx, l) head, err := l.source.BlockNumber(ctx) if err != nil { diff --git a/utils/params.go b/utils/params.go new file mode 100644 index 0000000..c626a41 --- /dev/null +++ b/utils/params.go @@ -0,0 +1,38 @@ +package utils + +import "github.com/ethereum/go-ethereum/common" + +type PresetConfig struct { + RegistryCoordinatorAddress common.Address + OperatorStateRetrieverAddress common.Address + TEELivenessVerifierAddress common.Address + LineaProverURL string + ScrollProverURL string +} + +var PresetConfigs = []*PresetConfig{HoleskyTestnetPreset, MainnetPreset} + +var HoleskyTestnetPreset = &PresetConfig{ + RegistryCoordinatorAddress: common.HexToAddress("0x62c715575cE3Ad7C5a43aA325b881c70564f2215"), + OperatorStateRetrieverAddress: common.HexToAddress("0xbfd43ac0a19c843e44491c3207ea13914818E214"), + TEELivenessVerifierAddress: common.HexToAddress("0x2E8628F6000Ef85dea615af6Da4Fd6dF4fD149e6"), + LineaProverURL: "https://avs-prover-staging.ata.network", + ScrollProverURL: "https://avs-prover-staging.ata.network", +} + +var MainnetPreset = &PresetConfig{ + RegistryCoordinatorAddress: common.HexToAddress("0x414696E4F7f06273973E89bfD3499e8666D63Bd4"), + OperatorStateRetrieverAddress: common.HexToAddress("0x91246253d3Bff9Ae19065A90dC3AB6e09EefD2B6"), + TEELivenessVerifierAddress: common.HexToAddress("0x99886d5C39c0DF3B0EAB67FcBb4CA230EF373510"), + LineaProverURL: "https://avs-prover-mainnet1.ata.network:18232", + ScrollProverURL: "https://avs-prover-mainnet1.ata.network:18232", +} + +func PresetConfigByRegistryCoordinatorAddress(registryCoordinatorAddress common.Address) *PresetConfig { + for _, preset := range PresetConfigs { + if preset.RegistryCoordinatorAddress == registryCoordinatorAddress { + return preset + } + } + return nil +} diff --git a/xmetric/operator_collector.go b/xmetric/operator_collector.go index 4de6d4a..c9f8f01 100644 --- a/xmetric/operator_collector.go +++ b/xmetric/operator_collector.go @@ -32,7 +32,17 @@ func NewOperatorCollector(app string, registry *prometheus.Registry) *OperatorCo Name: "metadata_counter", Help: "metadata", }, - []string{"avs_name", "operator_addr", "version", "attestation_addr", "prover_url_hash", "prover_version", "task_fetch_with_context"}, + []string{ + "avs_name", + "operator_addr", + "version", + "attestation_addr", + "prover_url_hash", + "prover_version", + "task_fetch_with_context", + "scroll_with_context", + "linea_with_context", + }, )), FetchTask: collect(&metrics, prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/xtask/prover_client.go b/xtask/prover_client.go index 923f2d7..3c0f4a4 100644 --- a/xtask/prover_client.go +++ b/xtask/prover_client.go @@ -104,7 +104,7 @@ func (p *ProverClient) DaPutPob(ctx context.Context, taskCtx *ScrollContext) err return nil } -func (p *ProverClient) GenerateContext(ctx context.Context, startBlock, endBlock int64, ty TaskType) (*ScrollContext, bool, error) { +func (p *ProverClient) GenerateScrollContext(ctx context.Context, startBlock, endBlock int64, ty TaskType) (*ScrollContext, bool, error) { var result ScrollContext retryTime := 3 var err error @@ -118,6 +118,29 @@ retry: goto retry } if strings.Contains(err.Error(), "skip") { + logex.Errorf("skip error for generate context (%v, %v, %v): %v", startBlock, endBlock, ty.Value(), err) + return nil, true, nil + } + return nil, false, logex.Trace(err, "prover_genContext") + } + return &result, false, nil +} + +func (p *ProverClient) GenerateLineaContext(ctx context.Context, startBlock, endBlock int64, ty TaskType) (*ScrollContext, bool, error) { + var result ScrollContext + retryTime := 3 + var err error +retry: + if retryTime == 0 { + return nil, false, logex.Trace(err) + } + err = p.client.CallContext(ctx, &result, "prover_genContext", startBlock, endBlock, ty) + if err != nil { + if strings.Contains(err.Error(), "unexpected EOF") { + goto retry + } + if strings.Contains(err.Error(), "skip") { + logex.Errorf("skip error for generate context (%v, %v): %v", startBlock, endBlock, err) return nil, true, nil } return nil, false, logex.Trace(err, "prover_genContext") @@ -126,8 +149,22 @@ retry: } type ProverMetadata struct { - WithContext bool `json:"with_context"` - Version string `json:"version"` + WithContext bool `json:"with_context"` + TaskWithContext map[uint64]bool `json:"task_with_context"` + Version string `json:"version"` +} + +func (md *ProverMetadata) GetWithContext(ty TaskType) bool { + withContext := true + if ty == ScrollTask { + withContext = md.WithContext + } + if md.TaskWithContext != nil { + if wc, ok := md.TaskWithContext[uint64(ty)]; ok { + withContext = wc + } + } + return withContext } func (p *ProverClient) Metadata(ctx context.Context) (*ProverMetadata, error) { @@ -139,9 +176,54 @@ func (p *ProverClient) Metadata(ctx context.Context) (*ProverMetadata, error) { } type ProveTaskParams struct { - Batch hexutil.Bytes `json:"batch"` - PobHash common.Hash `json:"pob_hash"` - From common.Address `json:"from"` + Batch hexutil.Bytes `json:"batch,omitempty"` + PobHash common.Hash `json:"pob_hash"` + From common.Address `json:"from"` + TaskType uint64 `json:"task_type,omitempty"` + + Start uint64 `json:"start,omitempty"` + End uint64 `json:"end,omitempty"` + StartingStateRoot common.Hash `json:"starting_state_root,omitempty"` + FinalStateRoot common.Hash `json:"final_state_root,omitempty"` +} + +func (p *ProverClient) ProveLinea(ctx context.Context, from common.Address, ext *LineaTaskExt, taskCtx *ScrollContext) (*PoeResponse, bool, error) { + lockResult, err := p.DaTryLock(ctx, taskCtx.Hash) + if err != nil { + return nil, false, logex.Trace(err) + } + + if lockResult == "Locked" { + logex.Info("uploading pob") + if err := p.DaPutPob(ctx, taskCtx); err != nil { + return nil, false, logex.Trace(err) + } + } + + params := ProveTaskParams{ + Batch: nil, + PobHash: taskCtx.Hash, + From: from, + TaskType: uint64(LineaTask), + Start: ext.StartBlock.ToInt().Uint64(), + End: ext.EndBlock.ToInt().Uint64(), + StartingStateRoot: ext.PrevBatchFinalStateRoot, + FinalStateRoot: ext.FinalStateRoot, + } + for { + var result *PoeResponse + if err := p.client.CallContext(ctx, &result, "prover_proveTask", params); err != nil { + if strings.Contains(err.Error(), "skip") { + return nil, true, nil + } + return nil, false, logex.Trace(err, "getPoe") + } + if result.NotReady { + time.Sleep(10 * time.Second) + continue + } + return result, false, nil + } } func (p *ProverClient) GetPoeByPob(ctx context.Context, from common.Address, batchData []byte, taskCtx *ScrollContext) (*PoeResponse, bool, error) { diff --git a/xtask/scroll_task.go b/xtask/scroll_task.go index dc8e562..9108c7f 100644 --- a/xtask/scroll_task.go +++ b/xtask/scroll_task.go @@ -31,3 +31,19 @@ type ScrollTaskExt struct { CommitTx common.Hash `json:"commit_tx"` ReferenceBlockNumber uint64 `json:"reference_block_number"` } + +type LineaTaskExt struct { + StartBlock *hexutil.Big `json:"start_block"` + EndBlock *hexutil.Big `json:"end_block"` + CommitTx common.Hash `json:"commit_tx"` + PrevCommitTx common.Hash `json:"prev_commit_tx"` + PrevBatchFinalStateRoot common.Hash `json:"prev_batch_final_state_root"` + FinalStateRoot common.Hash `json:"final_state_root"` + ReferenceBlockNumber uint64 `json:"reference_block_number"` +} + +type LineaContext struct { + Hash common.Hash `json:"hash"` + Interning json.RawMessage `json:"interning"` + Pob json.RawMessage `json:"pob"` +} diff --git a/xtask/task_manager.go b/xtask/task_manager.go index 0b97f42..76d128f 100644 --- a/xtask/task_manager.go +++ b/xtask/task_manager.go @@ -21,6 +21,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/rpc" + + sdkTypes "github.com/Layr-Labs/eigensdk-go/types" ) type TaskType int @@ -28,18 +30,54 @@ type TaskType int const ( MinTaskType TaskType = 0 ScrollTask TaskType = 1 - MaxTaskType TaskType = 2 + LineaTask TaskType = 2 + MaxTaskType TaskType = 3 ) +func GetQuorumNames() map[sdkTypes.QuorumNum]string { + return map[sdkTypes.QuorumNum]string{ + 0: "Scroll SGX Quorum", + 1: "Linea SGX Quorum", + } +} + +func NewTaskType(quorum byte) TaskType { + switch quorum { + case 0: + return ScrollTask + case 1: + return LineaTask + default: + return MaxTaskType + } +} + +func (t TaskType) GetQuorum() byte { + switch t { + case ScrollTask: + return 0 + case LineaTask: + return 1 + default: + panic(fmt.Sprintf("unknown task type: %v", t)) + } +} + func (t TaskType) Value() string { switch t { case ScrollTask: return "scroll" + case LineaTask: + return "linea" default: return fmt.Sprint(int(t)) } } +func (t TaskType) IsValid() bool { + return t > MinTaskType && t < MaxTaskType +} + var ( ErrInvalidTaskManager = logex.Define("invalid task type: %v") ) @@ -54,6 +92,7 @@ type TaskManagerConfig struct { Topics [][]common.Hash Addresses []common.Address // OffsetFile string + PresetStartBlock uint64 ScanIntervalSecs int64 } @@ -71,9 +110,10 @@ type TaskManager struct { collector *xmetric.AggregatorCollector referenceClient eth.Client - tasksMutex sync.Mutex - tasks map[TaskType]*TaskTuple - presetStartBlock uint64 + tasksMutex sync.Mutex + tasks map[TaskType]*TaskTuple + + lineaPrevLog *types.Log } type TaskTuple struct { @@ -93,14 +133,13 @@ func NewTaskManager(collector *xmetric.AggregatorCollector, sampling int64, refe tracers := make(map[TaskType]*utils.LogTracer) contexts := make(map[TaskType]*TaskContext) tm := &TaskManager{ - sampling: sampling, - sources: sources, - tracers: tracers, - contexts: contexts, - collector: collector, - referenceClient: referenceClient, - presetStartBlock: 0, //19976077, - tasks: make(map[TaskType]*TaskTuple, MaxTaskType), + sampling: sampling, + sources: sources, + tracers: tracers, + contexts: contexts, + collector: collector, + referenceClient: referenceClient, + tasks: make(map[TaskType]*TaskTuple, MaxTaskType), } for _, cfg := range tasks { @@ -138,23 +177,22 @@ func NewTaskManager(collector *xmetric.AggregatorCollector, sampling int64, refe } tracers[cfg.Identifier] = utils.NewLogTracer(source, &utils.LogTracerConfig{ - Id: fmt.Sprintf("aggregator-task-fetcher-%v", cfg.Identifier), + Id: fmt.Sprintf("aggregator-task-fetcher-%v", cfg.Identifier.Value()), Wait: 5, Max: 100, Topics: cfg.Topics, Addresses: cfg.Addresses, ScanIntervalSecs: cfg.ScanIntervalSecs, SkipOnError: true, - Handler: tm, + Handler: handlerWrapper(cfg.PresetStartBlock, tm), }) } return tm, nil } -func (t *TaskManager) OnNewLog(ctx context.Context, log *types.Log) error { +func (t *TaskManager) OnNewLog(ctx context.Context, id TaskType, log *types.Log) error { source := utils.KeyLogTracerSourceClient{}.Get(ctx) - id := ctx.Value(TaskManagerId{}).(TaskType) t.collector.NewTask.WithLabelValues(id.Value()).Add(1) switch id { @@ -162,6 +200,10 @@ func (t *TaskManager) OnNewLog(ctx context.Context, log *types.Log) error { if err := t.onScrollTask(ctx, source, log); err != nil { return logex.Trace(err) } + case LineaTask: + if err := t.onLineaTask(ctx, source, log); err != nil { + return logex.Trace(err) + } default: return nil } @@ -228,11 +270,75 @@ func (t *TaskManager) updateTask(taskInfo TaskInfo) { chs := taskTuple.Channels taskTuple.Channels = nil t.tasksMutex.Unlock() + logex.Infof("[%v] notify %v clients", taskInfo.Type.Value(), len(chs)) for _, ch := range chs { close(ch) } } +func (t *TaskManager) onLineaTask(ctx context.Context, _ *ethclient.Client, log *types.Log) error { + prover := ctx.Value(TaskManagerProverClient{}).(*ProverClient) + referenceBlockNumber, err := t.referenceClient.BlockNumber(ctx) + if err != nil { + return logex.Trace(err) + } + prevLog := t.lineaPrevLog + if prevLog == nil || prevLog.Topics[3] != log.Topics[2] { + tracer := utils.KeyLogTracer{}.Get(ctx) + prevLog, err = tracer.LookBack(ctx, int64(log.BlockNumber)) + if err != nil { + return logex.Trace(err) + } + if prevLog.Topics[3] != log.Topics[2] { + return logex.NewErrorf("Batches are not sequential: prev[%v], current[%v]", prevLog.TxHash, log.TxHash) + } + } + + startBlock := new(big.Int).SetBytes(prevLog.Topics[1][:]).Int64() + 1 + endBlock := new(big.Int).SetBytes(log.Topics[1][:]).Int64() + batchId := endBlock // can't determine the batch, so we use the end block number + + logex.Infof("generating task[linea] for #%v, refblk: %v", batchId, referenceBlockNumber) + + taskInfo := &TaskInfo{ + Type: LineaTask, + TaskID: batchId, + } + taskInfo.Ext, err = json.Marshal(LineaTaskExt{ + StartBlock: (*hexutil.Big)(big.NewInt(startBlock)), + EndBlock: (*hexutil.Big)(big.NewInt(endBlock)), + CommitTx: log.TxHash, + PrevCommitTx: prevLog.TxHash, + ReferenceBlockNumber: referenceBlockNumber - 1, + PrevBatchFinalStateRoot: log.Topics[2], + FinalStateRoot: log.Topics[3], + }) + if err != nil { + return logex.Trace(err) + } + + startGenerateContext := time.Now() + taskCtx, ignore, err := prover.GenerateLineaContext(ctx, startBlock, endBlock, taskInfo.Type) + if ignore { + return nil + } + if err != nil { + return logex.Trace(err, fmt.Sprintf("fetching context for scroll batchId#%v", batchId)) + } + generateContextCost := time.Since(startGenerateContext).Truncate(time.Millisecond) + + taskInfo.Context, err = json.Marshal(taskCtx) + if err != nil { + return logex.Trace(err) + } + t.updateTask(*taskInfo) + logex.Infof("update task: [%v] %v, (generateContext:%v)", taskInfo.Type.Value(), taskInfo.TaskID, generateContextCost) + time.Sleep(10 * time.Second) + t.lineaPrevLog = log + + return nil +} + func (t *TaskManager) onScrollTask(ctx context.Context, source *ethclient.Client, log *types.Log) error { prover := ctx.Value(TaskManagerProverClient{}).(*ProverClient) referenceBlockNumber, err := t.referenceClient.BlockNumber(ctx) @@ -248,6 +354,8 @@ func (t *TaskManager) onScrollTask(ctx context.Context, source *ethclient.Client } } + logex.Infof("generating task[scroll] for #%v, refblk: %v", batchId, referenceBlockNumber) + taskInfo := &TaskInfo{ Type: ScrollTask, TaskID: batchId.Int64(), @@ -290,7 +398,7 @@ func (t *TaskManager) onScrollTask(ctx context.Context, source *ethclient.Client t.updateTask(*taskInfo) startGenerateContext := time.Now() - taskCtx, ignore, err := prover.GenerateContext(ctx, startBlock, endBlock, taskInfo.Type) + taskCtx, ignore, err := prover.GenerateScrollContext(ctx, startBlock, endBlock, taskInfo.Type) if ignore { return nil } @@ -312,12 +420,26 @@ func (t *TaskManager) onScrollTask(ctx context.Context, source *ethclient.Client return nil } -func (t *TaskManager) SaveBlock(uint64) error { +type LogHandlerWrapper struct { + presetBlock uint64 + t *TaskManager +} + +func handlerWrapper(presetBlock uint64, t *TaskManager) *LogHandlerWrapper { + return &LogHandlerWrapper{presetBlock, t} +} + +func (w *LogHandlerWrapper) SaveBlock(uint64) error { return nil } -func (t *TaskManager) GetBlock() (uint64, error) { - return t.presetStartBlock, nil +func (w *LogHandlerWrapper) GetBlock() (uint64, error) { + return w.presetBlock, nil +} + +func (w *LogHandlerWrapper) OnNewLog(ctx context.Context, log *types.Log) error { + id := ctx.Value(TaskManagerId{}).(TaskType) + return w.t.OnNewLog(ctx, id, log) } func (t *TaskManager) Run(ctx context.Context) error {