diff --git a/backend/api/api.go b/backend/api/api.go index a25185a17..3419c1a98 100644 --- a/backend/api/api.go +++ b/backend/api/api.go @@ -11,6 +11,7 @@ import ( "github.com/cloudwego/hertz/pkg/app/server" "github.com/cloudwego/hertz/pkg/app/server/binding" "github.com/cloudwego/hertz/pkg/app/server/render" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/processor" "github.com/coze-dev/coze-loop/backend/api/handler/coze/loop/apis" "github.com/coze-dev/coze-loop/backend/infra/ck" @@ -29,6 +30,7 @@ import ( "github.com/coze-dev/coze-loop/backend/loop_gen/coze/loop/data/lotag" "github.com/coze-dev/coze-loop/backend/loop_gen/coze/loop/evaluation/loeval_set" "github.com/coze-dev/coze-loop/backend/loop_gen/coze/loop/evaluation/loevaluator" + "github.com/coze-dev/coze-loop/backend/loop_gen/coze/loop/evaluation/loexpt" "github.com/coze-dev/coze-loop/backend/loop_gen/coze/loop/foundation/loauth" "github.com/coze-dev/coze-loop/backend/loop_gen/coze/loop/foundation/lofile" "github.com/coze-dev/coze-loop/backend/loop_gen/coze/loop/foundation/louser" @@ -115,6 +117,10 @@ func Init( lotag.NewLocalTagService(dataHandler.TagService), limiterFactory, lodataset.NewLocalDatasetService(dataHandler.IDatasetApplication), + cmdable, + loexpt.NewLocalExperimentService(evaluationHandler.IExperimentApplication), + processor.TaskProcessor{}, + 0, ) if err != nil { return nil, err diff --git a/backend/api/handler/coze/loop/apis/handler.go b/backend/api/handler/coze/loop/apis/handler.go index 8e493d9bd..3c5a828e4 100644 --- a/backend/api/handler/coze/loop/apis/handler.go +++ b/backend/api/handler/coze/loop/apis/handler.go @@ -13,10 +13,13 @@ import ( "github.com/cloudwego/kitex/client/callopt" "github.com/cloudwego/kitex/pkg/endpoint" "github.com/cloudwego/kitex/pkg/kerrors" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" + "github.com/coze-dev/coze-loop/backend/loop_gen/coze/loop/observability/lotask" "github.com/coze-dev/coze-loop/backend/infra/i18n" cachemw "github.com/coze-dev/coze-loop/backend/infra/middleware/ctxcache" logmw "github.com/coze-dev/coze-loop/backend/infra/middleware/logs" + "github.com/coze-dev/coze-loop/backend/infra/middleware/session" "github.com/coze-dev/coze-loop/backend/infra/middleware/validator" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/data/dataset" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/data/tag" @@ -194,20 +197,24 @@ type ObservabilityHandler struct { obapp.ITraceApplication obapp.ITraceIngestionApplication obapp.IObservabilityOpenAPIApplication + obapp.ITaskApplication } func NewObservabilityHandler( traceApp obapp.ITraceApplication, ingestApp obapp.ITraceIngestionApplication, openAPIApp obapp.IObservabilityOpenAPIApplication, + taskApp obapp.ITaskApplication, ) *ObservabilityHandler { h := &ObservabilityHandler{ ITraceApplication: traceApp, ITraceIngestionApplication: ingestApp, IObservabilityOpenAPIApplication: openAPIApp, + ITaskApplication: taskApp, } bindLocalCallClient(trace.TraceService(h), &observabilityClient, lotrace.NewLocalTraceService) bindLocalCallClient(traceopenapi.OpenAPIService(h), &observabilityOpenAPIClient, looptraceopenapi.NewLocalOpenAPIService) + bindLocalCallClient(task.TaskService(h), &observabilityTaskClient, lotask.NewLocalTaskService) return h } @@ -224,6 +231,7 @@ func defaultKiteXMiddlewares() []endpoint.Middleware { return []endpoint.Middleware{ logmw.LogTrafficMW, validator.KiteXValidatorMW, + session.NewRequestSessionMW(), cachemw.CtxCacheMW, } } diff --git a/backend/api/handler/coze/loop/apis/observability_task_service.go b/backend/api/handler/coze/loop/apis/observability_task_service.go new file mode 100644 index 000000000..f275d014a --- /dev/null +++ b/backend/api/handler/coze/loop/apis/observability_task_service.go @@ -0,0 +1,46 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by hertz generator. + +package apis + +import ( + "context" + + "github.com/cloudwego/hertz/pkg/app" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/apis/observabilitytaskservice" +) + +var observabilityTaskClient observabilitytaskservice.Client + +// CheckTaskName . +// @router /api/observability/v1/tasks/check_name [GET] +func CheckTaskName(ctx context.Context, c *app.RequestContext) { + invokeAndRender(ctx, c, observabilityTaskClient.CheckTaskName) +} + +// CreateTask . +// @router /api/observability/v1/tasks [POST] +func CreateTask(ctx context.Context, c *app.RequestContext) { + invokeAndRender(ctx, c, observabilityTaskClient.CreateTask) +} + +// UpdateTask . +// @router /api/observability/v1/tasks/:task_id [PUT] +func UpdateTask(ctx context.Context, c *app.RequestContext) { + invokeAndRender(ctx, c, observabilityTaskClient.UpdateTask) +} + +// ListTasks . +// @router /api/observability/v1/tasks/list [POST] +func ListTasks(ctx context.Context, c *app.RequestContext) { + invokeAndRender(ctx, c, observabilityTaskClient.ListTasks) +} + +// GetTask . +// @router /api/observability/v1/tasks/:task_id [GET] +func GetTask(ctx context.Context, c *app.RequestContext) { + invokeAndRender(ctx, c, observabilityTaskClient.GetTask) + +} diff --git a/backend/api/handler/coze/loop/apis/observability_trace_service.go b/backend/api/handler/coze/loop/apis/observability_trace_service.go index 903bea52e..15ba67560 100644 --- a/backend/api/handler/coze/loop/apis/observability_trace_service.go +++ b/backend/api/handler/coze/loop/apis/observability_trace_service.go @@ -120,3 +120,21 @@ func PreviewExportTracesToDataset(ctx context.Context, c *app.RequestContext) { c.JSON(consts.StatusOK, resp) } + +// ChangeEvaluatorScore . +// @router /api/observability/v1/annotations/change_eEvaluator_sScore [POST] +func ChangeEvaluatorScore(ctx context.Context, c *app.RequestContext) { + invokeAndRender(ctx, c, observabilityClient.ChangeEvaluatorScore) +} + +// ListAnnotationEvaluators . +// @router /api/observability/v1/annotations/lis_annotation_evaluators [POST] +func ListAnnotationEvaluators(ctx context.Context, c *app.RequestContext) { + invokeAndRender(ctx, c, observabilityClient.ListAnnotationEvaluators) +} + +// ExtractSpanInfo . +// @router /api/observability/v1/traces/extract_span_info [POST] +func ExtractSpanInfo(ctx context.Context, c *app.RequestContext) { + invokeAndRender(ctx, c, observabilityClient.ExtractSpanInfo) +} diff --git a/backend/api/handler/coze/loop/apis/wire.go b/backend/api/handler/coze/loop/apis/wire.go index bca7afb64..7872ab501 100644 --- a/backend/api/handler/coze/loop/apis/wire.go +++ b/backend/api/handler/coze/loop/apis/wire.go @@ -10,6 +10,8 @@ import ( "context" "github.com/cloudwego/kitex/pkg/endpoint" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/experimentservice" + task_processor "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/processor" "github.com/google/wire" "github.com/coze-dev/coze-loop/backend/infra/ck" @@ -92,6 +94,7 @@ var ( obapp.InitTraceApplication, obapp.InitTraceIngestionApplication, obapp.InitOpenAPIApplication, + obapp.InitTaskApplication, ) ) @@ -207,6 +210,10 @@ func InitObservabilityHandler( tagClient tagservice.Client, limiterFactory limiter.IRateLimiterFactory, datasetClient datasetservice.Client, + redis redis.Cmdable, + experimentClient experimentservice.Client, + taskProcessor task_processor.TaskProcessor, + aid int32, ) (*ObservabilityHandler, error) { wire.Build( observabilitySet, diff --git a/backend/api/handler/coze/loop/apis/wire_gen.go b/backend/api/handler/coze/loop/apis/wire_gen.go index 28bfd2879..8c08b6ecf 100644 --- a/backend/api/handler/coze/loop/apis/wire_gen.go +++ b/backend/api/handler/coze/loop/apis/wire_gen.go @@ -8,7 +8,6 @@ package apis import ( "context" - "github.com/cloudwego/kitex/pkg/endpoint" "github.com/coze-dev/coze-loop/backend/infra/ck" "github.com/coze-dev/coze-loop/backend/infra/db" @@ -25,6 +24,7 @@ import ( "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/data/tag/tagservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/evaluationsetservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/evaluatorservice" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/experimentservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/auth/authservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/file/fileservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/user/userservice" @@ -40,6 +40,7 @@ import ( "github.com/coze-dev/coze-loop/backend/modules/foundation/application" application3 "github.com/coze-dev/coze-loop/backend/modules/llm/application" application6 "github.com/coze-dev/coze-loop/backend/modules/observability/application" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/processor" application2 "github.com/coze-dev/coze-loop/backend/modules/prompt/application" "github.com/coze-dev/coze-loop/backend/pkg/conf" "github.com/google/wire" @@ -149,8 +150,8 @@ func InitDataHandler(ctx context.Context, idgen2 idgen.IIDGenerator, db2 db.Prov return dataHandler, nil } -func InitObservabilityHandler(ctx context.Context, db2 db.Provider, ckDb ck.Provider, meter metrics.Meter, mqFactory mq.IFactory, configFactory conf.IConfigLoaderFactory, idgen2 idgen.IIDGenerator, benefit2 benefit.IBenefitService, fileClient fileservice.Client, authCli authservice.Client, userClient userservice.Client, evalClient evaluatorservice.Client, evalSetClient evaluationsetservice.Client, tagClient tagservice.Client, limiterFactory limiter.IRateLimiterFactory, datasetClient datasetservice.Client) (*ObservabilityHandler, error) { - iTraceApplication, err := application6.InitTraceApplication(db2, ckDb, meter, mqFactory, configFactory, idgen2, fileClient, benefit2, authCli, userClient, evalClient, evalSetClient, tagClient, datasetClient) +func InitObservabilityHandler(ctx context.Context, db2 db.Provider, ckDb ck.Provider, meter metrics.Meter, mqFactory mq.IFactory, configFactory conf.IConfigLoaderFactory, idgen2 idgen.IIDGenerator, benefit2 benefit.IBenefitService, fileClient fileservice.Client, authCli authservice.Client, userClient userservice.Client, evalClient evaluatorservice.Client, evalSetClient evaluationsetservice.Client, tagClient tagservice.Client, limiterFactory limiter.IRateLimiterFactory, datasetClient datasetservice.Client, redis2 redis.Cmdable, experimentClient experimentservice.Client, taskProcessor processor.TaskProcessor, aid int32) (*ObservabilityHandler, error) { + iTraceApplication, err := application6.InitTraceApplication(db2, ckDb, redis2, meter, mqFactory, configFactory, idgen2, fileClient, benefit2, authCli, userClient, evalClient, evalSetClient, tagClient, datasetClient) if err != nil { return nil, err } @@ -158,11 +159,15 @@ func InitObservabilityHandler(ctx context.Context, db2 db.Provider, ckDb ck.Prov if err != nil { return nil, err } - iObservabilityOpenAPIApplication, err := application6.InitOpenAPIApplication(mqFactory, configFactory, fileClient, ckDb, benefit2, limiterFactory, authCli, meter) + iObservabilityOpenAPIApplication, err := application6.InitOpenAPIApplication(mqFactory, configFactory, fileClient, ckDb, benefit2, limiterFactory, authCli, meter, db2, redis2, idgen2, evalClient) + if err != nil { + return nil, err + } + iTaskApplication, err := application6.InitTaskApplication(db2, idgen2, configFactory, benefit2, ckDb, redis2, mqFactory, userClient, authCli, evalClient, evalSetClient, experimentClient, datasetClient, fileClient, taskProcessor, aid) if err != nil { return nil, err } - observabilityHandler := NewObservabilityHandler(iTraceApplication, iTraceIngestionApplication, iObservabilityOpenAPIApplication) + observabilityHandler := NewObservabilityHandler(iTraceApplication, iTraceIngestionApplication, iObservabilityOpenAPIApplication, iTaskApplication) return observabilityHandler, nil } @@ -185,6 +190,6 @@ var ( NewDataHandler, application5.InitDatasetApplication, application5.InitTagApplication, foundation.NewAuthRPCProvider, conf2.NewConfigerFactory, ) observabilitySet = wire.NewSet( - NewObservabilityHandler, application6.InitTraceApplication, application6.InitTraceIngestionApplication, application6.InitOpenAPIApplication, + NewObservabilityHandler, application6.InitTraceApplication, application6.InitTraceIngestionApplication, application6.InitOpenAPIApplication, application6.InitTaskApplication, ) ) diff --git a/backend/api/router/coze/loop/apis/coze.loop.apis.go b/backend/api/router/coze/loop/apis/coze.loop.apis.go index 96984dbb3..260200703 100644 --- a/backend/api/router/coze/loop/apis/coze.loop.apis.go +++ b/backend/api/router/coze/loop/apis/coze.loop.apis.go @@ -284,18 +284,36 @@ func Register(r *server.Hertz, handler *apis.APIHandler) { _annotations.DELETE("/:annotation_id", append(_deletemanualannotationMw(handler), apis.DeleteManualAnnotation)...) _annotations.PUT("/:annotation_id", append(_updatemanualannotationMw(handler), apis.UpdateManualAnnotation)...) _annotations.POST("/list", append(_listannotationsMw(handler), apis.ListAnnotations)...) + _v14.POST("/tasks", append(_tasksMw(handler), apis.CreateTask)...) + _tasks := _v14.Group("/tasks", _tasksMw(handler)...) + _tasks.POST("/list", append(_listtasksMw(handler), apis.ListTasks)...) + _tasks.GET("/:task_id", append(_gettaskMw(handler), apis.GetTask)...) + _tasks.PUT("/:task_id", append(_updatetaskMw(handler), apis.UpdateTask)...) _v14.POST("/views", append(_viewsMw(handler), apis.CreateView)...) _views := _v14.Group("/views", _viewsMw(handler)...) _views.POST("/list", append(_listviewsMw(handler), apis.ListViews)...) _views.DELETE("/:view_id", append(_deleteviewMw(handler), apis.DeleteView)...) _views.PUT("/:view_id", append(_updateviewMw(handler), apis.UpdateView)...) + { + _annotation := _v14.Group("/annotation", _annotationMw(handler)...) + _annotation.GET("/list_evaluators", append(_listannotationevaluatorsMw(handler), apis.ListAnnotationEvaluators)...) + } { _spans := _v14.Group("/spans", _spansMw(handler)...) _spans.POST("/list", append(_listspansMw(handler), apis.ListSpans)...) } + { + _tasks0 := _v14.Group("/tasks", _tasks0Mw(handler)...) + _tasks0.POST("/check_name", append(_checktasknameMw(handler), apis.CheckTaskName)...) + } + { + _trace := _v14.Group("/trace", _traceMw(handler)...) + _trace.POST("/extract_span_info", append(_extractspaninfoMw(handler), apis.ExtractSpanInfo)...) + } { _traces := _v14.Group("/traces", _tracesMw(handler)...) _traces.POST("/batch_get_advance_info", append(_batchgettracesadvanceinfoMw(handler), apis.BatchGetTracesAdvanceInfo)...) + _traces.POST("/change_eval_score", append(_changeevaluatorscoreMw(handler), apis.ChangeEvaluatorScore)...) _traces.POST("/export_to_dataset", append(_exporttracestodatasetMw(handler), apis.ExportTracesToDataset)...) _traces.GET("/meta_info", append(_gettracesmetainfoMw(handler), apis.GetTracesMetaInfo)...) _traces.POST("/preview_export_to_dataset", append(_previewexporttracestodatasetMw(handler), apis.PreviewExportTracesToDataset)...) diff --git a/backend/api/router/coze/loop/apis/middleware.go b/backend/api/router/coze/loop/apis/middleware.go index 0b444cc5b..736a0d668 100644 --- a/backend/api/router/coze/loop/apis/middleware.go +++ b/backend/api/router/coze/loop/apis/middleware.go @@ -1271,17 +1271,62 @@ func _listtracesoapiMw(handler *apis.APIHandler) []app.HandlerFunc { return nil } -func _validateevaluatorMw(handler *apis.APIHandler) []app.HandlerFunc { +func _tasksMw(handler *apis.APIHandler) []app.HandlerFunc { // your code... return nil } -func _mockevaltargetoutputMw(handler *apis.APIHandler) []app.HandlerFunc { +func _createtaskMw(handler *apis.APIHandler) []app.HandlerFunc { // your code... return nil } -func _batchdebugevaluatorMw(handler *apis.APIHandler) []app.HandlerFunc { +func _listtasksMw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _gettaskMw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _updatetaskMw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _annotationMw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _listannotationevaluatorsMw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _tasks0Mw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _checktasknameMw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _traceMw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _extractspaninfoMw(handler *apis.APIHandler) []app.HandlerFunc { + // your code... + return nil +} + +func _changeevaluatorscoreMw(handler *apis.APIHandler) []app.HandlerFunc { // your code... return nil } @@ -1330,8 +1375,3 @@ func _listexptinsightanalysisrecordMw(handler *apis.APIHandler) []app.HandlerFun // your code... return nil } - -func _annotations0Mw(handler *apis.APIHandler) []app.HandlerFunc { - // your code... - return nil -} diff --git a/backend/cmd/consumer.go b/backend/cmd/consumer.go index 46c19325b..2d2683caa 100644 --- a/backend/cmd/consumer.go +++ b/backend/cmd/consumer.go @@ -19,6 +19,7 @@ func MustInitConsumerWorkers( experimentApplication exptapp.IExperimentApplication, datasetApplication dataapp.IJobRunMsgHandler, obApplication obapp.IObservabilityOpenAPIApplication, + taskApplication obapp.ITaskApplication, ) []mq.IConsumerWorker { var res []mq.IConsumerWorker @@ -38,7 +39,7 @@ func MustInitConsumerWorkers( if err != nil { panic(err) } - workers, err = obconsumer.NewConsumerWorkers(loader, obApplication) + workers, err = obconsumer.NewConsumerWorkers(loader, obApplication, taskApplication) if err != nil { panic(err) } diff --git a/backend/cmd/main.go b/backend/cmd/main.go index f116799c5..4de5c066b 100644 --- a/backend/cmd/main.go +++ b/backend/cmd/main.go @@ -60,10 +60,8 @@ func main() { if err := initTracer(handler); err != nil { panic(err) } - - consumerWorkers := MustInitConsumerWorkers(c.cfgFactory, handler, handler, handler) - consumerRegistry := registry.NewConsumerRegistry(c.mqFactory).Register(consumerWorkers) - if err := consumerRegistry.StartAll(ctx); err != nil { + consumerWorkers := MustInitConsumerWorkers(c.cfgFactory, handler, handler, handler, handler) + if err := registry.NewConsumerRegistry(c.mqFactory).Register(consumerWorkers).StartAll(ctx); err != nil { panic(err) } diff --git a/backend/go.mod b/backend/go.mod index a8ccc2e13..027ccffba 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -14,6 +14,7 @@ require ( github.com/alitto/pond/v2 v2.3.4 github.com/apache/rocketmq-client-go/v2 v2.1.2 github.com/apache/thrift v0.19.0 + github.com/apaxa-go/helper v0.0.0-20180607175117-61d31b1c31c3 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 github.com/aws/aws-sdk-go v1.55.7 github.com/baidubce/bce-qianfan-sdk/go/qianfan v0.0.15 diff --git a/backend/go.sum b/backend/go.sum index 8c9d8f2c3..38b18db66 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -97,6 +97,8 @@ github.com/apache/rocketmq-client-go/v2 v2.1.2 h1:yt73olKe5N6894Dbm+ojRf/JPiP0cx github.com/apache/rocketmq-client-go/v2 v2.1.2/go.mod h1:6I6vgxHR3hzrvn+6n/4mrhS+UTulzK/X9LB2Vk1U5gE= github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apaxa-go/helper v0.0.0-20180607175117-61d31b1c31c3 h1:badF2fxl2BsWu2f01OYRU9cNnbrOSoOlayijH7r9ip4= +github.com/apaxa-go/helper v0.0.0-20180607175117-61d31b1c31c3/go.mod h1:42ENZ1Wd+1+1pgQWSQ/naAWaaP/uKw1zmnrMzBBNyTQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= diff --git a/backend/infra/middleware/session/request_session_mw.go b/backend/infra/middleware/session/request_session_mw.go new file mode 100755 index 000000000..09cbac5cc --- /dev/null +++ b/backend/infra/middleware/session/request_session_mw.go @@ -0,0 +1,72 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package session + +import ( + "context" + "reflect" + "strconv" + + "github.com/cloudwego/kitex/pkg/endpoint" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/common" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +// NewRequestSessionMW 创建处理request参数中session的middleware +// 该middleware通过反射检测request参数中是否包含session字段(类型为*common.Session) +// 如果存在,则提取用户信息并使用WithCtxUser函数注入到context中 +func NewRequestSessionMW() endpoint.Middleware { + return func(next endpoint.Endpoint) endpoint.Endpoint { + return func(ctx context.Context, req, resp any) error { + // 尝试从request参数中提取session信息 + if session := extractSessionFromRequest(req); session != nil { + // 构造User对象并注入到context中 + user := &User{ + ID: strconv.FormatInt(session.GetUserID(), 10), // i64转string + AppID: session.GetAppID(), // i32 + // Name和Email暂时为空,可根据需要从其他地方获取 + } + ctx = WithCtxUser(ctx, user) + logs.CtxDebug(ctx, "RequestSessionMW: injected user to context, userID=%s, appID=%d", user.ID, user.AppID) + } + + return next(ctx, req, resp) + } + } +} + +// extractSessionFromRequest 使用反射从request参数中提取session字段 +// 支持*common.Session类型的session字段 +func extractSessionFromRequest(req any) *common.Session { + if req == nil { + return nil + } + + val := reflect.ValueOf(req) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + if val.Kind() != reflect.Struct { + return nil + } + + // 查找名为"Session"的字段 + sessionField := val.FieldByName("Session") + if !sessionField.IsValid() { + return nil + } + + // 检查字段是否为nil + if sessionField.IsNil() { + return nil + } + + // 尝试类型断言为*common.Session + if session, ok := sessionField.Interface().(*common.Session); ok { + return session + } + + return nil +} \ No newline at end of file diff --git a/backend/infra/mq/factory.go b/backend/infra/mq/factory.go index c6866455f..9da66b0b7 100644 --- a/backend/infra/mq/factory.go +++ b/backend/infra/mq/factory.go @@ -41,6 +41,7 @@ type ConsumerConfig struct { ConsumeGoroutineNums int // Timeout for consumer one message ConsumeTimeout time.Duration + EnablePPE *bool } type CompressionCodec int diff --git a/backend/infra/redis/commands.go b/backend/infra/redis/commands.go index 2bbfec53d..e37913301 100644 --- a/backend/infra/redis/commands.go +++ b/backend/infra/redis/commands.go @@ -26,6 +26,7 @@ type SimpleCmdable interface { Eval(ctx context.Context, script string, keys []string, args ...any) *redis.Cmd Expire(ctx context.Context, key string, expiration time.Duration) *redis.BoolCmd Exists(ctx context.Context, keys ...string) *redis.IntCmd + Scan(ctx context.Context, cursor uint64, match string, count int64) *redis.ScanCmd } // StringCmdable copy methods we need in [redis.StringCmdable] @@ -70,4 +71,4 @@ type Pipeliner interface { Len() int Exec(ctx context.Context) ([]redis.Cmder, error) Discard() -} +} \ No newline at end of file diff --git a/backend/infra/redis/redis.go b/backend/infra/redis/redis.go index 9dbf8ee41..1781779c7 100644 --- a/backend/infra/redis/redis.go +++ b/backend/infra/redis/redis.go @@ -146,7 +146,11 @@ func (p *provider) Expire(ctx context.Context, key string, expiration time.Durat return p.cli.Expire(ctx, key, expiration) } +func (p *provider) Scan(ctx context.Context, cursor uint64, match string, count int64) *redis.ScanCmd { + return p.cli.Scan(ctx, cursor, match, count) +} + func (p *provider) Pipeline() Pipeliner { pipe := p.cli.Pipeline() return pipe -} +} \ No newline at end of file diff --git a/backend/kitex_gen/coze/loop/apis/coze.loop.apis.go b/backend/kitex_gen/coze/loop/apis/coze.loop.apis.go index 2f08a1133..f9329c2a8 100644 --- a/backend/kitex_gen/coze/loop/apis/coze.loop.apis.go +++ b/backend/kitex_gen/coze/loop/apis/coze.loop.apis.go @@ -19,6 +19,7 @@ import ( manage0 "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/llm/manage" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/llm/runtime" openapi1 "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/openapi" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/trace" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/prompt/debug" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/prompt/execute" @@ -390,6 +391,32 @@ func NewObservabilityOpenAPIServiceClient(c thrift.TClient) *ObservabilityOpenAP } } +type ObservabilityTaskService interface { + task.TaskService +} + +type ObservabilityTaskServiceClient struct { + *task.TaskServiceClient +} + +func NewObservabilityTaskServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ObservabilityTaskServiceClient { + return &ObservabilityTaskServiceClient{ + TaskServiceClient: task.NewTaskServiceClientFactory(t, f), + } +} + +func NewObservabilityTaskServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ObservabilityTaskServiceClient { + return &ObservabilityTaskServiceClient{ + TaskServiceClient: task.NewTaskServiceClientProtocol(t, iprot, oprot), + } +} + +func NewObservabilityTaskServiceClient(c thrift.TClient) *ObservabilityTaskServiceClient { + return &ObservabilityTaskServiceClient{ + TaskServiceClient: task.NewTaskServiceClient(c), + } +} + type FoundationAuthService interface { auth.AuthService } @@ -672,6 +699,15 @@ func NewObservabilityOpenAPIServiceProcessor(handler ObservabilityOpenAPIService return self } +type ObservabilityTaskServiceProcessor struct { + *task.TaskServiceProcessor +} + +func NewObservabilityTaskServiceProcessor(handler ObservabilityTaskService) *ObservabilityTaskServiceProcessor { + self := &ObservabilityTaskServiceProcessor{task.NewTaskServiceProcessor(handler)} + return self +} + type FoundationAuthServiceProcessor struct { *auth.AuthServiceProcessor } diff --git a/backend/kitex_gen/coze/loop/apis/k-coze.loop.apis.go b/backend/kitex_gen/coze/loop/apis/k-coze.loop.apis.go index 8718d250d..9144d3e55 100644 --- a/backend/kitex_gen/coze/loop/apis/k-coze.loop.apis.go +++ b/backend/kitex_gen/coze/loop/apis/k-coze.loop.apis.go @@ -25,6 +25,7 @@ import ( manage0 "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/llm/manage" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/llm/runtime" openapi1 "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/openapi" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/trace" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/prompt/debug" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/prompt/execute" @@ -48,6 +49,7 @@ var ( _ = manage0.KitexUnusedProtection _ = runtime.KitexUnusedProtection _ = openapi1.KitexUnusedProtection + _ = task.KitexUnusedProtection _ = trace.KitexUnusedProtection _ = debug.KitexUnusedProtection _ = execute.KitexUnusedProtection diff --git a/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/client.go b/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/client.go new file mode 100644 index 000000000..a8e4d3beb --- /dev/null +++ b/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/client.go @@ -0,0 +1,73 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. + +package observabilitytaskservice + +import ( + "context" + client "github.com/cloudwego/kitex/client" + callopt "github.com/cloudwego/kitex/client/callopt" + task "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" +) + +// Client is designed to provide IDL-compatible methods with call-option parameter for kitex framework. +type Client interface { + CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest, callOptions ...callopt.Option) (r *task.CheckTaskNameResponse, err error) + CreateTask(ctx context.Context, req *task.CreateTaskRequest, callOptions ...callopt.Option) (r *task.CreateTaskResponse, err error) + UpdateTask(ctx context.Context, req *task.UpdateTaskRequest, callOptions ...callopt.Option) (r *task.UpdateTaskResponse, err error) + ListTasks(ctx context.Context, req *task.ListTasksRequest, callOptions ...callopt.Option) (r *task.ListTasksResponse, err error) + GetTask(ctx context.Context, req *task.GetTaskRequest, callOptions ...callopt.Option) (r *task.GetTaskResponse, err error) +} + +// NewClient creates a client for the service defined in IDL. +func NewClient(destService string, opts ...client.Option) (Client, error) { + var options []client.Option + options = append(options, client.WithDestService(destService)) + + options = append(options, opts...) + + kc, err := client.NewClient(serviceInfo(), options...) + if err != nil { + return nil, err + } + return &kObservabilityTaskServiceClient{ + kClient: newServiceClient(kc), + }, nil +} + +// MustNewClient creates a client for the service defined in IDL. It panics if any error occurs. +func MustNewClient(destService string, opts ...client.Option) Client { + kc, err := NewClient(destService, opts...) + if err != nil { + panic(err) + } + return kc +} + +type kObservabilityTaskServiceClient struct { + *kClient +} + +func (p *kObservabilityTaskServiceClient) CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest, callOptions ...callopt.Option) (r *task.CheckTaskNameResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.CheckTaskName(ctx, req) +} + +func (p *kObservabilityTaskServiceClient) CreateTask(ctx context.Context, req *task.CreateTaskRequest, callOptions ...callopt.Option) (r *task.CreateTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.CreateTask(ctx, req) +} + +func (p *kObservabilityTaskServiceClient) UpdateTask(ctx context.Context, req *task.UpdateTaskRequest, callOptions ...callopt.Option) (r *task.UpdateTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.UpdateTask(ctx, req) +} + +func (p *kObservabilityTaskServiceClient) ListTasks(ctx context.Context, req *task.ListTasksRequest, callOptions ...callopt.Option) (r *task.ListTasksResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ListTasks(ctx, req) +} + +func (p *kObservabilityTaskServiceClient) GetTask(ctx context.Context, req *task.GetTaskRequest, callOptions ...callopt.Option) (r *task.GetTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.GetTask(ctx, req) +} diff --git a/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/observabilitytaskservice.go b/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/observabilitytaskservice.go new file mode 100644 index 000000000..9a7049d55 --- /dev/null +++ b/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/observabilitytaskservice.go @@ -0,0 +1,240 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. + +package observabilitytaskservice + +import ( + "context" + "errors" + client "github.com/cloudwego/kitex/client" + kitex "github.com/cloudwego/kitex/pkg/serviceinfo" + apis "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/apis" + task "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" +) + +var errInvalidMessageType = errors.New("invalid message type for service method handler") + +var serviceMethods = map[string]kitex.MethodInfo{ + "CheckTaskName": kitex.NewMethodInfo( + checkTaskNameHandler, + newTaskServiceCheckTaskNameArgs, + newTaskServiceCheckTaskNameResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "CreateTask": kitex.NewMethodInfo( + createTaskHandler, + newTaskServiceCreateTaskArgs, + newTaskServiceCreateTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "UpdateTask": kitex.NewMethodInfo( + updateTaskHandler, + newTaskServiceUpdateTaskArgs, + newTaskServiceUpdateTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ListTasks": kitex.NewMethodInfo( + listTasksHandler, + newTaskServiceListTasksArgs, + newTaskServiceListTasksResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "GetTask": kitex.NewMethodInfo( + getTaskHandler, + newTaskServiceGetTaskArgs, + newTaskServiceGetTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), +} + +var ( + observabilityTaskServiceServiceInfo = NewServiceInfo() +) + +// for server +func serviceInfo() *kitex.ServiceInfo { + return observabilityTaskServiceServiceInfo +} + +// NewServiceInfo creates a new ServiceInfo +func NewServiceInfo() *kitex.ServiceInfo { + return newServiceInfo() +} + +func newServiceInfo() *kitex.ServiceInfo { + serviceName := "ObservabilityTaskService" + handlerType := (*apis.ObservabilityTaskService)(nil) + extra := map[string]interface{}{ + "PackageName": "apis", + } + svcInfo := &kitex.ServiceInfo{ + ServiceName: serviceName, + HandlerType: handlerType, + Methods: serviceMethods, + PayloadCodec: kitex.Thrift, + KiteXGenVersion: "v0.13.1", + Extra: extra, + } + return svcInfo +} + +func checkTaskNameHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceCheckTaskNameArgs) + realResult := result.(*task.TaskServiceCheckTaskNameResult) + success, err := handler.(task.TaskService).CheckTaskName(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceCheckTaskNameArgs() interface{} { + return task.NewTaskServiceCheckTaskNameArgs() +} + +func newTaskServiceCheckTaskNameResult() interface{} { + return task.NewTaskServiceCheckTaskNameResult() +} + +func createTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceCreateTaskArgs) + realResult := result.(*task.TaskServiceCreateTaskResult) + success, err := handler.(task.TaskService).CreateTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceCreateTaskArgs() interface{} { + return task.NewTaskServiceCreateTaskArgs() +} + +func newTaskServiceCreateTaskResult() interface{} { + return task.NewTaskServiceCreateTaskResult() +} + +func updateTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceUpdateTaskArgs) + realResult := result.(*task.TaskServiceUpdateTaskResult) + success, err := handler.(task.TaskService).UpdateTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceUpdateTaskArgs() interface{} { + return task.NewTaskServiceUpdateTaskArgs() +} + +func newTaskServiceUpdateTaskResult() interface{} { + return task.NewTaskServiceUpdateTaskResult() +} + +func listTasksHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceListTasksArgs) + realResult := result.(*task.TaskServiceListTasksResult) + success, err := handler.(task.TaskService).ListTasks(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceListTasksArgs() interface{} { + return task.NewTaskServiceListTasksArgs() +} + +func newTaskServiceListTasksResult() interface{} { + return task.NewTaskServiceListTasksResult() +} + +func getTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceGetTaskArgs) + realResult := result.(*task.TaskServiceGetTaskResult) + success, err := handler.(task.TaskService).GetTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceGetTaskArgs() interface{} { + return task.NewTaskServiceGetTaskArgs() +} + +func newTaskServiceGetTaskResult() interface{} { + return task.NewTaskServiceGetTaskResult() +} + +type kClient struct { + c client.Client + sc client.Streaming +} + +func newServiceClient(c client.Client) *kClient { + return &kClient{ + c: c, + sc: c.(client.Streaming), + } +} + +func (p *kClient) CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest) (r *task.CheckTaskNameResponse, err error) { + var _args task.TaskServiceCheckTaskNameArgs + _args.Req = req + var _result task.TaskServiceCheckTaskNameResult + if err = p.c.Call(ctx, "CheckTaskName", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) CreateTask(ctx context.Context, req *task.CreateTaskRequest) (r *task.CreateTaskResponse, err error) { + var _args task.TaskServiceCreateTaskArgs + _args.Req = req + var _result task.TaskServiceCreateTaskResult + if err = p.c.Call(ctx, "CreateTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) UpdateTask(ctx context.Context, req *task.UpdateTaskRequest) (r *task.UpdateTaskResponse, err error) { + var _args task.TaskServiceUpdateTaskArgs + _args.Req = req + var _result task.TaskServiceUpdateTaskResult + if err = p.c.Call(ctx, "UpdateTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ListTasks(ctx context.Context, req *task.ListTasksRequest) (r *task.ListTasksResponse, err error) { + var _args task.TaskServiceListTasksArgs + _args.Req = req + var _result task.TaskServiceListTasksResult + if err = p.c.Call(ctx, "ListTasks", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) GetTask(ctx context.Context, req *task.GetTaskRequest) (r *task.GetTaskResponse, err error) { + var _args task.TaskServiceGetTaskArgs + _args.Req = req + var _result task.TaskServiceGetTaskResult + if err = p.c.Call(ctx, "GetTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/server.go b/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/server.go new file mode 100644 index 000000000..b2c0984f5 --- /dev/null +++ b/backend/kitex_gen/coze/loop/apis/observabilitytaskservice/server.go @@ -0,0 +1,25 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. +package observabilitytaskservice + +import ( + server "github.com/cloudwego/kitex/server" + apis "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/apis" +) + +// NewServer creates a server.Server with the given handler and options. +func NewServer(handler apis.ObservabilityTaskService, opts ...server.Option) server.Server { + var options []server.Option + + options = append(options, opts...) + options = append(options, server.WithCompatibleMiddlewareForUnary()) + + svr := server.NewServer(options...) + if err := svr.RegisterService(serviceInfo(), handler); err != nil { + panic(err) + } + return svr +} + +func RegisterService(svr server.Server, handler apis.ObservabilityTaskService, opts ...server.RegisterOption) error { + return svr.RegisterService(serviceInfo(), handler, opts...) +} diff --git a/backend/kitex_gen/coze/loop/apis/observabilitytraceservice/client.go b/backend/kitex_gen/coze/loop/apis/observabilitytraceservice/client.go index f6cf531a1..9079d147d 100644 --- a/backend/kitex_gen/coze/loop/apis/observabilitytraceservice/client.go +++ b/backend/kitex_gen/coze/loop/apis/observabilitytraceservice/client.go @@ -26,6 +26,9 @@ type Client interface { ListAnnotations(ctx context.Context, req *trace.ListAnnotationsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationsResponse, err error) ExportTracesToDataset(ctx context.Context, req *trace.ExportTracesToDatasetRequest, callOptions ...callopt.Option) (r *trace.ExportTracesToDatasetResponse, err error) PreviewExportTracesToDataset(ctx context.Context, req *trace.PreviewExportTracesToDatasetRequest, callOptions ...callopt.Option) (r *trace.PreviewExportTracesToDatasetResponse, err error) + ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest, callOptions ...callopt.Option) (r *trace.ChangeEvaluatorScoreResponse, err error) + ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationEvaluatorsResponse, err error) + ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest, callOptions ...callopt.Option) (r *trace.ExtractSpanInfoResponse, err error) } // NewClient creates a client for the service defined in IDL. @@ -131,3 +134,18 @@ func (p *kObservabilityTraceServiceClient) PreviewExportTracesToDataset(ctx cont ctx = client.NewCtxWithCallOptions(ctx, callOptions) return p.kClient.PreviewExportTracesToDataset(ctx, req) } + +func (p *kObservabilityTraceServiceClient) ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest, callOptions ...callopt.Option) (r *trace.ChangeEvaluatorScoreResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ChangeEvaluatorScore(ctx, req) +} + +func (p *kObservabilityTraceServiceClient) ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationEvaluatorsResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ListAnnotationEvaluators(ctx, req) +} + +func (p *kObservabilityTraceServiceClient) ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest, callOptions ...callopt.Option) (r *trace.ExtractSpanInfoResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ExtractSpanInfo(ctx, req) +} diff --git a/backend/kitex_gen/coze/loop/apis/observabilitytraceservice/observabilitytraceservice.go b/backend/kitex_gen/coze/loop/apis/observabilitytraceservice/observabilitytraceservice.go index da42120e3..b2b236cd3 100644 --- a/backend/kitex_gen/coze/loop/apis/observabilitytraceservice/observabilitytraceservice.go +++ b/backend/kitex_gen/coze/loop/apis/observabilitytraceservice/observabilitytraceservice.go @@ -119,6 +119,27 @@ var serviceMethods = map[string]kitex.MethodInfo{ false, kitex.WithStreamingMode(kitex.StreamingNone), ), + "ChangeEvaluatorScore": kitex.NewMethodInfo( + changeEvaluatorScoreHandler, + newTraceServiceChangeEvaluatorScoreArgs, + newTraceServiceChangeEvaluatorScoreResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ListAnnotationEvaluators": kitex.NewMethodInfo( + listAnnotationEvaluatorsHandler, + newTraceServiceListAnnotationEvaluatorsArgs, + newTraceServiceListAnnotationEvaluatorsResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ExtractSpanInfo": kitex.NewMethodInfo( + extractSpanInfoHandler, + newTraceServiceExtractSpanInfoArgs, + newTraceServiceExtractSpanInfoResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), } var ( @@ -437,6 +458,63 @@ func newTraceServicePreviewExportTracesToDatasetResult() interface{} { return trace.NewTraceServicePreviewExportTracesToDatasetResult() } +func changeEvaluatorScoreHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceChangeEvaluatorScoreArgs) + realResult := result.(*trace.TraceServiceChangeEvaluatorScoreResult) + success, err := handler.(trace.TraceService).ChangeEvaluatorScore(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceChangeEvaluatorScoreArgs() interface{} { + return trace.NewTraceServiceChangeEvaluatorScoreArgs() +} + +func newTraceServiceChangeEvaluatorScoreResult() interface{} { + return trace.NewTraceServiceChangeEvaluatorScoreResult() +} + +func listAnnotationEvaluatorsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceListAnnotationEvaluatorsArgs) + realResult := result.(*trace.TraceServiceListAnnotationEvaluatorsResult) + success, err := handler.(trace.TraceService).ListAnnotationEvaluators(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceListAnnotationEvaluatorsArgs() interface{} { + return trace.NewTraceServiceListAnnotationEvaluatorsArgs() +} + +func newTraceServiceListAnnotationEvaluatorsResult() interface{} { + return trace.NewTraceServiceListAnnotationEvaluatorsResult() +} + +func extractSpanInfoHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceExtractSpanInfoArgs) + realResult := result.(*trace.TraceServiceExtractSpanInfoResult) + success, err := handler.(trace.TraceService).ExtractSpanInfo(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceExtractSpanInfoArgs() interface{} { + return trace.NewTraceServiceExtractSpanInfoArgs() +} + +func newTraceServiceExtractSpanInfoResult() interface{} { + return trace.NewTraceServiceExtractSpanInfoResult() +} + type kClient struct { c client.Client sc client.Streaming @@ -598,3 +676,33 @@ func (p *kClient) PreviewExportTracesToDataset(ctx context.Context, req *trace.P } return _result.GetSuccess(), nil } + +func (p *kClient) ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest) (r *trace.ChangeEvaluatorScoreResponse, err error) { + var _args trace.TraceServiceChangeEvaluatorScoreArgs + _args.Req = req + var _result trace.TraceServiceChangeEvaluatorScoreResult + if err = p.c.Call(ctx, "ChangeEvaluatorScore", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest) (r *trace.ListAnnotationEvaluatorsResponse, err error) { + var _args trace.TraceServiceListAnnotationEvaluatorsArgs + _args.Req = req + var _result trace.TraceServiceListAnnotationEvaluatorsResult + if err = p.c.Call(ctx, "ListAnnotationEvaluators", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest) (r *trace.ExtractSpanInfoResponse, err error) { + var _args trace.TraceServiceExtractSpanInfoArgs + _args.Req = req + var _result trace.TraceServiceExtractSpanInfoResult + if err = p.c.Call(ctx, "ExtractSpanInfo", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/backend/kitex_gen/coze/loop/data/domain/dataset/dataset.go b/backend/kitex_gen/coze/loop/data/domain/dataset/dataset.go index 46c1a8a88..233ebf02e 100644 --- a/backend/kitex_gen/coze/loop/data/domain/dataset/dataset.go +++ b/backend/kitex_gen/coze/loop/data/domain/dataset/dataset.go @@ -11498,3 +11498,417 @@ func (p *ItemErrorGroup) Field4DeepEqual(src []*ItemErrorDetail) bool { } return true } + +type CreateDatasetItemOutput struct { + // item 在 BatchCreateDatasetItemsReq.items 中的索引 + ItemIndex *int32 `thrift:"item_index,1,optional" frugal:"1,optional,i32" form:"item_index" json:"item_index,omitempty" query:"item_index"` + ItemKey *string `thrift:"item_key,2,optional" frugal:"2,optional,string" form:"item_key" json:"item_key,omitempty" query:"item_key"` + ItemID *int64 `thrift:"item_id,3,optional" frugal:"3,optional,i64" form:"item_id" json:"item_id,omitempty" query:"item_id"` + // 是否是新的 Item。提供 itemKey 时,如果 itemKey 在数据集中已存在数据,则不算做「新 Item」,该字段为 false。 + IsNewItem *bool `thrift:"is_new_item,4,optional" frugal:"4,optional,bool" form:"is_new_item" json:"is_new_item,omitempty" query:"is_new_item"` +} + +func NewCreateDatasetItemOutput() *CreateDatasetItemOutput { + return &CreateDatasetItemOutput{} +} + +func (p *CreateDatasetItemOutput) InitDefault() { +} + +var CreateDatasetItemOutput_ItemIndex_DEFAULT int32 + +func (p *CreateDatasetItemOutput) GetItemIndex() (v int32) { + if p == nil { + return + } + if !p.IsSetItemIndex() { + return CreateDatasetItemOutput_ItemIndex_DEFAULT + } + return *p.ItemIndex +} + +var CreateDatasetItemOutput_ItemKey_DEFAULT string + +func (p *CreateDatasetItemOutput) GetItemKey() (v string) { + if p == nil { + return + } + if !p.IsSetItemKey() { + return CreateDatasetItemOutput_ItemKey_DEFAULT + } + return *p.ItemKey +} + +var CreateDatasetItemOutput_ItemID_DEFAULT int64 + +func (p *CreateDatasetItemOutput) GetItemID() (v int64) { + if p == nil { + return + } + if !p.IsSetItemID() { + return CreateDatasetItemOutput_ItemID_DEFAULT + } + return *p.ItemID +} + +var CreateDatasetItemOutput_IsNewItem_DEFAULT bool + +func (p *CreateDatasetItemOutput) GetIsNewItem() (v bool) { + if p == nil { + return + } + if !p.IsSetIsNewItem() { + return CreateDatasetItemOutput_IsNewItem_DEFAULT + } + return *p.IsNewItem +} +func (p *CreateDatasetItemOutput) SetItemIndex(val *int32) { + p.ItemIndex = val +} +func (p *CreateDatasetItemOutput) SetItemKey(val *string) { + p.ItemKey = val +} +func (p *CreateDatasetItemOutput) SetItemID(val *int64) { + p.ItemID = val +} +func (p *CreateDatasetItemOutput) SetIsNewItem(val *bool) { + p.IsNewItem = val +} + +var fieldIDToName_CreateDatasetItemOutput = map[int16]string{ + 1: "item_index", + 2: "item_key", + 3: "item_id", + 4: "is_new_item", +} + +func (p *CreateDatasetItemOutput) IsSetItemIndex() bool { + return p.ItemIndex != nil +} + +func (p *CreateDatasetItemOutput) IsSetItemKey() bool { + return p.ItemKey != nil +} + +func (p *CreateDatasetItemOutput) IsSetItemID() bool { + return p.ItemID != nil +} + +func (p *CreateDatasetItemOutput) IsSetIsNewItem() bool { + return p.IsNewItem != nil +} + +func (p *CreateDatasetItemOutput) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CreateDatasetItemOutput[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *CreateDatasetItemOutput) ReadField1(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.ItemIndex = _field + return nil +} +func (p *CreateDatasetItemOutput) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.ItemKey = _field + return nil +} +func (p *CreateDatasetItemOutput) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ItemID = _field + return nil +} +func (p *CreateDatasetItemOutput) ReadField4(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsNewItem = _field + return nil +} + +func (p *CreateDatasetItemOutput) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CreateDatasetItemOutput"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *CreateDatasetItemOutput) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetItemIndex() { + if err = oprot.WriteFieldBegin("item_index", thrift.I32, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.ItemIndex); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *CreateDatasetItemOutput) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetItemKey() { + if err = oprot.WriteFieldBegin("item_key", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.ItemKey); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *CreateDatasetItemOutput) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetItemID() { + if err = oprot.WriteFieldBegin("item_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ItemID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *CreateDatasetItemOutput) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetIsNewItem() { + if err = oprot.WriteFieldBegin("is_new_item", thrift.BOOL, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsNewItem); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *CreateDatasetItemOutput) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CreateDatasetItemOutput(%+v)", *p) + +} + +func (p *CreateDatasetItemOutput) DeepEqual(ano *CreateDatasetItemOutput) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ItemIndex) { + return false + } + if !p.Field2DeepEqual(ano.ItemKey) { + return false + } + if !p.Field3DeepEqual(ano.ItemID) { + return false + } + if !p.Field4DeepEqual(ano.IsNewItem) { + return false + } + return true +} + +func (p *CreateDatasetItemOutput) Field1DeepEqual(src *int32) bool { + + if p.ItemIndex == src { + return true + } else if p.ItemIndex == nil || src == nil { + return false + } + if *p.ItemIndex != *src { + return false + } + return true +} +func (p *CreateDatasetItemOutput) Field2DeepEqual(src *string) bool { + + if p.ItemKey == src { + return true + } else if p.ItemKey == nil || src == nil { + return false + } + if strings.Compare(*p.ItemKey, *src) != 0 { + return false + } + return true +} +func (p *CreateDatasetItemOutput) Field3DeepEqual(src *int64) bool { + + if p.ItemID == src { + return true + } else if p.ItemID == nil || src == nil { + return false + } + if *p.ItemID != *src { + return false + } + return true +} +func (p *CreateDatasetItemOutput) Field4DeepEqual(src *bool) bool { + + if p.IsNewItem == src { + return true + } else if p.IsNewItem == nil || src == nil { + return false + } + if *p.IsNewItem != *src { + return false + } + return true +} diff --git a/backend/kitex_gen/coze/loop/data/domain/dataset/dataset_validator.go b/backend/kitex_gen/coze/loop/data/domain/dataset/dataset_validator.go index 7e7350794..9536da08e 100644 --- a/backend/kitex_gen/coze/loop/data/domain/dataset/dataset_validator.go +++ b/backend/kitex_gen/coze/loop/data/domain/dataset/dataset_validator.go @@ -151,3 +151,6 @@ func (p *ItemErrorDetail) IsValid() error { func (p *ItemErrorGroup) IsValid() error { return nil } +func (p *CreateDatasetItemOutput) IsValid() error { + return nil +} diff --git a/backend/kitex_gen/coze/loop/data/domain/dataset/k-dataset.go b/backend/kitex_gen/coze/loop/data/domain/dataset/k-dataset.go index c3e9733cc..4a6e73b6d 100644 --- a/backend/kitex_gen/coze/loop/data/domain/dataset/k-dataset.go +++ b/backend/kitex_gen/coze/loop/data/domain/dataset/k-dataset.go @@ -7617,3 +7617,282 @@ func (p *ItemErrorGroup) DeepCopy(s interface{}) error { return nil } + +func (p *CreateDatasetItemOutput) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CreateDatasetItemOutput[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *CreateDatasetItemOutput) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *int32 + if v, l, err := thrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.ItemIndex = _field + return offset, nil +} + +func (p *CreateDatasetItemOutput) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.ItemKey = _field + return offset, nil +} + +func (p *CreateDatasetItemOutput) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.ItemID = _field + return offset, nil +} + +func (p *CreateDatasetItemOutput) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field *bool + if v, l, err := thrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.IsNewItem = _field + return offset, nil +} + +func (p *CreateDatasetItemOutput) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *CreateDatasetItemOutput) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *CreateDatasetItemOutput) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *CreateDatasetItemOutput) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetItemIndex() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I32, 1) + offset += thrift.Binary.WriteI32(buf[offset:], *p.ItemIndex) + } + return offset +} + +func (p *CreateDatasetItemOutput) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetItemKey() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.ItemKey) + } + return offset +} + +func (p *CreateDatasetItemOutput) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetItemID() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 3) + offset += thrift.Binary.WriteI64(buf[offset:], *p.ItemID) + } + return offset +} + +func (p *CreateDatasetItemOutput) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetIsNewItem() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.BOOL, 4) + offset += thrift.Binary.WriteBool(buf[offset:], *p.IsNewItem) + } + return offset +} + +func (p *CreateDatasetItemOutput) field1Length() int { + l := 0 + if p.IsSetItemIndex() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I32Length() + } + return l +} + +func (p *CreateDatasetItemOutput) field2Length() int { + l := 0 + if p.IsSetItemKey() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.ItemKey) + } + return l +} + +func (p *CreateDatasetItemOutput) field3Length() int { + l := 0 + if p.IsSetItemID() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *CreateDatasetItemOutput) field4Length() int { + l := 0 + if p.IsSetIsNewItem() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.BoolLength() + } + return l +} + +func (p *CreateDatasetItemOutput) DeepCopy(s interface{}) error { + src, ok := s.(*CreateDatasetItemOutput) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.ItemIndex != nil { + tmp := *src.ItemIndex + p.ItemIndex = &tmp + } + + if src.ItemKey != nil { + var tmp string + if *src.ItemKey != "" { + tmp = kutils.StringDeepCopy(*src.ItemKey) + } + p.ItemKey = &tmp + } + + if src.ItemID != nil { + tmp := *src.ItemID + p.ItemID = &tmp + } + + if src.IsNewItem != nil { + tmp := *src.IsNewItem + p.IsNewItem = &tmp + } + + return nil +} diff --git a/backend/kitex_gen/coze/loop/evaluation/eval_set/coze.loop.evaluation.eval_set.go b/backend/kitex_gen/coze/loop/evaluation/eval_set/coze.loop.evaluation.eval_set.go index f2da940ad..52fbb6bfc 100644 --- a/backend/kitex_gen/coze/loop/evaluation/eval_set/coze.loop.evaluation.eval_set.go +++ b/backend/kitex_gen/coze/loop/evaluation/eval_set/coze.loop.evaluation.eval_set.go @@ -20,7 +20,7 @@ type CreateEvaluationSetRequest struct { EvaluationSetSchema *eval_set.EvaluationSetSchema `thrift:"evaluation_set_schema,4,optional" frugal:"4,optional,eval_set.EvaluationSetSchema" form:"evaluation_set_schema" json:"evaluation_set_schema,omitempty" query:"evaluation_set_schema"` // 业务分类 BizCategory *eval_set.BizCategory `thrift:"biz_category,5,optional" frugal:"5,optional,string" form:"biz_category" json:"biz_category,omitempty" query:"biz_category"` - Session *common.Session `thrift:"session,200,optional" frugal:"200,optional,common.Session" form:"session" json:"session,omitempty" query:"session"` + Session *common.Session `thrift:"session,200,optional" frugal:"200,optional,common.Session" form:"-" json:"-" query:"-"` Base *base.Base `thrift:"Base,255,optional" frugal:"255,optional,base.Base" form:"Base" json:"Base,omitempty" query:"Base"` } @@ -8488,9 +8488,10 @@ func (p *BatchCreateEvaluationSetItemsRequest) Field255DeepEqual(src *base.Base) type BatchCreateEvaluationSetItemsResponse struct { // key: item 在 items 中的索引 - AddedItems map[int64]int64 `thrift:"added_items,1,optional" frugal:"1,optional,map" json:"added_items" form:"added_items" query:"added_items"` - Errors []*dataset.ItemErrorGroup `thrift:"errors,2,optional" frugal:"2,optional,list" form:"errors" json:"errors,omitempty" query:"errors"` - BaseResp *base.BaseResp `thrift:"BaseResp,255" frugal:"255,default,base.BaseResp" form:"BaseResp" json:"BaseResp" query:"BaseResp"` + AddedItems map[int64]int64 `thrift:"added_items,1,optional" frugal:"1,optional,map" json:"added_items" form:"added_items" query:"added_items"` + Errors []*dataset.ItemErrorGroup `thrift:"errors,2,optional" frugal:"2,optional,list" form:"errors" json:"errors,omitempty" query:"errors"` + ItemOutputs []*dataset.CreateDatasetItemOutput `thrift:"item_outputs,3,optional" frugal:"3,optional,list" form:"item_outputs" json:"item_outputs,omitempty" query:"item_outputs"` + BaseResp *base.BaseResp `thrift:"BaseResp,255" frugal:"255,default,base.BaseResp" form:"BaseResp" json:"BaseResp" query:"BaseResp"` } func NewBatchCreateEvaluationSetItemsResponse() *BatchCreateEvaluationSetItemsResponse { @@ -8524,6 +8525,18 @@ func (p *BatchCreateEvaluationSetItemsResponse) GetErrors() (v []*dataset.ItemEr return p.Errors } +var BatchCreateEvaluationSetItemsResponse_ItemOutputs_DEFAULT []*dataset.CreateDatasetItemOutput + +func (p *BatchCreateEvaluationSetItemsResponse) GetItemOutputs() (v []*dataset.CreateDatasetItemOutput) { + if p == nil { + return + } + if !p.IsSetItemOutputs() { + return BatchCreateEvaluationSetItemsResponse_ItemOutputs_DEFAULT + } + return p.ItemOutputs +} + var BatchCreateEvaluationSetItemsResponse_BaseResp_DEFAULT *base.BaseResp func (p *BatchCreateEvaluationSetItemsResponse) GetBaseResp() (v *base.BaseResp) { @@ -8541,6 +8554,9 @@ func (p *BatchCreateEvaluationSetItemsResponse) SetAddedItems(val map[int64]int6 func (p *BatchCreateEvaluationSetItemsResponse) SetErrors(val []*dataset.ItemErrorGroup) { p.Errors = val } +func (p *BatchCreateEvaluationSetItemsResponse) SetItemOutputs(val []*dataset.CreateDatasetItemOutput) { + p.ItemOutputs = val +} func (p *BatchCreateEvaluationSetItemsResponse) SetBaseResp(val *base.BaseResp) { p.BaseResp = val } @@ -8548,6 +8564,7 @@ func (p *BatchCreateEvaluationSetItemsResponse) SetBaseResp(val *base.BaseResp) var fieldIDToName_BatchCreateEvaluationSetItemsResponse = map[int16]string{ 1: "added_items", 2: "errors", + 3: "item_outputs", 255: "BaseResp", } @@ -8559,6 +8576,10 @@ func (p *BatchCreateEvaluationSetItemsResponse) IsSetErrors() bool { return p.Errors != nil } +func (p *BatchCreateEvaluationSetItemsResponse) IsSetItemOutputs() bool { + return p.ItemOutputs != nil +} + func (p *BatchCreateEvaluationSetItemsResponse) IsSetBaseResp() bool { return p.BaseResp != nil } @@ -8597,6 +8618,14 @@ func (p *BatchCreateEvaluationSetItemsResponse) Read(iprot thrift.TProtocol) (er } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } case 255: if fieldTypeId == thrift.STRUCT { if err = p.ReadField255(iprot); err != nil { @@ -8686,6 +8715,29 @@ func (p *BatchCreateEvaluationSetItemsResponse) ReadField2(iprot thrift.TProtoco p.Errors = _field return nil } +func (p *BatchCreateEvaluationSetItemsResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*dataset.CreateDatasetItemOutput, 0, size) + values := make([]dataset.CreateDatasetItemOutput, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ItemOutputs = _field + return nil +} func (p *BatchCreateEvaluationSetItemsResponse) ReadField255(iprot thrift.TProtocol) error { _field := base.NewBaseResp() if err := _field.Read(iprot); err != nil { @@ -8709,6 +8761,10 @@ func (p *BatchCreateEvaluationSetItemsResponse) Write(oprot thrift.TProtocol) (e fieldId = 2 goto WriteFieldError } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } if err = p.writeField255(oprot); err != nil { fieldId = 255 goto WriteFieldError @@ -8786,6 +8842,32 @@ WriteFieldBeginError: WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } +func (p *BatchCreateEvaluationSetItemsResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetItemOutputs() { + if err = oprot.WriteFieldBegin("item_outputs", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ItemOutputs)); err != nil { + return err + } + for _, v := range p.ItemOutputs { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} func (p *BatchCreateEvaluationSetItemsResponse) writeField255(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { goto WriteFieldBeginError @@ -8823,6 +8905,9 @@ func (p *BatchCreateEvaluationSetItemsResponse) DeepEqual(ano *BatchCreateEvalua if !p.Field2DeepEqual(ano.Errors) { return false } + if !p.Field3DeepEqual(ano.ItemOutputs) { + return false + } if !p.Field255DeepEqual(ano.BaseResp) { return false } @@ -8855,6 +8940,19 @@ func (p *BatchCreateEvaluationSetItemsResponse) Field2DeepEqual(src []*dataset.I } return true } +func (p *BatchCreateEvaluationSetItemsResponse) Field3DeepEqual(src []*dataset.CreateDatasetItemOutput) bool { + + if len(p.ItemOutputs) != len(src) { + return false + } + for i, v := range p.ItemOutputs { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} func (p *BatchCreateEvaluationSetItemsResponse) Field255DeepEqual(src *base.BaseResp) bool { if !p.BaseResp.DeepEqual(src) { diff --git a/backend/kitex_gen/coze/loop/evaluation/eval_set/k-coze.loop.evaluation.eval_set.go b/backend/kitex_gen/coze/loop/evaluation/eval_set/k-coze.loop.evaluation.eval_set.go index 3ca04254c..e604829b2 100644 --- a/backend/kitex_gen/coze/loop/evaluation/eval_set/k-coze.loop.evaluation.eval_set.go +++ b/backend/kitex_gen/coze/loop/evaluation/eval_set/k-coze.loop.evaluation.eval_set.go @@ -6216,6 +6216,20 @@ func (p *BatchCreateEvaluationSetItemsResponse) FastRead(buf []byte) (int, error goto SkipFieldError } } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } case 255: if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField255(buf[offset:]) @@ -6305,6 +6319,31 @@ func (p *BatchCreateEvaluationSetItemsResponse) FastReadField2(buf []byte) (int, return offset, nil } +func (p *BatchCreateEvaluationSetItemsResponse) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*dataset.CreateDatasetItemOutput, 0, size) + values := make([]dataset.CreateDatasetItemOutput, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.ItemOutputs = _field + return offset, nil +} + func (p *BatchCreateEvaluationSetItemsResponse) FastReadField255(buf []byte) (int, error) { offset := 0 _field := base.NewBaseResp() @@ -6326,6 +6365,7 @@ func (p *BatchCreateEvaluationSetItemsResponse) FastWriteNocopy(buf []byte, w th if p != nil { offset += p.fastWriteField1(buf[offset:], w) offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) offset += p.fastWriteField255(buf[offset:], w) } offset += thrift.Binary.WriteFieldStop(buf[offset:]) @@ -6337,6 +6377,7 @@ func (p *BatchCreateEvaluationSetItemsResponse) BLength() int { if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() l += p.field255Length() } l += thrift.Binary.FieldStopLength() @@ -6376,6 +6417,22 @@ func (p *BatchCreateEvaluationSetItemsResponse) fastWriteField2(buf []byte, w th return offset } +func (p *BatchCreateEvaluationSetItemsResponse) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetItemOutputs() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 3) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.ItemOutputs { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + } + return offset +} + func (p *BatchCreateEvaluationSetItemsResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) @@ -6407,6 +6464,19 @@ func (p *BatchCreateEvaluationSetItemsResponse) field2Length() int { return l } +func (p *BatchCreateEvaluationSetItemsResponse) field3Length() int { + l := 0 + if p.IsSetItemOutputs() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.ItemOutputs { + _ = v + l += v.BLength() + } + } + return l +} + func (p *BatchCreateEvaluationSetItemsResponse) field255Length() int { l := 0 l += thrift.Binary.FieldBeginLength() @@ -6448,6 +6518,21 @@ func (p *BatchCreateEvaluationSetItemsResponse) DeepCopy(s interface{}) error { } } + if src.ItemOutputs != nil { + p.ItemOutputs = make([]*dataset.CreateDatasetItemOutput, 0, len(src.ItemOutputs)) + for _, elem := range src.ItemOutputs { + var _elem *dataset.CreateDatasetItemOutput + if elem != nil { + _elem = &dataset.CreateDatasetItemOutput{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.ItemOutputs = append(p.ItemOutputs, _elem) + } + } + var _baseResp *base.BaseResp if src.BaseResp != nil { _baseResp = &base.BaseResp{} diff --git a/backend/kitex_gen/coze/loop/evaluation/expt/coze.loop.evaluation.expt.go b/backend/kitex_gen/coze/loop/evaluation/expt/coze.loop.evaluation.expt.go index 5b27810b7..ee7fe7866 100644 --- a/backend/kitex_gen/coze/loop/evaluation/expt/coze.loop.evaluation.expt.go +++ b/backend/kitex_gen/coze/loop/evaluation/expt/coze.loop.evaluation.expt.go @@ -13662,9 +13662,10 @@ func (p *InvokeExperimentRequest) Field255DeepEqual(src *base.Base) bool { type InvokeExperimentResponse struct { // key: item 在 items 中的索引 - AddedItems map[int64]int64 `thrift:"added_items,1,optional" frugal:"1,optional,map" form:"added_items" json:"added_items,omitempty" query:"added_items"` - Errors []*dataset.ItemErrorGroup `thrift:"errors,2,optional" frugal:"2,optional,list" form:"errors" json:"errors,omitempty" query:"errors"` - BaseResp *base.BaseResp `thrift:"BaseResp,255" frugal:"255,default,base.BaseResp" form:"BaseResp" json:"BaseResp" query:"BaseResp"` + AddedItems map[int64]int64 `thrift:"added_items,1,optional" frugal:"1,optional,map" form:"added_items" json:"added_items,omitempty" query:"added_items"` + Errors []*dataset.ItemErrorGroup `thrift:"errors,2,optional" frugal:"2,optional,list" form:"errors" json:"errors,omitempty" query:"errors"` + ItemOutputs []*dataset.CreateDatasetItemOutput `thrift:"item_outputs,3,optional" frugal:"3,optional,list" form:"item_outputs" json:"item_outputs,omitempty" query:"item_outputs"` + BaseResp *base.BaseResp `thrift:"BaseResp,255" frugal:"255,default,base.BaseResp" form:"BaseResp" json:"BaseResp" query:"BaseResp"` } func NewInvokeExperimentResponse() *InvokeExperimentResponse { @@ -13698,6 +13699,18 @@ func (p *InvokeExperimentResponse) GetErrors() (v []*dataset.ItemErrorGroup) { return p.Errors } +var InvokeExperimentResponse_ItemOutputs_DEFAULT []*dataset.CreateDatasetItemOutput + +func (p *InvokeExperimentResponse) GetItemOutputs() (v []*dataset.CreateDatasetItemOutput) { + if p == nil { + return + } + if !p.IsSetItemOutputs() { + return InvokeExperimentResponse_ItemOutputs_DEFAULT + } + return p.ItemOutputs +} + var InvokeExperimentResponse_BaseResp_DEFAULT *base.BaseResp func (p *InvokeExperimentResponse) GetBaseResp() (v *base.BaseResp) { @@ -13715,6 +13728,9 @@ func (p *InvokeExperimentResponse) SetAddedItems(val map[int64]int64) { func (p *InvokeExperimentResponse) SetErrors(val []*dataset.ItemErrorGroup) { p.Errors = val } +func (p *InvokeExperimentResponse) SetItemOutputs(val []*dataset.CreateDatasetItemOutput) { + p.ItemOutputs = val +} func (p *InvokeExperimentResponse) SetBaseResp(val *base.BaseResp) { p.BaseResp = val } @@ -13722,6 +13738,7 @@ func (p *InvokeExperimentResponse) SetBaseResp(val *base.BaseResp) { var fieldIDToName_InvokeExperimentResponse = map[int16]string{ 1: "added_items", 2: "errors", + 3: "item_outputs", 255: "BaseResp", } @@ -13733,6 +13750,10 @@ func (p *InvokeExperimentResponse) IsSetErrors() bool { return p.Errors != nil } +func (p *InvokeExperimentResponse) IsSetItemOutputs() bool { + return p.ItemOutputs != nil +} + func (p *InvokeExperimentResponse) IsSetBaseResp() bool { return p.BaseResp != nil } @@ -13771,6 +13792,14 @@ func (p *InvokeExperimentResponse) Read(iprot thrift.TProtocol) (err error) { } else if err = iprot.Skip(fieldTypeId); err != nil { goto SkipFieldError } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } case 255: if fieldTypeId == thrift.STRUCT { if err = p.ReadField255(iprot); err != nil { @@ -13860,6 +13889,29 @@ func (p *InvokeExperimentResponse) ReadField2(iprot thrift.TProtocol) error { p.Errors = _field return nil } +func (p *InvokeExperimentResponse) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*dataset.CreateDatasetItemOutput, 0, size) + values := make([]dataset.CreateDatasetItemOutput, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.ItemOutputs = _field + return nil +} func (p *InvokeExperimentResponse) ReadField255(iprot thrift.TProtocol) error { _field := base.NewBaseResp() if err := _field.Read(iprot); err != nil { @@ -13883,6 +13935,10 @@ func (p *InvokeExperimentResponse) Write(oprot thrift.TProtocol) (err error) { fieldId = 2 goto WriteFieldError } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } if err = p.writeField255(oprot); err != nil { fieldId = 255 goto WriteFieldError @@ -13960,6 +14016,32 @@ WriteFieldBeginError: WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) } +func (p *InvokeExperimentResponse) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetItemOutputs() { + if err = oprot.WriteFieldBegin("item_outputs", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ItemOutputs)); err != nil { + return err + } + for _, v := range p.ItemOutputs { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} func (p *InvokeExperimentResponse) writeField255(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { goto WriteFieldBeginError @@ -13997,6 +14079,9 @@ func (p *InvokeExperimentResponse) DeepEqual(ano *InvokeExperimentResponse) bool if !p.Field2DeepEqual(ano.Errors) { return false } + if !p.Field3DeepEqual(ano.ItemOutputs) { + return false + } if !p.Field255DeepEqual(ano.BaseResp) { return false } @@ -14029,6 +14114,19 @@ func (p *InvokeExperimentResponse) Field2DeepEqual(src []*dataset.ItemErrorGroup } return true } +func (p *InvokeExperimentResponse) Field3DeepEqual(src []*dataset.CreateDatasetItemOutput) bool { + + if len(p.ItemOutputs) != len(src) { + return false + } + for i, v := range p.ItemOutputs { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} func (p *InvokeExperimentResponse) Field255DeepEqual(src *base.BaseResp) bool { if !p.BaseResp.DeepEqual(src) { diff --git a/backend/kitex_gen/coze/loop/evaluation/expt/k-coze.loop.evaluation.expt.go b/backend/kitex_gen/coze/loop/evaluation/expt/k-coze.loop.evaluation.expt.go index 4dd14f136..2a61f427f 100644 --- a/backend/kitex_gen/coze/loop/evaluation/expt/k-coze.loop.evaluation.expt.go +++ b/backend/kitex_gen/coze/loop/evaluation/expt/k-coze.loop.evaluation.expt.go @@ -10090,6 +10090,20 @@ func (p *InvokeExperimentResponse) FastRead(buf []byte) (int, error) { goto SkipFieldError } } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } case 255: if fieldTypeId == thrift.STRUCT { l, err = p.FastReadField255(buf[offset:]) @@ -10179,6 +10193,31 @@ func (p *InvokeExperimentResponse) FastReadField2(buf []byte) (int, error) { return offset, nil } +func (p *InvokeExperimentResponse) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*dataset.CreateDatasetItemOutput, 0, size) + values := make([]dataset.CreateDatasetItemOutput, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.ItemOutputs = _field + return offset, nil +} + func (p *InvokeExperimentResponse) FastReadField255(buf []byte) (int, error) { offset := 0 _field := base.NewBaseResp() @@ -10200,6 +10239,7 @@ func (p *InvokeExperimentResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWr if p != nil { offset += p.fastWriteField1(buf[offset:], w) offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) offset += p.fastWriteField255(buf[offset:], w) } offset += thrift.Binary.WriteFieldStop(buf[offset:]) @@ -10211,6 +10251,7 @@ func (p *InvokeExperimentResponse) BLength() int { if p != nil { l += p.field1Length() l += p.field2Length() + l += p.field3Length() l += p.field255Length() } l += thrift.Binary.FieldStopLength() @@ -10250,6 +10291,22 @@ func (p *InvokeExperimentResponse) fastWriteField2(buf []byte, w thrift.NocopyWr return offset } +func (p *InvokeExperimentResponse) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetItemOutputs() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 3) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.ItemOutputs { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + } + return offset +} + func (p *InvokeExperimentResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) @@ -10281,6 +10338,19 @@ func (p *InvokeExperimentResponse) field2Length() int { return l } +func (p *InvokeExperimentResponse) field3Length() int { + l := 0 + if p.IsSetItemOutputs() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.ItemOutputs { + _ = v + l += v.BLength() + } + } + return l +} + func (p *InvokeExperimentResponse) field255Length() int { l := 0 l += thrift.Binary.FieldBeginLength() @@ -10322,6 +10392,21 @@ func (p *InvokeExperimentResponse) DeepCopy(s interface{}) error { } } + if src.ItemOutputs != nil { + p.ItemOutputs = make([]*dataset.CreateDatasetItemOutput, 0, len(src.ItemOutputs)) + for _, elem := range src.ItemOutputs { + var _elem *dataset.CreateDatasetItemOutput + if elem != nil { + _elem = &dataset.CreateDatasetItemOutput{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.ItemOutputs = append(p.ItemOutputs, _elem) + } + } + var _baseResp *base.BaseResp if src.BaseResp != nil { _baseResp = &base.BaseResp{} diff --git a/backend/kitex_gen/coze/loop/foundation/domain/auth/auth.go b/backend/kitex_gen/coze/loop/foundation/domain/auth/auth.go index 801faa54f..c79d5c6e9 100644 --- a/backend/kitex_gen/coze/loop/foundation/domain/auth/auth.go +++ b/backend/kitex_gen/coze/loop/foundation/domain/auth/auth.go @@ -28,6 +28,8 @@ const ( AuthEntityTypeModel = "Model" AuthEntityTypeAnnotation = "Annotation" + + AuthEntityTypeTraceTask = "Task" ) // 主体类型 diff --git a/backend/kitex_gen/coze/loop/observability/coze.loop.observability.go b/backend/kitex_gen/coze/loop/observability/coze.loop.observability.go index b5816e39c..50831b31c 100644 --- a/backend/kitex_gen/coze/loop/observability/coze.loop.observability.go +++ b/backend/kitex_gen/coze/loop/observability/coze.loop.observability.go @@ -5,6 +5,7 @@ package observability import ( "github.com/apache/thrift/lib/go/thrift" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/openapi" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/trace" ) @@ -60,6 +61,32 @@ func NewObservabilityOpenAPIServiceClient(c thrift.TClient) *ObservabilityOpenAP } } +type ObservabilityTaskService interface { + task.TaskService +} + +type ObservabilityTaskServiceClient struct { + *task.TaskServiceClient +} + +func NewObservabilityTaskServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ObservabilityTaskServiceClient { + return &ObservabilityTaskServiceClient{ + TaskServiceClient: task.NewTaskServiceClientFactory(t, f), + } +} + +func NewObservabilityTaskServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ObservabilityTaskServiceClient { + return &ObservabilityTaskServiceClient{ + TaskServiceClient: task.NewTaskServiceClientProtocol(t, iprot, oprot), + } +} + +func NewObservabilityTaskServiceClient(c thrift.TClient) *ObservabilityTaskServiceClient { + return &ObservabilityTaskServiceClient{ + TaskServiceClient: task.NewTaskServiceClient(c), + } +} + type ObservabilityTraceServiceProcessor struct { *trace.TraceServiceProcessor } @@ -77,3 +104,12 @@ func NewObservabilityOpenAPIServiceProcessor(handler ObservabilityOpenAPIService self := &ObservabilityOpenAPIServiceProcessor{openapi.NewOpenAPIServiceProcessor(handler)} return self } + +type ObservabilityTaskServiceProcessor struct { + *task.TaskServiceProcessor +} + +func NewObservabilityTaskServiceProcessor(handler ObservabilityTaskService) *ObservabilityTaskServiceProcessor { + self := &ObservabilityTaskServiceProcessor{task.NewTaskServiceProcessor(handler)} + return self +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/annotation/annotation.go b/backend/kitex_gen/coze/loop/observability/domain/annotation/annotation.go index d2433f40c..3f494f41b 100644 --- a/backend/kitex_gen/coze/loop/observability/domain/annotation/annotation.go +++ b/backend/kitex_gen/coze/loop/observability/domain/annotation/annotation.go @@ -2757,3 +2757,312 @@ func (p *Annotation) Field102DeepEqual(src *ManualFeedback) bool { } return true } + +type AnnotationEvaluator struct { + EvaluatorVersionID int64 `thrift:"evaluator_version_id,1,required" frugal:"1,required,i64" form:"evaluator_version_id,required" json:"evaluator_version_id,required" query:"evaluator_version_id,required"` + EvaluatorName string `thrift:"evaluator_name,2,required" frugal:"2,required,string" form:"evaluator_name,required" json:"evaluator_name,required" query:"evaluator_name,required"` + EvaluatorVersion string `thrift:"evaluator_version,3,required" frugal:"3,required,string" form:"evaluator_version,required" json:"evaluator_version,required" query:"evaluator_version,required"` +} + +func NewAnnotationEvaluator() *AnnotationEvaluator { + return &AnnotationEvaluator{} +} + +func (p *AnnotationEvaluator) InitDefault() { +} + +func (p *AnnotationEvaluator) GetEvaluatorVersionID() (v int64) { + if p != nil { + return p.EvaluatorVersionID + } + return +} + +func (p *AnnotationEvaluator) GetEvaluatorName() (v string) { + if p != nil { + return p.EvaluatorName + } + return +} + +func (p *AnnotationEvaluator) GetEvaluatorVersion() (v string) { + if p != nil { + return p.EvaluatorVersion + } + return +} +func (p *AnnotationEvaluator) SetEvaluatorVersionID(val int64) { + p.EvaluatorVersionID = val +} +func (p *AnnotationEvaluator) SetEvaluatorName(val string) { + p.EvaluatorName = val +} +func (p *AnnotationEvaluator) SetEvaluatorVersion(val string) { + p.EvaluatorVersion = val +} + +var fieldIDToName_AnnotationEvaluator = map[int16]string{ + 1: "evaluator_version_id", + 2: "evaluator_name", + 3: "evaluator_version", +} + +func (p *AnnotationEvaluator) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetEvaluatorVersionID bool = false + var issetEvaluatorName bool = false + var issetEvaluatorVersion bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetEvaluatorVersionID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetEvaluatorName = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetEvaluatorVersion = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetEvaluatorVersionID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetEvaluatorName { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetEvaluatorVersion { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_AnnotationEvaluator[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_AnnotationEvaluator[fieldId])) +} + +func (p *AnnotationEvaluator) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.EvaluatorVersionID = _field + return nil +} +func (p *AnnotationEvaluator) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.EvaluatorName = _field + return nil +} +func (p *AnnotationEvaluator) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.EvaluatorVersion = _field + return nil +} + +func (p *AnnotationEvaluator) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("AnnotationEvaluator"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *AnnotationEvaluator) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("evaluator_version_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.EvaluatorVersionID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *AnnotationEvaluator) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("evaluator_name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.EvaluatorName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *AnnotationEvaluator) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("evaluator_version", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.EvaluatorVersion); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *AnnotationEvaluator) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AnnotationEvaluator(%+v)", *p) + +} + +func (p *AnnotationEvaluator) DeepEqual(ano *AnnotationEvaluator) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.EvaluatorVersionID) { + return false + } + if !p.Field2DeepEqual(ano.EvaluatorName) { + return false + } + if !p.Field3DeepEqual(ano.EvaluatorVersion) { + return false + } + return true +} + +func (p *AnnotationEvaluator) Field1DeepEqual(src int64) bool { + + if p.EvaluatorVersionID != src { + return false + } + return true +} +func (p *AnnotationEvaluator) Field2DeepEqual(src string) bool { + + if strings.Compare(p.EvaluatorName, src) != 0 { + return false + } + return true +} +func (p *AnnotationEvaluator) Field3DeepEqual(src string) bool { + + if strings.Compare(p.EvaluatorVersion, src) != 0 { + return false + } + return true +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/annotation/annotation_validator.go b/backend/kitex_gen/coze/loop/observability/domain/annotation/annotation_validator.go index 25668286a..76b7d2d3b 100644 --- a/backend/kitex_gen/coze/loop/observability/domain/annotation/annotation_validator.go +++ b/backend/kitex_gen/coze/loop/observability/domain/annotation/annotation_validator.go @@ -96,3 +96,6 @@ func (p *Annotation) IsValid() error { } return nil } +func (p *AnnotationEvaluator) IsValid() error { + return nil +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/annotation/k-annotation.go b/backend/kitex_gen/coze/loop/observability/domain/annotation/k-annotation.go index 4fa587ede..ba5cf5fb2 100644 --- a/backend/kitex_gen/coze/loop/observability/domain/annotation/k-annotation.go +++ b/backend/kitex_gen/coze/loop/observability/domain/annotation/k-annotation.go @@ -1991,3 +1991,231 @@ func (p *Annotation) DeepCopy(s interface{}) error { return nil } + +func (p *AnnotationEvaluator) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetEvaluatorVersionID bool = false + var issetEvaluatorName bool = false + var issetEvaluatorVersion bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEvaluatorVersionID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEvaluatorName = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEvaluatorVersion = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetEvaluatorVersionID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetEvaluatorName { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetEvaluatorVersion { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_AnnotationEvaluator[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_AnnotationEvaluator[fieldId])) +} + +func (p *AnnotationEvaluator) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.EvaluatorVersionID = _field + return offset, nil +} + +func (p *AnnotationEvaluator) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.EvaluatorName = _field + return offset, nil +} + +func (p *AnnotationEvaluator) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.EvaluatorVersion = _field + return offset, nil +} + +func (p *AnnotationEvaluator) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *AnnotationEvaluator) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *AnnotationEvaluator) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *AnnotationEvaluator) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.EvaluatorVersionID) + return offset +} + +func (p *AnnotationEvaluator) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.EvaluatorName) + return offset +} + +func (p *AnnotationEvaluator) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 3) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.EvaluatorVersion) + return offset +} + +func (p *AnnotationEvaluator) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AnnotationEvaluator) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.EvaluatorName) + return l +} + +func (p *AnnotationEvaluator) field3Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.EvaluatorVersion) + return l +} + +func (p *AnnotationEvaluator) DeepCopy(s interface{}) error { + src, ok := s.(*AnnotationEvaluator) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.EvaluatorVersionID = src.EvaluatorVersionID + + if src.EvaluatorName != "" { + p.EvaluatorName = kutils.StringDeepCopy(src.EvaluatorName) + } + + if src.EvaluatorVersion != "" { + p.EvaluatorVersion = kutils.StringDeepCopy(src.EvaluatorVersion) + } + + return nil +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/filter/filter.go b/backend/kitex_gen/coze/loop/observability/domain/filter/filter.go index c8aa7de67..231a1678c 100644 --- a/backend/kitex_gen/coze/loop/observability/domain/filter/filter.go +++ b/backend/kitex_gen/coze/loop/observability/domain/filter/filter.go @@ -5,6 +5,7 @@ package filter import ( "fmt" "github.com/apache/thrift/lib/go/thrift" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" "strings" ) @@ -44,6 +45,16 @@ const ( FieldTypeDouble = "double" FieldTypeBool = "bool" + + TaskFieldNameTaskStatus = "task_status" + + TaskFieldNameTaskName = "task_name" + + TaskFieldNameTaskType = "task_type" + + TaskFieldNameSampleRate = "sample_rate" + + TaskFieldNameCreatedBy = "created_by" ) type QueryType = string @@ -52,6 +63,8 @@ type QueryRelation = string type FieldType = string +type TaskFieldName = string + type FilterFields struct { QueryAndOr *QueryRelation `thrift:"query_and_or,1,optional" frugal:"1,optional,string" form:"query_and_or" json:"query_and_or,omitempty" query:"query_and_or"` FilterFields []*FilterField `thrift:"filter_fields,2,required" frugal:"2,required,list" form:"filter_fields,required" json:"filter_fields,required" query:"filter_fields,required"` @@ -1304,3 +1317,1188 @@ func (p *FieldOptions) Field4DeepEqual(src []string) bool { } return true } + +type TaskFilterFields struct { + QueryAndOr *QueryRelation `thrift:"query_and_or,1,optional" frugal:"1,optional,string" form:"query_and_or" json:"query_and_or,omitempty" query:"query_and_or"` + FilterFields []*TaskFilterField `thrift:"filter_fields,2,required" frugal:"2,required,list" form:"filter_fields,required" json:"filter_fields,required" query:"filter_fields,required"` +} + +func NewTaskFilterFields() *TaskFilterFields { + return &TaskFilterFields{} +} + +func (p *TaskFilterFields) InitDefault() { +} + +var TaskFilterFields_QueryAndOr_DEFAULT QueryRelation + +func (p *TaskFilterFields) GetQueryAndOr() (v QueryRelation) { + if p == nil { + return + } + if !p.IsSetQueryAndOr() { + return TaskFilterFields_QueryAndOr_DEFAULT + } + return *p.QueryAndOr +} + +func (p *TaskFilterFields) GetFilterFields() (v []*TaskFilterField) { + if p != nil { + return p.FilterFields + } + return +} +func (p *TaskFilterFields) SetQueryAndOr(val *QueryRelation) { + p.QueryAndOr = val +} +func (p *TaskFilterFields) SetFilterFields(val []*TaskFilterField) { + p.FilterFields = val +} + +var fieldIDToName_TaskFilterFields = map[int16]string{ + 1: "query_and_or", + 2: "filter_fields", +} + +func (p *TaskFilterFields) IsSetQueryAndOr() bool { + return p.QueryAndOr != nil +} + +func (p *TaskFilterFields) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetFilterFields bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetFilterFields = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetFilterFields { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskFilterFields[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TaskFilterFields[fieldId])) +} + +func (p *TaskFilterFields) ReadField1(iprot thrift.TProtocol) error { + + var _field *QueryRelation + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.QueryAndOr = _field + return nil +} +func (p *TaskFilterFields) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*TaskFilterField, 0, size) + values := make([]TaskFilterField, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FilterFields = _field + return nil +} + +func (p *TaskFilterFields) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TaskFilterFields"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskFilterFields) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryAndOr() { + if err = oprot.WriteFieldBegin("query_and_or", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.QueryAndOr); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *TaskFilterFields) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("filter_fields", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FilterFields)); err != nil { + return err + } + for _, v := range p.FilterFields { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TaskFilterFields) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskFilterFields(%+v)", *p) + +} + +func (p *TaskFilterFields) DeepEqual(ano *TaskFilterFields) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.QueryAndOr) { + return false + } + if !p.Field2DeepEqual(ano.FilterFields) { + return false + } + return true +} + +func (p *TaskFilterFields) Field1DeepEqual(src *QueryRelation) bool { + + if p.QueryAndOr == src { + return true + } else if p.QueryAndOr == nil || src == nil { + return false + } + if strings.Compare(*p.QueryAndOr, *src) != 0 { + return false + } + return true +} +func (p *TaskFilterFields) Field2DeepEqual(src []*TaskFilterField) bool { + + if len(p.FilterFields) != len(src) { + return false + } + for i, v := range p.FilterFields { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type TaskFilterField struct { + FieldName *TaskFieldName `thrift:"field_name,1,optional" frugal:"1,optional,string" form:"field_name" json:"field_name,omitempty" query:"field_name"` + FieldType *FieldType `thrift:"field_type,2,optional" frugal:"2,optional,string" form:"field_type" json:"field_type,omitempty" query:"field_type"` + Values []string `thrift:"values,3,optional" frugal:"3,optional,list" form:"values" json:"values,omitempty" query:"values"` + QueryType *QueryType `thrift:"query_type,4,optional" frugal:"4,optional,string" form:"query_type" json:"query_type,omitempty" query:"query_type"` + QueryAndOr *QueryRelation `thrift:"query_and_or,5,optional" frugal:"5,optional,string" form:"query_and_or" json:"query_and_or,omitempty" query:"query_and_or"` + SubFilter *TaskFilterField `thrift:"sub_filter,6,optional" frugal:"6,optional,TaskFilterField" form:"sub_filter" json:"sub_filter,omitempty" query:"sub_filter"` +} + +func NewTaskFilterField() *TaskFilterField { + return &TaskFilterField{} +} + +func (p *TaskFilterField) InitDefault() { +} + +var TaskFilterField_FieldName_DEFAULT TaskFieldName + +func (p *TaskFilterField) GetFieldName() (v TaskFieldName) { + if p == nil { + return + } + if !p.IsSetFieldName() { + return TaskFilterField_FieldName_DEFAULT + } + return *p.FieldName +} + +var TaskFilterField_FieldType_DEFAULT FieldType + +func (p *TaskFilterField) GetFieldType() (v FieldType) { + if p == nil { + return + } + if !p.IsSetFieldType() { + return TaskFilterField_FieldType_DEFAULT + } + return *p.FieldType +} + +var TaskFilterField_Values_DEFAULT []string + +func (p *TaskFilterField) GetValues() (v []string) { + if p == nil { + return + } + if !p.IsSetValues() { + return TaskFilterField_Values_DEFAULT + } + return p.Values +} + +var TaskFilterField_QueryType_DEFAULT QueryType + +func (p *TaskFilterField) GetQueryType() (v QueryType) { + if p == nil { + return + } + if !p.IsSetQueryType() { + return TaskFilterField_QueryType_DEFAULT + } + return *p.QueryType +} + +var TaskFilterField_QueryAndOr_DEFAULT QueryRelation + +func (p *TaskFilterField) GetQueryAndOr() (v QueryRelation) { + if p == nil { + return + } + if !p.IsSetQueryAndOr() { + return TaskFilterField_QueryAndOr_DEFAULT + } + return *p.QueryAndOr +} + +var TaskFilterField_SubFilter_DEFAULT *TaskFilterField + +func (p *TaskFilterField) GetSubFilter() (v *TaskFilterField) { + if p == nil { + return + } + if !p.IsSetSubFilter() { + return TaskFilterField_SubFilter_DEFAULT + } + return p.SubFilter +} +func (p *TaskFilterField) SetFieldName(val *TaskFieldName) { + p.FieldName = val +} +func (p *TaskFilterField) SetFieldType(val *FieldType) { + p.FieldType = val +} +func (p *TaskFilterField) SetValues(val []string) { + p.Values = val +} +func (p *TaskFilterField) SetQueryType(val *QueryType) { + p.QueryType = val +} +func (p *TaskFilterField) SetQueryAndOr(val *QueryRelation) { + p.QueryAndOr = val +} +func (p *TaskFilterField) SetSubFilter(val *TaskFilterField) { + p.SubFilter = val +} + +var fieldIDToName_TaskFilterField = map[int16]string{ + 1: "field_name", + 2: "field_type", + 3: "values", + 4: "query_type", + 5: "query_and_or", + 6: "sub_filter", +} + +func (p *TaskFilterField) IsSetFieldName() bool { + return p.FieldName != nil +} + +func (p *TaskFilterField) IsSetFieldType() bool { + return p.FieldType != nil +} + +func (p *TaskFilterField) IsSetValues() bool { + return p.Values != nil +} + +func (p *TaskFilterField) IsSetQueryType() bool { + return p.QueryType != nil +} + +func (p *TaskFilterField) IsSetQueryAndOr() bool { + return p.QueryAndOr != nil +} + +func (p *TaskFilterField) IsSetSubFilter() bool { + return p.SubFilter != nil +} + +func (p *TaskFilterField) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskFilterField[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskFilterField) ReadField1(iprot thrift.TProtocol) error { + + var _field *TaskFieldName + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.FieldName = _field + return nil +} +func (p *TaskFilterField) ReadField2(iprot thrift.TProtocol) error { + + var _field *FieldType + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.FieldType = _field + return nil +} +func (p *TaskFilterField) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Values = _field + return nil +} +func (p *TaskFilterField) ReadField4(iprot thrift.TProtocol) error { + + var _field *QueryType + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.QueryType = _field + return nil +} +func (p *TaskFilterField) ReadField5(iprot thrift.TProtocol) error { + + var _field *QueryRelation + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.QueryAndOr = _field + return nil +} +func (p *TaskFilterField) ReadField6(iprot thrift.TProtocol) error { + _field := NewTaskFilterField() + if err := _field.Read(iprot); err != nil { + return err + } + p.SubFilter = _field + return nil +} + +func (p *TaskFilterField) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TaskFilterField"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskFilterField) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFieldName() { + if err = oprot.WriteFieldBegin("field_name", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.FieldName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *TaskFilterField) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetFieldType() { + if err = oprot.WriteFieldBegin("field_type", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.FieldType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *TaskFilterField) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetValues() { + if err = oprot.WriteFieldBegin("values", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil { + return err + } + for _, v := range p.Values { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *TaskFilterField) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryType() { + if err = oprot.WriteFieldBegin("query_type", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.QueryType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *TaskFilterField) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetQueryAndOr() { + if err = oprot.WriteFieldBegin("query_and_or", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.QueryAndOr); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *TaskFilterField) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetSubFilter() { + if err = oprot.WriteFieldBegin("sub_filter", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.SubFilter.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *TaskFilterField) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskFilterField(%+v)", *p) + +} + +func (p *TaskFilterField) DeepEqual(ano *TaskFilterField) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.FieldName) { + return false + } + if !p.Field2DeepEqual(ano.FieldType) { + return false + } + if !p.Field3DeepEqual(ano.Values) { + return false + } + if !p.Field4DeepEqual(ano.QueryType) { + return false + } + if !p.Field5DeepEqual(ano.QueryAndOr) { + return false + } + if !p.Field6DeepEqual(ano.SubFilter) { + return false + } + return true +} + +func (p *TaskFilterField) Field1DeepEqual(src *TaskFieldName) bool { + + if p.FieldName == src { + return true + } else if p.FieldName == nil || src == nil { + return false + } + if strings.Compare(*p.FieldName, *src) != 0 { + return false + } + return true +} +func (p *TaskFilterField) Field2DeepEqual(src *FieldType) bool { + + if p.FieldType == src { + return true + } else if p.FieldType == nil || src == nil { + return false + } + if strings.Compare(*p.FieldType, *src) != 0 { + return false + } + return true +} +func (p *TaskFilterField) Field3DeepEqual(src []string) bool { + + if len(p.Values) != len(src) { + return false + } + for i, v := range p.Values { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *TaskFilterField) Field4DeepEqual(src *QueryType) bool { + + if p.QueryType == src { + return true + } else if p.QueryType == nil || src == nil { + return false + } + if strings.Compare(*p.QueryType, *src) != 0 { + return false + } + return true +} +func (p *TaskFilterField) Field5DeepEqual(src *QueryRelation) bool { + + if p.QueryAndOr == src { + return true + } else if p.QueryAndOr == nil || src == nil { + return false + } + if strings.Compare(*p.QueryAndOr, *src) != 0 { + return false + } + return true +} +func (p *TaskFilterField) Field6DeepEqual(src *TaskFilterField) bool { + + if !p.SubFilter.DeepEqual(src) { + return false + } + return true +} + +type SpanFilterFields struct { + // Span 过滤条件 + Filters *FilterFields `thrift:"filters,1,optional" frugal:"1,optional,FilterFields" form:"filters" json:"filters,omitempty" query:"filters"` + // 平台类型,不填默认是fornax + PlatformType *common.PlatformType `thrift:"platform_type,2,optional" frugal:"2,optional,string" form:"platform_type" json:"platform_type,omitempty" query:"platform_type"` + // 查询的 span 标签页类型,不填默认是 root span + SpanListType *common.SpanListType `thrift:"span_list_type,3,optional" frugal:"3,optional,string" form:"span_list_type" json:"span_list_type,omitempty" query:"span_list_type"` +} + +func NewSpanFilterFields() *SpanFilterFields { + return &SpanFilterFields{} +} + +func (p *SpanFilterFields) InitDefault() { +} + +var SpanFilterFields_Filters_DEFAULT *FilterFields + +func (p *SpanFilterFields) GetFilters() (v *FilterFields) { + if p == nil { + return + } + if !p.IsSetFilters() { + return SpanFilterFields_Filters_DEFAULT + } + return p.Filters +} + +var SpanFilterFields_PlatformType_DEFAULT common.PlatformType + +func (p *SpanFilterFields) GetPlatformType() (v common.PlatformType) { + if p == nil { + return + } + if !p.IsSetPlatformType() { + return SpanFilterFields_PlatformType_DEFAULT + } + return *p.PlatformType +} + +var SpanFilterFields_SpanListType_DEFAULT common.SpanListType + +func (p *SpanFilterFields) GetSpanListType() (v common.SpanListType) { + if p == nil { + return + } + if !p.IsSetSpanListType() { + return SpanFilterFields_SpanListType_DEFAULT + } + return *p.SpanListType +} +func (p *SpanFilterFields) SetFilters(val *FilterFields) { + p.Filters = val +} +func (p *SpanFilterFields) SetPlatformType(val *common.PlatformType) { + p.PlatformType = val +} +func (p *SpanFilterFields) SetSpanListType(val *common.SpanListType) { + p.SpanListType = val +} + +var fieldIDToName_SpanFilterFields = map[int16]string{ + 1: "filters", + 2: "platform_type", + 3: "span_list_type", +} + +func (p *SpanFilterFields) IsSetFilters() bool { + return p.Filters != nil +} + +func (p *SpanFilterFields) IsSetPlatformType() bool { + return p.PlatformType != nil +} + +func (p *SpanFilterFields) IsSetSpanListType() bool { + return p.SpanListType != nil +} + +func (p *SpanFilterFields) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_SpanFilterFields[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *SpanFilterFields) ReadField1(iprot thrift.TProtocol) error { + _field := NewFilterFields() + if err := _field.Read(iprot); err != nil { + return err + } + p.Filters = _field + return nil +} +func (p *SpanFilterFields) ReadField2(iprot thrift.TProtocol) error { + + var _field *common.PlatformType + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.PlatformType = _field + return nil +} +func (p *SpanFilterFields) ReadField3(iprot thrift.TProtocol) error { + + var _field *common.SpanListType + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.SpanListType = _field + return nil +} + +func (p *SpanFilterFields) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("SpanFilterFields"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *SpanFilterFields) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetFilters() { + if err = oprot.WriteFieldBegin("filters", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Filters.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *SpanFilterFields) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetPlatformType() { + if err = oprot.WriteFieldBegin("platform_type", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.PlatformType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *SpanFilterFields) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetSpanListType() { + if err = oprot.WriteFieldBegin("span_list_type", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.SpanListType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *SpanFilterFields) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SpanFilterFields(%+v)", *p) + +} + +func (p *SpanFilterFields) DeepEqual(ano *SpanFilterFields) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Filters) { + return false + } + if !p.Field2DeepEqual(ano.PlatformType) { + return false + } + if !p.Field3DeepEqual(ano.SpanListType) { + return false + } + return true +} + +func (p *SpanFilterFields) Field1DeepEqual(src *FilterFields) bool { + + if !p.Filters.DeepEqual(src) { + return false + } + return true +} +func (p *SpanFilterFields) Field2DeepEqual(src *common.PlatformType) bool { + + if p.PlatformType == src { + return true + } else if p.PlatformType == nil || src == nil { + return false + } + if strings.Compare(*p.PlatformType, *src) != 0 { + return false + } + return true +} +func (p *SpanFilterFields) Field3DeepEqual(src *common.SpanListType) bool { + + if p.SpanListType == src { + return true + } else if p.SpanListType == nil || src == nil { + return false + } + if strings.Compare(*p.SpanListType, *src) != 0 { + return false + } + return true +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/filter/filter_validator.go b/backend/kitex_gen/coze/loop/observability/domain/filter/filter_validator.go index 13b71395c..0cf259ee4 100644 --- a/backend/kitex_gen/coze/loop/observability/domain/filter/filter_validator.go +++ b/backend/kitex_gen/coze/loop/observability/domain/filter/filter_validator.go @@ -35,3 +35,22 @@ func (p *FilterField) IsValid() error { func (p *FieldOptions) IsValid() error { return nil } +func (p *TaskFilterFields) IsValid() error { + return nil +} +func (p *TaskFilterField) IsValid() error { + if p.SubFilter != nil { + if err := p.SubFilter.IsValid(); err != nil { + return fmt.Errorf("field SubFilter not valid, %w", err) + } + } + return nil +} +func (p *SpanFilterFields) IsValid() error { + if p.Filters != nil { + if err := p.Filters.IsValid(); err != nil { + return fmt.Errorf("field Filters not valid, %w", err) + } + } + return nil +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/filter/k-filter.go b/backend/kitex_gen/coze/loop/observability/domain/filter/k-filter.go index 49424d286..4e4867186 100644 --- a/backend/kitex_gen/coze/loop/observability/domain/filter/k-filter.go +++ b/backend/kitex_gen/coze/loop/observability/domain/filter/k-filter.go @@ -10,6 +10,12 @@ import ( "github.com/cloudwego/gopkg/protocol/thrift" kutils "github.com/cloudwego/kitex/pkg/utils" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" +) + +var ( + _ = common.KitexUnusedProtection ) // unused protection @@ -936,3 +942,845 @@ func (p *FieldOptions) DeepCopy(s interface{}) error { return nil } + +func (p *TaskFilterFields) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetFilterFields bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFilterFields = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetFilterFields { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskFilterFields[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_TaskFilterFields[fieldId])) +} + +func (p *TaskFilterFields) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *QueryRelation + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.QueryAndOr = _field + return offset, nil +} + +func (p *TaskFilterFields) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*TaskFilterField, 0, size) + values := make([]TaskFilterField, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.FilterFields = _field + return offset, nil +} + +func (p *TaskFilterFields) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskFilterFields) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskFilterFields) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskFilterFields) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetQueryAndOr() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 1) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.QueryAndOr) + } + return offset +} + +func (p *TaskFilterFields) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 2) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.FilterFields { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + return offset +} + +func (p *TaskFilterFields) field1Length() int { + l := 0 + if p.IsSetQueryAndOr() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.QueryAndOr) + } + return l +} + +func (p *TaskFilterFields) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.FilterFields { + _ = v + l += v.BLength() + } + return l +} + +func (p *TaskFilterFields) DeepCopy(s interface{}) error { + src, ok := s.(*TaskFilterFields) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.QueryAndOr != nil { + tmp := *src.QueryAndOr + p.QueryAndOr = &tmp + } + + if src.FilterFields != nil { + p.FilterFields = make([]*TaskFilterField, 0, len(src.FilterFields)) + for _, elem := range src.FilterFields { + var _elem *TaskFilterField + if elem != nil { + _elem = &TaskFilterField{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.FilterFields = append(p.FilterFields, _elem) + } + } + + return nil +} + +func (p *TaskFilterField) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskFilterField[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskFilterField) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *TaskFieldName + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.FieldName = _field + return offset, nil +} + +func (p *TaskFilterField) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *FieldType + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.FieldType = _field + return offset, nil +} + +func (p *TaskFilterField) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _elem = v + } + + _field = append(_field, _elem) + } + p.Values = _field + return offset, nil +} + +func (p *TaskFilterField) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field *QueryType + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.QueryType = _field + return offset, nil +} + +func (p *TaskFilterField) FastReadField5(buf []byte) (int, error) { + offset := 0 + + var _field *QueryRelation + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.QueryAndOr = _field + return offset, nil +} + +func (p *TaskFilterField) FastReadField6(buf []byte) (int, error) { + offset := 0 + _field := NewTaskFilterField() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.SubFilter = _field + return offset, nil +} + +func (p *TaskFilterField) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskFilterField) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskFilterField) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskFilterField) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetFieldName() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 1) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.FieldName) + } + return offset +} + +func (p *TaskFilterField) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetFieldType() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.FieldType) + } + return offset +} + +func (p *TaskFilterField) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetValues() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 3) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.Values { + length++ + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, v) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + } + return offset +} + +func (p *TaskFilterField) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetQueryType() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 4) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.QueryType) + } + return offset +} + +func (p *TaskFilterField) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetQueryAndOr() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 5) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.QueryAndOr) + } + return offset +} + +func (p *TaskFilterField) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSubFilter() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 6) + offset += p.SubFilter.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskFilterField) field1Length() int { + l := 0 + if p.IsSetFieldName() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.FieldName) + } + return l +} + +func (p *TaskFilterField) field2Length() int { + l := 0 + if p.IsSetFieldType() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.FieldType) + } + return l +} + +func (p *TaskFilterField) field3Length() int { + l := 0 + if p.IsSetValues() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.Values { + _ = v + l += thrift.Binary.StringLengthNocopy(v) + } + } + return l +} + +func (p *TaskFilterField) field4Length() int { + l := 0 + if p.IsSetQueryType() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.QueryType) + } + return l +} + +func (p *TaskFilterField) field5Length() int { + l := 0 + if p.IsSetQueryAndOr() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.QueryAndOr) + } + return l +} + +func (p *TaskFilterField) field6Length() int { + l := 0 + if p.IsSetSubFilter() { + l += thrift.Binary.FieldBeginLength() + l += p.SubFilter.BLength() + } + return l +} + +func (p *TaskFilterField) DeepCopy(s interface{}) error { + src, ok := s.(*TaskFilterField) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.FieldName != nil { + tmp := *src.FieldName + p.FieldName = &tmp + } + + if src.FieldType != nil { + tmp := *src.FieldType + p.FieldType = &tmp + } + + if src.Values != nil { + p.Values = make([]string, 0, len(src.Values)) + for _, elem := range src.Values { + var _elem string + if elem != "" { + _elem = kutils.StringDeepCopy(elem) + } + p.Values = append(p.Values, _elem) + } + } + + if src.QueryType != nil { + tmp := *src.QueryType + p.QueryType = &tmp + } + + if src.QueryAndOr != nil { + tmp := *src.QueryAndOr + p.QueryAndOr = &tmp + } + + var _subFilter *TaskFilterField + if src.SubFilter != nil { + _subFilter = &TaskFilterField{} + if err := _subFilter.DeepCopy(src.SubFilter); err != nil { + return err + } + } + p.SubFilter = _subFilter + + return nil +} + +func (p *SpanFilterFields) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_SpanFilterFields[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *SpanFilterFields) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewFilterFields() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Filters = _field + return offset, nil +} + +func (p *SpanFilterFields) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *common.PlatformType + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.PlatformType = _field + return offset, nil +} + +func (p *SpanFilterFields) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field *common.SpanListType + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.SpanListType = _field + return offset, nil +} + +func (p *SpanFilterFields) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *SpanFilterFields) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *SpanFilterFields) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *SpanFilterFields) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetFilters() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Filters.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *SpanFilterFields) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetPlatformType() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.PlatformType) + } + return offset +} + +func (p *SpanFilterFields) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSpanListType() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 3) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.SpanListType) + } + return offset +} + +func (p *SpanFilterFields) field1Length() int { + l := 0 + if p.IsSetFilters() { + l += thrift.Binary.FieldBeginLength() + l += p.Filters.BLength() + } + return l +} + +func (p *SpanFilterFields) field2Length() int { + l := 0 + if p.IsSetPlatformType() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.PlatformType) + } + return l +} + +func (p *SpanFilterFields) field3Length() int { + l := 0 + if p.IsSetSpanListType() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.SpanListType) + } + return l +} + +func (p *SpanFilterFields) DeepCopy(s interface{}) error { + src, ok := s.(*SpanFilterFields) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _filters *FilterFields + if src.Filters != nil { + _filters = &FilterFields{} + if err := _filters.DeepCopy(src.Filters); err != nil { + return err + } + } + p.Filters = _filters + + if src.PlatformType != nil { + tmp := *src.PlatformType + p.PlatformType = &tmp + } + + if src.SpanListType != nil { + tmp := *src.SpanListType + p.SpanListType = &tmp + } + + return nil +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/task/k-consts.go b/backend/kitex_gen/coze/loop/observability/domain/task/k-consts.go new file mode 100644 index 000000000..658fe4b5e --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/domain/task/k-consts.go @@ -0,0 +1,4 @@ +package task + +// KitexUnusedProtection is used to prevent 'imported and not used' error. +var KitexUnusedProtection = struct{}{} diff --git a/backend/kitex_gen/coze/loop/observability/domain/task/k-task.go b/backend/kitex_gen/coze/loop/observability/domain/task/k-task.go new file mode 100644 index 000000000..93b314663 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/domain/task/k-task.go @@ -0,0 +1,4943 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. + +package task + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + "github.com/cloudwego/gopkg/protocol/thrift" + kutils "github.com/cloudwego/kitex/pkg/utils" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/dataset" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" +) + +var ( + _ = common.KitexUnusedProtection + _ = dataset.KitexUnusedProtection + _ = filter.KitexUnusedProtection +) + +// unused protection +var ( + _ = fmt.Formatter(nil) + _ = (*bytes.Buffer)(nil) + _ = (*strings.Builder)(nil) + _ = reflect.Type(nil) + _ = thrift.STOP +) + +func (p *Task) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetName bool = false + var issetTaskType bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetName = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskType = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 100: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField100(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetName { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTaskType { + fieldId = 5 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_Task[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_Task[fieldId])) +} + +func (p *Task) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.ID = _field + return offset, nil +} + +func (p *Task) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.Name = _field + return offset, nil +} + +func (p *Task) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Description = _field + return offset, nil +} + +func (p *Task) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *Task) FastReadField5(buf []byte) (int, error) { + offset := 0 + + var _field TaskType + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.TaskType = _field + return offset, nil +} + +func (p *Task) FastReadField6(buf []byte) (int, error) { + offset := 0 + + var _field *TaskStatus + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.TaskStatus = _field + return offset, nil +} + +func (p *Task) FastReadField7(buf []byte) (int, error) { + offset := 0 + _field := NewRule() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Rule = _field + return offset, nil +} + +func (p *Task) FastReadField8(buf []byte) (int, error) { + offset := 0 + _field := NewTaskConfig() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TaskConfig = _field + return offset, nil +} + +func (p *Task) FastReadField9(buf []byte) (int, error) { + offset := 0 + _field := NewRunDetail() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TaskDetail = _field + return offset, nil +} + +func (p *Task) FastReadField10(buf []byte) (int, error) { + offset := 0 + _field := NewRunDetail() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BackfillTaskDetail = _field + return offset, nil +} + +func (p *Task) FastReadField100(buf []byte) (int, error) { + offset := 0 + _field := common.NewBaseInfo() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseInfo = _field + return offset, nil +} + +func (p *Task) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *Task) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + offset += p.fastWriteField7(buf[offset:], w) + offset += p.fastWriteField8(buf[offset:], w) + offset += p.fastWriteField9(buf[offset:], w) + offset += p.fastWriteField10(buf[offset:], w) + offset += p.fastWriteField100(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *Task) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field100Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *Task) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetID() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], *p.ID) + } + return offset +} + +func (p *Task) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.Name) + return offset +} + +func (p *Task) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetDescription() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 3) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.Description) + } + return offset +} + +func (p *Task) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetWorkspaceID() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 4) + offset += thrift.Binary.WriteI64(buf[offset:], *p.WorkspaceID) + } + return offset +} + +func (p *Task) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 5) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.TaskType) + return offset +} + +func (p *Task) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTaskStatus() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 6) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.TaskStatus) + } + return offset +} + +func (p *Task) fastWriteField7(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetRule() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 7) + offset += p.Rule.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Task) fastWriteField8(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTaskConfig() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 8) + offset += p.TaskConfig.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Task) fastWriteField9(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTaskDetail() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 9) + offset += p.TaskDetail.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Task) fastWriteField10(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBackfillTaskDetail() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 10) + offset += p.BackfillTaskDetail.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Task) fastWriteField100(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseInfo() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 100) + offset += p.BaseInfo.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Task) field1Length() int { + l := 0 + if p.IsSetID() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *Task) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.Name) + return l +} + +func (p *Task) field3Length() int { + l := 0 + if p.IsSetDescription() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.Description) + } + return l +} + +func (p *Task) field4Length() int { + l := 0 + if p.IsSetWorkspaceID() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *Task) field5Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.TaskType) + return l +} + +func (p *Task) field6Length() int { + l := 0 + if p.IsSetTaskStatus() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.TaskStatus) + } + return l +} + +func (p *Task) field7Length() int { + l := 0 + if p.IsSetRule() { + l += thrift.Binary.FieldBeginLength() + l += p.Rule.BLength() + } + return l +} + +func (p *Task) field8Length() int { + l := 0 + if p.IsSetTaskConfig() { + l += thrift.Binary.FieldBeginLength() + l += p.TaskConfig.BLength() + } + return l +} + +func (p *Task) field9Length() int { + l := 0 + if p.IsSetTaskDetail() { + l += thrift.Binary.FieldBeginLength() + l += p.TaskDetail.BLength() + } + return l +} + +func (p *Task) field10Length() int { + l := 0 + if p.IsSetBackfillTaskDetail() { + l += thrift.Binary.FieldBeginLength() + l += p.BackfillTaskDetail.BLength() + } + return l +} + +func (p *Task) field100Length() int { + l := 0 + if p.IsSetBaseInfo() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseInfo.BLength() + } + return l +} + +func (p *Task) DeepCopy(s interface{}) error { + src, ok := s.(*Task) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.ID != nil { + tmp := *src.ID + p.ID = &tmp + } + + if src.Name != "" { + p.Name = kutils.StringDeepCopy(src.Name) + } + + if src.Description != nil { + var tmp string + if *src.Description != "" { + tmp = kutils.StringDeepCopy(*src.Description) + } + p.Description = &tmp + } + + if src.WorkspaceID != nil { + tmp := *src.WorkspaceID + p.WorkspaceID = &tmp + } + + p.TaskType = src.TaskType + + if src.TaskStatus != nil { + tmp := *src.TaskStatus + p.TaskStatus = &tmp + } + + var _rule *Rule + if src.Rule != nil { + _rule = &Rule{} + if err := _rule.DeepCopy(src.Rule); err != nil { + return err + } + } + p.Rule = _rule + + var _taskConfig *TaskConfig + if src.TaskConfig != nil { + _taskConfig = &TaskConfig{} + if err := _taskConfig.DeepCopy(src.TaskConfig); err != nil { + return err + } + } + p.TaskConfig = _taskConfig + + var _taskDetail *RunDetail + if src.TaskDetail != nil { + _taskDetail = &RunDetail{} + if err := _taskDetail.DeepCopy(src.TaskDetail); err != nil { + return err + } + } + p.TaskDetail = _taskDetail + + var _backfillTaskDetail *RunDetail + if src.BackfillTaskDetail != nil { + _backfillTaskDetail = &RunDetail{} + if err := _backfillTaskDetail.DeepCopy(src.BackfillTaskDetail); err != nil { + return err + } + } + p.BackfillTaskDetail = _backfillTaskDetail + + var _baseInfo *common.BaseInfo + if src.BaseInfo != nil { + _baseInfo = &common.BaseInfo{} + if err := _baseInfo.DeepCopy(src.BaseInfo); err != nil { + return err + } + } + p.BaseInfo = _baseInfo + + return nil +} + +func (p *Rule) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_Rule[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *Rule) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := filter.NewSpanFilterFields() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.SpanFilters = _field + return offset, nil +} + +func (p *Rule) FastReadField2(buf []byte) (int, error) { + offset := 0 + _field := NewSampler() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Sampler = _field + return offset, nil +} + +func (p *Rule) FastReadField3(buf []byte) (int, error) { + offset := 0 + _field := NewEffectiveTime() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.EffectiveTime = _field + return offset, nil +} + +func (p *Rule) FastReadField4(buf []byte) (int, error) { + offset := 0 + _field := NewEffectiveTime() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BackfillEffectiveTime = _field + return offset, nil +} + +func (p *Rule) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *Rule) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *Rule) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *Rule) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSpanFilters() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.SpanFilters.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Rule) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSampler() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 2) + offset += p.Sampler.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Rule) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetEffectiveTime() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 3) + offset += p.EffectiveTime.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Rule) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBackfillEffectiveTime() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 4) + offset += p.BackfillEffectiveTime.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *Rule) field1Length() int { + l := 0 + if p.IsSetSpanFilters() { + l += thrift.Binary.FieldBeginLength() + l += p.SpanFilters.BLength() + } + return l +} + +func (p *Rule) field2Length() int { + l := 0 + if p.IsSetSampler() { + l += thrift.Binary.FieldBeginLength() + l += p.Sampler.BLength() + } + return l +} + +func (p *Rule) field3Length() int { + l := 0 + if p.IsSetEffectiveTime() { + l += thrift.Binary.FieldBeginLength() + l += p.EffectiveTime.BLength() + } + return l +} + +func (p *Rule) field4Length() int { + l := 0 + if p.IsSetBackfillEffectiveTime() { + l += thrift.Binary.FieldBeginLength() + l += p.BackfillEffectiveTime.BLength() + } + return l +} + +func (p *Rule) DeepCopy(s interface{}) error { + src, ok := s.(*Rule) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _spanFilters *filter.SpanFilterFields + if src.SpanFilters != nil { + _spanFilters = &filter.SpanFilterFields{} + if err := _spanFilters.DeepCopy(src.SpanFilters); err != nil { + return err + } + } + p.SpanFilters = _spanFilters + + var _sampler *Sampler + if src.Sampler != nil { + _sampler = &Sampler{} + if err := _sampler.DeepCopy(src.Sampler); err != nil { + return err + } + } + p.Sampler = _sampler + + var _effectiveTime *EffectiveTime + if src.EffectiveTime != nil { + _effectiveTime = &EffectiveTime{} + if err := _effectiveTime.DeepCopy(src.EffectiveTime); err != nil { + return err + } + } + p.EffectiveTime = _effectiveTime + + var _backfillEffectiveTime *EffectiveTime + if src.BackfillEffectiveTime != nil { + _backfillEffectiveTime = &EffectiveTime{} + if err := _backfillEffectiveTime.DeepCopy(src.BackfillEffectiveTime); err != nil { + return err + } + } + p.BackfillEffectiveTime = _backfillEffectiveTime + + return nil +} + +func (p *Sampler) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.DOUBLE { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_Sampler[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *Sampler) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *float64 + if v, l, err := thrift.Binary.ReadDouble(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.SampleRate = _field + return offset, nil +} + +func (p *Sampler) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.SampleSize = _field + return offset, nil +} + +func (p *Sampler) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field *bool + if v, l, err := thrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.IsCycle = _field + return offset, nil +} + +func (p *Sampler) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.CycleCount = _field + return offset, nil +} + +func (p *Sampler) FastReadField5(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.CycleInterval = _field + return offset, nil +} + +func (p *Sampler) FastReadField6(buf []byte) (int, error) { + offset := 0 + + var _field *TimeUnit + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.CycleTimeUnit = _field + return offset, nil +} + +func (p *Sampler) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *Sampler) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *Sampler) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *Sampler) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSampleRate() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.DOUBLE, 1) + offset += thrift.Binary.WriteDouble(buf[offset:], *p.SampleRate) + } + return offset +} + +func (p *Sampler) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSampleSize() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], *p.SampleSize) + } + return offset +} + +func (p *Sampler) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetIsCycle() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.BOOL, 3) + offset += thrift.Binary.WriteBool(buf[offset:], *p.IsCycle) + } + return offset +} + +func (p *Sampler) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetCycleCount() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 4) + offset += thrift.Binary.WriteI64(buf[offset:], *p.CycleCount) + } + return offset +} + +func (p *Sampler) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetCycleInterval() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 5) + offset += thrift.Binary.WriteI64(buf[offset:], *p.CycleInterval) + } + return offset +} + +func (p *Sampler) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetCycleTimeUnit() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 6) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.CycleTimeUnit) + } + return offset +} + +func (p *Sampler) field1Length() int { + l := 0 + if p.IsSetSampleRate() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.DoubleLength() + } + return l +} + +func (p *Sampler) field2Length() int { + l := 0 + if p.IsSetSampleSize() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *Sampler) field3Length() int { + l := 0 + if p.IsSetIsCycle() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.BoolLength() + } + return l +} + +func (p *Sampler) field4Length() int { + l := 0 + if p.IsSetCycleCount() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *Sampler) field5Length() int { + l := 0 + if p.IsSetCycleInterval() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *Sampler) field6Length() int { + l := 0 + if p.IsSetCycleTimeUnit() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.CycleTimeUnit) + } + return l +} + +func (p *Sampler) DeepCopy(s interface{}) error { + src, ok := s.(*Sampler) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.SampleRate != nil { + tmp := *src.SampleRate + p.SampleRate = &tmp + } + + if src.SampleSize != nil { + tmp := *src.SampleSize + p.SampleSize = &tmp + } + + if src.IsCycle != nil { + tmp := *src.IsCycle + p.IsCycle = &tmp + } + + if src.CycleCount != nil { + tmp := *src.CycleCount + p.CycleCount = &tmp + } + + if src.CycleInterval != nil { + tmp := *src.CycleInterval + p.CycleInterval = &tmp + } + + if src.CycleTimeUnit != nil { + tmp := *src.CycleTimeUnit + p.CycleTimeUnit = &tmp + } + + return nil +} + +func (p *EffectiveTime) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_EffectiveTime[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *EffectiveTime) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.StartAt = _field + return offset, nil +} + +func (p *EffectiveTime) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.EndAt = _field + return offset, nil +} + +func (p *EffectiveTime) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *EffectiveTime) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *EffectiveTime) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *EffectiveTime) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetStartAt() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], *p.StartAt) + } + return offset +} + +func (p *EffectiveTime) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetEndAt() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], *p.EndAt) + } + return offset +} + +func (p *EffectiveTime) field1Length() int { + l := 0 + if p.IsSetStartAt() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *EffectiveTime) field2Length() int { + l := 0 + if p.IsSetEndAt() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *EffectiveTime) DeepCopy(s interface{}) error { + src, ok := s.(*EffectiveTime) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.StartAt != nil { + tmp := *src.StartAt + p.StartAt = &tmp + } + + if src.EndAt != nil { + tmp := *src.EndAt + p.EndAt = &tmp + } + + return nil +} + +func (p *TaskConfig) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskConfig[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskConfig) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*AutoEvaluateConfig, 0, size) + values := make([]AutoEvaluateConfig, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.AutoEvaluateConfigs = _field + return offset, nil +} + +func (p *TaskConfig) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*DataReflowConfig, 0, size) + values := make([]DataReflowConfig, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.DataReflowConfig = _field + return offset, nil +} + +func (p *TaskConfig) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskConfig) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskConfig) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskConfig) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetAutoEvaluateConfigs() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 1) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.AutoEvaluateConfigs { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + } + return offset +} + +func (p *TaskConfig) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetDataReflowConfig() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 2) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.DataReflowConfig { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + } + return offset +} + +func (p *TaskConfig) field1Length() int { + l := 0 + if p.IsSetAutoEvaluateConfigs() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.AutoEvaluateConfigs { + _ = v + l += v.BLength() + } + } + return l +} + +func (p *TaskConfig) field2Length() int { + l := 0 + if p.IsSetDataReflowConfig() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.DataReflowConfig { + _ = v + l += v.BLength() + } + } + return l +} + +func (p *TaskConfig) DeepCopy(s interface{}) error { + src, ok := s.(*TaskConfig) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.AutoEvaluateConfigs != nil { + p.AutoEvaluateConfigs = make([]*AutoEvaluateConfig, 0, len(src.AutoEvaluateConfigs)) + for _, elem := range src.AutoEvaluateConfigs { + var _elem *AutoEvaluateConfig + if elem != nil { + _elem = &AutoEvaluateConfig{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.AutoEvaluateConfigs = append(p.AutoEvaluateConfigs, _elem) + } + } + + if src.DataReflowConfig != nil { + p.DataReflowConfig = make([]*DataReflowConfig, 0, len(src.DataReflowConfig)) + for _, elem := range src.DataReflowConfig { + var _elem *DataReflowConfig + if elem != nil { + _elem = &DataReflowConfig{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.DataReflowConfig = append(p.DataReflowConfig, _elem) + } + } + + return nil +} + +func (p *DataReflowConfig) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_DataReflowConfig[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *DataReflowConfig) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.DatasetID = _field + return offset, nil +} + +func (p *DataReflowConfig) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.DatasetName = _field + return offset, nil +} + +func (p *DataReflowConfig) FastReadField3(buf []byte) (int, error) { + offset := 0 + _field := dataset.NewDatasetSchema() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.DatasetSchema = _field + return offset, nil +} + +func (p *DataReflowConfig) FastReadField4(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*dataset.FieldMapping, 0, size) + values := make([]dataset.FieldMapping, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.FieldMappings = _field + return offset, nil +} + +func (p *DataReflowConfig) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *DataReflowConfig) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *DataReflowConfig) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *DataReflowConfig) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetDatasetID() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], *p.DatasetID) + } + return offset +} + +func (p *DataReflowConfig) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetDatasetName() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.DatasetName) + } + return offset +} + +func (p *DataReflowConfig) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetDatasetSchema() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 3) + offset += p.DatasetSchema.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *DataReflowConfig) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetFieldMappings() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 4) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.FieldMappings { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + } + return offset +} + +func (p *DataReflowConfig) field1Length() int { + l := 0 + if p.IsSetDatasetID() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *DataReflowConfig) field2Length() int { + l := 0 + if p.IsSetDatasetName() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.DatasetName) + } + return l +} + +func (p *DataReflowConfig) field3Length() int { + l := 0 + if p.IsSetDatasetSchema() { + l += thrift.Binary.FieldBeginLength() + l += p.DatasetSchema.BLength() + } + return l +} + +func (p *DataReflowConfig) field4Length() int { + l := 0 + if p.IsSetFieldMappings() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.FieldMappings { + _ = v + l += v.BLength() + } + } + return l +} + +func (p *DataReflowConfig) DeepCopy(s interface{}) error { + src, ok := s.(*DataReflowConfig) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.DatasetID != nil { + tmp := *src.DatasetID + p.DatasetID = &tmp + } + + if src.DatasetName != nil { + var tmp string + if *src.DatasetName != "" { + tmp = kutils.StringDeepCopy(*src.DatasetName) + } + p.DatasetName = &tmp + } + + var _datasetSchema *dataset.DatasetSchema + if src.DatasetSchema != nil { + _datasetSchema = &dataset.DatasetSchema{} + if err := _datasetSchema.DeepCopy(src.DatasetSchema); err != nil { + return err + } + } + p.DatasetSchema = _datasetSchema + + if src.FieldMappings != nil { + p.FieldMappings = make([]*dataset.FieldMapping, 0, len(src.FieldMappings)) + for _, elem := range src.FieldMappings { + var _elem *dataset.FieldMapping + if elem != nil { + _elem = &dataset.FieldMapping{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.FieldMappings = append(p.FieldMappings, _elem) + } + } + + return nil +} + +func (p *AutoEvaluateConfig) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetEvaluatorVersionID bool = false + var issetEvaluatorID bool = false + var issetFieldMappings bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEvaluatorVersionID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEvaluatorID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFieldMappings = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetEvaluatorVersionID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetEvaluatorID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetFieldMappings { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_AutoEvaluateConfig[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_AutoEvaluateConfig[fieldId])) +} + +func (p *AutoEvaluateConfig) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.EvaluatorVersionID = _field + return offset, nil +} + +func (p *AutoEvaluateConfig) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.EvaluatorID = _field + return offset, nil +} + +func (p *AutoEvaluateConfig) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*EvaluateFieldMapping, 0, size) + values := make([]EvaluateFieldMapping, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.FieldMappings = _field + return offset, nil +} + +func (p *AutoEvaluateConfig) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *AutoEvaluateConfig) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *AutoEvaluateConfig) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *AutoEvaluateConfig) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.EvaluatorVersionID) + return offset +} + +func (p *AutoEvaluateConfig) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], p.EvaluatorID) + return offset +} + +func (p *AutoEvaluateConfig) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 3) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.FieldMappings { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + return offset +} + +func (p *AutoEvaluateConfig) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateConfig) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateConfig) field3Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.FieldMappings { + _ = v + l += v.BLength() + } + return l +} + +func (p *AutoEvaluateConfig) DeepCopy(s interface{}) error { + src, ok := s.(*AutoEvaluateConfig) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.EvaluatorVersionID = src.EvaluatorVersionID + + p.EvaluatorID = src.EvaluatorID + + if src.FieldMappings != nil { + p.FieldMappings = make([]*EvaluateFieldMapping, 0, len(src.FieldMappings)) + for _, elem := range src.FieldMappings { + var _elem *EvaluateFieldMapping + if elem != nil { + _elem = &EvaluateFieldMapping{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.FieldMappings = append(p.FieldMappings, _elem) + } + } + + return nil +} + +func (p *RunDetail) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_RunDetail[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *RunDetail) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.SuccessCount = _field + return offset, nil +} + +func (p *RunDetail) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.FailedCount = _field + return offset, nil +} + +func (p *RunDetail) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.TotalCount = _field + return offset, nil +} + +func (p *RunDetail) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *RunDetail) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *RunDetail) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *RunDetail) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccessCount() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], *p.SuccessCount) + } + return offset +} + +func (p *RunDetail) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetFailedCount() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], *p.FailedCount) + } + return offset +} + +func (p *RunDetail) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTotalCount() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 3) + offset += thrift.Binary.WriteI64(buf[offset:], *p.TotalCount) + } + return offset +} + +func (p *RunDetail) field1Length() int { + l := 0 + if p.IsSetSuccessCount() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *RunDetail) field2Length() int { + l := 0 + if p.IsSetFailedCount() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *RunDetail) field3Length() int { + l := 0 + if p.IsSetTotalCount() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *RunDetail) DeepCopy(s interface{}) error { + src, ok := s.(*RunDetail) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.SuccessCount != nil { + tmp := *src.SuccessCount + p.SuccessCount = &tmp + } + + if src.FailedCount != nil { + tmp := *src.FailedCount + p.FailedCount = &tmp + } + + if src.TotalCount != nil { + tmp := *src.TotalCount + p.TotalCount = &tmp + } + + return nil +} + +func (p *BackfillDetail) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackfillDetail[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *BackfillDetail) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.SuccessCount = _field + return offset, nil +} + +func (p *BackfillDetail) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.FailedCount = _field + return offset, nil +} + +func (p *BackfillDetail) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.TotalCount = _field + return offset, nil +} + +func (p *BackfillDetail) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field *RunStatus + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.BackfillStatus = _field + return offset, nil +} + +func (p *BackfillDetail) FastReadField5(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.LastSpanPageToken = _field + return offset, nil +} + +func (p *BackfillDetail) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *BackfillDetail) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *BackfillDetail) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *BackfillDetail) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccessCount() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], *p.SuccessCount) + } + return offset +} + +func (p *BackfillDetail) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetFailedCount() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], *p.FailedCount) + } + return offset +} + +func (p *BackfillDetail) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTotalCount() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 3) + offset += thrift.Binary.WriteI64(buf[offset:], *p.TotalCount) + } + return offset +} + +func (p *BackfillDetail) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBackfillStatus() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 4) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.BackfillStatus) + } + return offset +} + +func (p *BackfillDetail) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetLastSpanPageToken() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 5) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.LastSpanPageToken) + } + return offset +} + +func (p *BackfillDetail) field1Length() int { + l := 0 + if p.IsSetSuccessCount() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *BackfillDetail) field2Length() int { + l := 0 + if p.IsSetFailedCount() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *BackfillDetail) field3Length() int { + l := 0 + if p.IsSetTotalCount() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *BackfillDetail) field4Length() int { + l := 0 + if p.IsSetBackfillStatus() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.BackfillStatus) + } + return l +} + +func (p *BackfillDetail) field5Length() int { + l := 0 + if p.IsSetLastSpanPageToken() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.LastSpanPageToken) + } + return l +} + +func (p *BackfillDetail) DeepCopy(s interface{}) error { + src, ok := s.(*BackfillDetail) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.SuccessCount != nil { + tmp := *src.SuccessCount + p.SuccessCount = &tmp + } + + if src.FailedCount != nil { + tmp := *src.FailedCount + p.FailedCount = &tmp + } + + if src.TotalCount != nil { + tmp := *src.TotalCount + p.TotalCount = &tmp + } + + if src.BackfillStatus != nil { + tmp := *src.BackfillStatus + p.BackfillStatus = &tmp + } + + if src.LastSpanPageToken != nil { + var tmp string + if *src.LastSpanPageToken != "" { + tmp = kutils.StringDeepCopy(*src.LastSpanPageToken) + } + p.LastSpanPageToken = &tmp + } + + return nil +} + +func (p *EvaluateFieldMapping) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetFieldSchema bool = false + var issetTraceFieldKey bool = false + var issetTraceFieldJsonpath bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFieldSchema = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTraceFieldKey = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTraceFieldJsonpath = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetFieldSchema { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetTraceFieldKey { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTraceFieldJsonpath { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_EvaluateFieldMapping[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_EvaluateFieldMapping[fieldId])) +} + +func (p *EvaluateFieldMapping) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := dataset.NewFieldSchema() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.FieldSchema = _field + return offset, nil +} + +func (p *EvaluateFieldMapping) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.TraceFieldKey = _field + return offset, nil +} + +func (p *EvaluateFieldMapping) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.TraceFieldJsonpath = _field + return offset, nil +} + +func (p *EvaluateFieldMapping) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.EvalSetName = _field + return offset, nil +} + +func (p *EvaluateFieldMapping) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *EvaluateFieldMapping) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *EvaluateFieldMapping) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *EvaluateFieldMapping) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.FieldSchema.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *EvaluateFieldMapping) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.TraceFieldKey) + return offset +} + +func (p *EvaluateFieldMapping) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 3) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.TraceFieldJsonpath) + return offset +} + +func (p *EvaluateFieldMapping) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetEvalSetName() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 4) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.EvalSetName) + } + return offset +} + +func (p *EvaluateFieldMapping) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.FieldSchema.BLength() + return l +} + +func (p *EvaluateFieldMapping) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.TraceFieldKey) + return l +} + +func (p *EvaluateFieldMapping) field3Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.TraceFieldJsonpath) + return l +} + +func (p *EvaluateFieldMapping) field4Length() int { + l := 0 + if p.IsSetEvalSetName() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.EvalSetName) + } + return l +} + +func (p *EvaluateFieldMapping) DeepCopy(s interface{}) error { + src, ok := s.(*EvaluateFieldMapping) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _fieldSchema *dataset.FieldSchema + if src.FieldSchema != nil { + _fieldSchema = &dataset.FieldSchema{} + if err := _fieldSchema.DeepCopy(src.FieldSchema); err != nil { + return err + } + } + p.FieldSchema = _fieldSchema + + if src.TraceFieldKey != "" { + p.TraceFieldKey = kutils.StringDeepCopy(src.TraceFieldKey) + } + + if src.TraceFieldJsonpath != "" { + p.TraceFieldJsonpath = kutils.StringDeepCopy(src.TraceFieldJsonpath) + } + + if src.EvalSetName != nil { + var tmp string + if *src.EvalSetName != "" { + tmp = kutils.StringDeepCopy(*src.EvalSetName) + } + p.EvalSetName = &tmp + } + + return nil +} + +func (p *TaskRun) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetID bool = false + var issetWorkspaceID bool = false + var issetTaskID bool = false + var issetTaskType bool = false + var issetRunStatus bool = false + var issetRunStartAt bool = false + var issetRunEndAt bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskType = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetRunStatus = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetRunStartAt = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetRunEndAt = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 10: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField10(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 100: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField100(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetWorkspaceID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTaskID { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetTaskType { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetRunStatus { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetRunStartAt { + fieldId = 8 + goto RequiredFieldNotSetError + } + + if !issetRunEndAt { + fieldId = 9 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskRun[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_TaskRun[fieldId])) +} + +func (p *TaskRun) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.ID = _field + return offset, nil +} + +func (p *TaskRun) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *TaskRun) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.TaskID = _field + return offset, nil +} + +func (p *TaskRun) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field TaskRunType + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.TaskType = _field + return offset, nil +} + +func (p *TaskRun) FastReadField5(buf []byte) (int, error) { + offset := 0 + + var _field RunStatus + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.RunStatus = _field + return offset, nil +} + +func (p *TaskRun) FastReadField6(buf []byte) (int, error) { + offset := 0 + _field := NewRunDetail() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.RunDetail = _field + return offset, nil +} + +func (p *TaskRun) FastReadField7(buf []byte) (int, error) { + offset := 0 + _field := NewBackfillDetail() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BackfillRunDetail = _field + return offset, nil +} + +func (p *TaskRun) FastReadField8(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.RunStartAt = _field + return offset, nil +} + +func (p *TaskRun) FastReadField9(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.RunEndAt = _field + return offset, nil +} + +func (p *TaskRun) FastReadField10(buf []byte) (int, error) { + offset := 0 + _field := NewTaskRunConfig() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TaskRunConfig = _field + return offset, nil +} + +func (p *TaskRun) FastReadField100(buf []byte) (int, error) { + offset := 0 + _field := common.NewBaseInfo() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseInfo = _field + return offset, nil +} + +func (p *TaskRun) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskRun) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField8(buf[offset:], w) + offset += p.fastWriteField9(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + offset += p.fastWriteField7(buf[offset:], w) + offset += p.fastWriteField10(buf[offset:], w) + offset += p.fastWriteField100(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskRun) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + l += p.field10Length() + l += p.field100Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskRun) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.ID) + return offset +} + +func (p *TaskRun) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], p.WorkspaceID) + return offset +} + +func (p *TaskRun) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 3) + offset += thrift.Binary.WriteI64(buf[offset:], p.TaskID) + return offset +} + +func (p *TaskRun) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 4) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.TaskType) + return offset +} + +func (p *TaskRun) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 5) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.RunStatus) + return offset +} + +func (p *TaskRun) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetRunDetail() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 6) + offset += p.RunDetail.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskRun) fastWriteField7(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBackfillRunDetail() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 7) + offset += p.BackfillRunDetail.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskRun) fastWriteField8(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 8) + offset += thrift.Binary.WriteI64(buf[offset:], p.RunStartAt) + return offset +} + +func (p *TaskRun) fastWriteField9(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 9) + offset += thrift.Binary.WriteI64(buf[offset:], p.RunEndAt) + return offset +} + +func (p *TaskRun) fastWriteField10(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTaskRunConfig() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 10) + offset += p.TaskRunConfig.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskRun) fastWriteField100(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseInfo() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 100) + offset += p.BaseInfo.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskRun) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *TaskRun) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *TaskRun) field3Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *TaskRun) field4Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.TaskType) + return l +} + +func (p *TaskRun) field5Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.RunStatus) + return l +} + +func (p *TaskRun) field6Length() int { + l := 0 + if p.IsSetRunDetail() { + l += thrift.Binary.FieldBeginLength() + l += p.RunDetail.BLength() + } + return l +} + +func (p *TaskRun) field7Length() int { + l := 0 + if p.IsSetBackfillRunDetail() { + l += thrift.Binary.FieldBeginLength() + l += p.BackfillRunDetail.BLength() + } + return l +} + +func (p *TaskRun) field8Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *TaskRun) field9Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *TaskRun) field10Length() int { + l := 0 + if p.IsSetTaskRunConfig() { + l += thrift.Binary.FieldBeginLength() + l += p.TaskRunConfig.BLength() + } + return l +} + +func (p *TaskRun) field100Length() int { + l := 0 + if p.IsSetBaseInfo() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseInfo.BLength() + } + return l +} + +func (p *TaskRun) DeepCopy(s interface{}) error { + src, ok := s.(*TaskRun) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.ID = src.ID + + p.WorkspaceID = src.WorkspaceID + + p.TaskID = src.TaskID + + p.TaskType = src.TaskType + + p.RunStatus = src.RunStatus + + var _runDetail *RunDetail + if src.RunDetail != nil { + _runDetail = &RunDetail{} + if err := _runDetail.DeepCopy(src.RunDetail); err != nil { + return err + } + } + p.RunDetail = _runDetail + + var _backfillRunDetail *BackfillDetail + if src.BackfillRunDetail != nil { + _backfillRunDetail = &BackfillDetail{} + if err := _backfillRunDetail.DeepCopy(src.BackfillRunDetail); err != nil { + return err + } + } + p.BackfillRunDetail = _backfillRunDetail + + p.RunStartAt = src.RunStartAt + + p.RunEndAt = src.RunEndAt + + var _taskRunConfig *TaskRunConfig + if src.TaskRunConfig != nil { + _taskRunConfig = &TaskRunConfig{} + if err := _taskRunConfig.DeepCopy(src.TaskRunConfig); err != nil { + return err + } + } + p.TaskRunConfig = _taskRunConfig + + var _baseInfo *common.BaseInfo + if src.BaseInfo != nil { + _baseInfo = &common.BaseInfo{} + if err := _baseInfo.DeepCopy(src.BaseInfo); err != nil { + return err + } + } + p.BaseInfo = _baseInfo + + return nil +} + +func (p *TaskRunConfig) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskRunConfig[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskRunConfig) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewAutoEvaluateRunConfig() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.AutoEvaluateRunConfig = _field + return offset, nil +} + +func (p *TaskRunConfig) FastReadField2(buf []byte) (int, error) { + offset := 0 + _field := NewDataReflowRunConfig() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.DataReflowRunConfig = _field + return offset, nil +} + +func (p *TaskRunConfig) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskRunConfig) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskRunConfig) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskRunConfig) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetAutoEvaluateRunConfig() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.AutoEvaluateRunConfig.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskRunConfig) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetDataReflowRunConfig() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 2) + offset += p.DataReflowRunConfig.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskRunConfig) field1Length() int { + l := 0 + if p.IsSetAutoEvaluateRunConfig() { + l += thrift.Binary.FieldBeginLength() + l += p.AutoEvaluateRunConfig.BLength() + } + return l +} + +func (p *TaskRunConfig) field2Length() int { + l := 0 + if p.IsSetDataReflowRunConfig() { + l += thrift.Binary.FieldBeginLength() + l += p.DataReflowRunConfig.BLength() + } + return l +} + +func (p *TaskRunConfig) DeepCopy(s interface{}) error { + src, ok := s.(*TaskRunConfig) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _autoEvaluateRunConfig *AutoEvaluateRunConfig + if src.AutoEvaluateRunConfig != nil { + _autoEvaluateRunConfig = &AutoEvaluateRunConfig{} + if err := _autoEvaluateRunConfig.DeepCopy(src.AutoEvaluateRunConfig); err != nil { + return err + } + } + p.AutoEvaluateRunConfig = _autoEvaluateRunConfig + + var _dataReflowRunConfig *DataReflowRunConfig + if src.DataReflowRunConfig != nil { + _dataReflowRunConfig = &DataReflowRunConfig{} + if err := _dataReflowRunConfig.DeepCopy(src.DataReflowRunConfig); err != nil { + return err + } + } + p.DataReflowRunConfig = _dataReflowRunConfig + + return nil +} + +func (p *AutoEvaluateRunConfig) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetExptID bool = false + var issetExptRunID bool = false + var issetEvalID bool = false + var issetSchemaID bool = false + var issetEndAt bool = false + var issetCycleStartAt bool = false + var issetCycleEndAt bool = false + var issetStatus bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetExptID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetExptRunID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEvalID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetSchemaID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEndAt = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetCycleStartAt = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 8: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField8(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetCycleEndAt = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 9: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField9(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetExptID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetExptRunID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetEvalID { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetSchemaID { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetEndAt { + fieldId = 6 + goto RequiredFieldNotSetError + } + + if !issetCycleStartAt { + fieldId = 7 + goto RequiredFieldNotSetError + } + + if !issetCycleEndAt { + fieldId = 8 + goto RequiredFieldNotSetError + } + + if !issetStatus { + fieldId = 9 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_AutoEvaluateRunConfig[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_AutoEvaluateRunConfig[fieldId])) +} + +func (p *AutoEvaluateRunConfig) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.ExptID = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.ExptRunID = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.EvalID = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.SchemaID = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastReadField5(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Schema = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastReadField6(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.EndAt = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastReadField7(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.CycleStartAt = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastReadField8(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.CycleEndAt = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastReadField9(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.Status = _field + return offset, nil +} + +func (p *AutoEvaluateRunConfig) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *AutoEvaluateRunConfig) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + offset += p.fastWriteField7(buf[offset:], w) + offset += p.fastWriteField8(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField9(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *AutoEvaluateRunConfig) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field8Length() + l += p.field9Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *AutoEvaluateRunConfig) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.ExptID) + return offset +} + +func (p *AutoEvaluateRunConfig) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], p.ExptRunID) + return offset +} + +func (p *AutoEvaluateRunConfig) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 3) + offset += thrift.Binary.WriteI64(buf[offset:], p.EvalID) + return offset +} + +func (p *AutoEvaluateRunConfig) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 4) + offset += thrift.Binary.WriteI64(buf[offset:], p.SchemaID) + return offset +} + +func (p *AutoEvaluateRunConfig) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSchema() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 5) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.Schema) + } + return offset +} + +func (p *AutoEvaluateRunConfig) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 6) + offset += thrift.Binary.WriteI64(buf[offset:], p.EndAt) + return offset +} + +func (p *AutoEvaluateRunConfig) fastWriteField7(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 7) + offset += thrift.Binary.WriteI64(buf[offset:], p.CycleStartAt) + return offset +} + +func (p *AutoEvaluateRunConfig) fastWriteField8(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 8) + offset += thrift.Binary.WriteI64(buf[offset:], p.CycleEndAt) + return offset +} + +func (p *AutoEvaluateRunConfig) fastWriteField9(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 9) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.Status) + return offset +} + +func (p *AutoEvaluateRunConfig) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateRunConfig) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateRunConfig) field3Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateRunConfig) field4Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateRunConfig) field5Length() int { + l := 0 + if p.IsSetSchema() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.Schema) + } + return l +} + +func (p *AutoEvaluateRunConfig) field6Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateRunConfig) field7Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateRunConfig) field8Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *AutoEvaluateRunConfig) field9Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.Status) + return l +} + +func (p *AutoEvaluateRunConfig) DeepCopy(s interface{}) error { + src, ok := s.(*AutoEvaluateRunConfig) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.ExptID = src.ExptID + + p.ExptRunID = src.ExptRunID + + p.EvalID = src.EvalID + + p.SchemaID = src.SchemaID + + if src.Schema != nil { + var tmp string + if *src.Schema != "" { + tmp = kutils.StringDeepCopy(*src.Schema) + } + p.Schema = &tmp + } + + p.EndAt = src.EndAt + + p.CycleStartAt = src.CycleStartAt + + p.CycleEndAt = src.CycleEndAt + + if src.Status != "" { + p.Status = kutils.StringDeepCopy(src.Status) + } + + return nil +} + +func (p *DataReflowRunConfig) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetDatasetID bool = false + var issetDatasetRunID bool = false + var issetEndAt bool = false + var issetCycleStartAt bool = false + var issetCycleEndAt bool = false + var issetStatus bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDatasetID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetDatasetRunID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEndAt = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetCycleStartAt = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetCycleEndAt = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStatus = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetDatasetID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetDatasetRunID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetEndAt { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetCycleStartAt { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetCycleEndAt { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetStatus { + fieldId = 6 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_DataReflowRunConfig[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_DataReflowRunConfig[fieldId])) +} + +func (p *DataReflowRunConfig) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.DatasetID = _field + return offset, nil +} + +func (p *DataReflowRunConfig) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.DatasetRunID = _field + return offset, nil +} + +func (p *DataReflowRunConfig) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.EndAt = _field + return offset, nil +} + +func (p *DataReflowRunConfig) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.CycleStartAt = _field + return offset, nil +} + +func (p *DataReflowRunConfig) FastReadField5(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.CycleEndAt = _field + return offset, nil +} + +func (p *DataReflowRunConfig) FastReadField6(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.Status = _field + return offset, nil +} + +func (p *DataReflowRunConfig) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *DataReflowRunConfig) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *DataReflowRunConfig) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *DataReflowRunConfig) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.DatasetID) + return offset +} + +func (p *DataReflowRunConfig) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], p.DatasetRunID) + return offset +} + +func (p *DataReflowRunConfig) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 3) + offset += thrift.Binary.WriteI64(buf[offset:], p.EndAt) + return offset +} + +func (p *DataReflowRunConfig) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 4) + offset += thrift.Binary.WriteI64(buf[offset:], p.CycleStartAt) + return offset +} + +func (p *DataReflowRunConfig) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 5) + offset += thrift.Binary.WriteI64(buf[offset:], p.CycleEndAt) + return offset +} + +func (p *DataReflowRunConfig) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 6) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.Status) + return offset +} + +func (p *DataReflowRunConfig) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *DataReflowRunConfig) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *DataReflowRunConfig) field3Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *DataReflowRunConfig) field4Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *DataReflowRunConfig) field5Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *DataReflowRunConfig) field6Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.Status) + return l +} + +func (p *DataReflowRunConfig) DeepCopy(s interface{}) error { + src, ok := s.(*DataReflowRunConfig) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.DatasetID = src.DatasetID + + p.DatasetRunID = src.DatasetRunID + + p.EndAt = src.EndAt + + p.CycleStartAt = src.CycleStartAt + + p.CycleEndAt = src.CycleEndAt + + if src.Status != "" { + p.Status = kutils.StringDeepCopy(src.Status) + } + + return nil +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/task/task.go b/backend/kitex_gen/coze/loop/observability/domain/task/task.go new file mode 100644 index 000000000..8f82e0063 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/domain/task/task.go @@ -0,0 +1,6811 @@ +// Code generated by thriftgo (0.4.1). DO NOT EDIT. + +package task + +import ( + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/dataset" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "strings" +) + +const ( + TimeUnitDay = "day" + + TimeUnitWeek = "week" + + TimeUnitNull = "null" + + TaskTypeAutoEval = "auto_evaluate" + + TaskTypeAutoDataReflow = "auto_data_reflow" + + TaskRunTypeBackFill = "back_fill" + + TaskRunTypeNewData = "new_data" + + TaskStatusUnstarted = "unstarted" + + TaskStatusRunning = "running" + + TaskStatusFailed = "failed" + + TaskStatusSuccess = "success" + + TaskStatusPending = "pending" + + TaskStatusDisabled = "disabled" + + RunStatusRunning = "running" + + RunStatusDone = "done" +) + +type TimeUnit = string + +type TaskType = string + +type TaskRunType = string + +type TaskStatus = string + +type RunStatus = string + +// Task +type Task struct { + // 任务 id + ID *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id" form:"id" query:"id"` + // 名称 + Name string `thrift:"name,2,required" frugal:"2,required,string" form:"name,required" json:"name,required" query:"name,required"` + // 描述 + Description *string `thrift:"description,3,optional" frugal:"3,optional,string" form:"description" json:"description,omitempty" query:"description"` + // 所在空间 + WorkspaceID *int64 `thrift:"workspace_id,4,optional" frugal:"4,optional,i64" json:"workspace_id" form:"workspace_id" query:"workspace_id"` + // 类型 + TaskType TaskType `thrift:"task_type,5,required" frugal:"5,required,string" form:"task_type,required" json:"task_type,required" query:"task_type,required"` + // 状态 + TaskStatus *TaskStatus `thrift:"task_status,6,optional" frugal:"6,optional,string" form:"task_status" json:"task_status,omitempty" query:"task_status"` + // 规则 + Rule *Rule `thrift:"rule,7,optional" frugal:"7,optional,Rule" form:"rule" json:"rule,omitempty" query:"rule"` + // 配置 + TaskConfig *TaskConfig `thrift:"task_config,8,optional" frugal:"8,optional,TaskConfig" form:"task_config" json:"task_config,omitempty" query:"task_config"` + // 任务状态详情 + TaskDetail *RunDetail `thrift:"task_detail,9,optional" frugal:"9,optional,RunDetail" form:"task_detail" json:"task_detail,omitempty" query:"task_detail"` + // 任务历史数据执行详情 + BackfillTaskDetail *RunDetail `thrift:"backfill_task_detail,10,optional" frugal:"10,optional,RunDetail" form:"backfill_task_detail" json:"backfill_task_detail,omitempty" query:"backfill_task_detail"` + // 基础信息 + BaseInfo *common.BaseInfo `thrift:"base_info,100,optional" frugal:"100,optional,common.BaseInfo" form:"base_info" json:"base_info,omitempty" query:"base_info"` +} + +func NewTask() *Task { + return &Task{} +} + +func (p *Task) InitDefault() { +} + +var Task_ID_DEFAULT int64 + +func (p *Task) GetID() (v int64) { + if p == nil { + return + } + if !p.IsSetID() { + return Task_ID_DEFAULT + } + return *p.ID +} + +func (p *Task) GetName() (v string) { + if p != nil { + return p.Name + } + return +} + +var Task_Description_DEFAULT string + +func (p *Task) GetDescription() (v string) { + if p == nil { + return + } + if !p.IsSetDescription() { + return Task_Description_DEFAULT + } + return *p.Description +} + +var Task_WorkspaceID_DEFAULT int64 + +func (p *Task) GetWorkspaceID() (v int64) { + if p == nil { + return + } + if !p.IsSetWorkspaceID() { + return Task_WorkspaceID_DEFAULT + } + return *p.WorkspaceID +} + +func (p *Task) GetTaskType() (v TaskType) { + if p != nil { + return p.TaskType + } + return +} + +var Task_TaskStatus_DEFAULT TaskStatus + +func (p *Task) GetTaskStatus() (v TaskStatus) { + if p == nil { + return + } + if !p.IsSetTaskStatus() { + return Task_TaskStatus_DEFAULT + } + return *p.TaskStatus +} + +var Task_Rule_DEFAULT *Rule + +func (p *Task) GetRule() (v *Rule) { + if p == nil { + return + } + if !p.IsSetRule() { + return Task_Rule_DEFAULT + } + return p.Rule +} + +var Task_TaskConfig_DEFAULT *TaskConfig + +func (p *Task) GetTaskConfig() (v *TaskConfig) { + if p == nil { + return + } + if !p.IsSetTaskConfig() { + return Task_TaskConfig_DEFAULT + } + return p.TaskConfig +} + +var Task_TaskDetail_DEFAULT *RunDetail + +func (p *Task) GetTaskDetail() (v *RunDetail) { + if p == nil { + return + } + if !p.IsSetTaskDetail() { + return Task_TaskDetail_DEFAULT + } + return p.TaskDetail +} + +var Task_BackfillTaskDetail_DEFAULT *RunDetail + +func (p *Task) GetBackfillTaskDetail() (v *RunDetail) { + if p == nil { + return + } + if !p.IsSetBackfillTaskDetail() { + return Task_BackfillTaskDetail_DEFAULT + } + return p.BackfillTaskDetail +} + +var Task_BaseInfo_DEFAULT *common.BaseInfo + +func (p *Task) GetBaseInfo() (v *common.BaseInfo) { + if p == nil { + return + } + if !p.IsSetBaseInfo() { + return Task_BaseInfo_DEFAULT + } + return p.BaseInfo +} +func (p *Task) SetID(val *int64) { + p.ID = val +} +func (p *Task) SetName(val string) { + p.Name = val +} +func (p *Task) SetDescription(val *string) { + p.Description = val +} +func (p *Task) SetWorkspaceID(val *int64) { + p.WorkspaceID = val +} +func (p *Task) SetTaskType(val TaskType) { + p.TaskType = val +} +func (p *Task) SetTaskStatus(val *TaskStatus) { + p.TaskStatus = val +} +func (p *Task) SetRule(val *Rule) { + p.Rule = val +} +func (p *Task) SetTaskConfig(val *TaskConfig) { + p.TaskConfig = val +} +func (p *Task) SetTaskDetail(val *RunDetail) { + p.TaskDetail = val +} +func (p *Task) SetBackfillTaskDetail(val *RunDetail) { + p.BackfillTaskDetail = val +} +func (p *Task) SetBaseInfo(val *common.BaseInfo) { + p.BaseInfo = val +} + +var fieldIDToName_Task = map[int16]string{ + 1: "id", + 2: "name", + 3: "description", + 4: "workspace_id", + 5: "task_type", + 6: "task_status", + 7: "rule", + 8: "task_config", + 9: "task_detail", + 10: "backfill_task_detail", + 100: "base_info", +} + +func (p *Task) IsSetID() bool { + return p.ID != nil +} + +func (p *Task) IsSetDescription() bool { + return p.Description != nil +} + +func (p *Task) IsSetWorkspaceID() bool { + return p.WorkspaceID != nil +} + +func (p *Task) IsSetTaskStatus() bool { + return p.TaskStatus != nil +} + +func (p *Task) IsSetRule() bool { + return p.Rule != nil +} + +func (p *Task) IsSetTaskConfig() bool { + return p.TaskConfig != nil +} + +func (p *Task) IsSetTaskDetail() bool { + return p.TaskDetail != nil +} + +func (p *Task) IsSetBackfillTaskDetail() bool { + return p.BackfillTaskDetail != nil +} + +func (p *Task) IsSetBaseInfo() bool { + return p.BaseInfo != nil +} + +func (p *Task) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetName bool = false + var issetTaskType bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetName = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetTaskType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 100: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField100(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetName { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTaskType { + fieldId = 5 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_Task[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_Task[fieldId])) +} + +func (p *Task) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.ID = _field + return nil +} +func (p *Task) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Name = _field + return nil +} +func (p *Task) ReadField3(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Description = _field + return nil +} +func (p *Task) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.WorkspaceID = _field + return nil +} +func (p *Task) ReadField5(iprot thrift.TProtocol) error { + + var _field TaskType + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.TaskType = _field + return nil +} +func (p *Task) ReadField6(iprot thrift.TProtocol) error { + + var _field *TaskStatus + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TaskStatus = _field + return nil +} +func (p *Task) ReadField7(iprot thrift.TProtocol) error { + _field := NewRule() + if err := _field.Read(iprot); err != nil { + return err + } + p.Rule = _field + return nil +} +func (p *Task) ReadField8(iprot thrift.TProtocol) error { + _field := NewTaskConfig() + if err := _field.Read(iprot); err != nil { + return err + } + p.TaskConfig = _field + return nil +} +func (p *Task) ReadField9(iprot thrift.TProtocol) error { + _field := NewRunDetail() + if err := _field.Read(iprot); err != nil { + return err + } + p.TaskDetail = _field + return nil +} +func (p *Task) ReadField10(iprot thrift.TProtocol) error { + _field := NewRunDetail() + if err := _field.Read(iprot); err != nil { + return err + } + p.BackfillTaskDetail = _field + return nil +} +func (p *Task) ReadField100(iprot thrift.TProtocol) error { + _field := common.NewBaseInfo() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseInfo = _field + return nil +} + +func (p *Task) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("Task"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField100(oprot); err != nil { + fieldId = 100 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *Task) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetID() { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.ID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *Task) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *Task) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDescription() { + if err = oprot.WriteFieldBegin("description", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Description); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *Task) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetWorkspaceID() { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *Task) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_type", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.TaskType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *Task) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetTaskStatus() { + if err = oprot.WriteFieldBegin("task_status", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TaskStatus); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} +func (p *Task) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetRule() { + if err = oprot.WriteFieldBegin("rule", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.Rule.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} +func (p *Task) writeField8(oprot thrift.TProtocol) (err error) { + if p.IsSetTaskConfig() { + if err = oprot.WriteFieldBegin("task_config", thrift.STRUCT, 8); err != nil { + goto WriteFieldBeginError + } + if err := p.TaskConfig.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} +func (p *Task) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetTaskDetail() { + if err = oprot.WriteFieldBegin("task_detail", thrift.STRUCT, 9); err != nil { + goto WriteFieldBeginError + } + if err := p.TaskDetail.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} +func (p *Task) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetBackfillTaskDetail() { + if err = oprot.WriteFieldBegin("backfill_task_detail", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.BackfillTaskDetail.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} +func (p *Task) writeField100(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseInfo() { + if err = oprot.WriteFieldBegin("base_info", thrift.STRUCT, 100); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 100 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 100 end error: ", p), err) +} + +func (p *Task) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Task(%+v)", *p) + +} + +func (p *Task) DeepEqual(ano *Task) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ID) { + return false + } + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field3DeepEqual(ano.Description) { + return false + } + if !p.Field4DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field5DeepEqual(ano.TaskType) { + return false + } + if !p.Field6DeepEqual(ano.TaskStatus) { + return false + } + if !p.Field7DeepEqual(ano.Rule) { + return false + } + if !p.Field8DeepEqual(ano.TaskConfig) { + return false + } + if !p.Field9DeepEqual(ano.TaskDetail) { + return false + } + if !p.Field10DeepEqual(ano.BackfillTaskDetail) { + return false + } + if !p.Field100DeepEqual(ano.BaseInfo) { + return false + } + return true +} + +func (p *Task) Field1DeepEqual(src *int64) bool { + + if p.ID == src { + return true + } else if p.ID == nil || src == nil { + return false + } + if *p.ID != *src { + return false + } + return true +} +func (p *Task) Field2DeepEqual(src string) bool { + + if strings.Compare(p.Name, src) != 0 { + return false + } + return true +} +func (p *Task) Field3DeepEqual(src *string) bool { + + if p.Description == src { + return true + } else if p.Description == nil || src == nil { + return false + } + if strings.Compare(*p.Description, *src) != 0 { + return false + } + return true +} +func (p *Task) Field4DeepEqual(src *int64) bool { + + if p.WorkspaceID == src { + return true + } else if p.WorkspaceID == nil || src == nil { + return false + } + if *p.WorkspaceID != *src { + return false + } + return true +} +func (p *Task) Field5DeepEqual(src TaskType) bool { + + if strings.Compare(p.TaskType, src) != 0 { + return false + } + return true +} +func (p *Task) Field6DeepEqual(src *TaskStatus) bool { + + if p.TaskStatus == src { + return true + } else if p.TaskStatus == nil || src == nil { + return false + } + if strings.Compare(*p.TaskStatus, *src) != 0 { + return false + } + return true +} +func (p *Task) Field7DeepEqual(src *Rule) bool { + + if !p.Rule.DeepEqual(src) { + return false + } + return true +} +func (p *Task) Field8DeepEqual(src *TaskConfig) bool { + + if !p.TaskConfig.DeepEqual(src) { + return false + } + return true +} +func (p *Task) Field9DeepEqual(src *RunDetail) bool { + + if !p.TaskDetail.DeepEqual(src) { + return false + } + return true +} +func (p *Task) Field10DeepEqual(src *RunDetail) bool { + + if !p.BackfillTaskDetail.DeepEqual(src) { + return false + } + return true +} +func (p *Task) Field100DeepEqual(src *common.BaseInfo) bool { + + if !p.BaseInfo.DeepEqual(src) { + return false + } + return true +} + +// Rule +type Rule struct { + // Span 过滤条件 + SpanFilters *filter.SpanFilterFields `thrift:"span_filters,1,optional" frugal:"1,optional,filter.SpanFilterFields" form:"span_filters" json:"span_filters,omitempty" query:"span_filters"` + // 采样配置 + Sampler *Sampler `thrift:"sampler,2,optional" frugal:"2,optional,Sampler" form:"sampler" json:"sampler,omitempty" query:"sampler"` + // 生效时间窗口 + EffectiveTime *EffectiveTime `thrift:"effective_time,3,optional" frugal:"3,optional,EffectiveTime" form:"effective_time" json:"effective_time,omitempty" query:"effective_time"` + // 历史数据生效时间窗口 + BackfillEffectiveTime *EffectiveTime `thrift:"backfill_effective_time,4,optional" frugal:"4,optional,EffectiveTime" form:"backfill_effective_time" json:"backfill_effective_time,omitempty" query:"backfill_effective_time"` +} + +func NewRule() *Rule { + return &Rule{} +} + +func (p *Rule) InitDefault() { +} + +var Rule_SpanFilters_DEFAULT *filter.SpanFilterFields + +func (p *Rule) GetSpanFilters() (v *filter.SpanFilterFields) { + if p == nil { + return + } + if !p.IsSetSpanFilters() { + return Rule_SpanFilters_DEFAULT + } + return p.SpanFilters +} + +var Rule_Sampler_DEFAULT *Sampler + +func (p *Rule) GetSampler() (v *Sampler) { + if p == nil { + return + } + if !p.IsSetSampler() { + return Rule_Sampler_DEFAULT + } + return p.Sampler +} + +var Rule_EffectiveTime_DEFAULT *EffectiveTime + +func (p *Rule) GetEffectiveTime() (v *EffectiveTime) { + if p == nil { + return + } + if !p.IsSetEffectiveTime() { + return Rule_EffectiveTime_DEFAULT + } + return p.EffectiveTime +} + +var Rule_BackfillEffectiveTime_DEFAULT *EffectiveTime + +func (p *Rule) GetBackfillEffectiveTime() (v *EffectiveTime) { + if p == nil { + return + } + if !p.IsSetBackfillEffectiveTime() { + return Rule_BackfillEffectiveTime_DEFAULT + } + return p.BackfillEffectiveTime +} +func (p *Rule) SetSpanFilters(val *filter.SpanFilterFields) { + p.SpanFilters = val +} +func (p *Rule) SetSampler(val *Sampler) { + p.Sampler = val +} +func (p *Rule) SetEffectiveTime(val *EffectiveTime) { + p.EffectiveTime = val +} +func (p *Rule) SetBackfillEffectiveTime(val *EffectiveTime) { + p.BackfillEffectiveTime = val +} + +var fieldIDToName_Rule = map[int16]string{ + 1: "span_filters", + 2: "sampler", + 3: "effective_time", + 4: "backfill_effective_time", +} + +func (p *Rule) IsSetSpanFilters() bool { + return p.SpanFilters != nil +} + +func (p *Rule) IsSetSampler() bool { + return p.Sampler != nil +} + +func (p *Rule) IsSetEffectiveTime() bool { + return p.EffectiveTime != nil +} + +func (p *Rule) IsSetBackfillEffectiveTime() bool { + return p.BackfillEffectiveTime != nil +} + +func (p *Rule) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_Rule[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *Rule) ReadField1(iprot thrift.TProtocol) error { + _field := filter.NewSpanFilterFields() + if err := _field.Read(iprot); err != nil { + return err + } + p.SpanFilters = _field + return nil +} +func (p *Rule) ReadField2(iprot thrift.TProtocol) error { + _field := NewSampler() + if err := _field.Read(iprot); err != nil { + return err + } + p.Sampler = _field + return nil +} +func (p *Rule) ReadField3(iprot thrift.TProtocol) error { + _field := NewEffectiveTime() + if err := _field.Read(iprot); err != nil { + return err + } + p.EffectiveTime = _field + return nil +} +func (p *Rule) ReadField4(iprot thrift.TProtocol) error { + _field := NewEffectiveTime() + if err := _field.Read(iprot); err != nil { + return err + } + p.BackfillEffectiveTime = _field + return nil +} + +func (p *Rule) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("Rule"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *Rule) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSpanFilters() { + if err = oprot.WriteFieldBegin("span_filters", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.SpanFilters.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *Rule) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSampler() { + if err = oprot.WriteFieldBegin("sampler", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.Sampler.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *Rule) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetEffectiveTime() { + if err = oprot.WriteFieldBegin("effective_time", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.EffectiveTime.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *Rule) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBackfillEffectiveTime() { + if err = oprot.WriteFieldBegin("backfill_effective_time", thrift.STRUCT, 4); err != nil { + goto WriteFieldBeginError + } + if err := p.BackfillEffectiveTime.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *Rule) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Rule(%+v)", *p) + +} + +func (p *Rule) DeepEqual(ano *Rule) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SpanFilters) { + return false + } + if !p.Field2DeepEqual(ano.Sampler) { + return false + } + if !p.Field3DeepEqual(ano.EffectiveTime) { + return false + } + if !p.Field4DeepEqual(ano.BackfillEffectiveTime) { + return false + } + return true +} + +func (p *Rule) Field1DeepEqual(src *filter.SpanFilterFields) bool { + + if !p.SpanFilters.DeepEqual(src) { + return false + } + return true +} +func (p *Rule) Field2DeepEqual(src *Sampler) bool { + + if !p.Sampler.DeepEqual(src) { + return false + } + return true +} +func (p *Rule) Field3DeepEqual(src *EffectiveTime) bool { + + if !p.EffectiveTime.DeepEqual(src) { + return false + } + return true +} +func (p *Rule) Field4DeepEqual(src *EffectiveTime) bool { + + if !p.BackfillEffectiveTime.DeepEqual(src) { + return false + } + return true +} + +type Sampler struct { + // 采样率 + SampleRate *float64 `thrift:"sample_rate,1,optional" frugal:"1,optional,double" form:"sample_rate" json:"sample_rate,omitempty" query:"sample_rate"` + // 采样上限 + SampleSize *int64 `thrift:"sample_size,2,optional" frugal:"2,optional,i64" form:"sample_size" json:"sample_size,omitempty" query:"sample_size"` + // 是否启动任务循环 + IsCycle *bool `thrift:"is_cycle,3,optional" frugal:"3,optional,bool" form:"is_cycle" json:"is_cycle,omitempty" query:"is_cycle"` + // 采样单次上限 + CycleCount *int64 `thrift:"cycle_count,4,optional" frugal:"4,optional,i64" form:"cycle_count" json:"cycle_count,omitempty" query:"cycle_count"` + // 循环间隔 + CycleInterval *int64 `thrift:"cycle_interval,5,optional" frugal:"5,optional,i64" form:"cycle_interval" json:"cycle_interval,omitempty" query:"cycle_interval"` + // 循环时间单位 + CycleTimeUnit *TimeUnit `thrift:"cycle_time_unit,6,optional" frugal:"6,optional,string" form:"cycle_time_unit" json:"cycle_time_unit,omitempty" query:"cycle_time_unit"` +} + +func NewSampler() *Sampler { + return &Sampler{} +} + +func (p *Sampler) InitDefault() { +} + +var Sampler_SampleRate_DEFAULT float64 + +func (p *Sampler) GetSampleRate() (v float64) { + if p == nil { + return + } + if !p.IsSetSampleRate() { + return Sampler_SampleRate_DEFAULT + } + return *p.SampleRate +} + +var Sampler_SampleSize_DEFAULT int64 + +func (p *Sampler) GetSampleSize() (v int64) { + if p == nil { + return + } + if !p.IsSetSampleSize() { + return Sampler_SampleSize_DEFAULT + } + return *p.SampleSize +} + +var Sampler_IsCycle_DEFAULT bool + +func (p *Sampler) GetIsCycle() (v bool) { + if p == nil { + return + } + if !p.IsSetIsCycle() { + return Sampler_IsCycle_DEFAULT + } + return *p.IsCycle +} + +var Sampler_CycleCount_DEFAULT int64 + +func (p *Sampler) GetCycleCount() (v int64) { + if p == nil { + return + } + if !p.IsSetCycleCount() { + return Sampler_CycleCount_DEFAULT + } + return *p.CycleCount +} + +var Sampler_CycleInterval_DEFAULT int64 + +func (p *Sampler) GetCycleInterval() (v int64) { + if p == nil { + return + } + if !p.IsSetCycleInterval() { + return Sampler_CycleInterval_DEFAULT + } + return *p.CycleInterval +} + +var Sampler_CycleTimeUnit_DEFAULT TimeUnit + +func (p *Sampler) GetCycleTimeUnit() (v TimeUnit) { + if p == nil { + return + } + if !p.IsSetCycleTimeUnit() { + return Sampler_CycleTimeUnit_DEFAULT + } + return *p.CycleTimeUnit +} +func (p *Sampler) SetSampleRate(val *float64) { + p.SampleRate = val +} +func (p *Sampler) SetSampleSize(val *int64) { + p.SampleSize = val +} +func (p *Sampler) SetIsCycle(val *bool) { + p.IsCycle = val +} +func (p *Sampler) SetCycleCount(val *int64) { + p.CycleCount = val +} +func (p *Sampler) SetCycleInterval(val *int64) { + p.CycleInterval = val +} +func (p *Sampler) SetCycleTimeUnit(val *TimeUnit) { + p.CycleTimeUnit = val +} + +var fieldIDToName_Sampler = map[int16]string{ + 1: "sample_rate", + 2: "sample_size", + 3: "is_cycle", + 4: "cycle_count", + 5: "cycle_interval", + 6: "cycle_time_unit", +} + +func (p *Sampler) IsSetSampleRate() bool { + return p.SampleRate != nil +} + +func (p *Sampler) IsSetSampleSize() bool { + return p.SampleSize != nil +} + +func (p *Sampler) IsSetIsCycle() bool { + return p.IsCycle != nil +} + +func (p *Sampler) IsSetCycleCount() bool { + return p.CycleCount != nil +} + +func (p *Sampler) IsSetCycleInterval() bool { + return p.CycleInterval != nil +} + +func (p *Sampler) IsSetCycleTimeUnit() bool { + return p.CycleTimeUnit != nil +} + +func (p *Sampler) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.DOUBLE { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_Sampler[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *Sampler) ReadField1(iprot thrift.TProtocol) error { + + var _field *float64 + if v, err := iprot.ReadDouble(); err != nil { + return err + } else { + _field = &v + } + p.SampleRate = _field + return nil +} +func (p *Sampler) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.SampleSize = _field + return nil +} +func (p *Sampler) ReadField3(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.IsCycle = _field + return nil +} +func (p *Sampler) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CycleCount = _field + return nil +} +func (p *Sampler) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.CycleInterval = _field + return nil +} +func (p *Sampler) ReadField6(iprot thrift.TProtocol) error { + + var _field *TimeUnit + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.CycleTimeUnit = _field + return nil +} + +func (p *Sampler) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("Sampler"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *Sampler) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSampleRate() { + if err = oprot.WriteFieldBegin("sample_rate", thrift.DOUBLE, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteDouble(*p.SampleRate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *Sampler) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetSampleSize() { + if err = oprot.WriteFieldBegin("sample_size", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.SampleSize); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *Sampler) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetIsCycle() { + if err = oprot.WriteFieldBegin("is_cycle", thrift.BOOL, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.IsCycle); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *Sampler) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetCycleCount() { + if err = oprot.WriteFieldBegin("cycle_count", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CycleCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *Sampler) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetCycleInterval() { + if err = oprot.WriteFieldBegin("cycle_interval", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.CycleInterval); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *Sampler) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetCycleTimeUnit() { + if err = oprot.WriteFieldBegin("cycle_time_unit", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.CycleTimeUnit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *Sampler) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Sampler(%+v)", *p) + +} + +func (p *Sampler) DeepEqual(ano *Sampler) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SampleRate) { + return false + } + if !p.Field2DeepEqual(ano.SampleSize) { + return false + } + if !p.Field3DeepEqual(ano.IsCycle) { + return false + } + if !p.Field4DeepEqual(ano.CycleCount) { + return false + } + if !p.Field5DeepEqual(ano.CycleInterval) { + return false + } + if !p.Field6DeepEqual(ano.CycleTimeUnit) { + return false + } + return true +} + +func (p *Sampler) Field1DeepEqual(src *float64) bool { + + if p.SampleRate == src { + return true + } else if p.SampleRate == nil || src == nil { + return false + } + if *p.SampleRate != *src { + return false + } + return true +} +func (p *Sampler) Field2DeepEqual(src *int64) bool { + + if p.SampleSize == src { + return true + } else if p.SampleSize == nil || src == nil { + return false + } + if *p.SampleSize != *src { + return false + } + return true +} +func (p *Sampler) Field3DeepEqual(src *bool) bool { + + if p.IsCycle == src { + return true + } else if p.IsCycle == nil || src == nil { + return false + } + if *p.IsCycle != *src { + return false + } + return true +} +func (p *Sampler) Field4DeepEqual(src *int64) bool { + + if p.CycleCount == src { + return true + } else if p.CycleCount == nil || src == nil { + return false + } + if *p.CycleCount != *src { + return false + } + return true +} +func (p *Sampler) Field5DeepEqual(src *int64) bool { + + if p.CycleInterval == src { + return true + } else if p.CycleInterval == nil || src == nil { + return false + } + if *p.CycleInterval != *src { + return false + } + return true +} +func (p *Sampler) Field6DeepEqual(src *TimeUnit) bool { + + if p.CycleTimeUnit == src { + return true + } else if p.CycleTimeUnit == nil || src == nil { + return false + } + if strings.Compare(*p.CycleTimeUnit, *src) != 0 { + return false + } + return true +} + +type EffectiveTime struct { + // ms timestamp + StartAt *int64 `thrift:"start_at,1,optional" frugal:"1,optional,i64" json:"start_at" form:"start_at" query:"start_at"` + // ms timestamp + EndAt *int64 `thrift:"end_at,2,optional" frugal:"2,optional,i64" json:"end_at" form:"end_at" query:"end_at"` +} + +func NewEffectiveTime() *EffectiveTime { + return &EffectiveTime{} +} + +func (p *EffectiveTime) InitDefault() { +} + +var EffectiveTime_StartAt_DEFAULT int64 + +func (p *EffectiveTime) GetStartAt() (v int64) { + if p == nil { + return + } + if !p.IsSetStartAt() { + return EffectiveTime_StartAt_DEFAULT + } + return *p.StartAt +} + +var EffectiveTime_EndAt_DEFAULT int64 + +func (p *EffectiveTime) GetEndAt() (v int64) { + if p == nil { + return + } + if !p.IsSetEndAt() { + return EffectiveTime_EndAt_DEFAULT + } + return *p.EndAt +} +func (p *EffectiveTime) SetStartAt(val *int64) { + p.StartAt = val +} +func (p *EffectiveTime) SetEndAt(val *int64) { + p.EndAt = val +} + +var fieldIDToName_EffectiveTime = map[int16]string{ + 1: "start_at", + 2: "end_at", +} + +func (p *EffectiveTime) IsSetStartAt() bool { + return p.StartAt != nil +} + +func (p *EffectiveTime) IsSetEndAt() bool { + return p.EndAt != nil +} + +func (p *EffectiveTime) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_EffectiveTime[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *EffectiveTime) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.StartAt = _field + return nil +} +func (p *EffectiveTime) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.EndAt = _field + return nil +} + +func (p *EffectiveTime) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("EffectiveTime"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *EffectiveTime) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetStartAt() { + if err = oprot.WriteFieldBegin("start_at", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.StartAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *EffectiveTime) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetEndAt() { + if err = oprot.WriteFieldBegin("end_at", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.EndAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *EffectiveTime) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("EffectiveTime(%+v)", *p) + +} + +func (p *EffectiveTime) DeepEqual(ano *EffectiveTime) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.StartAt) { + return false + } + if !p.Field2DeepEqual(ano.EndAt) { + return false + } + return true +} + +func (p *EffectiveTime) Field1DeepEqual(src *int64) bool { + + if p.StartAt == src { + return true + } else if p.StartAt == nil || src == nil { + return false + } + if *p.StartAt != *src { + return false + } + return true +} +func (p *EffectiveTime) Field2DeepEqual(src *int64) bool { + + if p.EndAt == src { + return true + } else if p.EndAt == nil || src == nil { + return false + } + if *p.EndAt != *src { + return false + } + return true +} + +// TaskConfig +type TaskConfig struct { + // 配置的评测规则信息 + AutoEvaluateConfigs []*AutoEvaluateConfig `thrift:"auto_evaluate_configs,1,optional" frugal:"1,optional,list" form:"auto_evaluate_configs" json:"auto_evaluate_configs,omitempty" query:"auto_evaluate_configs"` + // 配置的数据回流的数据集信息 + DataReflowConfig []*DataReflowConfig `thrift:"data_reflow_config,2,optional" frugal:"2,optional,list" form:"data_reflow_config" json:"data_reflow_config,omitempty" query:"data_reflow_config"` +} + +func NewTaskConfig() *TaskConfig { + return &TaskConfig{} +} + +func (p *TaskConfig) InitDefault() { +} + +var TaskConfig_AutoEvaluateConfigs_DEFAULT []*AutoEvaluateConfig + +func (p *TaskConfig) GetAutoEvaluateConfigs() (v []*AutoEvaluateConfig) { + if p == nil { + return + } + if !p.IsSetAutoEvaluateConfigs() { + return TaskConfig_AutoEvaluateConfigs_DEFAULT + } + return p.AutoEvaluateConfigs +} + +var TaskConfig_DataReflowConfig_DEFAULT []*DataReflowConfig + +func (p *TaskConfig) GetDataReflowConfig() (v []*DataReflowConfig) { + if p == nil { + return + } + if !p.IsSetDataReflowConfig() { + return TaskConfig_DataReflowConfig_DEFAULT + } + return p.DataReflowConfig +} +func (p *TaskConfig) SetAutoEvaluateConfigs(val []*AutoEvaluateConfig) { + p.AutoEvaluateConfigs = val +} +func (p *TaskConfig) SetDataReflowConfig(val []*DataReflowConfig) { + p.DataReflowConfig = val +} + +var fieldIDToName_TaskConfig = map[int16]string{ + 1: "auto_evaluate_configs", + 2: "data_reflow_config", +} + +func (p *TaskConfig) IsSetAutoEvaluateConfigs() bool { + return p.AutoEvaluateConfigs != nil +} + +func (p *TaskConfig) IsSetDataReflowConfig() bool { + return p.DataReflowConfig != nil +} + +func (p *TaskConfig) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskConfig[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskConfig) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*AutoEvaluateConfig, 0, size) + values := make([]AutoEvaluateConfig, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.AutoEvaluateConfigs = _field + return nil +} +func (p *TaskConfig) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*DataReflowConfig, 0, size) + values := make([]DataReflowConfig, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.DataReflowConfig = _field + return nil +} + +func (p *TaskConfig) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TaskConfig"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskConfig) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetAutoEvaluateConfigs() { + if err = oprot.WriteFieldBegin("auto_evaluate_configs", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.AutoEvaluateConfigs)); err != nil { + return err + } + for _, v := range p.AutoEvaluateConfigs { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *TaskConfig) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDataReflowConfig() { + if err = oprot.WriteFieldBegin("data_reflow_config", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.DataReflowConfig)); err != nil { + return err + } + for _, v := range p.DataReflowConfig { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TaskConfig) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskConfig(%+v)", *p) + +} + +func (p *TaskConfig) DeepEqual(ano *TaskConfig) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.AutoEvaluateConfigs) { + return false + } + if !p.Field2DeepEqual(ano.DataReflowConfig) { + return false + } + return true +} + +func (p *TaskConfig) Field1DeepEqual(src []*AutoEvaluateConfig) bool { + + if len(p.AutoEvaluateConfigs) != len(src) { + return false + } + for i, v := range p.AutoEvaluateConfigs { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *TaskConfig) Field2DeepEqual(src []*DataReflowConfig) bool { + + if len(p.DataReflowConfig) != len(src) { + return false + } + for i, v := range p.DataReflowConfig { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type DataReflowConfig struct { + // 数据集id,新增数据集时可为空 + DatasetID *int64 `thrift:"dataset_id,1,optional" frugal:"1,optional,i64" json:"dataset_id" form:"dataset_id" query:"dataset_id"` + // 数据集名称 + DatasetName *string `thrift:"dataset_name,2,optional" frugal:"2,optional,string" form:"dataset_name" json:"dataset_name,omitempty" query:"dataset_name"` + // 数据集列数据schema + DatasetSchema *dataset.DatasetSchema `thrift:"dataset_schema,3,optional" frugal:"3,optional,dataset.DatasetSchema" form:"dataset_schema" json:"dataset_schema,omitempty" query:"dataset_schema"` + FieldMappings []*dataset.FieldMapping `thrift:"field_mappings,4,optional" frugal:"4,optional,list" form:"field_mappings" json:"field_mappings,omitempty" query:"field_mappings"` +} + +func NewDataReflowConfig() *DataReflowConfig { + return &DataReflowConfig{} +} + +func (p *DataReflowConfig) InitDefault() { +} + +var DataReflowConfig_DatasetID_DEFAULT int64 + +func (p *DataReflowConfig) GetDatasetID() (v int64) { + if p == nil { + return + } + if !p.IsSetDatasetID() { + return DataReflowConfig_DatasetID_DEFAULT + } + return *p.DatasetID +} + +var DataReflowConfig_DatasetName_DEFAULT string + +func (p *DataReflowConfig) GetDatasetName() (v string) { + if p == nil { + return + } + if !p.IsSetDatasetName() { + return DataReflowConfig_DatasetName_DEFAULT + } + return *p.DatasetName +} + +var DataReflowConfig_DatasetSchema_DEFAULT *dataset.DatasetSchema + +func (p *DataReflowConfig) GetDatasetSchema() (v *dataset.DatasetSchema) { + if p == nil { + return + } + if !p.IsSetDatasetSchema() { + return DataReflowConfig_DatasetSchema_DEFAULT + } + return p.DatasetSchema +} + +var DataReflowConfig_FieldMappings_DEFAULT []*dataset.FieldMapping + +func (p *DataReflowConfig) GetFieldMappings() (v []*dataset.FieldMapping) { + if p == nil { + return + } + if !p.IsSetFieldMappings() { + return DataReflowConfig_FieldMappings_DEFAULT + } + return p.FieldMappings +} +func (p *DataReflowConfig) SetDatasetID(val *int64) { + p.DatasetID = val +} +func (p *DataReflowConfig) SetDatasetName(val *string) { + p.DatasetName = val +} +func (p *DataReflowConfig) SetDatasetSchema(val *dataset.DatasetSchema) { + p.DatasetSchema = val +} +func (p *DataReflowConfig) SetFieldMappings(val []*dataset.FieldMapping) { + p.FieldMappings = val +} + +var fieldIDToName_DataReflowConfig = map[int16]string{ + 1: "dataset_id", + 2: "dataset_name", + 3: "dataset_schema", + 4: "field_mappings", +} + +func (p *DataReflowConfig) IsSetDatasetID() bool { + return p.DatasetID != nil +} + +func (p *DataReflowConfig) IsSetDatasetName() bool { + return p.DatasetName != nil +} + +func (p *DataReflowConfig) IsSetDatasetSchema() bool { + return p.DatasetSchema != nil +} + +func (p *DataReflowConfig) IsSetFieldMappings() bool { + return p.FieldMappings != nil +} + +func (p *DataReflowConfig) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.LIST { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_DataReflowConfig[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *DataReflowConfig) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.DatasetID = _field + return nil +} +func (p *DataReflowConfig) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.DatasetName = _field + return nil +} +func (p *DataReflowConfig) ReadField3(iprot thrift.TProtocol) error { + _field := dataset.NewDatasetSchema() + if err := _field.Read(iprot); err != nil { + return err + } + p.DatasetSchema = _field + return nil +} +func (p *DataReflowConfig) ReadField4(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*dataset.FieldMapping, 0, size) + values := make([]dataset.FieldMapping, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FieldMappings = _field + return nil +} + +func (p *DataReflowConfig) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("DataReflowConfig"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *DataReflowConfig) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetDatasetID() { + if err = oprot.WriteFieldBegin("dataset_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.DatasetID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *DataReflowConfig) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDatasetName() { + if err = oprot.WriteFieldBegin("dataset_name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.DatasetName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *DataReflowConfig) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetDatasetSchema() { + if err = oprot.WriteFieldBegin("dataset_schema", thrift.STRUCT, 3); err != nil { + goto WriteFieldBeginError + } + if err := p.DatasetSchema.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *DataReflowConfig) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetFieldMappings() { + if err = oprot.WriteFieldBegin("field_mappings", thrift.LIST, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FieldMappings)); err != nil { + return err + } + for _, v := range p.FieldMappings { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *DataReflowConfig) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DataReflowConfig(%+v)", *p) + +} + +func (p *DataReflowConfig) DeepEqual(ano *DataReflowConfig) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DatasetID) { + return false + } + if !p.Field2DeepEqual(ano.DatasetName) { + return false + } + if !p.Field3DeepEqual(ano.DatasetSchema) { + return false + } + if !p.Field4DeepEqual(ano.FieldMappings) { + return false + } + return true +} + +func (p *DataReflowConfig) Field1DeepEqual(src *int64) bool { + + if p.DatasetID == src { + return true + } else if p.DatasetID == nil || src == nil { + return false + } + if *p.DatasetID != *src { + return false + } + return true +} +func (p *DataReflowConfig) Field2DeepEqual(src *string) bool { + + if p.DatasetName == src { + return true + } else if p.DatasetName == nil || src == nil { + return false + } + if strings.Compare(*p.DatasetName, *src) != 0 { + return false + } + return true +} +func (p *DataReflowConfig) Field3DeepEqual(src *dataset.DatasetSchema) bool { + + if !p.DatasetSchema.DeepEqual(src) { + return false + } + return true +} +func (p *DataReflowConfig) Field4DeepEqual(src []*dataset.FieldMapping) bool { + + if len(p.FieldMappings) != len(src) { + return false + } + for i, v := range p.FieldMappings { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type AutoEvaluateConfig struct { + EvaluatorVersionID int64 `thrift:"evaluator_version_id,1,required" frugal:"1,required,i64" json:"evaluator_version_id" form:"evaluator_version_id,required" query:"evaluator_version_id,required"` + EvaluatorID int64 `thrift:"evaluator_id,2,required" frugal:"2,required,i64" json:"evaluator_id" form:"evaluator_id,required" query:"evaluator_id,required"` + FieldMappings []*EvaluateFieldMapping `thrift:"field_mappings,3,required" frugal:"3,required,list" form:"field_mappings,required" json:"field_mappings,required" query:"field_mappings,required"` +} + +func NewAutoEvaluateConfig() *AutoEvaluateConfig { + return &AutoEvaluateConfig{} +} + +func (p *AutoEvaluateConfig) InitDefault() { +} + +func (p *AutoEvaluateConfig) GetEvaluatorVersionID() (v int64) { + if p != nil { + return p.EvaluatorVersionID + } + return +} + +func (p *AutoEvaluateConfig) GetEvaluatorID() (v int64) { + if p != nil { + return p.EvaluatorID + } + return +} + +func (p *AutoEvaluateConfig) GetFieldMappings() (v []*EvaluateFieldMapping) { + if p != nil { + return p.FieldMappings + } + return +} +func (p *AutoEvaluateConfig) SetEvaluatorVersionID(val int64) { + p.EvaluatorVersionID = val +} +func (p *AutoEvaluateConfig) SetEvaluatorID(val int64) { + p.EvaluatorID = val +} +func (p *AutoEvaluateConfig) SetFieldMappings(val []*EvaluateFieldMapping) { + p.FieldMappings = val +} + +var fieldIDToName_AutoEvaluateConfig = map[int16]string{ + 1: "evaluator_version_id", + 2: "evaluator_id", + 3: "field_mappings", +} + +func (p *AutoEvaluateConfig) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetEvaluatorVersionID bool = false + var issetEvaluatorID bool = false + var issetFieldMappings bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetEvaluatorVersionID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetEvaluatorID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetFieldMappings = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetEvaluatorVersionID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetEvaluatorID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetFieldMappings { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_AutoEvaluateConfig[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_AutoEvaluateConfig[fieldId])) +} + +func (p *AutoEvaluateConfig) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.EvaluatorVersionID = _field + return nil +} +func (p *AutoEvaluateConfig) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.EvaluatorID = _field + return nil +} +func (p *AutoEvaluateConfig) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*EvaluateFieldMapping, 0, size) + values := make([]EvaluateFieldMapping, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FieldMappings = _field + return nil +} + +func (p *AutoEvaluateConfig) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("AutoEvaluateConfig"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *AutoEvaluateConfig) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("evaluator_version_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.EvaluatorVersionID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *AutoEvaluateConfig) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("evaluator_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.EvaluatorID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *AutoEvaluateConfig) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("field_mappings", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FieldMappings)); err != nil { + return err + } + for _, v := range p.FieldMappings { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *AutoEvaluateConfig) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AutoEvaluateConfig(%+v)", *p) + +} + +func (p *AutoEvaluateConfig) DeepEqual(ano *AutoEvaluateConfig) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.EvaluatorVersionID) { + return false + } + if !p.Field2DeepEqual(ano.EvaluatorID) { + return false + } + if !p.Field3DeepEqual(ano.FieldMappings) { + return false + } + return true +} + +func (p *AutoEvaluateConfig) Field1DeepEqual(src int64) bool { + + if p.EvaluatorVersionID != src { + return false + } + return true +} +func (p *AutoEvaluateConfig) Field2DeepEqual(src int64) bool { + + if p.EvaluatorID != src { + return false + } + return true +} +func (p *AutoEvaluateConfig) Field3DeepEqual(src []*EvaluateFieldMapping) bool { + + if len(p.FieldMappings) != len(src) { + return false + } + for i, v := range p.FieldMappings { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +// RunDetail +type RunDetail struct { + SuccessCount *int64 `thrift:"success_count,1,optional" frugal:"1,optional,i64" form:"success_count" json:"success_count,omitempty" query:"success_count"` + FailedCount *int64 `thrift:"failed_count,2,optional" frugal:"2,optional,i64" form:"failed_count" json:"failed_count,omitempty" query:"failed_count"` + TotalCount *int64 `thrift:"total_count,3,optional" frugal:"3,optional,i64" form:"total_count" json:"total_count,omitempty" query:"total_count"` +} + +func NewRunDetail() *RunDetail { + return &RunDetail{} +} + +func (p *RunDetail) InitDefault() { +} + +var RunDetail_SuccessCount_DEFAULT int64 + +func (p *RunDetail) GetSuccessCount() (v int64) { + if p == nil { + return + } + if !p.IsSetSuccessCount() { + return RunDetail_SuccessCount_DEFAULT + } + return *p.SuccessCount +} + +var RunDetail_FailedCount_DEFAULT int64 + +func (p *RunDetail) GetFailedCount() (v int64) { + if p == nil { + return + } + if !p.IsSetFailedCount() { + return RunDetail_FailedCount_DEFAULT + } + return *p.FailedCount +} + +var RunDetail_TotalCount_DEFAULT int64 + +func (p *RunDetail) GetTotalCount() (v int64) { + if p == nil { + return + } + if !p.IsSetTotalCount() { + return RunDetail_TotalCount_DEFAULT + } + return *p.TotalCount +} +func (p *RunDetail) SetSuccessCount(val *int64) { + p.SuccessCount = val +} +func (p *RunDetail) SetFailedCount(val *int64) { + p.FailedCount = val +} +func (p *RunDetail) SetTotalCount(val *int64) { + p.TotalCount = val +} + +var fieldIDToName_RunDetail = map[int16]string{ + 1: "success_count", + 2: "failed_count", + 3: "total_count", +} + +func (p *RunDetail) IsSetSuccessCount() bool { + return p.SuccessCount != nil +} + +func (p *RunDetail) IsSetFailedCount() bool { + return p.FailedCount != nil +} + +func (p *RunDetail) IsSetTotalCount() bool { + return p.TotalCount != nil +} + +func (p *RunDetail) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_RunDetail[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *RunDetail) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.SuccessCount = _field + return nil +} +func (p *RunDetail) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FailedCount = _field + return nil +} +func (p *RunDetail) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TotalCount = _field + return nil +} + +func (p *RunDetail) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("RunDetail"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *RunDetail) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccessCount() { + if err = oprot.WriteFieldBegin("success_count", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.SuccessCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *RunDetail) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetFailedCount() { + if err = oprot.WriteFieldBegin("failed_count", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FailedCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *RunDetail) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalCount() { + if err = oprot.WriteFieldBegin("total_count", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TotalCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} + +func (p *RunDetail) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("RunDetail(%+v)", *p) + +} + +func (p *RunDetail) DeepEqual(ano *RunDetail) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SuccessCount) { + return false + } + if !p.Field2DeepEqual(ano.FailedCount) { + return false + } + if !p.Field3DeepEqual(ano.TotalCount) { + return false + } + return true +} + +func (p *RunDetail) Field1DeepEqual(src *int64) bool { + + if p.SuccessCount == src { + return true + } else if p.SuccessCount == nil || src == nil { + return false + } + if *p.SuccessCount != *src { + return false + } + return true +} +func (p *RunDetail) Field2DeepEqual(src *int64) bool { + + if p.FailedCount == src { + return true + } else if p.FailedCount == nil || src == nil { + return false + } + if *p.FailedCount != *src { + return false + } + return true +} +func (p *RunDetail) Field3DeepEqual(src *int64) bool { + + if p.TotalCount == src { + return true + } else if p.TotalCount == nil || src == nil { + return false + } + if *p.TotalCount != *src { + return false + } + return true +} + +type BackfillDetail struct { + SuccessCount *int64 `thrift:"success_count,1,optional" frugal:"1,optional,i64" form:"success_count" json:"success_count,omitempty" query:"success_count"` + FailedCount *int64 `thrift:"failed_count,2,optional" frugal:"2,optional,i64" form:"failed_count" json:"failed_count,omitempty" query:"failed_count"` + TotalCount *int64 `thrift:"total_count,3,optional" frugal:"3,optional,i64" form:"total_count" json:"total_count,omitempty" query:"total_count"` + BackfillStatus *RunStatus `thrift:"backfill_status,4,optional" frugal:"4,optional,string" form:"backfill_status" json:"backfill_status,omitempty" query:"backfill_status"` + LastSpanPageToken *string `thrift:"last_span_page_token,5,optional" frugal:"5,optional,string" form:"last_span_page_token" json:"last_span_page_token,omitempty" query:"last_span_page_token"` +} + +func NewBackfillDetail() *BackfillDetail { + return &BackfillDetail{} +} + +func (p *BackfillDetail) InitDefault() { +} + +var BackfillDetail_SuccessCount_DEFAULT int64 + +func (p *BackfillDetail) GetSuccessCount() (v int64) { + if p == nil { + return + } + if !p.IsSetSuccessCount() { + return BackfillDetail_SuccessCount_DEFAULT + } + return *p.SuccessCount +} + +var BackfillDetail_FailedCount_DEFAULT int64 + +func (p *BackfillDetail) GetFailedCount() (v int64) { + if p == nil { + return + } + if !p.IsSetFailedCount() { + return BackfillDetail_FailedCount_DEFAULT + } + return *p.FailedCount +} + +var BackfillDetail_TotalCount_DEFAULT int64 + +func (p *BackfillDetail) GetTotalCount() (v int64) { + if p == nil { + return + } + if !p.IsSetTotalCount() { + return BackfillDetail_TotalCount_DEFAULT + } + return *p.TotalCount +} + +var BackfillDetail_BackfillStatus_DEFAULT RunStatus + +func (p *BackfillDetail) GetBackfillStatus() (v RunStatus) { + if p == nil { + return + } + if !p.IsSetBackfillStatus() { + return BackfillDetail_BackfillStatus_DEFAULT + } + return *p.BackfillStatus +} + +var BackfillDetail_LastSpanPageToken_DEFAULT string + +func (p *BackfillDetail) GetLastSpanPageToken() (v string) { + if p == nil { + return + } + if !p.IsSetLastSpanPageToken() { + return BackfillDetail_LastSpanPageToken_DEFAULT + } + return *p.LastSpanPageToken +} +func (p *BackfillDetail) SetSuccessCount(val *int64) { + p.SuccessCount = val +} +func (p *BackfillDetail) SetFailedCount(val *int64) { + p.FailedCount = val +} +func (p *BackfillDetail) SetTotalCount(val *int64) { + p.TotalCount = val +} +func (p *BackfillDetail) SetBackfillStatus(val *RunStatus) { + p.BackfillStatus = val +} +func (p *BackfillDetail) SetLastSpanPageToken(val *string) { + p.LastSpanPageToken = val +} + +var fieldIDToName_BackfillDetail = map[int16]string{ + 1: "success_count", + 2: "failed_count", + 3: "total_count", + 4: "backfill_status", + 5: "last_span_page_token", +} + +func (p *BackfillDetail) IsSetSuccessCount() bool { + return p.SuccessCount != nil +} + +func (p *BackfillDetail) IsSetFailedCount() bool { + return p.FailedCount != nil +} + +func (p *BackfillDetail) IsSetTotalCount() bool { + return p.TotalCount != nil +} + +func (p *BackfillDetail) IsSetBackfillStatus() bool { + return p.BackfillStatus != nil +} + +func (p *BackfillDetail) IsSetLastSpanPageToken() bool { + return p.LastSpanPageToken != nil +} + +func (p *BackfillDetail) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackfillDetail[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *BackfillDetail) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.SuccessCount = _field + return nil +} +func (p *BackfillDetail) ReadField2(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.FailedCount = _field + return nil +} +func (p *BackfillDetail) ReadField3(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TotalCount = _field + return nil +} +func (p *BackfillDetail) ReadField4(iprot thrift.TProtocol) error { + + var _field *RunStatus + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.BackfillStatus = _field + return nil +} +func (p *BackfillDetail) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.LastSpanPageToken = _field + return nil +} + +func (p *BackfillDetail) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("BackfillDetail"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *BackfillDetail) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccessCount() { + if err = oprot.WriteFieldBegin("success_count", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.SuccessCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *BackfillDetail) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetFailedCount() { + if err = oprot.WriteFieldBegin("failed_count", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.FailedCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *BackfillDetail) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTotalCount() { + if err = oprot.WriteFieldBegin("total_count", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TotalCount); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *BackfillDetail) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetBackfillStatus() { + if err = oprot.WriteFieldBegin("backfill_status", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.BackfillStatus); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *BackfillDetail) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetLastSpanPageToken() { + if err = oprot.WriteFieldBegin("last_span_page_token", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.LastSpanPageToken); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} + +func (p *BackfillDetail) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BackfillDetail(%+v)", *p) + +} + +func (p *BackfillDetail) DeepEqual(ano *BackfillDetail) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SuccessCount) { + return false + } + if !p.Field2DeepEqual(ano.FailedCount) { + return false + } + if !p.Field3DeepEqual(ano.TotalCount) { + return false + } + if !p.Field4DeepEqual(ano.BackfillStatus) { + return false + } + if !p.Field5DeepEqual(ano.LastSpanPageToken) { + return false + } + return true +} + +func (p *BackfillDetail) Field1DeepEqual(src *int64) bool { + + if p.SuccessCount == src { + return true + } else if p.SuccessCount == nil || src == nil { + return false + } + if *p.SuccessCount != *src { + return false + } + return true +} +func (p *BackfillDetail) Field2DeepEqual(src *int64) bool { + + if p.FailedCount == src { + return true + } else if p.FailedCount == nil || src == nil { + return false + } + if *p.FailedCount != *src { + return false + } + return true +} +func (p *BackfillDetail) Field3DeepEqual(src *int64) bool { + + if p.TotalCount == src { + return true + } else if p.TotalCount == nil || src == nil { + return false + } + if *p.TotalCount != *src { + return false + } + return true +} +func (p *BackfillDetail) Field4DeepEqual(src *RunStatus) bool { + + if p.BackfillStatus == src { + return true + } else if p.BackfillStatus == nil || src == nil { + return false + } + if strings.Compare(*p.BackfillStatus, *src) != 0 { + return false + } + return true +} +func (p *BackfillDetail) Field5DeepEqual(src *string) bool { + + if p.LastSpanPageToken == src { + return true + } else if p.LastSpanPageToken == nil || src == nil { + return false + } + if strings.Compare(*p.LastSpanPageToken, *src) != 0 { + return false + } + return true +} + +type EvaluateFieldMapping struct { + // 数据集字段约束 + FieldSchema *dataset.FieldSchema `thrift:"field_schema,1,required" frugal:"1,required,dataset.FieldSchema" form:"field_schema,required" json:"field_schema,required" query:"field_schema,required"` + TraceFieldKey string `thrift:"trace_field_key,2,required" frugal:"2,required,string" form:"trace_field_key,required" json:"trace_field_key,required" query:"trace_field_key,required"` + TraceFieldJsonpath string `thrift:"trace_field_jsonpath,3,required" frugal:"3,required,string" form:"trace_field_jsonpath,required" json:"trace_field_jsonpath,required" query:"trace_field_jsonpath,required"` + EvalSetName *string `thrift:"eval_set_name,4,optional" frugal:"4,optional,string" form:"eval_set_name" json:"eval_set_name,omitempty" query:"eval_set_name"` +} + +func NewEvaluateFieldMapping() *EvaluateFieldMapping { + return &EvaluateFieldMapping{} +} + +func (p *EvaluateFieldMapping) InitDefault() { +} + +var EvaluateFieldMapping_FieldSchema_DEFAULT *dataset.FieldSchema + +func (p *EvaluateFieldMapping) GetFieldSchema() (v *dataset.FieldSchema) { + if p == nil { + return + } + if !p.IsSetFieldSchema() { + return EvaluateFieldMapping_FieldSchema_DEFAULT + } + return p.FieldSchema +} + +func (p *EvaluateFieldMapping) GetTraceFieldKey() (v string) { + if p != nil { + return p.TraceFieldKey + } + return +} + +func (p *EvaluateFieldMapping) GetTraceFieldJsonpath() (v string) { + if p != nil { + return p.TraceFieldJsonpath + } + return +} + +var EvaluateFieldMapping_EvalSetName_DEFAULT string + +func (p *EvaluateFieldMapping) GetEvalSetName() (v string) { + if p == nil { + return + } + if !p.IsSetEvalSetName() { + return EvaluateFieldMapping_EvalSetName_DEFAULT + } + return *p.EvalSetName +} +func (p *EvaluateFieldMapping) SetFieldSchema(val *dataset.FieldSchema) { + p.FieldSchema = val +} +func (p *EvaluateFieldMapping) SetTraceFieldKey(val string) { + p.TraceFieldKey = val +} +func (p *EvaluateFieldMapping) SetTraceFieldJsonpath(val string) { + p.TraceFieldJsonpath = val +} +func (p *EvaluateFieldMapping) SetEvalSetName(val *string) { + p.EvalSetName = val +} + +var fieldIDToName_EvaluateFieldMapping = map[int16]string{ + 1: "field_schema", + 2: "trace_field_key", + 3: "trace_field_jsonpath", + 4: "eval_set_name", +} + +func (p *EvaluateFieldMapping) IsSetFieldSchema() bool { + return p.FieldSchema != nil +} + +func (p *EvaluateFieldMapping) IsSetEvalSetName() bool { + return p.EvalSetName != nil +} + +func (p *EvaluateFieldMapping) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetFieldSchema bool = false + var issetTraceFieldKey bool = false + var issetTraceFieldJsonpath bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetFieldSchema = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetTraceFieldKey = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetTraceFieldJsonpath = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetFieldSchema { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetTraceFieldKey { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTraceFieldJsonpath { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_EvaluateFieldMapping[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_EvaluateFieldMapping[fieldId])) +} + +func (p *EvaluateFieldMapping) ReadField1(iprot thrift.TProtocol) error { + _field := dataset.NewFieldSchema() + if err := _field.Read(iprot); err != nil { + return err + } + p.FieldSchema = _field + return nil +} +func (p *EvaluateFieldMapping) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.TraceFieldKey = _field + return nil +} +func (p *EvaluateFieldMapping) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.TraceFieldJsonpath = _field + return nil +} +func (p *EvaluateFieldMapping) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.EvalSetName = _field + return nil +} + +func (p *EvaluateFieldMapping) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("EvaluateFieldMapping"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *EvaluateFieldMapping) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("field_schema", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.FieldSchema.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *EvaluateFieldMapping) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("trace_field_key", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.TraceFieldKey); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *EvaluateFieldMapping) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("trace_field_jsonpath", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.TraceFieldJsonpath); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *EvaluateFieldMapping) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetEvalSetName() { + if err = oprot.WriteFieldBegin("eval_set_name", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.EvalSetName); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} + +func (p *EvaluateFieldMapping) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("EvaluateFieldMapping(%+v)", *p) + +} + +func (p *EvaluateFieldMapping) DeepEqual(ano *EvaluateFieldMapping) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.FieldSchema) { + return false + } + if !p.Field2DeepEqual(ano.TraceFieldKey) { + return false + } + if !p.Field3DeepEqual(ano.TraceFieldJsonpath) { + return false + } + if !p.Field4DeepEqual(ano.EvalSetName) { + return false + } + return true +} + +func (p *EvaluateFieldMapping) Field1DeepEqual(src *dataset.FieldSchema) bool { + + if !p.FieldSchema.DeepEqual(src) { + return false + } + return true +} +func (p *EvaluateFieldMapping) Field2DeepEqual(src string) bool { + + if strings.Compare(p.TraceFieldKey, src) != 0 { + return false + } + return true +} +func (p *EvaluateFieldMapping) Field3DeepEqual(src string) bool { + + if strings.Compare(p.TraceFieldJsonpath, src) != 0 { + return false + } + return true +} +func (p *EvaluateFieldMapping) Field4DeepEqual(src *string) bool { + + if p.EvalSetName == src { + return true + } else if p.EvalSetName == nil || src == nil { + return false + } + if strings.Compare(*p.EvalSetName, *src) != 0 { + return false + } + return true +} + +// TaskRun +type TaskRun struct { + // 任务 run id + ID int64 `thrift:"id,1,required" frugal:"1,required,i64" json:"id" form:"id,required" query:"id,required"` + // 所在空间 + WorkspaceID int64 `thrift:"workspace_id,2,required" frugal:"2,required,i64" json:"workspace_id" form:"workspace_id,required" query:"workspace_id,required"` + // 任务 id + TaskID int64 `thrift:"task_id,3,required" frugal:"3,required,i64" json:"task_id" form:"task_id,required" query:"task_id,required"` + // 类型 + TaskType TaskRunType `thrift:"task_type,4,required" frugal:"4,required,string" form:"task_type,required" json:"task_type,required" query:"task_type,required"` + // 状态 + RunStatus RunStatus `thrift:"run_status,5,required" frugal:"5,required,string" form:"run_status,required" json:"run_status,required" query:"run_status,required"` + // 任务状态详情 + RunDetail *RunDetail `thrift:"run_detail,6,optional" frugal:"6,optional,RunDetail" form:"run_detail" json:"run_detail,omitempty" query:"run_detail"` + // 任务历史数据执行详情 + BackfillRunDetail *BackfillDetail `thrift:"backfill_run_detail,7,optional" frugal:"7,optional,BackfillDetail" form:"backfill_run_detail" json:"backfill_run_detail,omitempty" query:"backfill_run_detail"` + RunStartAt int64 `thrift:"run_start_at,8,required" frugal:"8,required,i64" json:"run_start_at" form:"run_start_at,required" query:"run_start_at,required"` + RunEndAt int64 `thrift:"run_end_at,9,required" frugal:"9,required,i64" json:"run_end_at" form:"run_end_at,required" query:"run_end_at,required"` + // 配置 + TaskRunConfig *TaskRunConfig `thrift:"task_run_config,10,optional" frugal:"10,optional,TaskRunConfig" form:"task_run_config" json:"task_run_config,omitempty" query:"task_run_config"` + // 基础信息 + BaseInfo *common.BaseInfo `thrift:"base_info,100,optional" frugal:"100,optional,common.BaseInfo" form:"base_info" json:"base_info,omitempty" query:"base_info"` +} + +func NewTaskRun() *TaskRun { + return &TaskRun{} +} + +func (p *TaskRun) InitDefault() { +} + +func (p *TaskRun) GetID() (v int64) { + if p != nil { + return p.ID + } + return +} + +func (p *TaskRun) GetWorkspaceID() (v int64) { + if p != nil { + return p.WorkspaceID + } + return +} + +func (p *TaskRun) GetTaskID() (v int64) { + if p != nil { + return p.TaskID + } + return +} + +func (p *TaskRun) GetTaskType() (v TaskRunType) { + if p != nil { + return p.TaskType + } + return +} + +func (p *TaskRun) GetRunStatus() (v RunStatus) { + if p != nil { + return p.RunStatus + } + return +} + +var TaskRun_RunDetail_DEFAULT *RunDetail + +func (p *TaskRun) GetRunDetail() (v *RunDetail) { + if p == nil { + return + } + if !p.IsSetRunDetail() { + return TaskRun_RunDetail_DEFAULT + } + return p.RunDetail +} + +var TaskRun_BackfillRunDetail_DEFAULT *BackfillDetail + +func (p *TaskRun) GetBackfillRunDetail() (v *BackfillDetail) { + if p == nil { + return + } + if !p.IsSetBackfillRunDetail() { + return TaskRun_BackfillRunDetail_DEFAULT + } + return p.BackfillRunDetail +} + +func (p *TaskRun) GetRunStartAt() (v int64) { + if p != nil { + return p.RunStartAt + } + return +} + +func (p *TaskRun) GetRunEndAt() (v int64) { + if p != nil { + return p.RunEndAt + } + return +} + +var TaskRun_TaskRunConfig_DEFAULT *TaskRunConfig + +func (p *TaskRun) GetTaskRunConfig() (v *TaskRunConfig) { + if p == nil { + return + } + if !p.IsSetTaskRunConfig() { + return TaskRun_TaskRunConfig_DEFAULT + } + return p.TaskRunConfig +} + +var TaskRun_BaseInfo_DEFAULT *common.BaseInfo + +func (p *TaskRun) GetBaseInfo() (v *common.BaseInfo) { + if p == nil { + return + } + if !p.IsSetBaseInfo() { + return TaskRun_BaseInfo_DEFAULT + } + return p.BaseInfo +} +func (p *TaskRun) SetID(val int64) { + p.ID = val +} +func (p *TaskRun) SetWorkspaceID(val int64) { + p.WorkspaceID = val +} +func (p *TaskRun) SetTaskID(val int64) { + p.TaskID = val +} +func (p *TaskRun) SetTaskType(val TaskRunType) { + p.TaskType = val +} +func (p *TaskRun) SetRunStatus(val RunStatus) { + p.RunStatus = val +} +func (p *TaskRun) SetRunDetail(val *RunDetail) { + p.RunDetail = val +} +func (p *TaskRun) SetBackfillRunDetail(val *BackfillDetail) { + p.BackfillRunDetail = val +} +func (p *TaskRun) SetRunStartAt(val int64) { + p.RunStartAt = val +} +func (p *TaskRun) SetRunEndAt(val int64) { + p.RunEndAt = val +} +func (p *TaskRun) SetTaskRunConfig(val *TaskRunConfig) { + p.TaskRunConfig = val +} +func (p *TaskRun) SetBaseInfo(val *common.BaseInfo) { + p.BaseInfo = val +} + +var fieldIDToName_TaskRun = map[int16]string{ + 1: "id", + 2: "workspace_id", + 3: "task_id", + 4: "task_type", + 5: "run_status", + 6: "run_detail", + 7: "backfill_run_detail", + 8: "run_start_at", + 9: "run_end_at", + 10: "task_run_config", + 100: "base_info", +} + +func (p *TaskRun) IsSetRunDetail() bool { + return p.RunDetail != nil +} + +func (p *TaskRun) IsSetBackfillRunDetail() bool { + return p.BackfillRunDetail != nil +} + +func (p *TaskRun) IsSetTaskRunConfig() bool { + return p.TaskRunConfig != nil +} + +func (p *TaskRun) IsSetBaseInfo() bool { + return p.BaseInfo != nil +} + +func (p *TaskRun) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetID bool = false + var issetWorkspaceID bool = false + var issetTaskID bool = false + var issetTaskType bool = false + var issetRunStatus bool = false + var issetRunStartAt bool = false + var issetRunEndAt bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetTaskID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetTaskType = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetRunStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + issetRunStartAt = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.I64 { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + issetRunEndAt = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 10: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField10(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 100: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField100(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetWorkspaceID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetTaskID { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetTaskType { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetRunStatus { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetRunStartAt { + fieldId = 8 + goto RequiredFieldNotSetError + } + + if !issetRunEndAt { + fieldId = 9 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskRun[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TaskRun[fieldId])) +} + +func (p *TaskRun) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.ID = _field + return nil +} +func (p *TaskRun) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.WorkspaceID = _field + return nil +} +func (p *TaskRun) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TaskID = _field + return nil +} +func (p *TaskRun) ReadField4(iprot thrift.TProtocol) error { + + var _field TaskRunType + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.TaskType = _field + return nil +} +func (p *TaskRun) ReadField5(iprot thrift.TProtocol) error { + + var _field RunStatus + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.RunStatus = _field + return nil +} +func (p *TaskRun) ReadField6(iprot thrift.TProtocol) error { + _field := NewRunDetail() + if err := _field.Read(iprot); err != nil { + return err + } + p.RunDetail = _field + return nil +} +func (p *TaskRun) ReadField7(iprot thrift.TProtocol) error { + _field := NewBackfillDetail() + if err := _field.Read(iprot); err != nil { + return err + } + p.BackfillRunDetail = _field + return nil +} +func (p *TaskRun) ReadField8(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.RunStartAt = _field + return nil +} +func (p *TaskRun) ReadField9(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.RunEndAt = _field + return nil +} +func (p *TaskRun) ReadField10(iprot thrift.TProtocol) error { + _field := NewTaskRunConfig() + if err := _field.Read(iprot); err != nil { + return err + } + p.TaskRunConfig = _field + return nil +} +func (p *TaskRun) ReadField100(iprot thrift.TProtocol) error { + _field := common.NewBaseInfo() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseInfo = _field + return nil +} + +func (p *TaskRun) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TaskRun"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + if err = p.writeField10(oprot); err != nil { + fieldId = 10 + goto WriteFieldError + } + if err = p.writeField100(oprot); err != nil { + fieldId = 100 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskRun) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.ID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *TaskRun) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *TaskRun) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TaskID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *TaskRun) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_type", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.TaskType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *TaskRun) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("run_status", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.RunStatus); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *TaskRun) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetRunDetail() { + if err = oprot.WriteFieldBegin("run_detail", thrift.STRUCT, 6); err != nil { + goto WriteFieldBeginError + } + if err := p.RunDetail.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} +func (p *TaskRun) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetBackfillRunDetail() { + if err = oprot.WriteFieldBegin("backfill_run_detail", thrift.STRUCT, 7); err != nil { + goto WriteFieldBeginError + } + if err := p.BackfillRunDetail.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} +func (p *TaskRun) writeField8(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("run_start_at", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.RunStartAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} +func (p *TaskRun) writeField9(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("run_end_at", thrift.I64, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.RunEndAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} +func (p *TaskRun) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTaskRunConfig() { + if err = oprot.WriteFieldBegin("task_run_config", thrift.STRUCT, 10); err != nil { + goto WriteFieldBeginError + } + if err := p.TaskRunConfig.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err) +} +func (p *TaskRun) writeField100(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseInfo() { + if err = oprot.WriteFieldBegin("base_info", thrift.STRUCT, 100); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseInfo.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 100 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 100 end error: ", p), err) +} + +func (p *TaskRun) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskRun(%+v)", *p) + +} + +func (p *TaskRun) DeepEqual(ano *TaskRun) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ID) { + return false + } + if !p.Field2DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field3DeepEqual(ano.TaskID) { + return false + } + if !p.Field4DeepEqual(ano.TaskType) { + return false + } + if !p.Field5DeepEqual(ano.RunStatus) { + return false + } + if !p.Field6DeepEqual(ano.RunDetail) { + return false + } + if !p.Field7DeepEqual(ano.BackfillRunDetail) { + return false + } + if !p.Field8DeepEqual(ano.RunStartAt) { + return false + } + if !p.Field9DeepEqual(ano.RunEndAt) { + return false + } + if !p.Field10DeepEqual(ano.TaskRunConfig) { + return false + } + if !p.Field100DeepEqual(ano.BaseInfo) { + return false + } + return true +} + +func (p *TaskRun) Field1DeepEqual(src int64) bool { + + if p.ID != src { + return false + } + return true +} +func (p *TaskRun) Field2DeepEqual(src int64) bool { + + if p.WorkspaceID != src { + return false + } + return true +} +func (p *TaskRun) Field3DeepEqual(src int64) bool { + + if p.TaskID != src { + return false + } + return true +} +func (p *TaskRun) Field4DeepEqual(src TaskRunType) bool { + + if strings.Compare(p.TaskType, src) != 0 { + return false + } + return true +} +func (p *TaskRun) Field5DeepEqual(src RunStatus) bool { + + if strings.Compare(p.RunStatus, src) != 0 { + return false + } + return true +} +func (p *TaskRun) Field6DeepEqual(src *RunDetail) bool { + + if !p.RunDetail.DeepEqual(src) { + return false + } + return true +} +func (p *TaskRun) Field7DeepEqual(src *BackfillDetail) bool { + + if !p.BackfillRunDetail.DeepEqual(src) { + return false + } + return true +} +func (p *TaskRun) Field8DeepEqual(src int64) bool { + + if p.RunStartAt != src { + return false + } + return true +} +func (p *TaskRun) Field9DeepEqual(src int64) bool { + + if p.RunEndAt != src { + return false + } + return true +} +func (p *TaskRun) Field10DeepEqual(src *TaskRunConfig) bool { + + if !p.TaskRunConfig.DeepEqual(src) { + return false + } + return true +} +func (p *TaskRun) Field100DeepEqual(src *common.BaseInfo) bool { + + if !p.BaseInfo.DeepEqual(src) { + return false + } + return true +} + +type TaskRunConfig struct { + // 自动评测对应的运行配置信息 + AutoEvaluateRunConfig *AutoEvaluateRunConfig `thrift:"auto_evaluate_run_config,1,optional" frugal:"1,optional,AutoEvaluateRunConfig" form:"auto_evaluate_run_config" json:"auto_evaluate_run_config,omitempty" query:"auto_evaluate_run_config"` + // 数据回流对应的运行配置信息 + DataReflowRunConfig *DataReflowRunConfig `thrift:"data_reflow_run_config,2,optional" frugal:"2,optional,DataReflowRunConfig" form:"data_reflow_run_config" json:"data_reflow_run_config,omitempty" query:"data_reflow_run_config"` +} + +func NewTaskRunConfig() *TaskRunConfig { + return &TaskRunConfig{} +} + +func (p *TaskRunConfig) InitDefault() { +} + +var TaskRunConfig_AutoEvaluateRunConfig_DEFAULT *AutoEvaluateRunConfig + +func (p *TaskRunConfig) GetAutoEvaluateRunConfig() (v *AutoEvaluateRunConfig) { + if p == nil { + return + } + if !p.IsSetAutoEvaluateRunConfig() { + return TaskRunConfig_AutoEvaluateRunConfig_DEFAULT + } + return p.AutoEvaluateRunConfig +} + +var TaskRunConfig_DataReflowRunConfig_DEFAULT *DataReflowRunConfig + +func (p *TaskRunConfig) GetDataReflowRunConfig() (v *DataReflowRunConfig) { + if p == nil { + return + } + if !p.IsSetDataReflowRunConfig() { + return TaskRunConfig_DataReflowRunConfig_DEFAULT + } + return p.DataReflowRunConfig +} +func (p *TaskRunConfig) SetAutoEvaluateRunConfig(val *AutoEvaluateRunConfig) { + p.AutoEvaluateRunConfig = val +} +func (p *TaskRunConfig) SetDataReflowRunConfig(val *DataReflowRunConfig) { + p.DataReflowRunConfig = val +} + +var fieldIDToName_TaskRunConfig = map[int16]string{ + 1: "auto_evaluate_run_config", + 2: "data_reflow_run_config", +} + +func (p *TaskRunConfig) IsSetAutoEvaluateRunConfig() bool { + return p.AutoEvaluateRunConfig != nil +} + +func (p *TaskRunConfig) IsSetDataReflowRunConfig() bool { + return p.DataReflowRunConfig != nil +} + +func (p *TaskRunConfig) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskRunConfig[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskRunConfig) ReadField1(iprot thrift.TProtocol) error { + _field := NewAutoEvaluateRunConfig() + if err := _field.Read(iprot); err != nil { + return err + } + p.AutoEvaluateRunConfig = _field + return nil +} +func (p *TaskRunConfig) ReadField2(iprot thrift.TProtocol) error { + _field := NewDataReflowRunConfig() + if err := _field.Read(iprot); err != nil { + return err + } + p.DataReflowRunConfig = _field + return nil +} + +func (p *TaskRunConfig) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("TaskRunConfig"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskRunConfig) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetAutoEvaluateRunConfig() { + if err = oprot.WriteFieldBegin("auto_evaluate_run_config", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.AutoEvaluateRunConfig.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *TaskRunConfig) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetDataReflowRunConfig() { + if err = oprot.WriteFieldBegin("data_reflow_run_config", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.DataReflowRunConfig.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *TaskRunConfig) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskRunConfig(%+v)", *p) + +} + +func (p *TaskRunConfig) DeepEqual(ano *TaskRunConfig) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.AutoEvaluateRunConfig) { + return false + } + if !p.Field2DeepEqual(ano.DataReflowRunConfig) { + return false + } + return true +} + +func (p *TaskRunConfig) Field1DeepEqual(src *AutoEvaluateRunConfig) bool { + + if !p.AutoEvaluateRunConfig.DeepEqual(src) { + return false + } + return true +} +func (p *TaskRunConfig) Field2DeepEqual(src *DataReflowRunConfig) bool { + + if !p.DataReflowRunConfig.DeepEqual(src) { + return false + } + return true +} + +type AutoEvaluateRunConfig struct { + ExptID int64 `thrift:"expt_id,1,required" frugal:"1,required,i64" json:"expt_id" form:"expt_id,required" query:"expt_id,required"` + ExptRunID int64 `thrift:"expt_run_id,2,required" frugal:"2,required,i64" json:"expt_run_id" form:"expt_run_id,required" query:"expt_run_id,required"` + EvalID int64 `thrift:"eval_id,3,required" frugal:"3,required,i64" json:"eval_id" form:"eval_id,required" query:"eval_id,required"` + SchemaID int64 `thrift:"schema_id,4,required" frugal:"4,required,i64" json:"schema_id" form:"schema_id,required" query:"schema_id,required"` + Schema *string `thrift:"schema,5,optional" frugal:"5,optional,string" form:"schema" json:"schema,omitempty" query:"schema"` + EndAt int64 `thrift:"end_at,6,required" frugal:"6,required,i64" json:"end_at" form:"end_at,required" query:"end_at,required"` + CycleStartAt int64 `thrift:"cycle_start_at,7,required" frugal:"7,required,i64" json:"cycle_start_at" form:"cycle_start_at,required" query:"cycle_start_at,required"` + CycleEndAt int64 `thrift:"cycle_end_at,8,required" frugal:"8,required,i64" json:"cycle_end_at" form:"cycle_end_at,required" query:"cycle_end_at,required"` + Status string `thrift:"status,9,required" frugal:"9,required,string" form:"status,required" json:"status,required" query:"status,required"` +} + +func NewAutoEvaluateRunConfig() *AutoEvaluateRunConfig { + return &AutoEvaluateRunConfig{} +} + +func (p *AutoEvaluateRunConfig) InitDefault() { +} + +func (p *AutoEvaluateRunConfig) GetExptID() (v int64) { + if p != nil { + return p.ExptID + } + return +} + +func (p *AutoEvaluateRunConfig) GetExptRunID() (v int64) { + if p != nil { + return p.ExptRunID + } + return +} + +func (p *AutoEvaluateRunConfig) GetEvalID() (v int64) { + if p != nil { + return p.EvalID + } + return +} + +func (p *AutoEvaluateRunConfig) GetSchemaID() (v int64) { + if p != nil { + return p.SchemaID + } + return +} + +var AutoEvaluateRunConfig_Schema_DEFAULT string + +func (p *AutoEvaluateRunConfig) GetSchema() (v string) { + if p == nil { + return + } + if !p.IsSetSchema() { + return AutoEvaluateRunConfig_Schema_DEFAULT + } + return *p.Schema +} + +func (p *AutoEvaluateRunConfig) GetEndAt() (v int64) { + if p != nil { + return p.EndAt + } + return +} + +func (p *AutoEvaluateRunConfig) GetCycleStartAt() (v int64) { + if p != nil { + return p.CycleStartAt + } + return +} + +func (p *AutoEvaluateRunConfig) GetCycleEndAt() (v int64) { + if p != nil { + return p.CycleEndAt + } + return +} + +func (p *AutoEvaluateRunConfig) GetStatus() (v string) { + if p != nil { + return p.Status + } + return +} +func (p *AutoEvaluateRunConfig) SetExptID(val int64) { + p.ExptID = val +} +func (p *AutoEvaluateRunConfig) SetExptRunID(val int64) { + p.ExptRunID = val +} +func (p *AutoEvaluateRunConfig) SetEvalID(val int64) { + p.EvalID = val +} +func (p *AutoEvaluateRunConfig) SetSchemaID(val int64) { + p.SchemaID = val +} +func (p *AutoEvaluateRunConfig) SetSchema(val *string) { + p.Schema = val +} +func (p *AutoEvaluateRunConfig) SetEndAt(val int64) { + p.EndAt = val +} +func (p *AutoEvaluateRunConfig) SetCycleStartAt(val int64) { + p.CycleStartAt = val +} +func (p *AutoEvaluateRunConfig) SetCycleEndAt(val int64) { + p.CycleEndAt = val +} +func (p *AutoEvaluateRunConfig) SetStatus(val string) { + p.Status = val +} + +var fieldIDToName_AutoEvaluateRunConfig = map[int16]string{ + 1: "expt_id", + 2: "expt_run_id", + 3: "eval_id", + 4: "schema_id", + 5: "schema", + 6: "end_at", + 7: "cycle_start_at", + 8: "cycle_end_at", + 9: "status", +} + +func (p *AutoEvaluateRunConfig) IsSetSchema() bool { + return p.Schema != nil +} + +func (p *AutoEvaluateRunConfig) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetExptID bool = false + var issetExptRunID bool = false + var issetEvalID bool = false + var issetSchemaID bool = false + var issetEndAt bool = false + var issetCycleStartAt bool = false + var issetCycleEndAt bool = false + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetExptID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetExptRunID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetEvalID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetSchemaID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRING { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.I64 { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + issetEndAt = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.I64 { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + issetCycleStartAt = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 8: + if fieldTypeId == thrift.I64 { + if err = p.ReadField8(iprot); err != nil { + goto ReadFieldError + } + issetCycleEndAt = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 9: + if fieldTypeId == thrift.STRING { + if err = p.ReadField9(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetExptID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetExptRunID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetEvalID { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetSchemaID { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetEndAt { + fieldId = 6 + goto RequiredFieldNotSetError + } + + if !issetCycleStartAt { + fieldId = 7 + goto RequiredFieldNotSetError + } + + if !issetCycleEndAt { + fieldId = 8 + goto RequiredFieldNotSetError + } + + if !issetStatus { + fieldId = 9 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_AutoEvaluateRunConfig[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_AutoEvaluateRunConfig[fieldId])) +} + +func (p *AutoEvaluateRunConfig) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.ExptID = _field + return nil +} +func (p *AutoEvaluateRunConfig) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.ExptRunID = _field + return nil +} +func (p *AutoEvaluateRunConfig) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.EvalID = _field + return nil +} +func (p *AutoEvaluateRunConfig) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.SchemaID = _field + return nil +} +func (p *AutoEvaluateRunConfig) ReadField5(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Schema = _field + return nil +} +func (p *AutoEvaluateRunConfig) ReadField6(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.EndAt = _field + return nil +} +func (p *AutoEvaluateRunConfig) ReadField7(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.CycleStartAt = _field + return nil +} +func (p *AutoEvaluateRunConfig) ReadField8(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.CycleEndAt = _field + return nil +} +func (p *AutoEvaluateRunConfig) ReadField9(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Status = _field + return nil +} + +func (p *AutoEvaluateRunConfig) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("AutoEvaluateRunConfig"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField8(oprot); err != nil { + fieldId = 8 + goto WriteFieldError + } + if err = p.writeField9(oprot); err != nil { + fieldId = 9 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *AutoEvaluateRunConfig) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("expt_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.ExptID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *AutoEvaluateRunConfig) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("expt_run_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.ExptRunID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *AutoEvaluateRunConfig) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("eval_id", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.EvalID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *AutoEvaluateRunConfig) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("schema_id", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.SchemaID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *AutoEvaluateRunConfig) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetSchema() { + if err = oprot.WriteFieldBegin("schema", thrift.STRING, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Schema); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *AutoEvaluateRunConfig) writeField6(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("end_at", thrift.I64, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.EndAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} +func (p *AutoEvaluateRunConfig) writeField7(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("cycle_start_at", thrift.I64, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.CycleStartAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} +func (p *AutoEvaluateRunConfig) writeField8(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("cycle_end_at", thrift.I64, 8); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.CycleEndAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err) +} +func (p *AutoEvaluateRunConfig) writeField9(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRING, 9); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Status); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err) +} + +func (p *AutoEvaluateRunConfig) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AutoEvaluateRunConfig(%+v)", *p) + +} + +func (p *AutoEvaluateRunConfig) DeepEqual(ano *AutoEvaluateRunConfig) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.ExptID) { + return false + } + if !p.Field2DeepEqual(ano.ExptRunID) { + return false + } + if !p.Field3DeepEqual(ano.EvalID) { + return false + } + if !p.Field4DeepEqual(ano.SchemaID) { + return false + } + if !p.Field5DeepEqual(ano.Schema) { + return false + } + if !p.Field6DeepEqual(ano.EndAt) { + return false + } + if !p.Field7DeepEqual(ano.CycleStartAt) { + return false + } + if !p.Field8DeepEqual(ano.CycleEndAt) { + return false + } + if !p.Field9DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *AutoEvaluateRunConfig) Field1DeepEqual(src int64) bool { + + if p.ExptID != src { + return false + } + return true +} +func (p *AutoEvaluateRunConfig) Field2DeepEqual(src int64) bool { + + if p.ExptRunID != src { + return false + } + return true +} +func (p *AutoEvaluateRunConfig) Field3DeepEqual(src int64) bool { + + if p.EvalID != src { + return false + } + return true +} +func (p *AutoEvaluateRunConfig) Field4DeepEqual(src int64) bool { + + if p.SchemaID != src { + return false + } + return true +} +func (p *AutoEvaluateRunConfig) Field5DeepEqual(src *string) bool { + + if p.Schema == src { + return true + } else if p.Schema == nil || src == nil { + return false + } + if strings.Compare(*p.Schema, *src) != 0 { + return false + } + return true +} +func (p *AutoEvaluateRunConfig) Field6DeepEqual(src int64) bool { + + if p.EndAt != src { + return false + } + return true +} +func (p *AutoEvaluateRunConfig) Field7DeepEqual(src int64) bool { + + if p.CycleStartAt != src { + return false + } + return true +} +func (p *AutoEvaluateRunConfig) Field8DeepEqual(src int64) bool { + + if p.CycleEndAt != src { + return false + } + return true +} +func (p *AutoEvaluateRunConfig) Field9DeepEqual(src string) bool { + + if strings.Compare(p.Status, src) != 0 { + return false + } + return true +} + +type DataReflowRunConfig struct { + DatasetID int64 `thrift:"dataset_id,1,required" frugal:"1,required,i64" json:"dataset_id" form:"dataset_id,required" query:"dataset_id,required"` + DatasetRunID int64 `thrift:"dataset_run_id,2,required" frugal:"2,required,i64" json:"dataset_run_id" form:"dataset_run_id,required" query:"dataset_run_id,required"` + EndAt int64 `thrift:"end_at,3,required" frugal:"3,required,i64" json:"end_at" form:"end_at,required" query:"end_at,required"` + CycleStartAt int64 `thrift:"cycle_start_at,4,required" frugal:"4,required,i64" json:"cycle_start_at" form:"cycle_start_at,required" query:"cycle_start_at,required"` + CycleEndAt int64 `thrift:"cycle_end_at,5,required" frugal:"5,required,i64" json:"cycle_end_at" form:"cycle_end_at,required" query:"cycle_end_at,required"` + Status string `thrift:"status,6,required" frugal:"6,required,string" form:"status,required" json:"status,required" query:"status,required"` +} + +func NewDataReflowRunConfig() *DataReflowRunConfig { + return &DataReflowRunConfig{} +} + +func (p *DataReflowRunConfig) InitDefault() { +} + +func (p *DataReflowRunConfig) GetDatasetID() (v int64) { + if p != nil { + return p.DatasetID + } + return +} + +func (p *DataReflowRunConfig) GetDatasetRunID() (v int64) { + if p != nil { + return p.DatasetRunID + } + return +} + +func (p *DataReflowRunConfig) GetEndAt() (v int64) { + if p != nil { + return p.EndAt + } + return +} + +func (p *DataReflowRunConfig) GetCycleStartAt() (v int64) { + if p != nil { + return p.CycleStartAt + } + return +} + +func (p *DataReflowRunConfig) GetCycleEndAt() (v int64) { + if p != nil { + return p.CycleEndAt + } + return +} + +func (p *DataReflowRunConfig) GetStatus() (v string) { + if p != nil { + return p.Status + } + return +} +func (p *DataReflowRunConfig) SetDatasetID(val int64) { + p.DatasetID = val +} +func (p *DataReflowRunConfig) SetDatasetRunID(val int64) { + p.DatasetRunID = val +} +func (p *DataReflowRunConfig) SetEndAt(val int64) { + p.EndAt = val +} +func (p *DataReflowRunConfig) SetCycleStartAt(val int64) { + p.CycleStartAt = val +} +func (p *DataReflowRunConfig) SetCycleEndAt(val int64) { + p.CycleEndAt = val +} +func (p *DataReflowRunConfig) SetStatus(val string) { + p.Status = val +} + +var fieldIDToName_DataReflowRunConfig = map[int16]string{ + 1: "dataset_id", + 2: "dataset_run_id", + 3: "end_at", + 4: "cycle_start_at", + 5: "cycle_end_at", + 6: "status", +} + +func (p *DataReflowRunConfig) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetDatasetID bool = false + var issetDatasetRunID bool = false + var issetEndAt bool = false + var issetCycleStartAt bool = false + var issetCycleEndAt bool = false + var issetStatus bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetDatasetID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetDatasetRunID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.I64 { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetEndAt = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetCycleStartAt = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetCycleEndAt = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + issetStatus = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetDatasetID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetDatasetRunID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetEndAt { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetCycleStartAt { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetCycleEndAt { + fieldId = 5 + goto RequiredFieldNotSetError + } + + if !issetStatus { + fieldId = 6 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_DataReflowRunConfig[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_DataReflowRunConfig[fieldId])) +} + +func (p *DataReflowRunConfig) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.DatasetID = _field + return nil +} +func (p *DataReflowRunConfig) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.DatasetRunID = _field + return nil +} +func (p *DataReflowRunConfig) ReadField3(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.EndAt = _field + return nil +} +func (p *DataReflowRunConfig) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.CycleStartAt = _field + return nil +} +func (p *DataReflowRunConfig) ReadField5(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.CycleEndAt = _field + return nil +} +func (p *DataReflowRunConfig) ReadField6(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Status = _field + return nil +} + +func (p *DataReflowRunConfig) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("DataReflowRunConfig"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *DataReflowRunConfig) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("dataset_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.DatasetID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *DataReflowRunConfig) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("dataset_run_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.DatasetRunID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *DataReflowRunConfig) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("end_at", thrift.I64, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.EndAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *DataReflowRunConfig) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("cycle_start_at", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.CycleStartAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *DataReflowRunConfig) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("cycle_end_at", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.CycleEndAt); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *DataReflowRunConfig) writeField6(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("status", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Status); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} + +func (p *DataReflowRunConfig) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("DataReflowRunConfig(%+v)", *p) + +} + +func (p *DataReflowRunConfig) DeepEqual(ano *DataReflowRunConfig) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.DatasetID) { + return false + } + if !p.Field2DeepEqual(ano.DatasetRunID) { + return false + } + if !p.Field3DeepEqual(ano.EndAt) { + return false + } + if !p.Field4DeepEqual(ano.CycleStartAt) { + return false + } + if !p.Field5DeepEqual(ano.CycleEndAt) { + return false + } + if !p.Field6DeepEqual(ano.Status) { + return false + } + return true +} + +func (p *DataReflowRunConfig) Field1DeepEqual(src int64) bool { + + if p.DatasetID != src { + return false + } + return true +} +func (p *DataReflowRunConfig) Field2DeepEqual(src int64) bool { + + if p.DatasetRunID != src { + return false + } + return true +} +func (p *DataReflowRunConfig) Field3DeepEqual(src int64) bool { + + if p.EndAt != src { + return false + } + return true +} +func (p *DataReflowRunConfig) Field4DeepEqual(src int64) bool { + + if p.CycleStartAt != src { + return false + } + return true +} +func (p *DataReflowRunConfig) Field5DeepEqual(src int64) bool { + + if p.CycleEndAt != src { + return false + } + return true +} +func (p *DataReflowRunConfig) Field6DeepEqual(src string) bool { + + if strings.Compare(p.Status, src) != 0 { + return false + } + return true +} diff --git a/backend/kitex_gen/coze/loop/observability/domain/task/task_validator.go b/backend/kitex_gen/coze/loop/observability/domain/task/task_validator.go new file mode 100644 index 000000000..c157b3ca8 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/domain/task/task_validator.go @@ -0,0 +1,157 @@ +// Code generated by Validator v0.2.6. DO NOT EDIT. + +package task + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strings" + "time" +) + +// unused protection +var ( + _ = fmt.Formatter(nil) + _ = (*bytes.Buffer)(nil) + _ = (*strings.Builder)(nil) + _ = reflect.Type(nil) + _ = (*regexp.Regexp)(nil) + _ = time.Nanosecond +) + +func (p *Task) IsValid() error { + if p.Rule != nil { + if err := p.Rule.IsValid(); err != nil { + return fmt.Errorf("field Rule not valid, %w", err) + } + } + if p.TaskConfig != nil { + if err := p.TaskConfig.IsValid(); err != nil { + return fmt.Errorf("field TaskConfig not valid, %w", err) + } + } + if p.TaskDetail != nil { + if err := p.TaskDetail.IsValid(); err != nil { + return fmt.Errorf("field TaskDetail not valid, %w", err) + } + } + if p.BackfillTaskDetail != nil { + if err := p.BackfillTaskDetail.IsValid(); err != nil { + return fmt.Errorf("field BackfillTaskDetail not valid, %w", err) + } + } + if p.BaseInfo != nil { + if err := p.BaseInfo.IsValid(); err != nil { + return fmt.Errorf("field BaseInfo not valid, %w", err) + } + } + return nil +} +func (p *Rule) IsValid() error { + if p.SpanFilters != nil { + if err := p.SpanFilters.IsValid(); err != nil { + return fmt.Errorf("field SpanFilters not valid, %w", err) + } + } + if p.Sampler != nil { + if err := p.Sampler.IsValid(); err != nil { + return fmt.Errorf("field Sampler not valid, %w", err) + } + } + if p.EffectiveTime != nil { + if err := p.EffectiveTime.IsValid(); err != nil { + return fmt.Errorf("field EffectiveTime not valid, %w", err) + } + } + if p.BackfillEffectiveTime != nil { + if err := p.BackfillEffectiveTime.IsValid(); err != nil { + return fmt.Errorf("field BackfillEffectiveTime not valid, %w", err) + } + } + return nil +} +func (p *Sampler) IsValid() error { + return nil +} +func (p *EffectiveTime) IsValid() error { + return nil +} +func (p *TaskConfig) IsValid() error { + return nil +} +func (p *DataReflowConfig) IsValid() error { + if p.DatasetSchema == nil { + return fmt.Errorf("field DatasetSchema not_nil rule failed") + } + if err := p.DatasetSchema.IsValid(); err != nil { + return fmt.Errorf("field DatasetSchema not valid, %w", err) + } + if len(p.FieldMappings) < int(1) { + return fmt.Errorf("field FieldMappings MinLen rule failed, current value: %v", p.FieldMappings) + } + if len(p.FieldMappings) > int(100) { + return fmt.Errorf("field FieldMappings MaxLen rule failed, current value: %v", p.FieldMappings) + } + return nil +} +func (p *AutoEvaluateConfig) IsValid() error { + return nil +} +func (p *RunDetail) IsValid() error { + return nil +} +func (p *BackfillDetail) IsValid() error { + return nil +} +func (p *EvaluateFieldMapping) IsValid() error { + if p.FieldSchema != nil { + if err := p.FieldSchema.IsValid(); err != nil { + return fmt.Errorf("field FieldSchema not valid, %w", err) + } + } + return nil +} +func (p *TaskRun) IsValid() error { + if p.RunDetail != nil { + if err := p.RunDetail.IsValid(); err != nil { + return fmt.Errorf("field RunDetail not valid, %w", err) + } + } + if p.BackfillRunDetail != nil { + if err := p.BackfillRunDetail.IsValid(); err != nil { + return fmt.Errorf("field BackfillRunDetail not valid, %w", err) + } + } + if p.TaskRunConfig != nil { + if err := p.TaskRunConfig.IsValid(); err != nil { + return fmt.Errorf("field TaskRunConfig not valid, %w", err) + } + } + if p.BaseInfo != nil { + if err := p.BaseInfo.IsValid(); err != nil { + return fmt.Errorf("field BaseInfo not valid, %w", err) + } + } + return nil +} +func (p *TaskRunConfig) IsValid() error { + if p.AutoEvaluateRunConfig != nil { + if err := p.AutoEvaluateRunConfig.IsValid(); err != nil { + return fmt.Errorf("field AutoEvaluateRunConfig not valid, %w", err) + } + } + if p.DataReflowRunConfig != nil { + if err := p.DataReflowRunConfig.IsValid(); err != nil { + return fmt.Errorf("field DataReflowRunConfig not valid, %w", err) + } + } + return nil +} +func (p *AutoEvaluateRunConfig) IsValid() error { + return nil +} +func (p *DataReflowRunConfig) IsValid() error { + return nil +} diff --git a/backend/kitex_gen/coze/loop/observability/k-coze.loop.observability.go b/backend/kitex_gen/coze/loop/observability/k-coze.loop.observability.go index 30ba0d9c4..04e1f19b8 100644 --- a/backend/kitex_gen/coze/loop/observability/k-coze.loop.observability.go +++ b/backend/kitex_gen/coze/loop/observability/k-coze.loop.observability.go @@ -11,11 +11,13 @@ import ( "github.com/cloudwego/gopkg/protocol/thrift" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/openapi" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/trace" ) var ( _ = openapi.KitexUnusedProtection + _ = task.KitexUnusedProtection _ = trace.KitexUnusedProtection ) diff --git a/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/client.go b/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/client.go new file mode 100644 index 000000000..a8e4d3beb --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/client.go @@ -0,0 +1,73 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. + +package observabilitytaskservice + +import ( + "context" + client "github.com/cloudwego/kitex/client" + callopt "github.com/cloudwego/kitex/client/callopt" + task "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" +) + +// Client is designed to provide IDL-compatible methods with call-option parameter for kitex framework. +type Client interface { + CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest, callOptions ...callopt.Option) (r *task.CheckTaskNameResponse, err error) + CreateTask(ctx context.Context, req *task.CreateTaskRequest, callOptions ...callopt.Option) (r *task.CreateTaskResponse, err error) + UpdateTask(ctx context.Context, req *task.UpdateTaskRequest, callOptions ...callopt.Option) (r *task.UpdateTaskResponse, err error) + ListTasks(ctx context.Context, req *task.ListTasksRequest, callOptions ...callopt.Option) (r *task.ListTasksResponse, err error) + GetTask(ctx context.Context, req *task.GetTaskRequest, callOptions ...callopt.Option) (r *task.GetTaskResponse, err error) +} + +// NewClient creates a client for the service defined in IDL. +func NewClient(destService string, opts ...client.Option) (Client, error) { + var options []client.Option + options = append(options, client.WithDestService(destService)) + + options = append(options, opts...) + + kc, err := client.NewClient(serviceInfo(), options...) + if err != nil { + return nil, err + } + return &kObservabilityTaskServiceClient{ + kClient: newServiceClient(kc), + }, nil +} + +// MustNewClient creates a client for the service defined in IDL. It panics if any error occurs. +func MustNewClient(destService string, opts ...client.Option) Client { + kc, err := NewClient(destService, opts...) + if err != nil { + panic(err) + } + return kc +} + +type kObservabilityTaskServiceClient struct { + *kClient +} + +func (p *kObservabilityTaskServiceClient) CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest, callOptions ...callopt.Option) (r *task.CheckTaskNameResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.CheckTaskName(ctx, req) +} + +func (p *kObservabilityTaskServiceClient) CreateTask(ctx context.Context, req *task.CreateTaskRequest, callOptions ...callopt.Option) (r *task.CreateTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.CreateTask(ctx, req) +} + +func (p *kObservabilityTaskServiceClient) UpdateTask(ctx context.Context, req *task.UpdateTaskRequest, callOptions ...callopt.Option) (r *task.UpdateTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.UpdateTask(ctx, req) +} + +func (p *kObservabilityTaskServiceClient) ListTasks(ctx context.Context, req *task.ListTasksRequest, callOptions ...callopt.Option) (r *task.ListTasksResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ListTasks(ctx, req) +} + +func (p *kObservabilityTaskServiceClient) GetTask(ctx context.Context, req *task.GetTaskRequest, callOptions ...callopt.Option) (r *task.GetTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.GetTask(ctx, req) +} diff --git a/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/observabilitytaskservice.go b/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/observabilitytaskservice.go new file mode 100644 index 000000000..41b4f6709 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/observabilitytaskservice.go @@ -0,0 +1,240 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. + +package observabilitytaskservice + +import ( + "context" + "errors" + client "github.com/cloudwego/kitex/client" + kitex "github.com/cloudwego/kitex/pkg/serviceinfo" + observability "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability" + task "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" +) + +var errInvalidMessageType = errors.New("invalid message type for service method handler") + +var serviceMethods = map[string]kitex.MethodInfo{ + "CheckTaskName": kitex.NewMethodInfo( + checkTaskNameHandler, + newTaskServiceCheckTaskNameArgs, + newTaskServiceCheckTaskNameResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "CreateTask": kitex.NewMethodInfo( + createTaskHandler, + newTaskServiceCreateTaskArgs, + newTaskServiceCreateTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "UpdateTask": kitex.NewMethodInfo( + updateTaskHandler, + newTaskServiceUpdateTaskArgs, + newTaskServiceUpdateTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ListTasks": kitex.NewMethodInfo( + listTasksHandler, + newTaskServiceListTasksArgs, + newTaskServiceListTasksResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "GetTask": kitex.NewMethodInfo( + getTaskHandler, + newTaskServiceGetTaskArgs, + newTaskServiceGetTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), +} + +var ( + observabilityTaskServiceServiceInfo = NewServiceInfo() +) + +// for server +func serviceInfo() *kitex.ServiceInfo { + return observabilityTaskServiceServiceInfo +} + +// NewServiceInfo creates a new ServiceInfo +func NewServiceInfo() *kitex.ServiceInfo { + return newServiceInfo() +} + +func newServiceInfo() *kitex.ServiceInfo { + serviceName := "ObservabilityTaskService" + handlerType := (*observability.ObservabilityTaskService)(nil) + extra := map[string]interface{}{ + "PackageName": "observability", + } + svcInfo := &kitex.ServiceInfo{ + ServiceName: serviceName, + HandlerType: handlerType, + Methods: serviceMethods, + PayloadCodec: kitex.Thrift, + KiteXGenVersion: "v0.13.1", + Extra: extra, + } + return svcInfo +} + +func checkTaskNameHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceCheckTaskNameArgs) + realResult := result.(*task.TaskServiceCheckTaskNameResult) + success, err := handler.(task.TaskService).CheckTaskName(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceCheckTaskNameArgs() interface{} { + return task.NewTaskServiceCheckTaskNameArgs() +} + +func newTaskServiceCheckTaskNameResult() interface{} { + return task.NewTaskServiceCheckTaskNameResult() +} + +func createTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceCreateTaskArgs) + realResult := result.(*task.TaskServiceCreateTaskResult) + success, err := handler.(task.TaskService).CreateTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceCreateTaskArgs() interface{} { + return task.NewTaskServiceCreateTaskArgs() +} + +func newTaskServiceCreateTaskResult() interface{} { + return task.NewTaskServiceCreateTaskResult() +} + +func updateTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceUpdateTaskArgs) + realResult := result.(*task.TaskServiceUpdateTaskResult) + success, err := handler.(task.TaskService).UpdateTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceUpdateTaskArgs() interface{} { + return task.NewTaskServiceUpdateTaskArgs() +} + +func newTaskServiceUpdateTaskResult() interface{} { + return task.NewTaskServiceUpdateTaskResult() +} + +func listTasksHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceListTasksArgs) + realResult := result.(*task.TaskServiceListTasksResult) + success, err := handler.(task.TaskService).ListTasks(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceListTasksArgs() interface{} { + return task.NewTaskServiceListTasksArgs() +} + +func newTaskServiceListTasksResult() interface{} { + return task.NewTaskServiceListTasksResult() +} + +func getTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceGetTaskArgs) + realResult := result.(*task.TaskServiceGetTaskResult) + success, err := handler.(task.TaskService).GetTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceGetTaskArgs() interface{} { + return task.NewTaskServiceGetTaskArgs() +} + +func newTaskServiceGetTaskResult() interface{} { + return task.NewTaskServiceGetTaskResult() +} + +type kClient struct { + c client.Client + sc client.Streaming +} + +func newServiceClient(c client.Client) *kClient { + return &kClient{ + c: c, + sc: c.(client.Streaming), + } +} + +func (p *kClient) CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest) (r *task.CheckTaskNameResponse, err error) { + var _args task.TaskServiceCheckTaskNameArgs + _args.Req = req + var _result task.TaskServiceCheckTaskNameResult + if err = p.c.Call(ctx, "CheckTaskName", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) CreateTask(ctx context.Context, req *task.CreateTaskRequest) (r *task.CreateTaskResponse, err error) { + var _args task.TaskServiceCreateTaskArgs + _args.Req = req + var _result task.TaskServiceCreateTaskResult + if err = p.c.Call(ctx, "CreateTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) UpdateTask(ctx context.Context, req *task.UpdateTaskRequest) (r *task.UpdateTaskResponse, err error) { + var _args task.TaskServiceUpdateTaskArgs + _args.Req = req + var _result task.TaskServiceUpdateTaskResult + if err = p.c.Call(ctx, "UpdateTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ListTasks(ctx context.Context, req *task.ListTasksRequest) (r *task.ListTasksResponse, err error) { + var _args task.TaskServiceListTasksArgs + _args.Req = req + var _result task.TaskServiceListTasksResult + if err = p.c.Call(ctx, "ListTasks", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) GetTask(ctx context.Context, req *task.GetTaskRequest) (r *task.GetTaskResponse, err error) { + var _args task.TaskServiceGetTaskArgs + _args.Req = req + var _result task.TaskServiceGetTaskResult + if err = p.c.Call(ctx, "GetTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/server.go b/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/server.go new file mode 100644 index 000000000..f7b67a1a6 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/observabilitytaskservice/server.go @@ -0,0 +1,25 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. +package observabilitytaskservice + +import ( + server "github.com/cloudwego/kitex/server" + observability "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability" +) + +// NewServer creates a server.Server with the given handler and options. +func NewServer(handler observability.ObservabilityTaskService, opts ...server.Option) server.Server { + var options []server.Option + + options = append(options, opts...) + options = append(options, server.WithCompatibleMiddlewareForUnary()) + + svr := server.NewServer(options...) + if err := svr.RegisterService(serviceInfo(), handler); err != nil { + panic(err) + } + return svr +} + +func RegisterService(svr server.Server, handler observability.ObservabilityTaskService, opts ...server.RegisterOption) error { + return svr.RegisterService(serviceInfo(), handler, opts...) +} diff --git a/backend/kitex_gen/coze/loop/observability/observabilitytraceservice/client.go b/backend/kitex_gen/coze/loop/observability/observabilitytraceservice/client.go index f6cf531a1..9079d147d 100644 --- a/backend/kitex_gen/coze/loop/observability/observabilitytraceservice/client.go +++ b/backend/kitex_gen/coze/loop/observability/observabilitytraceservice/client.go @@ -26,6 +26,9 @@ type Client interface { ListAnnotations(ctx context.Context, req *trace.ListAnnotationsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationsResponse, err error) ExportTracesToDataset(ctx context.Context, req *trace.ExportTracesToDatasetRequest, callOptions ...callopt.Option) (r *trace.ExportTracesToDatasetResponse, err error) PreviewExportTracesToDataset(ctx context.Context, req *trace.PreviewExportTracesToDatasetRequest, callOptions ...callopt.Option) (r *trace.PreviewExportTracesToDatasetResponse, err error) + ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest, callOptions ...callopt.Option) (r *trace.ChangeEvaluatorScoreResponse, err error) + ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationEvaluatorsResponse, err error) + ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest, callOptions ...callopt.Option) (r *trace.ExtractSpanInfoResponse, err error) } // NewClient creates a client for the service defined in IDL. @@ -131,3 +134,18 @@ func (p *kObservabilityTraceServiceClient) PreviewExportTracesToDataset(ctx cont ctx = client.NewCtxWithCallOptions(ctx, callOptions) return p.kClient.PreviewExportTracesToDataset(ctx, req) } + +func (p *kObservabilityTraceServiceClient) ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest, callOptions ...callopt.Option) (r *trace.ChangeEvaluatorScoreResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ChangeEvaluatorScore(ctx, req) +} + +func (p *kObservabilityTraceServiceClient) ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationEvaluatorsResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ListAnnotationEvaluators(ctx, req) +} + +func (p *kObservabilityTraceServiceClient) ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest, callOptions ...callopt.Option) (r *trace.ExtractSpanInfoResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ExtractSpanInfo(ctx, req) +} diff --git a/backend/kitex_gen/coze/loop/observability/observabilitytraceservice/observabilitytraceservice.go b/backend/kitex_gen/coze/loop/observability/observabilitytraceservice/observabilitytraceservice.go index c78122eef..30bdc7a2f 100644 --- a/backend/kitex_gen/coze/loop/observability/observabilitytraceservice/observabilitytraceservice.go +++ b/backend/kitex_gen/coze/loop/observability/observabilitytraceservice/observabilitytraceservice.go @@ -119,6 +119,27 @@ var serviceMethods = map[string]kitex.MethodInfo{ false, kitex.WithStreamingMode(kitex.StreamingNone), ), + "ChangeEvaluatorScore": kitex.NewMethodInfo( + changeEvaluatorScoreHandler, + newTraceServiceChangeEvaluatorScoreArgs, + newTraceServiceChangeEvaluatorScoreResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ListAnnotationEvaluators": kitex.NewMethodInfo( + listAnnotationEvaluatorsHandler, + newTraceServiceListAnnotationEvaluatorsArgs, + newTraceServiceListAnnotationEvaluatorsResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ExtractSpanInfo": kitex.NewMethodInfo( + extractSpanInfoHandler, + newTraceServiceExtractSpanInfoArgs, + newTraceServiceExtractSpanInfoResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), } var ( @@ -437,6 +458,63 @@ func newTraceServicePreviewExportTracesToDatasetResult() interface{} { return trace.NewTraceServicePreviewExportTracesToDatasetResult() } +func changeEvaluatorScoreHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceChangeEvaluatorScoreArgs) + realResult := result.(*trace.TraceServiceChangeEvaluatorScoreResult) + success, err := handler.(trace.TraceService).ChangeEvaluatorScore(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceChangeEvaluatorScoreArgs() interface{} { + return trace.NewTraceServiceChangeEvaluatorScoreArgs() +} + +func newTraceServiceChangeEvaluatorScoreResult() interface{} { + return trace.NewTraceServiceChangeEvaluatorScoreResult() +} + +func listAnnotationEvaluatorsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceListAnnotationEvaluatorsArgs) + realResult := result.(*trace.TraceServiceListAnnotationEvaluatorsResult) + success, err := handler.(trace.TraceService).ListAnnotationEvaluators(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceListAnnotationEvaluatorsArgs() interface{} { + return trace.NewTraceServiceListAnnotationEvaluatorsArgs() +} + +func newTraceServiceListAnnotationEvaluatorsResult() interface{} { + return trace.NewTraceServiceListAnnotationEvaluatorsResult() +} + +func extractSpanInfoHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceExtractSpanInfoArgs) + realResult := result.(*trace.TraceServiceExtractSpanInfoResult) + success, err := handler.(trace.TraceService).ExtractSpanInfo(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceExtractSpanInfoArgs() interface{} { + return trace.NewTraceServiceExtractSpanInfoArgs() +} + +func newTraceServiceExtractSpanInfoResult() interface{} { + return trace.NewTraceServiceExtractSpanInfoResult() +} + type kClient struct { c client.Client sc client.Streaming @@ -598,3 +676,33 @@ func (p *kClient) PreviewExportTracesToDataset(ctx context.Context, req *trace.P } return _result.GetSuccess(), nil } + +func (p *kClient) ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest) (r *trace.ChangeEvaluatorScoreResponse, err error) { + var _args trace.TraceServiceChangeEvaluatorScoreArgs + _args.Req = req + var _result trace.TraceServiceChangeEvaluatorScoreResult + if err = p.c.Call(ctx, "ChangeEvaluatorScore", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest) (r *trace.ListAnnotationEvaluatorsResponse, err error) { + var _args trace.TraceServiceListAnnotationEvaluatorsArgs + _args.Req = req + var _result trace.TraceServiceListAnnotationEvaluatorsResult + if err = p.c.Call(ctx, "ListAnnotationEvaluators", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest) (r *trace.ExtractSpanInfoResponse, err error) { + var _args trace.TraceServiceExtractSpanInfoArgs + _args.Req = req + var _result trace.TraceServiceExtractSpanInfoResult + if err = p.c.Call(ctx, "ExtractSpanInfo", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/backend/kitex_gen/coze/loop/observability/task/coze.loop.observability.task.go b/backend/kitex_gen/coze/loop/observability/task/coze.loop.observability.task.go new file mode 100644 index 000000000..6d21a1664 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/task/coze.loop.observability.task.go @@ -0,0 +1,5454 @@ +// Code generated by thriftgo (0.4.1). DO NOT EDIT. + +package task + +import ( + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "github.com/coze-dev/coze-loop/backend/kitex_gen/base" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + "strings" +) + +type CreateTaskRequest struct { + Task *task.Task `thrift:"task,1,required" frugal:"1,required,task.Task" form:"task,required" json:"task,required"` + Base *base.Base `thrift:"base,255,optional" frugal:"255,optional,base.Base" form:"base" json:"base,omitempty" query:"base"` +} + +func NewCreateTaskRequest() *CreateTaskRequest { + return &CreateTaskRequest{} +} + +func (p *CreateTaskRequest) InitDefault() { +} + +var CreateTaskRequest_Task_DEFAULT *task.Task + +func (p *CreateTaskRequest) GetTask() (v *task.Task) { + if p == nil { + return + } + if !p.IsSetTask() { + return CreateTaskRequest_Task_DEFAULT + } + return p.Task +} + +var CreateTaskRequest_Base_DEFAULT *base.Base + +func (p *CreateTaskRequest) GetBase() (v *base.Base) { + if p == nil { + return + } + if !p.IsSetBase() { + return CreateTaskRequest_Base_DEFAULT + } + return p.Base +} +func (p *CreateTaskRequest) SetTask(val *task.Task) { + p.Task = val +} +func (p *CreateTaskRequest) SetBase(val *base.Base) { + p.Base = val +} + +var fieldIDToName_CreateTaskRequest = map[int16]string{ + 1: "task", + 255: "base", +} + +func (p *CreateTaskRequest) IsSetTask() bool { + return p.Task != nil +} + +func (p *CreateTaskRequest) IsSetBase() bool { + return p.Base != nil +} + +func (p *CreateTaskRequest) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetTask bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTask = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetTask { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CreateTaskRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_CreateTaskRequest[fieldId])) +} + +func (p *CreateTaskRequest) ReadField1(iprot thrift.TProtocol) error { + _field := task.NewTask() + if err := _field.Read(iprot); err != nil { + return err + } + p.Task = _field + return nil +} +func (p *CreateTaskRequest) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBase() + if err := _field.Read(iprot); err != nil { + return err + } + p.Base = _field + return nil +} + +func (p *CreateTaskRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CreateTaskRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *CreateTaskRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Task.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *CreateTaskRequest) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBase() { + if err = oprot.WriteFieldBegin("base", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.Base.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *CreateTaskRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CreateTaskRequest(%+v)", *p) + +} + +func (p *CreateTaskRequest) DeepEqual(ano *CreateTaskRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Task) { + return false + } + if !p.Field255DeepEqual(ano.Base) { + return false + } + return true +} + +func (p *CreateTaskRequest) Field1DeepEqual(src *task.Task) bool { + + if !p.Task.DeepEqual(src) { + return false + } + return true +} +func (p *CreateTaskRequest) Field255DeepEqual(src *base.Base) bool { + + if !p.Base.DeepEqual(src) { + return false + } + return true +} + +type CreateTaskResponse struct { + TaskID *int64 `thrift:"task_id,1,optional" frugal:"1,optional,i64" form:"task_id" json:"task_id,string,omitempty"` + BaseResp *base.BaseResp `thrift:"BaseResp,255,optional" frugal:"255,optional,base.BaseResp" form:"BaseResp" json:"BaseResp,omitempty" query:"BaseResp"` +} + +func NewCreateTaskResponse() *CreateTaskResponse { + return &CreateTaskResponse{} +} + +func (p *CreateTaskResponse) InitDefault() { +} + +var CreateTaskResponse_TaskID_DEFAULT int64 + +func (p *CreateTaskResponse) GetTaskID() (v int64) { + if p == nil { + return + } + if !p.IsSetTaskID() { + return CreateTaskResponse_TaskID_DEFAULT + } + return *p.TaskID +} + +var CreateTaskResponse_BaseResp_DEFAULT *base.BaseResp + +func (p *CreateTaskResponse) GetBaseResp() (v *base.BaseResp) { + if p == nil { + return + } + if !p.IsSetBaseResp() { + return CreateTaskResponse_BaseResp_DEFAULT + } + return p.BaseResp +} +func (p *CreateTaskResponse) SetTaskID(val *int64) { + p.TaskID = val +} +func (p *CreateTaskResponse) SetBaseResp(val *base.BaseResp) { + p.BaseResp = val +} + +var fieldIDToName_CreateTaskResponse = map[int16]string{ + 1: "task_id", + 255: "BaseResp", +} + +func (p *CreateTaskResponse) IsSetTaskID() bool { + return p.TaskID != nil +} + +func (p *CreateTaskResponse) IsSetBaseResp() bool { + return p.BaseResp != nil +} + +func (p *CreateTaskResponse) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CreateTaskResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *CreateTaskResponse) ReadField1(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.TaskID = _field + return nil +} +func (p *CreateTaskResponse) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBaseResp() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseResp = _field + return nil +} + +func (p *CreateTaskResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CreateTaskResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *CreateTaskResponse) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTaskID() { + if err = oprot.WriteFieldBegin("task_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.TaskID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *CreateTaskResponse) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseResp() { + if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseResp.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *CreateTaskResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CreateTaskResponse(%+v)", *p) + +} + +func (p *CreateTaskResponse) DeepEqual(ano *CreateTaskResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TaskID) { + return false + } + if !p.Field255DeepEqual(ano.BaseResp) { + return false + } + return true +} + +func (p *CreateTaskResponse) Field1DeepEqual(src *int64) bool { + + if p.TaskID == src { + return true + } else if p.TaskID == nil || src == nil { + return false + } + if *p.TaskID != *src { + return false + } + return true +} +func (p *CreateTaskResponse) Field255DeepEqual(src *base.BaseResp) bool { + + if !p.BaseResp.DeepEqual(src) { + return false + } + return true +} + +type UpdateTaskRequest struct { + TaskID int64 `thrift:"task_id,1,required" frugal:"1,required,i64" json:"task_id,string,required" path:"task_id,required"` + WorkspaceID int64 `thrift:"workspace_id,2,required" frugal:"2,required,i64" json:"workspace_id" form:"workspace_id,required" ` + TaskStatus *task.TaskStatus `thrift:"task_status,3,optional" frugal:"3,optional,string" form:"task_status" json:"task_status,omitempty"` + Description *string `thrift:"description,4,optional" frugal:"4,optional,string" form:"description" json:"description,omitempty"` + EffectiveTime *task.EffectiveTime `thrift:"effective_time,5,optional" frugal:"5,optional,task.EffectiveTime" form:"effective_time" json:"effective_time,omitempty"` + SampleRate *float64 `thrift:"sample_rate,6,optional" frugal:"6,optional,double" form:"sample_rate" json:"sample_rate,omitempty"` + Base *base.Base `thrift:"base,255,optional" frugal:"255,optional,base.Base" form:"base" json:"base,omitempty" query:"base"` +} + +func NewUpdateTaskRequest() *UpdateTaskRequest { + return &UpdateTaskRequest{} +} + +func (p *UpdateTaskRequest) InitDefault() { +} + +func (p *UpdateTaskRequest) GetTaskID() (v int64) { + if p != nil { + return p.TaskID + } + return +} + +func (p *UpdateTaskRequest) GetWorkspaceID() (v int64) { + if p != nil { + return p.WorkspaceID + } + return +} + +var UpdateTaskRequest_TaskStatus_DEFAULT task.TaskStatus + +func (p *UpdateTaskRequest) GetTaskStatus() (v task.TaskStatus) { + if p == nil { + return + } + if !p.IsSetTaskStatus() { + return UpdateTaskRequest_TaskStatus_DEFAULT + } + return *p.TaskStatus +} + +var UpdateTaskRequest_Description_DEFAULT string + +func (p *UpdateTaskRequest) GetDescription() (v string) { + if p == nil { + return + } + if !p.IsSetDescription() { + return UpdateTaskRequest_Description_DEFAULT + } + return *p.Description +} + +var UpdateTaskRequest_EffectiveTime_DEFAULT *task.EffectiveTime + +func (p *UpdateTaskRequest) GetEffectiveTime() (v *task.EffectiveTime) { + if p == nil { + return + } + if !p.IsSetEffectiveTime() { + return UpdateTaskRequest_EffectiveTime_DEFAULT + } + return p.EffectiveTime +} + +var UpdateTaskRequest_SampleRate_DEFAULT float64 + +func (p *UpdateTaskRequest) GetSampleRate() (v float64) { + if p == nil { + return + } + if !p.IsSetSampleRate() { + return UpdateTaskRequest_SampleRate_DEFAULT + } + return *p.SampleRate +} + +var UpdateTaskRequest_Base_DEFAULT *base.Base + +func (p *UpdateTaskRequest) GetBase() (v *base.Base) { + if p == nil { + return + } + if !p.IsSetBase() { + return UpdateTaskRequest_Base_DEFAULT + } + return p.Base +} +func (p *UpdateTaskRequest) SetTaskID(val int64) { + p.TaskID = val +} +func (p *UpdateTaskRequest) SetWorkspaceID(val int64) { + p.WorkspaceID = val +} +func (p *UpdateTaskRequest) SetTaskStatus(val *task.TaskStatus) { + p.TaskStatus = val +} +func (p *UpdateTaskRequest) SetDescription(val *string) { + p.Description = val +} +func (p *UpdateTaskRequest) SetEffectiveTime(val *task.EffectiveTime) { + p.EffectiveTime = val +} +func (p *UpdateTaskRequest) SetSampleRate(val *float64) { + p.SampleRate = val +} +func (p *UpdateTaskRequest) SetBase(val *base.Base) { + p.Base = val +} + +var fieldIDToName_UpdateTaskRequest = map[int16]string{ + 1: "task_id", + 2: "workspace_id", + 3: "task_status", + 4: "description", + 5: "effective_time", + 6: "sample_rate", + 255: "base", +} + +func (p *UpdateTaskRequest) IsSetTaskStatus() bool { + return p.TaskStatus != nil +} + +func (p *UpdateTaskRequest) IsSetDescription() bool { + return p.Description != nil +} + +func (p *UpdateTaskRequest) IsSetEffectiveTime() bool { + return p.EffectiveTime != nil +} + +func (p *UpdateTaskRequest) IsSetSampleRate() bool { + return p.SampleRate != nil +} + +func (p *UpdateTaskRequest) IsSetBase() bool { + return p.Base != nil +} + +func (p *UpdateTaskRequest) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetTaskID bool = false + var issetWorkspaceID bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTaskID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.STRING { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.DOUBLE { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetTaskID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetWorkspaceID { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_UpdateTaskRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_UpdateTaskRequest[fieldId])) +} + +func (p *UpdateTaskRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TaskID = _field + return nil +} +func (p *UpdateTaskRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.WorkspaceID = _field + return nil +} +func (p *UpdateTaskRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field *task.TaskStatus + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.TaskStatus = _field + return nil +} +func (p *UpdateTaskRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Description = _field + return nil +} +func (p *UpdateTaskRequest) ReadField5(iprot thrift.TProtocol) error { + _field := task.NewEffectiveTime() + if err := _field.Read(iprot); err != nil { + return err + } + p.EffectiveTime = _field + return nil +} +func (p *UpdateTaskRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *float64 + if v, err := iprot.ReadDouble(); err != nil { + return err + } else { + _field = &v + } + p.SampleRate = _field + return nil +} +func (p *UpdateTaskRequest) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBase() + if err := _field.Read(iprot); err != nil { + return err + } + p.Base = _field + return nil +} + +func (p *UpdateTaskRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("UpdateTaskRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *UpdateTaskRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TaskID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *UpdateTaskRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *UpdateTaskRequest) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetTaskStatus() { + if err = oprot.WriteFieldBegin("task_status", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.TaskStatus); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *UpdateTaskRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetDescription() { + if err = oprot.WriteFieldBegin("description", thrift.STRING, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Description); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *UpdateTaskRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetEffectiveTime() { + if err = oprot.WriteFieldBegin("effective_time", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.EffectiveTime.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *UpdateTaskRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetSampleRate() { + if err = oprot.WriteFieldBegin("sample_rate", thrift.DOUBLE, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteDouble(*p.SampleRate); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} +func (p *UpdateTaskRequest) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBase() { + if err = oprot.WriteFieldBegin("base", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.Base.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *UpdateTaskRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateTaskRequest(%+v)", *p) + +} + +func (p *UpdateTaskRequest) DeepEqual(ano *UpdateTaskRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TaskID) { + return false + } + if !p.Field2DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field3DeepEqual(ano.TaskStatus) { + return false + } + if !p.Field4DeepEqual(ano.Description) { + return false + } + if !p.Field5DeepEqual(ano.EffectiveTime) { + return false + } + if !p.Field6DeepEqual(ano.SampleRate) { + return false + } + if !p.Field255DeepEqual(ano.Base) { + return false + } + return true +} + +func (p *UpdateTaskRequest) Field1DeepEqual(src int64) bool { + + if p.TaskID != src { + return false + } + return true +} +func (p *UpdateTaskRequest) Field2DeepEqual(src int64) bool { + + if p.WorkspaceID != src { + return false + } + return true +} +func (p *UpdateTaskRequest) Field3DeepEqual(src *task.TaskStatus) bool { + + if p.TaskStatus == src { + return true + } else if p.TaskStatus == nil || src == nil { + return false + } + if strings.Compare(*p.TaskStatus, *src) != 0 { + return false + } + return true +} +func (p *UpdateTaskRequest) Field4DeepEqual(src *string) bool { + + if p.Description == src { + return true + } else if p.Description == nil || src == nil { + return false + } + if strings.Compare(*p.Description, *src) != 0 { + return false + } + return true +} +func (p *UpdateTaskRequest) Field5DeepEqual(src *task.EffectiveTime) bool { + + if !p.EffectiveTime.DeepEqual(src) { + return false + } + return true +} +func (p *UpdateTaskRequest) Field6DeepEqual(src *float64) bool { + + if p.SampleRate == src { + return true + } else if p.SampleRate == nil || src == nil { + return false + } + if *p.SampleRate != *src { + return false + } + return true +} +func (p *UpdateTaskRequest) Field255DeepEqual(src *base.Base) bool { + + if !p.Base.DeepEqual(src) { + return false + } + return true +} + +type UpdateTaskResponse struct { + BaseResp *base.BaseResp `thrift:"BaseResp,255,optional" frugal:"255,optional,base.BaseResp" form:"BaseResp" json:"BaseResp,omitempty" query:"BaseResp"` +} + +func NewUpdateTaskResponse() *UpdateTaskResponse { + return &UpdateTaskResponse{} +} + +func (p *UpdateTaskResponse) InitDefault() { +} + +var UpdateTaskResponse_BaseResp_DEFAULT *base.BaseResp + +func (p *UpdateTaskResponse) GetBaseResp() (v *base.BaseResp) { + if p == nil { + return + } + if !p.IsSetBaseResp() { + return UpdateTaskResponse_BaseResp_DEFAULT + } + return p.BaseResp +} +func (p *UpdateTaskResponse) SetBaseResp(val *base.BaseResp) { + p.BaseResp = val +} + +var fieldIDToName_UpdateTaskResponse = map[int16]string{ + 255: "BaseResp", +} + +func (p *UpdateTaskResponse) IsSetBaseResp() bool { + return p.BaseResp != nil +} + +func (p *UpdateTaskResponse) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_UpdateTaskResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *UpdateTaskResponse) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBaseResp() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseResp = _field + return nil +} + +func (p *UpdateTaskResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("UpdateTaskResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *UpdateTaskResponse) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseResp() { + if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseResp.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *UpdateTaskResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("UpdateTaskResponse(%+v)", *p) + +} + +func (p *UpdateTaskResponse) DeepEqual(ano *UpdateTaskResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field255DeepEqual(ano.BaseResp) { + return false + } + return true +} + +func (p *UpdateTaskResponse) Field255DeepEqual(src *base.BaseResp) bool { + + if !p.BaseResp.DeepEqual(src) { + return false + } + return true +} + +type ListTasksRequest struct { + WorkspaceID int64 `thrift:"workspace_id,1,required" frugal:"1,required,i64" json:"workspace_id" form:"workspace_id,required" ` + TaskFilters *filter.TaskFilterFields `thrift:"task_filters,2,optional" frugal:"2,optional,filter.TaskFilterFields" form:"task_filters" json:"task_filters,omitempty"` + /* default 20 max 200 */ + Limit *int32 `thrift:"limit,101,optional" frugal:"101,optional,i32" form:"limit" json:"limit,omitempty"` + Offset *int32 `thrift:"offset,102,optional" frugal:"102,optional,i32" form:"offset" json:"offset,omitempty"` + OrderBy *common.OrderBy `thrift:"order_by,103,optional" frugal:"103,optional,common.OrderBy" form:"order_by" json:"order_by,omitempty"` + Base *base.Base `thrift:"base,255,optional" frugal:"255,optional,base.Base" form:"base" json:"base,omitempty" query:"base"` +} + +func NewListTasksRequest() *ListTasksRequest { + return &ListTasksRequest{} +} + +func (p *ListTasksRequest) InitDefault() { +} + +func (p *ListTasksRequest) GetWorkspaceID() (v int64) { + if p != nil { + return p.WorkspaceID + } + return +} + +var ListTasksRequest_TaskFilters_DEFAULT *filter.TaskFilterFields + +func (p *ListTasksRequest) GetTaskFilters() (v *filter.TaskFilterFields) { + if p == nil { + return + } + if !p.IsSetTaskFilters() { + return ListTasksRequest_TaskFilters_DEFAULT + } + return p.TaskFilters +} + +var ListTasksRequest_Limit_DEFAULT int32 + +func (p *ListTasksRequest) GetLimit() (v int32) { + if p == nil { + return + } + if !p.IsSetLimit() { + return ListTasksRequest_Limit_DEFAULT + } + return *p.Limit +} + +var ListTasksRequest_Offset_DEFAULT int32 + +func (p *ListTasksRequest) GetOffset() (v int32) { + if p == nil { + return + } + if !p.IsSetOffset() { + return ListTasksRequest_Offset_DEFAULT + } + return *p.Offset +} + +var ListTasksRequest_OrderBy_DEFAULT *common.OrderBy + +func (p *ListTasksRequest) GetOrderBy() (v *common.OrderBy) { + if p == nil { + return + } + if !p.IsSetOrderBy() { + return ListTasksRequest_OrderBy_DEFAULT + } + return p.OrderBy +} + +var ListTasksRequest_Base_DEFAULT *base.Base + +func (p *ListTasksRequest) GetBase() (v *base.Base) { + if p == nil { + return + } + if !p.IsSetBase() { + return ListTasksRequest_Base_DEFAULT + } + return p.Base +} +func (p *ListTasksRequest) SetWorkspaceID(val int64) { + p.WorkspaceID = val +} +func (p *ListTasksRequest) SetTaskFilters(val *filter.TaskFilterFields) { + p.TaskFilters = val +} +func (p *ListTasksRequest) SetLimit(val *int32) { + p.Limit = val +} +func (p *ListTasksRequest) SetOffset(val *int32) { + p.Offset = val +} +func (p *ListTasksRequest) SetOrderBy(val *common.OrderBy) { + p.OrderBy = val +} +func (p *ListTasksRequest) SetBase(val *base.Base) { + p.Base = val +} + +var fieldIDToName_ListTasksRequest = map[int16]string{ + 1: "workspace_id", + 2: "task_filters", + 101: "limit", + 102: "offset", + 103: "order_by", + 255: "base", +} + +func (p *ListTasksRequest) IsSetTaskFilters() bool { + return p.TaskFilters != nil +} + +func (p *ListTasksRequest) IsSetLimit() bool { + return p.Limit != nil +} + +func (p *ListTasksRequest) IsSetOffset() bool { + return p.Offset != nil +} + +func (p *ListTasksRequest) IsSetOrderBy() bool { + return p.OrderBy != nil +} + +func (p *ListTasksRequest) IsSetBase() bool { + return p.Base != nil +} + +func (p *ListTasksRequest) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 101: + if fieldTypeId == thrift.I32 { + if err = p.ReadField101(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 102: + if fieldTypeId == thrift.I32 { + if err = p.ReadField102(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 103: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField103(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ListTasksRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_ListTasksRequest[fieldId])) +} + +func (p *ListTasksRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.WorkspaceID = _field + return nil +} +func (p *ListTasksRequest) ReadField2(iprot thrift.TProtocol) error { + _field := filter.NewTaskFilterFields() + if err := _field.Read(iprot); err != nil { + return err + } + p.TaskFilters = _field + return nil +} +func (p *ListTasksRequest) ReadField101(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.Limit = _field + return nil +} +func (p *ListTasksRequest) ReadField102(iprot thrift.TProtocol) error { + + var _field *int32 + if v, err := iprot.ReadI32(); err != nil { + return err + } else { + _field = &v + } + p.Offset = _field + return nil +} +func (p *ListTasksRequest) ReadField103(iprot thrift.TProtocol) error { + _field := common.NewOrderBy() + if err := _field.Read(iprot); err != nil { + return err + } + p.OrderBy = _field + return nil +} +func (p *ListTasksRequest) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBase() + if err := _field.Read(iprot); err != nil { + return err + } + p.Base = _field + return nil +} + +func (p *ListTasksRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ListTasksRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField101(oprot); err != nil { + fieldId = 101 + goto WriteFieldError + } + if err = p.writeField102(oprot); err != nil { + fieldId = 102 + goto WriteFieldError + } + if err = p.writeField103(oprot); err != nil { + fieldId = 103 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *ListTasksRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *ListTasksRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetTaskFilters() { + if err = oprot.WriteFieldBegin("task_filters", thrift.STRUCT, 2); err != nil { + goto WriteFieldBeginError + } + if err := p.TaskFilters.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *ListTasksRequest) writeField101(oprot thrift.TProtocol) (err error) { + if p.IsSetLimit() { + if err = oprot.WriteFieldBegin("limit", thrift.I32, 101); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.Limit); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 101 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 101 end error: ", p), err) +} +func (p *ListTasksRequest) writeField102(oprot thrift.TProtocol) (err error) { + if p.IsSetOffset() { + if err = oprot.WriteFieldBegin("offset", thrift.I32, 102); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI32(*p.Offset); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 102 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 102 end error: ", p), err) +} +func (p *ListTasksRequest) writeField103(oprot thrift.TProtocol) (err error) { + if p.IsSetOrderBy() { + if err = oprot.WriteFieldBegin("order_by", thrift.STRUCT, 103); err != nil { + goto WriteFieldBeginError + } + if err := p.OrderBy.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 103 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 103 end error: ", p), err) +} +func (p *ListTasksRequest) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBase() { + if err = oprot.WriteFieldBegin("base", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.Base.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *ListTasksRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ListTasksRequest(%+v)", *p) + +} + +func (p *ListTasksRequest) DeepEqual(ano *ListTasksRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field2DeepEqual(ano.TaskFilters) { + return false + } + if !p.Field101DeepEqual(ano.Limit) { + return false + } + if !p.Field102DeepEqual(ano.Offset) { + return false + } + if !p.Field103DeepEqual(ano.OrderBy) { + return false + } + if !p.Field255DeepEqual(ano.Base) { + return false + } + return true +} + +func (p *ListTasksRequest) Field1DeepEqual(src int64) bool { + + if p.WorkspaceID != src { + return false + } + return true +} +func (p *ListTasksRequest) Field2DeepEqual(src *filter.TaskFilterFields) bool { + + if !p.TaskFilters.DeepEqual(src) { + return false + } + return true +} +func (p *ListTasksRequest) Field101DeepEqual(src *int32) bool { + + if p.Limit == src { + return true + } else if p.Limit == nil || src == nil { + return false + } + if *p.Limit != *src { + return false + } + return true +} +func (p *ListTasksRequest) Field102DeepEqual(src *int32) bool { + + if p.Offset == src { + return true + } else if p.Offset == nil || src == nil { + return false + } + if *p.Offset != *src { + return false + } + return true +} +func (p *ListTasksRequest) Field103DeepEqual(src *common.OrderBy) bool { + + if !p.OrderBy.DeepEqual(src) { + return false + } + return true +} +func (p *ListTasksRequest) Field255DeepEqual(src *base.Base) bool { + + if !p.Base.DeepEqual(src) { + return false + } + return true +} + +type ListTasksResponse struct { + Tasks []*task.Task `thrift:"tasks,1,optional" frugal:"1,optional,list" form:"tasks" json:"tasks,omitempty"` + Total *int64 `thrift:"total,100,optional" frugal:"100,optional,i64" form:"total" json:"total,string,omitempty"` + BaseResp *base.BaseResp `thrift:"BaseResp,255,optional" frugal:"255,optional,base.BaseResp" form:"BaseResp" json:"BaseResp,omitempty" query:"BaseResp"` +} + +func NewListTasksResponse() *ListTasksResponse { + return &ListTasksResponse{} +} + +func (p *ListTasksResponse) InitDefault() { +} + +var ListTasksResponse_Tasks_DEFAULT []*task.Task + +func (p *ListTasksResponse) GetTasks() (v []*task.Task) { + if p == nil { + return + } + if !p.IsSetTasks() { + return ListTasksResponse_Tasks_DEFAULT + } + return p.Tasks +} + +var ListTasksResponse_Total_DEFAULT int64 + +func (p *ListTasksResponse) GetTotal() (v int64) { + if p == nil { + return + } + if !p.IsSetTotal() { + return ListTasksResponse_Total_DEFAULT + } + return *p.Total +} + +var ListTasksResponse_BaseResp_DEFAULT *base.BaseResp + +func (p *ListTasksResponse) GetBaseResp() (v *base.BaseResp) { + if p == nil { + return + } + if !p.IsSetBaseResp() { + return ListTasksResponse_BaseResp_DEFAULT + } + return p.BaseResp +} +func (p *ListTasksResponse) SetTasks(val []*task.Task) { + p.Tasks = val +} +func (p *ListTasksResponse) SetTotal(val *int64) { + p.Total = val +} +func (p *ListTasksResponse) SetBaseResp(val *base.BaseResp) { + p.BaseResp = val +} + +var fieldIDToName_ListTasksResponse = map[int16]string{ + 1: "tasks", + 100: "total", + 255: "BaseResp", +} + +func (p *ListTasksResponse) IsSetTasks() bool { + return p.Tasks != nil +} + +func (p *ListTasksResponse) IsSetTotal() bool { + return p.Total != nil +} + +func (p *ListTasksResponse) IsSetBaseResp() bool { + return p.BaseResp != nil +} + +func (p *ListTasksResponse) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 100: + if fieldTypeId == thrift.I64 { + if err = p.ReadField100(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ListTasksResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *ListTasksResponse) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*task.Task, 0, size) + values := make([]task.Task, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Tasks = _field + return nil +} +func (p *ListTasksResponse) ReadField100(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.Total = _field + return nil +} +func (p *ListTasksResponse) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBaseResp() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseResp = _field + return nil +} + +func (p *ListTasksResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ListTasksResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField100(oprot); err != nil { + fieldId = 100 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *ListTasksResponse) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTasks() { + if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil { + return err + } + for _, v := range p.Tasks { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *ListTasksResponse) writeField100(oprot thrift.TProtocol) (err error) { + if p.IsSetTotal() { + if err = oprot.WriteFieldBegin("total", thrift.I64, 100); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.Total); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 100 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 100 end error: ", p), err) +} +func (p *ListTasksResponse) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseResp() { + if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseResp.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *ListTasksResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ListTasksResponse(%+v)", *p) + +} + +func (p *ListTasksResponse) DeepEqual(ano *ListTasksResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Tasks) { + return false + } + if !p.Field100DeepEqual(ano.Total) { + return false + } + if !p.Field255DeepEqual(ano.BaseResp) { + return false + } + return true +} + +func (p *ListTasksResponse) Field1DeepEqual(src []*task.Task) bool { + + if len(p.Tasks) != len(src) { + return false + } + for i, v := range p.Tasks { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *ListTasksResponse) Field100DeepEqual(src *int64) bool { + + if p.Total == src { + return true + } else if p.Total == nil || src == nil { + return false + } + if *p.Total != *src { + return false + } + return true +} +func (p *ListTasksResponse) Field255DeepEqual(src *base.BaseResp) bool { + + if !p.BaseResp.DeepEqual(src) { + return false + } + return true +} + +type GetTaskRequest struct { + TaskID int64 `thrift:"task_id,1,required" frugal:"1,required,i64" json:"task_id,string,required" path:"task_id,required"` + WorkspaceID int64 `thrift:"workspace_id,2,required" frugal:"2,required,i64" json:"workspace_id" query:"workspace_id,required" ` + Base *base.Base `thrift:"base,255,optional" frugal:"255,optional,base.Base" form:"base" json:"base,omitempty" query:"base"` +} + +func NewGetTaskRequest() *GetTaskRequest { + return &GetTaskRequest{} +} + +func (p *GetTaskRequest) InitDefault() { +} + +func (p *GetTaskRequest) GetTaskID() (v int64) { + if p != nil { + return p.TaskID + } + return +} + +func (p *GetTaskRequest) GetWorkspaceID() (v int64) { + if p != nil { + return p.WorkspaceID + } + return +} + +var GetTaskRequest_Base_DEFAULT *base.Base + +func (p *GetTaskRequest) GetBase() (v *base.Base) { + if p == nil { + return + } + if !p.IsSetBase() { + return GetTaskRequest_Base_DEFAULT + } + return p.Base +} +func (p *GetTaskRequest) SetTaskID(val int64) { + p.TaskID = val +} +func (p *GetTaskRequest) SetWorkspaceID(val int64) { + p.WorkspaceID = val +} +func (p *GetTaskRequest) SetBase(val *base.Base) { + p.Base = val +} + +var fieldIDToName_GetTaskRequest = map[int16]string{ + 1: "task_id", + 2: "workspace_id", + 255: "base", +} + +func (p *GetTaskRequest) IsSetBase() bool { + return p.Base != nil +} + +func (p *GetTaskRequest) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetTaskID bool = false + var issetWorkspaceID bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetTaskID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.I64 { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetTaskID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetWorkspaceID { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_GetTaskRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_GetTaskRequest[fieldId])) +} + +func (p *GetTaskRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.TaskID = _field + return nil +} +func (p *GetTaskRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.WorkspaceID = _field + return nil +} +func (p *GetTaskRequest) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBase() + if err := _field.Read(iprot); err != nil { + return err + } + p.Base = _field + return nil +} + +func (p *GetTaskRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("GetTaskRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *GetTaskRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("task_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.TaskID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *GetTaskRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *GetTaskRequest) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBase() { + if err = oprot.WriteFieldBegin("base", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.Base.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *GetTaskRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("GetTaskRequest(%+v)", *p) + +} + +func (p *GetTaskRequest) DeepEqual(ano *GetTaskRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.TaskID) { + return false + } + if !p.Field2DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field255DeepEqual(ano.Base) { + return false + } + return true +} + +func (p *GetTaskRequest) Field1DeepEqual(src int64) bool { + + if p.TaskID != src { + return false + } + return true +} +func (p *GetTaskRequest) Field2DeepEqual(src int64) bool { + + if p.WorkspaceID != src { + return false + } + return true +} +func (p *GetTaskRequest) Field255DeepEqual(src *base.Base) bool { + + if !p.Base.DeepEqual(src) { + return false + } + return true +} + +type GetTaskResponse struct { + Task *task.Task `thrift:"task,1,optional" frugal:"1,optional,task.Task" form:"task" json:"task,omitempty"` + BaseResp *base.BaseResp `thrift:"BaseResp,255,optional" frugal:"255,optional,base.BaseResp" form:"BaseResp" json:"BaseResp,omitempty" query:"BaseResp"` +} + +func NewGetTaskResponse() *GetTaskResponse { + return &GetTaskResponse{} +} + +func (p *GetTaskResponse) InitDefault() { +} + +var GetTaskResponse_Task_DEFAULT *task.Task + +func (p *GetTaskResponse) GetTask() (v *task.Task) { + if p == nil { + return + } + if !p.IsSetTask() { + return GetTaskResponse_Task_DEFAULT + } + return p.Task +} + +var GetTaskResponse_BaseResp_DEFAULT *base.BaseResp + +func (p *GetTaskResponse) GetBaseResp() (v *base.BaseResp) { + if p == nil { + return + } + if !p.IsSetBaseResp() { + return GetTaskResponse_BaseResp_DEFAULT + } + return p.BaseResp +} +func (p *GetTaskResponse) SetTask(val *task.Task) { + p.Task = val +} +func (p *GetTaskResponse) SetBaseResp(val *base.BaseResp) { + p.BaseResp = val +} + +var fieldIDToName_GetTaskResponse = map[int16]string{ + 1: "task", + 255: "BaseResp", +} + +func (p *GetTaskResponse) IsSetTask() bool { + return p.Task != nil +} + +func (p *GetTaskResponse) IsSetBaseResp() bool { + return p.BaseResp != nil +} + +func (p *GetTaskResponse) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_GetTaskResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *GetTaskResponse) ReadField1(iprot thrift.TProtocol) error { + _field := task.NewTask() + if err := _field.Read(iprot); err != nil { + return err + } + p.Task = _field + return nil +} +func (p *GetTaskResponse) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBaseResp() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseResp = _field + return nil +} + +func (p *GetTaskResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("GetTaskResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *GetTaskResponse) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetTask() { + if err = oprot.WriteFieldBegin("task", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Task.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *GetTaskResponse) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseResp() { + if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseResp.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *GetTaskResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("GetTaskResponse(%+v)", *p) + +} + +func (p *GetTaskResponse) DeepEqual(ano *GetTaskResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Task) { + return false + } + if !p.Field255DeepEqual(ano.BaseResp) { + return false + } + return true +} + +func (p *GetTaskResponse) Field1DeepEqual(src *task.Task) bool { + + if !p.Task.DeepEqual(src) { + return false + } + return true +} +func (p *GetTaskResponse) Field255DeepEqual(src *base.BaseResp) bool { + + if !p.BaseResp.DeepEqual(src) { + return false + } + return true +} + +type CheckTaskNameRequest struct { + WorkspaceID int64 `thrift:"workspace_id,1,required" frugal:"1,required,i64" json:"workspace_id" form:"workspace_id,required" ` + Name string `thrift:"name,2,required" frugal:"2,required,string" form:"name,required" json:"name,required"` + Base *base.Base `thrift:"Base,255,optional" frugal:"255,optional,base.Base" form:"Base" json:"Base,omitempty" query:"Base"` +} + +func NewCheckTaskNameRequest() *CheckTaskNameRequest { + return &CheckTaskNameRequest{} +} + +func (p *CheckTaskNameRequest) InitDefault() { +} + +func (p *CheckTaskNameRequest) GetWorkspaceID() (v int64) { + if p != nil { + return p.WorkspaceID + } + return +} + +func (p *CheckTaskNameRequest) GetName() (v string) { + if p != nil { + return p.Name + } + return +} + +var CheckTaskNameRequest_Base_DEFAULT *base.Base + +func (p *CheckTaskNameRequest) GetBase() (v *base.Base) { + if p == nil { + return + } + if !p.IsSetBase() { + return CheckTaskNameRequest_Base_DEFAULT + } + return p.Base +} +func (p *CheckTaskNameRequest) SetWorkspaceID(val int64) { + p.WorkspaceID = val +} +func (p *CheckTaskNameRequest) SetName(val string) { + p.Name = val +} +func (p *CheckTaskNameRequest) SetBase(val *base.Base) { + p.Base = val +} + +var fieldIDToName_CheckTaskNameRequest = map[int16]string{ + 1: "workspace_id", + 2: "name", + 255: "Base", +} + +func (p *CheckTaskNameRequest) IsSetBase() bool { + return p.Base != nil +} + +func (p *CheckTaskNameRequest) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + var issetName bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetName = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetName { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CheckTaskNameRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_CheckTaskNameRequest[fieldId])) +} + +func (p *CheckTaskNameRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.WorkspaceID = _field + return nil +} +func (p *CheckTaskNameRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.Name = _field + return nil +} +func (p *CheckTaskNameRequest) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBase() + if err := _field.Read(iprot); err != nil { + return err + } + p.Base = _field + return nil +} + +func (p *CheckTaskNameRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CheckTaskNameRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *CheckTaskNameRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *CheckTaskNameRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *CheckTaskNameRequest) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBase() { + if err = oprot.WriteFieldBegin("Base", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.Base.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *CheckTaskNameRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckTaskNameRequest(%+v)", *p) + +} + +func (p *CheckTaskNameRequest) DeepEqual(ano *CheckTaskNameRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field255DeepEqual(ano.Base) { + return false + } + return true +} + +func (p *CheckTaskNameRequest) Field1DeepEqual(src int64) bool { + + if p.WorkspaceID != src { + return false + } + return true +} +func (p *CheckTaskNameRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.Name, src) != 0 { + return false + } + return true +} +func (p *CheckTaskNameRequest) Field255DeepEqual(src *base.Base) bool { + + if !p.Base.DeepEqual(src) { + return false + } + return true +} + +type CheckTaskNameResponse struct { + Pass *bool `thrift:"Pass,1,optional" frugal:"1,optional,bool" form:"Pass" json:"Pass,omitempty" query:"Pass"` + Message *string `thrift:"Message,2,optional" frugal:"2,optional,string" form:"Message" json:"Message,omitempty" query:"Message"` + BaseResp *base.BaseResp `thrift:"BaseResp,255" frugal:"255,default,base.BaseResp" form:"BaseResp" json:"BaseResp" query:"BaseResp"` +} + +func NewCheckTaskNameResponse() *CheckTaskNameResponse { + return &CheckTaskNameResponse{} +} + +func (p *CheckTaskNameResponse) InitDefault() { +} + +var CheckTaskNameResponse_Pass_DEFAULT bool + +func (p *CheckTaskNameResponse) GetPass() (v bool) { + if p == nil { + return + } + if !p.IsSetPass() { + return CheckTaskNameResponse_Pass_DEFAULT + } + return *p.Pass +} + +var CheckTaskNameResponse_Message_DEFAULT string + +func (p *CheckTaskNameResponse) GetMessage() (v string) { + if p == nil { + return + } + if !p.IsSetMessage() { + return CheckTaskNameResponse_Message_DEFAULT + } + return *p.Message +} + +var CheckTaskNameResponse_BaseResp_DEFAULT *base.BaseResp + +func (p *CheckTaskNameResponse) GetBaseResp() (v *base.BaseResp) { + if p == nil { + return + } + if !p.IsSetBaseResp() { + return CheckTaskNameResponse_BaseResp_DEFAULT + } + return p.BaseResp +} +func (p *CheckTaskNameResponse) SetPass(val *bool) { + p.Pass = val +} +func (p *CheckTaskNameResponse) SetMessage(val *string) { + p.Message = val +} +func (p *CheckTaskNameResponse) SetBaseResp(val *base.BaseResp) { + p.BaseResp = val +} + +var fieldIDToName_CheckTaskNameResponse = map[int16]string{ + 1: "Pass", + 2: "Message", + 255: "BaseResp", +} + +func (p *CheckTaskNameResponse) IsSetPass() bool { + return p.Pass != nil +} + +func (p *CheckTaskNameResponse) IsSetMessage() bool { + return p.Message != nil +} + +func (p *CheckTaskNameResponse) IsSetBaseResp() bool { + return p.BaseResp != nil +} + +func (p *CheckTaskNameResponse) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CheckTaskNameResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *CheckTaskNameResponse) ReadField1(iprot thrift.TProtocol) error { + + var _field *bool + if v, err := iprot.ReadBool(); err != nil { + return err + } else { + _field = &v + } + p.Pass = _field + return nil +} +func (p *CheckTaskNameResponse) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Message = _field + return nil +} +func (p *CheckTaskNameResponse) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBaseResp() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseResp = _field + return nil +} + +func (p *CheckTaskNameResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CheckTaskNameResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *CheckTaskNameResponse) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetPass() { + if err = oprot.WriteFieldBegin("Pass", thrift.BOOL, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteBool(*p.Pass); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *CheckTaskNameResponse) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetMessage() { + if err = oprot.WriteFieldBegin("Message", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Message); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *CheckTaskNameResponse) writeField255(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseResp.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *CheckTaskNameResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("CheckTaskNameResponse(%+v)", *p) + +} + +func (p *CheckTaskNameResponse) DeepEqual(ano *CheckTaskNameResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Pass) { + return false + } + if !p.Field2DeepEqual(ano.Message) { + return false + } + if !p.Field255DeepEqual(ano.BaseResp) { + return false + } + return true +} + +func (p *CheckTaskNameResponse) Field1DeepEqual(src *bool) bool { + + if p.Pass == src { + return true + } else if p.Pass == nil || src == nil { + return false + } + if *p.Pass != *src { + return false + } + return true +} +func (p *CheckTaskNameResponse) Field2DeepEqual(src *string) bool { + + if p.Message == src { + return true + } else if p.Message == nil || src == nil { + return false + } + if strings.Compare(*p.Message, *src) != 0 { + return false + } + return true +} +func (p *CheckTaskNameResponse) Field255DeepEqual(src *base.BaseResp) bool { + + if !p.BaseResp.DeepEqual(src) { + return false + } + return true +} + +type TaskService interface { + CheckTaskName(ctx context.Context, req *CheckTaskNameRequest) (r *CheckTaskNameResponse, err error) + + CreateTask(ctx context.Context, req *CreateTaskRequest) (r *CreateTaskResponse, err error) + + UpdateTask(ctx context.Context, req *UpdateTaskRequest) (r *UpdateTaskResponse, err error) + + ListTasks(ctx context.Context, req *ListTasksRequest) (r *ListTasksResponse, err error) + + GetTask(ctx context.Context, req *GetTaskRequest) (r *GetTaskResponse, err error) +} + +type TaskServiceClient struct { + c thrift.TClient +} + +func NewTaskServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *TaskServiceClient { + return &TaskServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewTaskServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *TaskServiceClient { + return &TaskServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewTaskServiceClient(c thrift.TClient) *TaskServiceClient { + return &TaskServiceClient{ + c: c, + } +} + +func (p *TaskServiceClient) Client_() thrift.TClient { + return p.c +} + +func (p *TaskServiceClient) CheckTaskName(ctx context.Context, req *CheckTaskNameRequest) (r *CheckTaskNameResponse, err error) { + var _args TaskServiceCheckTaskNameArgs + _args.Req = req + var _result TaskServiceCheckTaskNameResult + if err = p.Client_().Call(ctx, "CheckTaskName", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TaskServiceClient) CreateTask(ctx context.Context, req *CreateTaskRequest) (r *CreateTaskResponse, err error) { + var _args TaskServiceCreateTaskArgs + _args.Req = req + var _result TaskServiceCreateTaskResult + if err = p.Client_().Call(ctx, "CreateTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TaskServiceClient) UpdateTask(ctx context.Context, req *UpdateTaskRequest) (r *UpdateTaskResponse, err error) { + var _args TaskServiceUpdateTaskArgs + _args.Req = req + var _result TaskServiceUpdateTaskResult + if err = p.Client_().Call(ctx, "UpdateTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TaskServiceClient) ListTasks(ctx context.Context, req *ListTasksRequest) (r *ListTasksResponse, err error) { + var _args TaskServiceListTasksArgs + _args.Req = req + var _result TaskServiceListTasksResult + if err = p.Client_().Call(ctx, "ListTasks", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TaskServiceClient) GetTask(ctx context.Context, req *GetTaskRequest) (r *GetTaskResponse, err error) { + var _args TaskServiceGetTaskArgs + _args.Req = req + var _result TaskServiceGetTaskResult + if err = p.Client_().Call(ctx, "GetTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +type TaskServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler TaskService +} + +func (p *TaskServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *TaskServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *TaskServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewTaskServiceProcessor(handler TaskService) *TaskServiceProcessor { + self := &TaskServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self.AddToProcessorMap("CheckTaskName", &taskServiceProcessorCheckTaskName{handler: handler}) + self.AddToProcessorMap("CreateTask", &taskServiceProcessorCreateTask{handler: handler}) + self.AddToProcessorMap("UpdateTask", &taskServiceProcessorUpdateTask{handler: handler}) + self.AddToProcessorMap("ListTasks", &taskServiceProcessorListTasks{handler: handler}) + self.AddToProcessorMap("GetTask", &taskServiceProcessorGetTask{handler: handler}) + return self +} +func (p *TaskServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x +} + +type taskServiceProcessorCheckTaskName struct { + handler TaskService +} + +func (p *taskServiceProcessorCheckTaskName) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TaskServiceCheckTaskNameArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("CheckTaskName", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TaskServiceCheckTaskNameResult{} + var retval *CheckTaskNameResponse + if retval, err2 = p.handler.CheckTaskName(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CheckTaskName: "+err2.Error()) + oprot.WriteMessageBegin("CheckTaskName", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("CheckTaskName", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type taskServiceProcessorCreateTask struct { + handler TaskService +} + +func (p *taskServiceProcessorCreateTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TaskServiceCreateTaskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("CreateTask", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TaskServiceCreateTaskResult{} + var retval *CreateTaskResponse + if retval, err2 = p.handler.CreateTask(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CreateTask: "+err2.Error()) + oprot.WriteMessageBegin("CreateTask", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("CreateTask", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type taskServiceProcessorUpdateTask struct { + handler TaskService +} + +func (p *taskServiceProcessorUpdateTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TaskServiceUpdateTaskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("UpdateTask", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TaskServiceUpdateTaskResult{} + var retval *UpdateTaskResponse + if retval, err2 = p.handler.UpdateTask(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing UpdateTask: "+err2.Error()) + oprot.WriteMessageBegin("UpdateTask", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("UpdateTask", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type taskServiceProcessorListTasks struct { + handler TaskService +} + +func (p *taskServiceProcessorListTasks) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TaskServiceListTasksArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ListTasks", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TaskServiceListTasksResult{} + var retval *ListTasksResponse + if retval, err2 = p.handler.ListTasks(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ListTasks: "+err2.Error()) + oprot.WriteMessageBegin("ListTasks", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ListTasks", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type taskServiceProcessorGetTask struct { + handler TaskService +} + +func (p *taskServiceProcessorGetTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TaskServiceGetTaskArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("GetTask", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TaskServiceGetTaskResult{} + var retval *GetTaskResponse + if retval, err2 = p.handler.GetTask(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetTask: "+err2.Error()) + oprot.WriteMessageBegin("GetTask", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("GetTask", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type TaskServiceCheckTaskNameArgs struct { + Req *CheckTaskNameRequest `thrift:"req,1" frugal:"1,default,CheckTaskNameRequest"` +} + +func NewTaskServiceCheckTaskNameArgs() *TaskServiceCheckTaskNameArgs { + return &TaskServiceCheckTaskNameArgs{} +} + +func (p *TaskServiceCheckTaskNameArgs) InitDefault() { +} + +var TaskServiceCheckTaskNameArgs_Req_DEFAULT *CheckTaskNameRequest + +func (p *TaskServiceCheckTaskNameArgs) GetReq() (v *CheckTaskNameRequest) { + if p == nil { + return + } + if !p.IsSetReq() { + return TaskServiceCheckTaskNameArgs_Req_DEFAULT + } + return p.Req +} +func (p *TaskServiceCheckTaskNameArgs) SetReq(val *CheckTaskNameRequest) { + p.Req = val +} + +var fieldIDToName_TaskServiceCheckTaskNameArgs = map[int16]string{ + 1: "req", +} + +func (p *TaskServiceCheckTaskNameArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TaskServiceCheckTaskNameArgs) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceCheckTaskNameArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceCheckTaskNameArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewCheckTaskNameRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Req = _field + return nil +} + +func (p *TaskServiceCheckTaskNameArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CheckTaskName_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceCheckTaskNameArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Req.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TaskServiceCheckTaskNameArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceCheckTaskNameArgs(%+v)", *p) + +} + +func (p *TaskServiceCheckTaskNameArgs) DeepEqual(ano *TaskServiceCheckTaskNameArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Req) { + return false + } + return true +} + +func (p *TaskServiceCheckTaskNameArgs) Field1DeepEqual(src *CheckTaskNameRequest) bool { + + if !p.Req.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceCheckTaskNameResult struct { + Success *CheckTaskNameResponse `thrift:"success,0,optional" frugal:"0,optional,CheckTaskNameResponse"` +} + +func NewTaskServiceCheckTaskNameResult() *TaskServiceCheckTaskNameResult { + return &TaskServiceCheckTaskNameResult{} +} + +func (p *TaskServiceCheckTaskNameResult) InitDefault() { +} + +var TaskServiceCheckTaskNameResult_Success_DEFAULT *CheckTaskNameResponse + +func (p *TaskServiceCheckTaskNameResult) GetSuccess() (v *CheckTaskNameResponse) { + if p == nil { + return + } + if !p.IsSetSuccess() { + return TaskServiceCheckTaskNameResult_Success_DEFAULT + } + return p.Success +} +func (p *TaskServiceCheckTaskNameResult) SetSuccess(x interface{}) { + p.Success = x.(*CheckTaskNameResponse) +} + +var fieldIDToName_TaskServiceCheckTaskNameResult = map[int16]string{ + 0: "success", +} + +func (p *TaskServiceCheckTaskNameResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TaskServiceCheckTaskNameResult) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceCheckTaskNameResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceCheckTaskNameResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewCheckTaskNameResponse() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *TaskServiceCheckTaskNameResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CheckTaskName_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceCheckTaskNameResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *TaskServiceCheckTaskNameResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceCheckTaskNameResult(%+v)", *p) + +} + +func (p *TaskServiceCheckTaskNameResult) DeepEqual(ano *TaskServiceCheckTaskNameResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *TaskServiceCheckTaskNameResult) Field0DeepEqual(src *CheckTaskNameResponse) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceCreateTaskArgs struct { + Req *CreateTaskRequest `thrift:"req,1" frugal:"1,default,CreateTaskRequest"` +} + +func NewTaskServiceCreateTaskArgs() *TaskServiceCreateTaskArgs { + return &TaskServiceCreateTaskArgs{} +} + +func (p *TaskServiceCreateTaskArgs) InitDefault() { +} + +var TaskServiceCreateTaskArgs_Req_DEFAULT *CreateTaskRequest + +func (p *TaskServiceCreateTaskArgs) GetReq() (v *CreateTaskRequest) { + if p == nil { + return + } + if !p.IsSetReq() { + return TaskServiceCreateTaskArgs_Req_DEFAULT + } + return p.Req +} +func (p *TaskServiceCreateTaskArgs) SetReq(val *CreateTaskRequest) { + p.Req = val +} + +var fieldIDToName_TaskServiceCreateTaskArgs = map[int16]string{ + 1: "req", +} + +func (p *TaskServiceCreateTaskArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TaskServiceCreateTaskArgs) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceCreateTaskArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceCreateTaskArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewCreateTaskRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Req = _field + return nil +} + +func (p *TaskServiceCreateTaskArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CreateTask_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceCreateTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Req.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TaskServiceCreateTaskArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceCreateTaskArgs(%+v)", *p) + +} + +func (p *TaskServiceCreateTaskArgs) DeepEqual(ano *TaskServiceCreateTaskArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Req) { + return false + } + return true +} + +func (p *TaskServiceCreateTaskArgs) Field1DeepEqual(src *CreateTaskRequest) bool { + + if !p.Req.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceCreateTaskResult struct { + Success *CreateTaskResponse `thrift:"success,0,optional" frugal:"0,optional,CreateTaskResponse"` +} + +func NewTaskServiceCreateTaskResult() *TaskServiceCreateTaskResult { + return &TaskServiceCreateTaskResult{} +} + +func (p *TaskServiceCreateTaskResult) InitDefault() { +} + +var TaskServiceCreateTaskResult_Success_DEFAULT *CreateTaskResponse + +func (p *TaskServiceCreateTaskResult) GetSuccess() (v *CreateTaskResponse) { + if p == nil { + return + } + if !p.IsSetSuccess() { + return TaskServiceCreateTaskResult_Success_DEFAULT + } + return p.Success +} +func (p *TaskServiceCreateTaskResult) SetSuccess(x interface{}) { + p.Success = x.(*CreateTaskResponse) +} + +var fieldIDToName_TaskServiceCreateTaskResult = map[int16]string{ + 0: "success", +} + +func (p *TaskServiceCreateTaskResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TaskServiceCreateTaskResult) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceCreateTaskResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceCreateTaskResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewCreateTaskResponse() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *TaskServiceCreateTaskResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("CreateTask_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceCreateTaskResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *TaskServiceCreateTaskResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceCreateTaskResult(%+v)", *p) + +} + +func (p *TaskServiceCreateTaskResult) DeepEqual(ano *TaskServiceCreateTaskResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *TaskServiceCreateTaskResult) Field0DeepEqual(src *CreateTaskResponse) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceUpdateTaskArgs struct { + Req *UpdateTaskRequest `thrift:"req,1" frugal:"1,default,UpdateTaskRequest"` +} + +func NewTaskServiceUpdateTaskArgs() *TaskServiceUpdateTaskArgs { + return &TaskServiceUpdateTaskArgs{} +} + +func (p *TaskServiceUpdateTaskArgs) InitDefault() { +} + +var TaskServiceUpdateTaskArgs_Req_DEFAULT *UpdateTaskRequest + +func (p *TaskServiceUpdateTaskArgs) GetReq() (v *UpdateTaskRequest) { + if p == nil { + return + } + if !p.IsSetReq() { + return TaskServiceUpdateTaskArgs_Req_DEFAULT + } + return p.Req +} +func (p *TaskServiceUpdateTaskArgs) SetReq(val *UpdateTaskRequest) { + p.Req = val +} + +var fieldIDToName_TaskServiceUpdateTaskArgs = map[int16]string{ + 1: "req", +} + +func (p *TaskServiceUpdateTaskArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TaskServiceUpdateTaskArgs) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceUpdateTaskArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceUpdateTaskArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewUpdateTaskRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Req = _field + return nil +} + +func (p *TaskServiceUpdateTaskArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("UpdateTask_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceUpdateTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Req.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TaskServiceUpdateTaskArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceUpdateTaskArgs(%+v)", *p) + +} + +func (p *TaskServiceUpdateTaskArgs) DeepEqual(ano *TaskServiceUpdateTaskArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Req) { + return false + } + return true +} + +func (p *TaskServiceUpdateTaskArgs) Field1DeepEqual(src *UpdateTaskRequest) bool { + + if !p.Req.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceUpdateTaskResult struct { + Success *UpdateTaskResponse `thrift:"success,0,optional" frugal:"0,optional,UpdateTaskResponse"` +} + +func NewTaskServiceUpdateTaskResult() *TaskServiceUpdateTaskResult { + return &TaskServiceUpdateTaskResult{} +} + +func (p *TaskServiceUpdateTaskResult) InitDefault() { +} + +var TaskServiceUpdateTaskResult_Success_DEFAULT *UpdateTaskResponse + +func (p *TaskServiceUpdateTaskResult) GetSuccess() (v *UpdateTaskResponse) { + if p == nil { + return + } + if !p.IsSetSuccess() { + return TaskServiceUpdateTaskResult_Success_DEFAULT + } + return p.Success +} +func (p *TaskServiceUpdateTaskResult) SetSuccess(x interface{}) { + p.Success = x.(*UpdateTaskResponse) +} + +var fieldIDToName_TaskServiceUpdateTaskResult = map[int16]string{ + 0: "success", +} + +func (p *TaskServiceUpdateTaskResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TaskServiceUpdateTaskResult) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceUpdateTaskResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceUpdateTaskResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewUpdateTaskResponse() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *TaskServiceUpdateTaskResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("UpdateTask_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceUpdateTaskResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *TaskServiceUpdateTaskResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceUpdateTaskResult(%+v)", *p) + +} + +func (p *TaskServiceUpdateTaskResult) DeepEqual(ano *TaskServiceUpdateTaskResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *TaskServiceUpdateTaskResult) Field0DeepEqual(src *UpdateTaskResponse) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceListTasksArgs struct { + Req *ListTasksRequest `thrift:"req,1" frugal:"1,default,ListTasksRequest"` +} + +func NewTaskServiceListTasksArgs() *TaskServiceListTasksArgs { + return &TaskServiceListTasksArgs{} +} + +func (p *TaskServiceListTasksArgs) InitDefault() { +} + +var TaskServiceListTasksArgs_Req_DEFAULT *ListTasksRequest + +func (p *TaskServiceListTasksArgs) GetReq() (v *ListTasksRequest) { + if p == nil { + return + } + if !p.IsSetReq() { + return TaskServiceListTasksArgs_Req_DEFAULT + } + return p.Req +} +func (p *TaskServiceListTasksArgs) SetReq(val *ListTasksRequest) { + p.Req = val +} + +var fieldIDToName_TaskServiceListTasksArgs = map[int16]string{ + 1: "req", +} + +func (p *TaskServiceListTasksArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TaskServiceListTasksArgs) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceListTasksArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceListTasksArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewListTasksRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Req = _field + return nil +} + +func (p *TaskServiceListTasksArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ListTasks_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceListTasksArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Req.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TaskServiceListTasksArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceListTasksArgs(%+v)", *p) + +} + +func (p *TaskServiceListTasksArgs) DeepEqual(ano *TaskServiceListTasksArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Req) { + return false + } + return true +} + +func (p *TaskServiceListTasksArgs) Field1DeepEqual(src *ListTasksRequest) bool { + + if !p.Req.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceListTasksResult struct { + Success *ListTasksResponse `thrift:"success,0,optional" frugal:"0,optional,ListTasksResponse"` +} + +func NewTaskServiceListTasksResult() *TaskServiceListTasksResult { + return &TaskServiceListTasksResult{} +} + +func (p *TaskServiceListTasksResult) InitDefault() { +} + +var TaskServiceListTasksResult_Success_DEFAULT *ListTasksResponse + +func (p *TaskServiceListTasksResult) GetSuccess() (v *ListTasksResponse) { + if p == nil { + return + } + if !p.IsSetSuccess() { + return TaskServiceListTasksResult_Success_DEFAULT + } + return p.Success +} +func (p *TaskServiceListTasksResult) SetSuccess(x interface{}) { + p.Success = x.(*ListTasksResponse) +} + +var fieldIDToName_TaskServiceListTasksResult = map[int16]string{ + 0: "success", +} + +func (p *TaskServiceListTasksResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TaskServiceListTasksResult) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceListTasksResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceListTasksResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewListTasksResponse() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *TaskServiceListTasksResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ListTasks_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceListTasksResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *TaskServiceListTasksResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceListTasksResult(%+v)", *p) + +} + +func (p *TaskServiceListTasksResult) DeepEqual(ano *TaskServiceListTasksResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *TaskServiceListTasksResult) Field0DeepEqual(src *ListTasksResponse) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceGetTaskArgs struct { + Req *GetTaskRequest `thrift:"req,1" frugal:"1,default,GetTaskRequest"` +} + +func NewTaskServiceGetTaskArgs() *TaskServiceGetTaskArgs { + return &TaskServiceGetTaskArgs{} +} + +func (p *TaskServiceGetTaskArgs) InitDefault() { +} + +var TaskServiceGetTaskArgs_Req_DEFAULT *GetTaskRequest + +func (p *TaskServiceGetTaskArgs) GetReq() (v *GetTaskRequest) { + if p == nil { + return + } + if !p.IsSetReq() { + return TaskServiceGetTaskArgs_Req_DEFAULT + } + return p.Req +} +func (p *TaskServiceGetTaskArgs) SetReq(val *GetTaskRequest) { + p.Req = val +} + +var fieldIDToName_TaskServiceGetTaskArgs = map[int16]string{ + 1: "req", +} + +func (p *TaskServiceGetTaskArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TaskServiceGetTaskArgs) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceGetTaskArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceGetTaskArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewGetTaskRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Req = _field + return nil +} + +func (p *TaskServiceGetTaskArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("GetTask_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceGetTaskArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Req.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TaskServiceGetTaskArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceGetTaskArgs(%+v)", *p) + +} + +func (p *TaskServiceGetTaskArgs) DeepEqual(ano *TaskServiceGetTaskArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Req) { + return false + } + return true +} + +func (p *TaskServiceGetTaskArgs) Field1DeepEqual(src *GetTaskRequest) bool { + + if !p.Req.DeepEqual(src) { + return false + } + return true +} + +type TaskServiceGetTaskResult struct { + Success *GetTaskResponse `thrift:"success,0,optional" frugal:"0,optional,GetTaskResponse"` +} + +func NewTaskServiceGetTaskResult() *TaskServiceGetTaskResult { + return &TaskServiceGetTaskResult{} +} + +func (p *TaskServiceGetTaskResult) InitDefault() { +} + +var TaskServiceGetTaskResult_Success_DEFAULT *GetTaskResponse + +func (p *TaskServiceGetTaskResult) GetSuccess() (v *GetTaskResponse) { + if p == nil { + return + } + if !p.IsSetSuccess() { + return TaskServiceGetTaskResult_Success_DEFAULT + } + return p.Success +} +func (p *TaskServiceGetTaskResult) SetSuccess(x interface{}) { + p.Success = x.(*GetTaskResponse) +} + +var fieldIDToName_TaskServiceGetTaskResult = map[int16]string{ + 0: "success", +} + +func (p *TaskServiceGetTaskResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TaskServiceGetTaskResult) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceGetTaskResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TaskServiceGetTaskResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewGetTaskResponse() + if err := _field.Read(iprot); err != nil { + return err + } + p.Success = _field + return nil +} + +func (p *TaskServiceGetTaskResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("GetTask_result"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TaskServiceGetTaskResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *TaskServiceGetTaskResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("TaskServiceGetTaskResult(%+v)", *p) + +} + +func (p *TaskServiceGetTaskResult) DeepEqual(ano *TaskServiceGetTaskResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field0DeepEqual(ano.Success) { + return false + } + return true +} + +func (p *TaskServiceGetTaskResult) Field0DeepEqual(src *GetTaskResponse) bool { + + if !p.Success.DeepEqual(src) { + return false + } + return true +} diff --git a/backend/kitex_gen/coze/loop/observability/task/coze.loop.observability.task_validator.go b/backend/kitex_gen/coze/loop/observability/task/coze.loop.observability.task_validator.go new file mode 100644 index 000000000..94ddf6901 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/task/coze.loop.observability.task_validator.go @@ -0,0 +1,140 @@ +// Code generated by Validator v0.2.6. DO NOT EDIT. + +package task + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strings" + "time" +) + +// unused protection +var ( + _ = fmt.Formatter(nil) + _ = (*bytes.Buffer)(nil) + _ = (*strings.Builder)(nil) + _ = reflect.Type(nil) + _ = (*regexp.Regexp)(nil) + _ = time.Nanosecond +) + +func (p *CreateTaskRequest) IsValid() error { + if p.Task != nil { + if err := p.Task.IsValid(); err != nil { + return fmt.Errorf("field Task not valid, %w", err) + } + } + if p.Base != nil { + if err := p.Base.IsValid(); err != nil { + return fmt.Errorf("field Base not valid, %w", err) + } + } + return nil +} +func (p *CreateTaskResponse) IsValid() error { + if p.BaseResp != nil { + if err := p.BaseResp.IsValid(); err != nil { + return fmt.Errorf("field BaseResp not valid, %w", err) + } + } + return nil +} +func (p *UpdateTaskRequest) IsValid() error { + if p.WorkspaceID <= int64(0) { + return fmt.Errorf("field WorkspaceID gt rule failed, current value: %v", p.WorkspaceID) + } + if p.EffectiveTime != nil { + if err := p.EffectiveTime.IsValid(); err != nil { + return fmt.Errorf("field EffectiveTime not valid, %w", err) + } + } + if p.Base != nil { + if err := p.Base.IsValid(); err != nil { + return fmt.Errorf("field Base not valid, %w", err) + } + } + return nil +} +func (p *UpdateTaskResponse) IsValid() error { + if p.BaseResp != nil { + if err := p.BaseResp.IsValid(); err != nil { + return fmt.Errorf("field BaseResp not valid, %w", err) + } + } + return nil +} +func (p *ListTasksRequest) IsValid() error { + if p.WorkspaceID <= int64(0) { + return fmt.Errorf("field WorkspaceID gt rule failed, current value: %v", p.WorkspaceID) + } + if p.TaskFilters != nil { + if err := p.TaskFilters.IsValid(); err != nil { + return fmt.Errorf("field TaskFilters not valid, %w", err) + } + } + if p.OrderBy != nil { + if err := p.OrderBy.IsValid(); err != nil { + return fmt.Errorf("field OrderBy not valid, %w", err) + } + } + if p.Base != nil { + if err := p.Base.IsValid(); err != nil { + return fmt.Errorf("field Base not valid, %w", err) + } + } + return nil +} +func (p *ListTasksResponse) IsValid() error { + if p.BaseResp != nil { + if err := p.BaseResp.IsValid(); err != nil { + return fmt.Errorf("field BaseResp not valid, %w", err) + } + } + return nil +} +func (p *GetTaskRequest) IsValid() error { + if p.WorkspaceID <= int64(0) { + return fmt.Errorf("field WorkspaceID gt rule failed, current value: %v", p.WorkspaceID) + } + if p.Base != nil { + if err := p.Base.IsValid(); err != nil { + return fmt.Errorf("field Base not valid, %w", err) + } + } + return nil +} +func (p *GetTaskResponse) IsValid() error { + if p.Task != nil { + if err := p.Task.IsValid(); err != nil { + return fmt.Errorf("field Task not valid, %w", err) + } + } + if p.BaseResp != nil { + if err := p.BaseResp.IsValid(); err != nil { + return fmt.Errorf("field BaseResp not valid, %w", err) + } + } + return nil +} +func (p *CheckTaskNameRequest) IsValid() error { + if p.WorkspaceID <= int64(0) { + return fmt.Errorf("field WorkspaceID gt rule failed, current value: %v", p.WorkspaceID) + } + if p.Base != nil { + if err := p.Base.IsValid(); err != nil { + return fmt.Errorf("field Base not valid, %w", err) + } + } + return nil +} +func (p *CheckTaskNameResponse) IsValid() error { + if p.BaseResp != nil { + if err := p.BaseResp.IsValid(); err != nil { + return fmt.Errorf("field BaseResp not valid, %w", err) + } + } + return nil +} diff --git a/backend/kitex_gen/coze/loop/observability/task/k-consts.go b/backend/kitex_gen/coze/loop/observability/task/k-consts.go new file mode 100644 index 000000000..658fe4b5e --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/task/k-consts.go @@ -0,0 +1,4 @@ +package task + +// KitexUnusedProtection is used to prevent 'imported and not used' error. +var KitexUnusedProtection = struct{}{} diff --git a/backend/kitex_gen/coze/loop/observability/task/k-coze.loop.observability.task.go b/backend/kitex_gen/coze/loop/observability/task/k-coze.loop.observability.task.go new file mode 100644 index 000000000..3732777b3 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/task/k-coze.loop.observability.task.go @@ -0,0 +1,3654 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. + +package task + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + "github.com/cloudwego/gopkg/protocol/thrift" + kutils "github.com/cloudwego/kitex/pkg/utils" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/base" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" +) + +var ( + _ = base.KitexUnusedProtection + _ = common.KitexUnusedProtection + _ = filter.KitexUnusedProtection + _ = task.KitexUnusedProtection +) + +// unused protection +var ( + _ = fmt.Formatter(nil) + _ = (*bytes.Buffer)(nil) + _ = (*strings.Builder)(nil) + _ = reflect.Type(nil) + _ = thrift.STOP +) + +func (p *CreateTaskRequest) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetTask bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTask = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetTask { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CreateTaskRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_CreateTaskRequest[fieldId])) +} + +func (p *CreateTaskRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := task.NewTask() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Task = _field + return offset, nil +} + +func (p *CreateTaskRequest) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBase() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Base = _field + return offset, nil +} + +func (p *CreateTaskRequest) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *CreateTaskRequest) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *CreateTaskRequest) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *CreateTaskRequest) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Task.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *CreateTaskRequest) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBase() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.Base.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *CreateTaskRequest) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Task.BLength() + return l +} + +func (p *CreateTaskRequest) field255Length() int { + l := 0 + if p.IsSetBase() { + l += thrift.Binary.FieldBeginLength() + l += p.Base.BLength() + } + return l +} + +func (p *CreateTaskRequest) DeepCopy(s interface{}) error { + src, ok := s.(*CreateTaskRequest) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _task *task.Task + if src.Task != nil { + _task = &task.Task{} + if err := _task.DeepCopy(src.Task); err != nil { + return err + } + } + p.Task = _task + + var _base *base.Base + if src.Base != nil { + _base = &base.Base{} + if err := _base.DeepCopy(src.Base); err != nil { + return err + } + } + p.Base = _base + + return nil +} + +func (p *CreateTaskResponse) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CreateTaskResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *CreateTaskResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.TaskID = _field + return offset, nil +} + +func (p *CreateTaskResponse) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBaseResp() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseResp = _field + return offset, nil +} + +func (p *CreateTaskResponse) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *CreateTaskResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *CreateTaskResponse) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *CreateTaskResponse) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTaskID() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], *p.TaskID) + } + return offset +} + +func (p *CreateTaskResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseResp() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.BaseResp.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *CreateTaskResponse) field1Length() int { + l := 0 + if p.IsSetTaskID() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *CreateTaskResponse) field255Length() int { + l := 0 + if p.IsSetBaseResp() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseResp.BLength() + } + return l +} + +func (p *CreateTaskResponse) DeepCopy(s interface{}) error { + src, ok := s.(*CreateTaskResponse) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.TaskID != nil { + tmp := *src.TaskID + p.TaskID = &tmp + } + + var _baseResp *base.BaseResp + if src.BaseResp != nil { + _baseResp = &base.BaseResp{} + if err := _baseResp.DeepCopy(src.BaseResp); err != nil { + return err + } + } + p.BaseResp = _baseResp + + return nil +} + +func (p *UpdateTaskRequest) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetTaskID bool = false + var issetWorkspaceID bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.DOUBLE { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetTaskID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetWorkspaceID { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_UpdateTaskRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_UpdateTaskRequest[fieldId])) +} + +func (p *UpdateTaskRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.TaskID = _field + return offset, nil +} + +func (p *UpdateTaskRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *UpdateTaskRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field *task.TaskStatus + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.TaskStatus = _field + return offset, nil +} + +func (p *UpdateTaskRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Description = _field + return offset, nil +} + +func (p *UpdateTaskRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + _field := task.NewEffectiveTime() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.EffectiveTime = _field + return offset, nil +} + +func (p *UpdateTaskRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + var _field *float64 + if v, l, err := thrift.Binary.ReadDouble(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.SampleRate = _field + return offset, nil +} + +func (p *UpdateTaskRequest) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBase() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Base = _field + return offset, nil +} + +func (p *UpdateTaskRequest) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *UpdateTaskRequest) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *UpdateTaskRequest) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *UpdateTaskRequest) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.TaskID) + return offset +} + +func (p *UpdateTaskRequest) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], p.WorkspaceID) + return offset +} + +func (p *UpdateTaskRequest) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTaskStatus() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 3) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.TaskStatus) + } + return offset +} + +func (p *UpdateTaskRequest) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetDescription() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 4) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.Description) + } + return offset +} + +func (p *UpdateTaskRequest) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetEffectiveTime() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 5) + offset += p.EffectiveTime.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *UpdateTaskRequest) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSampleRate() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.DOUBLE, 6) + offset += thrift.Binary.WriteDouble(buf[offset:], *p.SampleRate) + } + return offset +} + +func (p *UpdateTaskRequest) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBase() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.Base.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *UpdateTaskRequest) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *UpdateTaskRequest) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *UpdateTaskRequest) field3Length() int { + l := 0 + if p.IsSetTaskStatus() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.TaskStatus) + } + return l +} + +func (p *UpdateTaskRequest) field4Length() int { + l := 0 + if p.IsSetDescription() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.Description) + } + return l +} + +func (p *UpdateTaskRequest) field5Length() int { + l := 0 + if p.IsSetEffectiveTime() { + l += thrift.Binary.FieldBeginLength() + l += p.EffectiveTime.BLength() + } + return l +} + +func (p *UpdateTaskRequest) field6Length() int { + l := 0 + if p.IsSetSampleRate() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.DoubleLength() + } + return l +} + +func (p *UpdateTaskRequest) field255Length() int { + l := 0 + if p.IsSetBase() { + l += thrift.Binary.FieldBeginLength() + l += p.Base.BLength() + } + return l +} + +func (p *UpdateTaskRequest) DeepCopy(s interface{}) error { + src, ok := s.(*UpdateTaskRequest) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.TaskID = src.TaskID + + p.WorkspaceID = src.WorkspaceID + + if src.TaskStatus != nil { + tmp := *src.TaskStatus + p.TaskStatus = &tmp + } + + if src.Description != nil { + var tmp string + if *src.Description != "" { + tmp = kutils.StringDeepCopy(*src.Description) + } + p.Description = &tmp + } + + var _effectiveTime *task.EffectiveTime + if src.EffectiveTime != nil { + _effectiveTime = &task.EffectiveTime{} + if err := _effectiveTime.DeepCopy(src.EffectiveTime); err != nil { + return err + } + } + p.EffectiveTime = _effectiveTime + + if src.SampleRate != nil { + tmp := *src.SampleRate + p.SampleRate = &tmp + } + + var _base *base.Base + if src.Base != nil { + _base = &base.Base{} + if err := _base.DeepCopy(src.Base); err != nil { + return err + } + } + p.Base = _base + + return nil +} + +func (p *UpdateTaskResponse) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_UpdateTaskResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *UpdateTaskResponse) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBaseResp() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseResp = _field + return offset, nil +} + +func (p *UpdateTaskResponse) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *UpdateTaskResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *UpdateTaskResponse) BLength() int { + l := 0 + if p != nil { + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *UpdateTaskResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseResp() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.BaseResp.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *UpdateTaskResponse) field255Length() int { + l := 0 + if p.IsSetBaseResp() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseResp.BLength() + } + return l +} + +func (p *UpdateTaskResponse) DeepCopy(s interface{}) error { + src, ok := s.(*UpdateTaskResponse) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _baseResp *base.BaseResp + if src.BaseResp != nil { + _baseResp = &base.BaseResp{} + if err := _baseResp.DeepCopy(src.BaseResp); err != nil { + return err + } + } + p.BaseResp = _baseResp + + return nil +} + +func (p *ListTasksRequest) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 101: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField101(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 102: + if fieldTypeId == thrift.I32 { + l, err = p.FastReadField102(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 103: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField103(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ListTasksRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_ListTasksRequest[fieldId])) +} + +func (p *ListTasksRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *ListTasksRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + _field := filter.NewTaskFilterFields() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.TaskFilters = _field + return offset, nil +} + +func (p *ListTasksRequest) FastReadField101(buf []byte) (int, error) { + offset := 0 + + var _field *int32 + if v, l, err := thrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Limit = _field + return offset, nil +} + +func (p *ListTasksRequest) FastReadField102(buf []byte) (int, error) { + offset := 0 + + var _field *int32 + if v, l, err := thrift.Binary.ReadI32(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Offset = _field + return offset, nil +} + +func (p *ListTasksRequest) FastReadField103(buf []byte) (int, error) { + offset := 0 + _field := common.NewOrderBy() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.OrderBy = _field + return offset, nil +} + +func (p *ListTasksRequest) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBase() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Base = _field + return offset, nil +} + +func (p *ListTasksRequest) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *ListTasksRequest) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField101(buf[offset:], w) + offset += p.fastWriteField102(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField103(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *ListTasksRequest) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field101Length() + l += p.field102Length() + l += p.field103Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *ListTasksRequest) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.WorkspaceID) + return offset +} + +func (p *ListTasksRequest) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTaskFilters() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 2) + offset += p.TaskFilters.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ListTasksRequest) fastWriteField101(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetLimit() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I32, 101) + offset += thrift.Binary.WriteI32(buf[offset:], *p.Limit) + } + return offset +} + +func (p *ListTasksRequest) fastWriteField102(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetOffset() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I32, 102) + offset += thrift.Binary.WriteI32(buf[offset:], *p.Offset) + } + return offset +} + +func (p *ListTasksRequest) fastWriteField103(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetOrderBy() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 103) + offset += p.OrderBy.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ListTasksRequest) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBase() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.Base.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ListTasksRequest) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *ListTasksRequest) field2Length() int { + l := 0 + if p.IsSetTaskFilters() { + l += thrift.Binary.FieldBeginLength() + l += p.TaskFilters.BLength() + } + return l +} + +func (p *ListTasksRequest) field101Length() int { + l := 0 + if p.IsSetLimit() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I32Length() + } + return l +} + +func (p *ListTasksRequest) field102Length() int { + l := 0 + if p.IsSetOffset() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I32Length() + } + return l +} + +func (p *ListTasksRequest) field103Length() int { + l := 0 + if p.IsSetOrderBy() { + l += thrift.Binary.FieldBeginLength() + l += p.OrderBy.BLength() + } + return l +} + +func (p *ListTasksRequest) field255Length() int { + l := 0 + if p.IsSetBase() { + l += thrift.Binary.FieldBeginLength() + l += p.Base.BLength() + } + return l +} + +func (p *ListTasksRequest) DeepCopy(s interface{}) error { + src, ok := s.(*ListTasksRequest) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.WorkspaceID = src.WorkspaceID + + var _taskFilters *filter.TaskFilterFields + if src.TaskFilters != nil { + _taskFilters = &filter.TaskFilterFields{} + if err := _taskFilters.DeepCopy(src.TaskFilters); err != nil { + return err + } + } + p.TaskFilters = _taskFilters + + if src.Limit != nil { + tmp := *src.Limit + p.Limit = &tmp + } + + if src.Offset != nil { + tmp := *src.Offset + p.Offset = &tmp + } + + var _orderBy *common.OrderBy + if src.OrderBy != nil { + _orderBy = &common.OrderBy{} + if err := _orderBy.DeepCopy(src.OrderBy); err != nil { + return err + } + } + p.OrderBy = _orderBy + + var _base *base.Base + if src.Base != nil { + _base = &base.Base{} + if err := _base.DeepCopy(src.Base); err != nil { + return err + } + } + p.Base = _base + + return nil +} + +func (p *ListTasksResponse) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 100: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField100(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ListTasksResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *ListTasksResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*task.Task, 0, size) + values := make([]task.Task, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.Tasks = _field + return offset, nil +} + +func (p *ListTasksResponse) FastReadField100(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Total = _field + return offset, nil +} + +func (p *ListTasksResponse) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBaseResp() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseResp = _field + return offset, nil +} + +func (p *ListTasksResponse) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *ListTasksResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField100(buf[offset:], w) + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *ListTasksResponse) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field100Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *ListTasksResponse) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTasks() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 1) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.Tasks { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + } + return offset +} + +func (p *ListTasksResponse) fastWriteField100(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTotal() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 100) + offset += thrift.Binary.WriteI64(buf[offset:], *p.Total) + } + return offset +} + +func (p *ListTasksResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseResp() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.BaseResp.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ListTasksResponse) field1Length() int { + l := 0 + if p.IsSetTasks() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.Tasks { + _ = v + l += v.BLength() + } + } + return l +} + +func (p *ListTasksResponse) field100Length() int { + l := 0 + if p.IsSetTotal() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *ListTasksResponse) field255Length() int { + l := 0 + if p.IsSetBaseResp() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseResp.BLength() + } + return l +} + +func (p *ListTasksResponse) DeepCopy(s interface{}) error { + src, ok := s.(*ListTasksResponse) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.Tasks != nil { + p.Tasks = make([]*task.Task, 0, len(src.Tasks)) + for _, elem := range src.Tasks { + var _elem *task.Task + if elem != nil { + _elem = &task.Task{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.Tasks = append(p.Tasks, _elem) + } + } + + if src.Total != nil { + tmp := *src.Total + p.Total = &tmp + } + + var _baseResp *base.BaseResp + if src.BaseResp != nil { + _baseResp = &base.BaseResp{} + if err := _baseResp.DeepCopy(src.BaseResp); err != nil { + return err + } + } + p.BaseResp = _baseResp + + return nil +} + +func (p *GetTaskRequest) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetTaskID bool = false + var issetWorkspaceID bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTaskID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetTaskID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetWorkspaceID { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_GetTaskRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_GetTaskRequest[fieldId])) +} + +func (p *GetTaskRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.TaskID = _field + return offset, nil +} + +func (p *GetTaskRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *GetTaskRequest) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBase() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Base = _field + return offset, nil +} + +func (p *GetTaskRequest) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *GetTaskRequest) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *GetTaskRequest) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *GetTaskRequest) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.TaskID) + return offset +} + +func (p *GetTaskRequest) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 2) + offset += thrift.Binary.WriteI64(buf[offset:], p.WorkspaceID) + return offset +} + +func (p *GetTaskRequest) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBase() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.Base.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *GetTaskRequest) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *GetTaskRequest) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *GetTaskRequest) field255Length() int { + l := 0 + if p.IsSetBase() { + l += thrift.Binary.FieldBeginLength() + l += p.Base.BLength() + } + return l +} + +func (p *GetTaskRequest) DeepCopy(s interface{}) error { + src, ok := s.(*GetTaskRequest) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.TaskID = src.TaskID + + p.WorkspaceID = src.WorkspaceID + + var _base *base.Base + if src.Base != nil { + _base = &base.Base{} + if err := _base.DeepCopy(src.Base); err != nil { + return err + } + } + p.Base = _base + + return nil +} + +func (p *GetTaskResponse) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_GetTaskResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *GetTaskResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := task.NewTask() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Task = _field + return offset, nil +} + +func (p *GetTaskResponse) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBaseResp() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseResp = _field + return offset, nil +} + +func (p *GetTaskResponse) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *GetTaskResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *GetTaskResponse) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *GetTaskResponse) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetTask() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Task.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *GetTaskResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseResp() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.BaseResp.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *GetTaskResponse) field1Length() int { + l := 0 + if p.IsSetTask() { + l += thrift.Binary.FieldBeginLength() + l += p.Task.BLength() + } + return l +} + +func (p *GetTaskResponse) field255Length() int { + l := 0 + if p.IsSetBaseResp() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseResp.BLength() + } + return l +} + +func (p *GetTaskResponse) DeepCopy(s interface{}) error { + src, ok := s.(*GetTaskResponse) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _task *task.Task + if src.Task != nil { + _task = &task.Task{} + if err := _task.DeepCopy(src.Task); err != nil { + return err + } + } + p.Task = _task + + var _baseResp *base.BaseResp + if src.BaseResp != nil { + _baseResp = &base.BaseResp{} + if err := _baseResp.DeepCopy(src.BaseResp); err != nil { + return err + } + } + p.BaseResp = _baseResp + + return nil +} + +func (p *CheckTaskNameRequest) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + var issetName bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetName = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetName { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CheckTaskNameRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_CheckTaskNameRequest[fieldId])) +} + +func (p *CheckTaskNameRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *CheckTaskNameRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.Name = _field + return offset, nil +} + +func (p *CheckTaskNameRequest) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBase() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Base = _field + return offset, nil +} + +func (p *CheckTaskNameRequest) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *CheckTaskNameRequest) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *CheckTaskNameRequest) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *CheckTaskNameRequest) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.WorkspaceID) + return offset +} + +func (p *CheckTaskNameRequest) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.Name) + return offset +} + +func (p *CheckTaskNameRequest) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBase() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.Base.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *CheckTaskNameRequest) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *CheckTaskNameRequest) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.Name) + return l +} + +func (p *CheckTaskNameRequest) field255Length() int { + l := 0 + if p.IsSetBase() { + l += thrift.Binary.FieldBeginLength() + l += p.Base.BLength() + } + return l +} + +func (p *CheckTaskNameRequest) DeepCopy(s interface{}) error { + src, ok := s.(*CheckTaskNameRequest) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.WorkspaceID = src.WorkspaceID + + if src.Name != "" { + p.Name = kutils.StringDeepCopy(src.Name) + } + + var _base *base.Base + if src.Base != nil { + _base = &base.Base{} + if err := _base.DeepCopy(src.Base); err != nil { + return err + } + } + p.Base = _base + + return nil +} + +func (p *CheckTaskNameResponse) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.BOOL { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_CheckTaskNameResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *CheckTaskNameResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field *bool + if v, l, err := thrift.Binary.ReadBool(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Pass = _field + return offset, nil +} + +func (p *CheckTaskNameResponse) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Message = _field + return offset, nil +} + +func (p *CheckTaskNameResponse) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBaseResp() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseResp = _field + return offset, nil +} + +func (p *CheckTaskNameResponse) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *CheckTaskNameResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *CheckTaskNameResponse) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *CheckTaskNameResponse) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetPass() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.BOOL, 1) + offset += thrift.Binary.WriteBool(buf[offset:], *p.Pass) + } + return offset +} + +func (p *CheckTaskNameResponse) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetMessage() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.Message) + } + return offset +} + +func (p *CheckTaskNameResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.BaseResp.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *CheckTaskNameResponse) field1Length() int { + l := 0 + if p.IsSetPass() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.BoolLength() + } + return l +} + +func (p *CheckTaskNameResponse) field2Length() int { + l := 0 + if p.IsSetMessage() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.Message) + } + return l +} + +func (p *CheckTaskNameResponse) field255Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.BaseResp.BLength() + return l +} + +func (p *CheckTaskNameResponse) DeepCopy(s interface{}) error { + src, ok := s.(*CheckTaskNameResponse) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.Pass != nil { + tmp := *src.Pass + p.Pass = &tmp + } + + if src.Message != nil { + var tmp string + if *src.Message != "" { + tmp = kutils.StringDeepCopy(*src.Message) + } + p.Message = &tmp + } + + var _baseResp *base.BaseResp + if src.BaseResp != nil { + _baseResp = &base.BaseResp{} + if err := _baseResp.DeepCopy(src.BaseResp); err != nil { + return err + } + } + p.BaseResp = _baseResp + + return nil +} + +func (p *TaskServiceCheckTaskNameArgs) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceCheckTaskNameArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceCheckTaskNameArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewCheckTaskNameRequest() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Req = _field + return offset, nil +} + +func (p *TaskServiceCheckTaskNameArgs) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceCheckTaskNameArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceCheckTaskNameArgs) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceCheckTaskNameArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Req.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *TaskServiceCheckTaskNameArgs) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Req.BLength() + return l +} + +func (p *TaskServiceCheckTaskNameArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceCheckTaskNameArgs) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _req *CheckTaskNameRequest + if src.Req != nil { + _req = &CheckTaskNameRequest{} + if err := _req.DeepCopy(src.Req); err != nil { + return err + } + } + p.Req = _req + + return nil +} + +func (p *TaskServiceCheckTaskNameResult) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceCheckTaskNameResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceCheckTaskNameResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + _field := NewCheckTaskNameResponse() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = _field + return offset, nil +} + +func (p *TaskServiceCheckTaskNameResult) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceCheckTaskNameResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField0(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceCheckTaskNameResult) BLength() int { + l := 0 + if p != nil { + l += p.field0Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceCheckTaskNameResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskServiceCheckTaskNameResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += thrift.Binary.FieldBeginLength() + l += p.Success.BLength() + } + return l +} + +func (p *TaskServiceCheckTaskNameResult) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceCheckTaskNameResult) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _success *CheckTaskNameResponse + if src.Success != nil { + _success = &CheckTaskNameResponse{} + if err := _success.DeepCopy(src.Success); err != nil { + return err + } + } + p.Success = _success + + return nil +} + +func (p *TaskServiceCreateTaskArgs) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceCreateTaskArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceCreateTaskArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewCreateTaskRequest() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Req = _field + return offset, nil +} + +func (p *TaskServiceCreateTaskArgs) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceCreateTaskArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceCreateTaskArgs) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceCreateTaskArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Req.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *TaskServiceCreateTaskArgs) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Req.BLength() + return l +} + +func (p *TaskServiceCreateTaskArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceCreateTaskArgs) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _req *CreateTaskRequest + if src.Req != nil { + _req = &CreateTaskRequest{} + if err := _req.DeepCopy(src.Req); err != nil { + return err + } + } + p.Req = _req + + return nil +} + +func (p *TaskServiceCreateTaskResult) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceCreateTaskResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceCreateTaskResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + _field := NewCreateTaskResponse() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = _field + return offset, nil +} + +func (p *TaskServiceCreateTaskResult) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceCreateTaskResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField0(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceCreateTaskResult) BLength() int { + l := 0 + if p != nil { + l += p.field0Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceCreateTaskResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskServiceCreateTaskResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += thrift.Binary.FieldBeginLength() + l += p.Success.BLength() + } + return l +} + +func (p *TaskServiceCreateTaskResult) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceCreateTaskResult) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _success *CreateTaskResponse + if src.Success != nil { + _success = &CreateTaskResponse{} + if err := _success.DeepCopy(src.Success); err != nil { + return err + } + } + p.Success = _success + + return nil +} + +func (p *TaskServiceUpdateTaskArgs) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceUpdateTaskArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceUpdateTaskArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewUpdateTaskRequest() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Req = _field + return offset, nil +} + +func (p *TaskServiceUpdateTaskArgs) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceUpdateTaskArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceUpdateTaskArgs) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceUpdateTaskArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Req.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *TaskServiceUpdateTaskArgs) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Req.BLength() + return l +} + +func (p *TaskServiceUpdateTaskArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceUpdateTaskArgs) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _req *UpdateTaskRequest + if src.Req != nil { + _req = &UpdateTaskRequest{} + if err := _req.DeepCopy(src.Req); err != nil { + return err + } + } + p.Req = _req + + return nil +} + +func (p *TaskServiceUpdateTaskResult) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceUpdateTaskResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceUpdateTaskResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + _field := NewUpdateTaskResponse() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = _field + return offset, nil +} + +func (p *TaskServiceUpdateTaskResult) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceUpdateTaskResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField0(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceUpdateTaskResult) BLength() int { + l := 0 + if p != nil { + l += p.field0Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceUpdateTaskResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskServiceUpdateTaskResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += thrift.Binary.FieldBeginLength() + l += p.Success.BLength() + } + return l +} + +func (p *TaskServiceUpdateTaskResult) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceUpdateTaskResult) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _success *UpdateTaskResponse + if src.Success != nil { + _success = &UpdateTaskResponse{} + if err := _success.DeepCopy(src.Success); err != nil { + return err + } + } + p.Success = _success + + return nil +} + +func (p *TaskServiceListTasksArgs) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceListTasksArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceListTasksArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewListTasksRequest() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Req = _field + return offset, nil +} + +func (p *TaskServiceListTasksArgs) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceListTasksArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceListTasksArgs) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceListTasksArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Req.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *TaskServiceListTasksArgs) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Req.BLength() + return l +} + +func (p *TaskServiceListTasksArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceListTasksArgs) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _req *ListTasksRequest + if src.Req != nil { + _req = &ListTasksRequest{} + if err := _req.DeepCopy(src.Req); err != nil { + return err + } + } + p.Req = _req + + return nil +} + +func (p *TaskServiceListTasksResult) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceListTasksResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceListTasksResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + _field := NewListTasksResponse() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = _field + return offset, nil +} + +func (p *TaskServiceListTasksResult) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceListTasksResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField0(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceListTasksResult) BLength() int { + l := 0 + if p != nil { + l += p.field0Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceListTasksResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskServiceListTasksResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += thrift.Binary.FieldBeginLength() + l += p.Success.BLength() + } + return l +} + +func (p *TaskServiceListTasksResult) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceListTasksResult) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _success *ListTasksResponse + if src.Success != nil { + _success = &ListTasksResponse{} + if err := _success.DeepCopy(src.Success); err != nil { + return err + } + } + p.Success = _success + + return nil +} + +func (p *TaskServiceGetTaskArgs) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceGetTaskArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceGetTaskArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewGetTaskRequest() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Req = _field + return offset, nil +} + +func (p *TaskServiceGetTaskArgs) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceGetTaskArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceGetTaskArgs) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceGetTaskArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Req.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *TaskServiceGetTaskArgs) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Req.BLength() + return l +} + +func (p *TaskServiceGetTaskArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceGetTaskArgs) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _req *GetTaskRequest + if src.Req != nil { + _req = &GetTaskRequest{} + if err := _req.DeepCopy(src.Req); err != nil { + return err + } + } + p.Req = _req + + return nil +} + +func (p *TaskServiceGetTaskResult) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TaskServiceGetTaskResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TaskServiceGetTaskResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + _field := NewGetTaskResponse() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = _field + return offset, nil +} + +func (p *TaskServiceGetTaskResult) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TaskServiceGetTaskResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField0(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TaskServiceGetTaskResult) BLength() int { + l := 0 + if p != nil { + l += p.field0Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TaskServiceGetTaskResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TaskServiceGetTaskResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += thrift.Binary.FieldBeginLength() + l += p.Success.BLength() + } + return l +} + +func (p *TaskServiceGetTaskResult) DeepCopy(s interface{}) error { + src, ok := s.(*TaskServiceGetTaskResult) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _success *GetTaskResponse + if src.Success != nil { + _success = &GetTaskResponse{} + if err := _success.DeepCopy(src.Success); err != nil { + return err + } + } + p.Success = _success + + return nil +} + +func (p *TaskServiceCheckTaskNameArgs) GetFirstArgument() interface{} { + return p.Req +} + +func (p *TaskServiceCheckTaskNameResult) GetResult() interface{} { + return p.Success +} + +func (p *TaskServiceCreateTaskArgs) GetFirstArgument() interface{} { + return p.Req +} + +func (p *TaskServiceCreateTaskResult) GetResult() interface{} { + return p.Success +} + +func (p *TaskServiceUpdateTaskArgs) GetFirstArgument() interface{} { + return p.Req +} + +func (p *TaskServiceUpdateTaskResult) GetResult() interface{} { + return p.Success +} + +func (p *TaskServiceListTasksArgs) GetFirstArgument() interface{} { + return p.Req +} + +func (p *TaskServiceListTasksResult) GetResult() interface{} { + return p.Success +} + +func (p *TaskServiceGetTaskArgs) GetFirstArgument() interface{} { + return p.Req +} + +func (p *TaskServiceGetTaskResult) GetResult() interface{} { + return p.Success +} diff --git a/backend/kitex_gen/coze/loop/observability/task/taskservice/client.go b/backend/kitex_gen/coze/loop/observability/task/taskservice/client.go new file mode 100644 index 000000000..91c5be4b8 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/task/taskservice/client.go @@ -0,0 +1,73 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. + +package taskservice + +import ( + "context" + client "github.com/cloudwego/kitex/client" + callopt "github.com/cloudwego/kitex/client/callopt" + task "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" +) + +// Client is designed to provide IDL-compatible methods with call-option parameter for kitex framework. +type Client interface { + CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest, callOptions ...callopt.Option) (r *task.CheckTaskNameResponse, err error) + CreateTask(ctx context.Context, req *task.CreateTaskRequest, callOptions ...callopt.Option) (r *task.CreateTaskResponse, err error) + UpdateTask(ctx context.Context, req *task.UpdateTaskRequest, callOptions ...callopt.Option) (r *task.UpdateTaskResponse, err error) + ListTasks(ctx context.Context, req *task.ListTasksRequest, callOptions ...callopt.Option) (r *task.ListTasksResponse, err error) + GetTask(ctx context.Context, req *task.GetTaskRequest, callOptions ...callopt.Option) (r *task.GetTaskResponse, err error) +} + +// NewClient creates a client for the service defined in IDL. +func NewClient(destService string, opts ...client.Option) (Client, error) { + var options []client.Option + options = append(options, client.WithDestService(destService)) + + options = append(options, opts...) + + kc, err := client.NewClient(serviceInfo(), options...) + if err != nil { + return nil, err + } + return &kTaskServiceClient{ + kClient: newServiceClient(kc), + }, nil +} + +// MustNewClient creates a client for the service defined in IDL. It panics if any error occurs. +func MustNewClient(destService string, opts ...client.Option) Client { + kc, err := NewClient(destService, opts...) + if err != nil { + panic(err) + } + return kc +} + +type kTaskServiceClient struct { + *kClient +} + +func (p *kTaskServiceClient) CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest, callOptions ...callopt.Option) (r *task.CheckTaskNameResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.CheckTaskName(ctx, req) +} + +func (p *kTaskServiceClient) CreateTask(ctx context.Context, req *task.CreateTaskRequest, callOptions ...callopt.Option) (r *task.CreateTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.CreateTask(ctx, req) +} + +func (p *kTaskServiceClient) UpdateTask(ctx context.Context, req *task.UpdateTaskRequest, callOptions ...callopt.Option) (r *task.UpdateTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.UpdateTask(ctx, req) +} + +func (p *kTaskServiceClient) ListTasks(ctx context.Context, req *task.ListTasksRequest, callOptions ...callopt.Option) (r *task.ListTasksResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ListTasks(ctx, req) +} + +func (p *kTaskServiceClient) GetTask(ctx context.Context, req *task.GetTaskRequest, callOptions ...callopt.Option) (r *task.GetTaskResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.GetTask(ctx, req) +} diff --git a/backend/kitex_gen/coze/loop/observability/task/taskservice/server.go b/backend/kitex_gen/coze/loop/observability/task/taskservice/server.go new file mode 100644 index 000000000..adae7fb69 --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/task/taskservice/server.go @@ -0,0 +1,25 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. +package taskservice + +import ( + server "github.com/cloudwego/kitex/server" + task "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" +) + +// NewServer creates a server.Server with the given handler and options. +func NewServer(handler task.TaskService, opts ...server.Option) server.Server { + var options []server.Option + + options = append(options, opts...) + options = append(options, server.WithCompatibleMiddlewareForUnary()) + + svr := server.NewServer(options...) + if err := svr.RegisterService(serviceInfo(), handler); err != nil { + panic(err) + } + return svr +} + +func RegisterService(svr server.Server, handler task.TaskService, opts ...server.RegisterOption) error { + return svr.RegisterService(serviceInfo(), handler, opts...) +} diff --git a/backend/kitex_gen/coze/loop/observability/task/taskservice/taskservice.go b/backend/kitex_gen/coze/loop/observability/task/taskservice/taskservice.go new file mode 100644 index 000000000..a9bad629e --- /dev/null +++ b/backend/kitex_gen/coze/loop/observability/task/taskservice/taskservice.go @@ -0,0 +1,239 @@ +// Code generated by Kitex v0.13.1. DO NOT EDIT. + +package taskservice + +import ( + "context" + "errors" + client "github.com/cloudwego/kitex/client" + kitex "github.com/cloudwego/kitex/pkg/serviceinfo" + task "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" +) + +var errInvalidMessageType = errors.New("invalid message type for service method handler") + +var serviceMethods = map[string]kitex.MethodInfo{ + "CheckTaskName": kitex.NewMethodInfo( + checkTaskNameHandler, + newTaskServiceCheckTaskNameArgs, + newTaskServiceCheckTaskNameResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "CreateTask": kitex.NewMethodInfo( + createTaskHandler, + newTaskServiceCreateTaskArgs, + newTaskServiceCreateTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "UpdateTask": kitex.NewMethodInfo( + updateTaskHandler, + newTaskServiceUpdateTaskArgs, + newTaskServiceUpdateTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ListTasks": kitex.NewMethodInfo( + listTasksHandler, + newTaskServiceListTasksArgs, + newTaskServiceListTasksResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "GetTask": kitex.NewMethodInfo( + getTaskHandler, + newTaskServiceGetTaskArgs, + newTaskServiceGetTaskResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), +} + +var ( + taskServiceServiceInfo = NewServiceInfo() +) + +// for server +func serviceInfo() *kitex.ServiceInfo { + return taskServiceServiceInfo +} + +// NewServiceInfo creates a new ServiceInfo +func NewServiceInfo() *kitex.ServiceInfo { + return newServiceInfo() +} + +func newServiceInfo() *kitex.ServiceInfo { + serviceName := "TaskService" + handlerType := (*task.TaskService)(nil) + extra := map[string]interface{}{ + "PackageName": "task", + } + svcInfo := &kitex.ServiceInfo{ + ServiceName: serviceName, + HandlerType: handlerType, + Methods: serviceMethods, + PayloadCodec: kitex.Thrift, + KiteXGenVersion: "v0.13.1", + Extra: extra, + } + return svcInfo +} + +func checkTaskNameHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceCheckTaskNameArgs) + realResult := result.(*task.TaskServiceCheckTaskNameResult) + success, err := handler.(task.TaskService).CheckTaskName(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceCheckTaskNameArgs() interface{} { + return task.NewTaskServiceCheckTaskNameArgs() +} + +func newTaskServiceCheckTaskNameResult() interface{} { + return task.NewTaskServiceCheckTaskNameResult() +} + +func createTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceCreateTaskArgs) + realResult := result.(*task.TaskServiceCreateTaskResult) + success, err := handler.(task.TaskService).CreateTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceCreateTaskArgs() interface{} { + return task.NewTaskServiceCreateTaskArgs() +} + +func newTaskServiceCreateTaskResult() interface{} { + return task.NewTaskServiceCreateTaskResult() +} + +func updateTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceUpdateTaskArgs) + realResult := result.(*task.TaskServiceUpdateTaskResult) + success, err := handler.(task.TaskService).UpdateTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceUpdateTaskArgs() interface{} { + return task.NewTaskServiceUpdateTaskArgs() +} + +func newTaskServiceUpdateTaskResult() interface{} { + return task.NewTaskServiceUpdateTaskResult() +} + +func listTasksHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceListTasksArgs) + realResult := result.(*task.TaskServiceListTasksResult) + success, err := handler.(task.TaskService).ListTasks(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceListTasksArgs() interface{} { + return task.NewTaskServiceListTasksArgs() +} + +func newTaskServiceListTasksResult() interface{} { + return task.NewTaskServiceListTasksResult() +} + +func getTaskHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*task.TaskServiceGetTaskArgs) + realResult := result.(*task.TaskServiceGetTaskResult) + success, err := handler.(task.TaskService).GetTask(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTaskServiceGetTaskArgs() interface{} { + return task.NewTaskServiceGetTaskArgs() +} + +func newTaskServiceGetTaskResult() interface{} { + return task.NewTaskServiceGetTaskResult() +} + +type kClient struct { + c client.Client + sc client.Streaming +} + +func newServiceClient(c client.Client) *kClient { + return &kClient{ + c: c, + sc: c.(client.Streaming), + } +} + +func (p *kClient) CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest) (r *task.CheckTaskNameResponse, err error) { + var _args task.TaskServiceCheckTaskNameArgs + _args.Req = req + var _result task.TaskServiceCheckTaskNameResult + if err = p.c.Call(ctx, "CheckTaskName", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) CreateTask(ctx context.Context, req *task.CreateTaskRequest) (r *task.CreateTaskResponse, err error) { + var _args task.TaskServiceCreateTaskArgs + _args.Req = req + var _result task.TaskServiceCreateTaskResult + if err = p.c.Call(ctx, "CreateTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) UpdateTask(ctx context.Context, req *task.UpdateTaskRequest) (r *task.UpdateTaskResponse, err error) { + var _args task.TaskServiceUpdateTaskArgs + _args.Req = req + var _result task.TaskServiceUpdateTaskResult + if err = p.c.Call(ctx, "UpdateTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ListTasks(ctx context.Context, req *task.ListTasksRequest) (r *task.ListTasksResponse, err error) { + var _args task.TaskServiceListTasksArgs + _args.Req = req + var _result task.TaskServiceListTasksResult + if err = p.c.Call(ctx, "ListTasks", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) GetTask(ctx context.Context, req *task.GetTaskRequest) (r *task.GetTaskResponse, err error) { + var _args task.TaskServiceGetTaskArgs + _args.Req = req + var _result task.TaskServiceGetTaskResult + if err = p.c.Call(ctx, "GetTask", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/backend/kitex_gen/coze/loop/observability/trace/coze.loop.observability.trace.go b/backend/kitex_gen/coze/loop/observability/trace/coze.loop.observability.trace.go index 3d9a22912..99322c8a5 100644 --- a/backend/kitex_gen/coze/loop/observability/trace/coze.loop.observability.trace.go +++ b/backend/kitex_gen/coze/loop/observability/trace/coze.loop.observability.trace.go @@ -14564,1010 +14564,4922 @@ func (p *PreviewExportTracesToDatasetResponse) Field257DeepEqual(src *string) bo return true } -type TraceService interface { - ListSpans(ctx context.Context, req *ListSpansRequest) (r *ListSpansResponse, err error) - - GetTrace(ctx context.Context, req *GetTraceRequest) (r *GetTraceResponse, err error) - - BatchGetTracesAdvanceInfo(ctx context.Context, req *BatchGetTracesAdvanceInfoRequest) (r *BatchGetTracesAdvanceInfoResponse, err error) - - IngestTracesInner(ctx context.Context, req *IngestTracesRequest) (r *IngestTracesResponse, err error) - - GetTracesMetaInfo(ctx context.Context, req *GetTracesMetaInfoRequest) (r *GetTracesMetaInfoResponse, err error) - - CreateView(ctx context.Context, req *CreateViewRequest) (r *CreateViewResponse, err error) - - UpdateView(ctx context.Context, req *UpdateViewRequest) (r *UpdateViewResponse, err error) - - DeleteView(ctx context.Context, req *DeleteViewRequest) (r *DeleteViewResponse, err error) - - ListViews(ctx context.Context, req *ListViewsRequest) (r *ListViewsResponse, err error) - - CreateManualAnnotation(ctx context.Context, req *CreateManualAnnotationRequest) (r *CreateManualAnnotationResponse, err error) - - UpdateManualAnnotation(ctx context.Context, req *UpdateManualAnnotationRequest) (r *UpdateManualAnnotationResponse, err error) - - DeleteManualAnnotation(ctx context.Context, req *DeleteManualAnnotationRequest) (r *DeleteManualAnnotationResponse, err error) - - ListAnnotations(ctx context.Context, req *ListAnnotationsRequest) (r *ListAnnotationsResponse, err error) - - ExportTracesToDataset(ctx context.Context, req *ExportTracesToDatasetRequest) (r *ExportTracesToDatasetResponse, err error) - - PreviewExportTracesToDataset(ctx context.Context, req *PreviewExportTracesToDatasetRequest) (r *PreviewExportTracesToDatasetResponse, err error) +type ChangeEvaluatorScoreRequest struct { + WorkspaceID int64 `thrift:"workspace_id,1,required" frugal:"1,required,i64" json:"workspace_id" form:"workspace_id,required" ` + AnnotationID string `thrift:"annotation_id,2,required" frugal:"2,required,string" form:"annotation_id,required" json:"annotation_id,required"` + SpanID string `thrift:"span_id,3,required" frugal:"3,required,string" form:"span_id,required" json:"span_id,required"` + StartTime int64 `thrift:"start_time,4,required" frugal:"4,required,i64" json:"start_time" form:"start_time,required" ` + Correction *annotation.Correction `thrift:"correction,5,required" frugal:"5,required,annotation.Correction" form:"correction,required" json:"correction,required"` + PlatformType *common.PlatformType `thrift:"platform_type,6,optional" frugal:"6,optional,string" form:"platform_type" json:"platform_type,omitempty"` + Base *base.Base `thrift:"Base,255,optional" frugal:"255,optional,base.Base" form:"Base" json:"Base,omitempty" query:"Base"` } -type TraceServiceClient struct { - c thrift.TClient +func NewChangeEvaluatorScoreRequest() *ChangeEvaluatorScoreRequest { + return &ChangeEvaluatorScoreRequest{} } -func NewTraceServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *TraceServiceClient { - return &TraceServiceClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } +func (p *ChangeEvaluatorScoreRequest) InitDefault() { } -func NewTraceServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *TraceServiceClient { - return &TraceServiceClient{ - c: thrift.NewTStandardClient(iprot, oprot), +func (p *ChangeEvaluatorScoreRequest) GetWorkspaceID() (v int64) { + if p != nil { + return p.WorkspaceID } + return } -func NewTraceServiceClient(c thrift.TClient) *TraceServiceClient { - return &TraceServiceClient{ - c: c, +func (p *ChangeEvaluatorScoreRequest) GetAnnotationID() (v string) { + if p != nil { + return p.AnnotationID } + return } -func (p *TraceServiceClient) Client_() thrift.TClient { - return p.c -} - -func (p *TraceServiceClient) ListSpans(ctx context.Context, req *ListSpansRequest) (r *ListSpansResponse, err error) { - var _args TraceServiceListSpansArgs - _args.Req = req - var _result TraceServiceListSpansResult - if err = p.Client_().Call(ctx, "ListSpans", &_args, &_result); err != nil { - return +func (p *ChangeEvaluatorScoreRequest) GetSpanID() (v string) { + if p != nil { + return p.SpanID } - return _result.GetSuccess(), nil + return } -func (p *TraceServiceClient) GetTrace(ctx context.Context, req *GetTraceRequest) (r *GetTraceResponse, err error) { - var _args TraceServiceGetTraceArgs - _args.Req = req - var _result TraceServiceGetTraceResult - if err = p.Client_().Call(ctx, "GetTrace", &_args, &_result); err != nil { - return + +func (p *ChangeEvaluatorScoreRequest) GetStartTime() (v int64) { + if p != nil { + return p.StartTime } - return _result.GetSuccess(), nil + return } -func (p *TraceServiceClient) BatchGetTracesAdvanceInfo(ctx context.Context, req *BatchGetTracesAdvanceInfoRequest) (r *BatchGetTracesAdvanceInfoResponse, err error) { - var _args TraceServiceBatchGetTracesAdvanceInfoArgs - _args.Req = req - var _result TraceServiceBatchGetTracesAdvanceInfoResult - if err = p.Client_().Call(ctx, "BatchGetTracesAdvanceInfo", &_args, &_result); err != nil { + +var ChangeEvaluatorScoreRequest_Correction_DEFAULT *annotation.Correction + +func (p *ChangeEvaluatorScoreRequest) GetCorrection() (v *annotation.Correction) { + if p == nil { return } - return _result.GetSuccess(), nil -} -func (p *TraceServiceClient) IngestTracesInner(ctx context.Context, req *IngestTracesRequest) (r *IngestTracesResponse, err error) { - var _args TraceServiceIngestTracesInnerArgs - _args.Req = req - var _result TraceServiceIngestTracesInnerResult - if err = p.Client_().Call(ctx, "IngestTracesInner", &_args, &_result); err != nil { - return + if !p.IsSetCorrection() { + return ChangeEvaluatorScoreRequest_Correction_DEFAULT } - return _result.GetSuccess(), nil + return p.Correction } -func (p *TraceServiceClient) GetTracesMetaInfo(ctx context.Context, req *GetTracesMetaInfoRequest) (r *GetTracesMetaInfoResponse, err error) { - var _args TraceServiceGetTracesMetaInfoArgs - _args.Req = req - var _result TraceServiceGetTracesMetaInfoResult - if err = p.Client_().Call(ctx, "GetTracesMetaInfo", &_args, &_result); err != nil { + +var ChangeEvaluatorScoreRequest_PlatformType_DEFAULT common.PlatformType + +func (p *ChangeEvaluatorScoreRequest) GetPlatformType() (v common.PlatformType) { + if p == nil { return } - return _result.GetSuccess(), nil -} -func (p *TraceServiceClient) CreateView(ctx context.Context, req *CreateViewRequest) (r *CreateViewResponse, err error) { - var _args TraceServiceCreateViewArgs - _args.Req = req - var _result TraceServiceCreateViewResult - if err = p.Client_().Call(ctx, "CreateView", &_args, &_result); err != nil { - return + if !p.IsSetPlatformType() { + return ChangeEvaluatorScoreRequest_PlatformType_DEFAULT } - return _result.GetSuccess(), nil + return *p.PlatformType } -func (p *TraceServiceClient) UpdateView(ctx context.Context, req *UpdateViewRequest) (r *UpdateViewResponse, err error) { - var _args TraceServiceUpdateViewArgs - _args.Req = req - var _result TraceServiceUpdateViewResult - if err = p.Client_().Call(ctx, "UpdateView", &_args, &_result); err != nil { + +var ChangeEvaluatorScoreRequest_Base_DEFAULT *base.Base + +func (p *ChangeEvaluatorScoreRequest) GetBase() (v *base.Base) { + if p == nil { return } - return _result.GetSuccess(), nil -} -func (p *TraceServiceClient) DeleteView(ctx context.Context, req *DeleteViewRequest) (r *DeleteViewResponse, err error) { - var _args TraceServiceDeleteViewArgs - _args.Req = req - var _result TraceServiceDeleteViewResult - if err = p.Client_().Call(ctx, "DeleteView", &_args, &_result); err != nil { - return + if !p.IsSetBase() { + return ChangeEvaluatorScoreRequest_Base_DEFAULT } - return _result.GetSuccess(), nil + return p.Base } -func (p *TraceServiceClient) ListViews(ctx context.Context, req *ListViewsRequest) (r *ListViewsResponse, err error) { - var _args TraceServiceListViewsArgs - _args.Req = req - var _result TraceServiceListViewsResult - if err = p.Client_().Call(ctx, "ListViews", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil +func (p *ChangeEvaluatorScoreRequest) SetWorkspaceID(val int64) { + p.WorkspaceID = val } -func (p *TraceServiceClient) CreateManualAnnotation(ctx context.Context, req *CreateManualAnnotationRequest) (r *CreateManualAnnotationResponse, err error) { - var _args TraceServiceCreateManualAnnotationArgs - _args.Req = req - var _result TraceServiceCreateManualAnnotationResult - if err = p.Client_().Call(ctx, "CreateManualAnnotation", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil +func (p *ChangeEvaluatorScoreRequest) SetAnnotationID(val string) { + p.AnnotationID = val } -func (p *TraceServiceClient) UpdateManualAnnotation(ctx context.Context, req *UpdateManualAnnotationRequest) (r *UpdateManualAnnotationResponse, err error) { - var _args TraceServiceUpdateManualAnnotationArgs - _args.Req = req - var _result TraceServiceUpdateManualAnnotationResult - if err = p.Client_().Call(ctx, "UpdateManualAnnotation", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil +func (p *ChangeEvaluatorScoreRequest) SetSpanID(val string) { + p.SpanID = val } -func (p *TraceServiceClient) DeleteManualAnnotation(ctx context.Context, req *DeleteManualAnnotationRequest) (r *DeleteManualAnnotationResponse, err error) { - var _args TraceServiceDeleteManualAnnotationArgs - _args.Req = req - var _result TraceServiceDeleteManualAnnotationResult - if err = p.Client_().Call(ctx, "DeleteManualAnnotation", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil +func (p *ChangeEvaluatorScoreRequest) SetStartTime(val int64) { + p.StartTime = val } -func (p *TraceServiceClient) ListAnnotations(ctx context.Context, req *ListAnnotationsRequest) (r *ListAnnotationsResponse, err error) { - var _args TraceServiceListAnnotationsArgs - _args.Req = req - var _result TraceServiceListAnnotationsResult - if err = p.Client_().Call(ctx, "ListAnnotations", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil +func (p *ChangeEvaluatorScoreRequest) SetCorrection(val *annotation.Correction) { + p.Correction = val } -func (p *TraceServiceClient) ExportTracesToDataset(ctx context.Context, req *ExportTracesToDatasetRequest) (r *ExportTracesToDatasetResponse, err error) { - var _args TraceServiceExportTracesToDatasetArgs - _args.Req = req - var _result TraceServiceExportTracesToDatasetResult - if err = p.Client_().Call(ctx, "ExportTracesToDataset", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil +func (p *ChangeEvaluatorScoreRequest) SetPlatformType(val *common.PlatformType) { + p.PlatformType = val } -func (p *TraceServiceClient) PreviewExportTracesToDataset(ctx context.Context, req *PreviewExportTracesToDatasetRequest) (r *PreviewExportTracesToDatasetResponse, err error) { - var _args TraceServicePreviewExportTracesToDatasetArgs - _args.Req = req - var _result TraceServicePreviewExportTracesToDatasetResult - if err = p.Client_().Call(ctx, "PreviewExportTracesToDataset", &_args, &_result); err != nil { - return - } - return _result.GetSuccess(), nil +func (p *ChangeEvaluatorScoreRequest) SetBase(val *base.Base) { + p.Base = val } -type TraceServiceProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler TraceService +var fieldIDToName_ChangeEvaluatorScoreRequest = map[int16]string{ + 1: "workspace_id", + 2: "annotation_id", + 3: "span_id", + 4: "start_time", + 5: "correction", + 6: "platform_type", + 255: "Base", } -func (p *TraceServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor +func (p *ChangeEvaluatorScoreRequest) IsSetCorrection() bool { + return p.Correction != nil } -func (p *TraceServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok +func (p *ChangeEvaluatorScoreRequest) IsSetPlatformType() bool { + return p.PlatformType != nil } -func (p *TraceServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap +func (p *ChangeEvaluatorScoreRequest) IsSetBase() bool { + return p.Base != nil } -func NewTraceServiceProcessor(handler TraceService) *TraceServiceProcessor { - self := &TraceServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self.AddToProcessorMap("ListSpans", &traceServiceProcessorListSpans{handler: handler}) - self.AddToProcessorMap("GetTrace", &traceServiceProcessorGetTrace{handler: handler}) - self.AddToProcessorMap("BatchGetTracesAdvanceInfo", &traceServiceProcessorBatchGetTracesAdvanceInfo{handler: handler}) - self.AddToProcessorMap("IngestTracesInner", &traceServiceProcessorIngestTracesInner{handler: handler}) - self.AddToProcessorMap("GetTracesMetaInfo", &traceServiceProcessorGetTracesMetaInfo{handler: handler}) - self.AddToProcessorMap("CreateView", &traceServiceProcessorCreateView{handler: handler}) - self.AddToProcessorMap("UpdateView", &traceServiceProcessorUpdateView{handler: handler}) - self.AddToProcessorMap("DeleteView", &traceServiceProcessorDeleteView{handler: handler}) - self.AddToProcessorMap("ListViews", &traceServiceProcessorListViews{handler: handler}) - self.AddToProcessorMap("CreateManualAnnotation", &traceServiceProcessorCreateManualAnnotation{handler: handler}) - self.AddToProcessorMap("UpdateManualAnnotation", &traceServiceProcessorUpdateManualAnnotation{handler: handler}) - self.AddToProcessorMap("DeleteManualAnnotation", &traceServiceProcessorDeleteManualAnnotation{handler: handler}) - self.AddToProcessorMap("ListAnnotations", &traceServiceProcessorListAnnotations{handler: handler}) - self.AddToProcessorMap("ExportTracesToDataset", &traceServiceProcessorExportTracesToDataset{handler: handler}) - self.AddToProcessorMap("PreviewExportTracesToDataset", &traceServiceProcessorPreviewExportTracesToDataset{handler: handler}) - return self -} -func (p *TraceServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x -} +func (p *ChangeEvaluatorScoreRequest) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + var issetAnnotationID bool = false + var issetSpanID bool = false + var issetStartTime bool = false + var issetCorrection bool = false -type traceServiceProcessorListSpans struct { - handler TraceService -} + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } -func (p *traceServiceProcessorListSpans) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetAnnotationID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.STRING { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetSpanID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + issetStartTime = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + issetCorrection = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetAnnotationID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetSpanID { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetStartTime { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetCorrection { + fieldId = 5 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ChangeEvaluatorScoreRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_ChangeEvaluatorScoreRequest[fieldId])) +} + +func (p *ChangeEvaluatorScoreRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.WorkspaceID = _field + return nil +} +func (p *ChangeEvaluatorScoreRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.AnnotationID = _field + return nil +} +func (p *ChangeEvaluatorScoreRequest) ReadField3(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.SpanID = _field + return nil +} +func (p *ChangeEvaluatorScoreRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.StartTime = _field + return nil +} +func (p *ChangeEvaluatorScoreRequest) ReadField5(iprot thrift.TProtocol) error { + _field := annotation.NewCorrection() + if err := _field.Read(iprot); err != nil { + return err + } + p.Correction = _field + return nil +} +func (p *ChangeEvaluatorScoreRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *common.PlatformType + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.PlatformType = _field + return nil +} +func (p *ChangeEvaluatorScoreRequest) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBase() + if err := _field.Read(iprot); err != nil { + return err + } + p.Base = _field + return nil +} + +func (p *ChangeEvaluatorScoreRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ChangeEvaluatorScoreRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *ChangeEvaluatorScoreRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *ChangeEvaluatorScoreRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("annotation_id", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.AnnotationID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *ChangeEvaluatorScoreRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("span_id", thrift.STRING, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.SpanID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *ChangeEvaluatorScoreRequest) writeField4(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("start_time", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.StartTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *ChangeEvaluatorScoreRequest) writeField5(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("correction", thrift.STRUCT, 5); err != nil { + goto WriteFieldBeginError + } + if err := p.Correction.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *ChangeEvaluatorScoreRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPlatformType() { + if err = oprot.WriteFieldBegin("platform_type", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.PlatformType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} +func (p *ChangeEvaluatorScoreRequest) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBase() { + if err = oprot.WriteFieldBegin("Base", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.Base.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *ChangeEvaluatorScoreRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ChangeEvaluatorScoreRequest(%+v)", *p) + +} + +func (p *ChangeEvaluatorScoreRequest) DeepEqual(ano *ChangeEvaluatorScoreRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field2DeepEqual(ano.AnnotationID) { + return false + } + if !p.Field3DeepEqual(ano.SpanID) { + return false + } + if !p.Field4DeepEqual(ano.StartTime) { + return false + } + if !p.Field5DeepEqual(ano.Correction) { + return false + } + if !p.Field6DeepEqual(ano.PlatformType) { + return false + } + if !p.Field255DeepEqual(ano.Base) { + return false + } + return true +} + +func (p *ChangeEvaluatorScoreRequest) Field1DeepEqual(src int64) bool { + + if p.WorkspaceID != src { + return false + } + return true +} +func (p *ChangeEvaluatorScoreRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.AnnotationID, src) != 0 { + return false + } + return true +} +func (p *ChangeEvaluatorScoreRequest) Field3DeepEqual(src string) bool { + + if strings.Compare(p.SpanID, src) != 0 { + return false + } + return true +} +func (p *ChangeEvaluatorScoreRequest) Field4DeepEqual(src int64) bool { + + if p.StartTime != src { + return false + } + return true +} +func (p *ChangeEvaluatorScoreRequest) Field5DeepEqual(src *annotation.Correction) bool { + + if !p.Correction.DeepEqual(src) { + return false + } + return true +} +func (p *ChangeEvaluatorScoreRequest) Field6DeepEqual(src *common.PlatformType) bool { + + if p.PlatformType == src { + return true + } else if p.PlatformType == nil || src == nil { + return false + } + if strings.Compare(*p.PlatformType, *src) != 0 { + return false + } + return true +} +func (p *ChangeEvaluatorScoreRequest) Field255DeepEqual(src *base.Base) bool { + + if !p.Base.DeepEqual(src) { + return false + } + return true +} + +type ChangeEvaluatorScoreResponse struct { + Annotation *annotation.Annotation `thrift:"annotation,1,required" frugal:"1,required,annotation.Annotation" form:"annotation,required" json:"annotation,required" query:"annotation,required"` + BaseResp *base.BaseResp `thrift:"BaseResp,255,optional" frugal:"255,optional,base.BaseResp" form:"BaseResp" json:"BaseResp,omitempty" query:"BaseResp"` +} + +func NewChangeEvaluatorScoreResponse() *ChangeEvaluatorScoreResponse { + return &ChangeEvaluatorScoreResponse{} +} + +func (p *ChangeEvaluatorScoreResponse) InitDefault() { +} + +var ChangeEvaluatorScoreResponse_Annotation_DEFAULT *annotation.Annotation + +func (p *ChangeEvaluatorScoreResponse) GetAnnotation() (v *annotation.Annotation) { + if p == nil { + return + } + if !p.IsSetAnnotation() { + return ChangeEvaluatorScoreResponse_Annotation_DEFAULT + } + return p.Annotation +} + +var ChangeEvaluatorScoreResponse_BaseResp_DEFAULT *base.BaseResp + +func (p *ChangeEvaluatorScoreResponse) GetBaseResp() (v *base.BaseResp) { + if p == nil { + return + } + if !p.IsSetBaseResp() { + return ChangeEvaluatorScoreResponse_BaseResp_DEFAULT + } + return p.BaseResp +} +func (p *ChangeEvaluatorScoreResponse) SetAnnotation(val *annotation.Annotation) { + p.Annotation = val +} +func (p *ChangeEvaluatorScoreResponse) SetBaseResp(val *base.BaseResp) { + p.BaseResp = val +} + +var fieldIDToName_ChangeEvaluatorScoreResponse = map[int16]string{ + 1: "annotation", + 255: "BaseResp", +} + +func (p *ChangeEvaluatorScoreResponse) IsSetAnnotation() bool { + return p.Annotation != nil +} + +func (p *ChangeEvaluatorScoreResponse) IsSetBaseResp() bool { + return p.BaseResp != nil +} + +func (p *ChangeEvaluatorScoreResponse) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetAnnotation bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetAnnotation = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetAnnotation { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ChangeEvaluatorScoreResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_ChangeEvaluatorScoreResponse[fieldId])) +} + +func (p *ChangeEvaluatorScoreResponse) ReadField1(iprot thrift.TProtocol) error { + _field := annotation.NewAnnotation() + if err := _field.Read(iprot); err != nil { + return err + } + p.Annotation = _field + return nil +} +func (p *ChangeEvaluatorScoreResponse) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBaseResp() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseResp = _field + return nil +} + +func (p *ChangeEvaluatorScoreResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ChangeEvaluatorScoreResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *ChangeEvaluatorScoreResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("annotation", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Annotation.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *ChangeEvaluatorScoreResponse) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseResp() { + if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseResp.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *ChangeEvaluatorScoreResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ChangeEvaluatorScoreResponse(%+v)", *p) + +} + +func (p *ChangeEvaluatorScoreResponse) DeepEqual(ano *ChangeEvaluatorScoreResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Annotation) { + return false + } + if !p.Field255DeepEqual(ano.BaseResp) { + return false + } + return true +} + +func (p *ChangeEvaluatorScoreResponse) Field1DeepEqual(src *annotation.Annotation) bool { + + if !p.Annotation.DeepEqual(src) { + return false + } + return true +} +func (p *ChangeEvaluatorScoreResponse) Field255DeepEqual(src *base.BaseResp) bool { + + if !p.BaseResp.DeepEqual(src) { + return false + } + return true +} + +type ListAnnotationEvaluatorsRequest struct { + WorkspaceID int64 `thrift:"workspace_id,1,required" frugal:"1,required,i64" json:"workspace_id" query:"workspace_id,required" ` + Name *string `thrift:"name,2,optional" frugal:"2,optional,string" form:"name" json:"name,omitempty"` + Base *base.Base `thrift:"Base,255,optional" frugal:"255,optional,base.Base" form:"-" json:"-" query:"-"` +} + +func NewListAnnotationEvaluatorsRequest() *ListAnnotationEvaluatorsRequest { + return &ListAnnotationEvaluatorsRequest{} +} + +func (p *ListAnnotationEvaluatorsRequest) InitDefault() { +} + +func (p *ListAnnotationEvaluatorsRequest) GetWorkspaceID() (v int64) { + if p != nil { + return p.WorkspaceID + } + return +} + +var ListAnnotationEvaluatorsRequest_Name_DEFAULT string + +func (p *ListAnnotationEvaluatorsRequest) GetName() (v string) { + if p == nil { + return + } + if !p.IsSetName() { + return ListAnnotationEvaluatorsRequest_Name_DEFAULT + } + return *p.Name +} + +var ListAnnotationEvaluatorsRequest_Base_DEFAULT *base.Base + +func (p *ListAnnotationEvaluatorsRequest) GetBase() (v *base.Base) { + if p == nil { + return + } + if !p.IsSetBase() { + return ListAnnotationEvaluatorsRequest_Base_DEFAULT + } + return p.Base +} +func (p *ListAnnotationEvaluatorsRequest) SetWorkspaceID(val int64) { + p.WorkspaceID = val +} +func (p *ListAnnotationEvaluatorsRequest) SetName(val *string) { + p.Name = val +} +func (p *ListAnnotationEvaluatorsRequest) SetBase(val *base.Base) { + p.Base = val +} + +var fieldIDToName_ListAnnotationEvaluatorsRequest = map[int16]string{ + 1: "workspace_id", + 2: "name", + 255: "Base", +} + +func (p *ListAnnotationEvaluatorsRequest) IsSetName() bool { + return p.Name != nil +} + +func (p *ListAnnotationEvaluatorsRequest) IsSetBase() bool { + return p.Base != nil +} + +func (p *ListAnnotationEvaluatorsRequest) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ListAnnotationEvaluatorsRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_ListAnnotationEvaluatorsRequest[fieldId])) +} + +func (p *ListAnnotationEvaluatorsRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.WorkspaceID = _field + return nil +} +func (p *ListAnnotationEvaluatorsRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field *string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.Name = _field + return nil +} +func (p *ListAnnotationEvaluatorsRequest) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBase() + if err := _field.Read(iprot); err != nil { + return err + } + p.Base = _field + return nil +} + +func (p *ListAnnotationEvaluatorsRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ListAnnotationEvaluatorsRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *ListAnnotationEvaluatorsRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *ListAnnotationEvaluatorsRequest) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetName() { + if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.Name); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *ListAnnotationEvaluatorsRequest) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBase() { + if err = oprot.WriteFieldBegin("Base", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.Base.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *ListAnnotationEvaluatorsRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ListAnnotationEvaluatorsRequest(%+v)", *p) + +} + +func (p *ListAnnotationEvaluatorsRequest) DeepEqual(ano *ListAnnotationEvaluatorsRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field2DeepEqual(ano.Name) { + return false + } + if !p.Field255DeepEqual(ano.Base) { + return false + } + return true +} + +func (p *ListAnnotationEvaluatorsRequest) Field1DeepEqual(src int64) bool { + + if p.WorkspaceID != src { + return false + } + return true +} +func (p *ListAnnotationEvaluatorsRequest) Field2DeepEqual(src *string) bool { + + if p.Name == src { + return true + } else if p.Name == nil || src == nil { + return false + } + if strings.Compare(*p.Name, *src) != 0 { + return false + } + return true +} +func (p *ListAnnotationEvaluatorsRequest) Field255DeepEqual(src *base.Base) bool { + + if !p.Base.DeepEqual(src) { + return false + } + return true +} + +type ListAnnotationEvaluatorsResponse struct { + Evaluators []*annotation.AnnotationEvaluator `thrift:"evaluators,1,required" frugal:"1,required,list" form:"evaluators,required" json:"evaluators,required" query:"evaluators,required"` + BaseResp *base.BaseResp `thrift:"BaseResp,255,optional" frugal:"255,optional,base.BaseResp" form:"BaseResp" json:"BaseResp,omitempty" query:"BaseResp"` +} + +func NewListAnnotationEvaluatorsResponse() *ListAnnotationEvaluatorsResponse { + return &ListAnnotationEvaluatorsResponse{} +} + +func (p *ListAnnotationEvaluatorsResponse) InitDefault() { +} + +func (p *ListAnnotationEvaluatorsResponse) GetEvaluators() (v []*annotation.AnnotationEvaluator) { + if p != nil { + return p.Evaluators + } + return +} + +var ListAnnotationEvaluatorsResponse_BaseResp_DEFAULT *base.BaseResp + +func (p *ListAnnotationEvaluatorsResponse) GetBaseResp() (v *base.BaseResp) { + if p == nil { + return + } + if !p.IsSetBaseResp() { + return ListAnnotationEvaluatorsResponse_BaseResp_DEFAULT + } + return p.BaseResp +} +func (p *ListAnnotationEvaluatorsResponse) SetEvaluators(val []*annotation.AnnotationEvaluator) { + p.Evaluators = val +} +func (p *ListAnnotationEvaluatorsResponse) SetBaseResp(val *base.BaseResp) { + p.BaseResp = val +} + +var fieldIDToName_ListAnnotationEvaluatorsResponse = map[int16]string{ + 1: "evaluators", + 255: "BaseResp", +} + +func (p *ListAnnotationEvaluatorsResponse) IsSetBaseResp() bool { + return p.BaseResp != nil +} + +func (p *ListAnnotationEvaluatorsResponse) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetEvaluators bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetEvaluators = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetEvaluators { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ListAnnotationEvaluatorsResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_ListAnnotationEvaluatorsResponse[fieldId])) +} + +func (p *ListAnnotationEvaluatorsResponse) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*annotation.AnnotationEvaluator, 0, size) + values := make([]annotation.AnnotationEvaluator, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.Evaluators = _field + return nil +} +func (p *ListAnnotationEvaluatorsResponse) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBaseResp() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseResp = _field + return nil +} + +func (p *ListAnnotationEvaluatorsResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ListAnnotationEvaluatorsResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *ListAnnotationEvaluatorsResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("evaluators", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Evaluators)); err != nil { + return err + } + for _, v := range p.Evaluators { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *ListAnnotationEvaluatorsResponse) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseResp() { + if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseResp.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *ListAnnotationEvaluatorsResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ListAnnotationEvaluatorsResponse(%+v)", *p) + +} + +func (p *ListAnnotationEvaluatorsResponse) DeepEqual(ano *ListAnnotationEvaluatorsResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.Evaluators) { + return false + } + if !p.Field255DeepEqual(ano.BaseResp) { + return false + } + return true +} + +func (p *ListAnnotationEvaluatorsResponse) Field1DeepEqual(src []*annotation.AnnotationEvaluator) bool { + + if len(p.Evaluators) != len(src) { + return false + } + for i, v := range p.Evaluators { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *ListAnnotationEvaluatorsResponse) Field255DeepEqual(src *base.BaseResp) bool { + + if !p.BaseResp.DeepEqual(src) { + return false + } + return true +} + +type ExtractSpanInfoRequest struct { + WorkspaceID int64 `thrift:"workspace_id,1,required" frugal:"1,required,i64" json:"workspace_id" form:"workspace_id,required" ` + TraceID string `thrift:"trace_id,2,required" frugal:"2,required,string" form:"trace_id,required" json:"trace_id,required"` + SpanIds []string `thrift:"span_ids,3,required" frugal:"3,required,list" form:"span_ids,required" json:"span_ids,required"` + StartTime *int64 `thrift:"start_time,4,optional" frugal:"4,optional,i64" json:"start_time" form:"start_time" ` + EndTime *int64 `thrift:"end_time,5,optional" frugal:"5,optional,i64" json:"end_time" form:"end_time" ` + PlatformType *common.PlatformType `thrift:"platform_type,6,optional" frugal:"6,optional,string" form:"platform_type" json:"platform_type,omitempty"` + FieldMappings []*dataset0.FieldMapping `thrift:"field_mappings,7,optional" frugal:"7,optional,list" form:"field_mappings" json:"field_mappings,omitempty" query:"field_mappings"` + Base *base.Base `thrift:"Base,255,optional" frugal:"255,optional,base.Base" form:"-" json:"-" query:"-"` +} + +func NewExtractSpanInfoRequest() *ExtractSpanInfoRequest { + return &ExtractSpanInfoRequest{} +} + +func (p *ExtractSpanInfoRequest) InitDefault() { +} + +func (p *ExtractSpanInfoRequest) GetWorkspaceID() (v int64) { + if p != nil { + return p.WorkspaceID + } + return +} + +func (p *ExtractSpanInfoRequest) GetTraceID() (v string) { + if p != nil { + return p.TraceID + } + return +} + +func (p *ExtractSpanInfoRequest) GetSpanIds() (v []string) { + if p != nil { + return p.SpanIds + } + return +} + +var ExtractSpanInfoRequest_StartTime_DEFAULT int64 + +func (p *ExtractSpanInfoRequest) GetStartTime() (v int64) { + if p == nil { + return + } + if !p.IsSetStartTime() { + return ExtractSpanInfoRequest_StartTime_DEFAULT + } + return *p.StartTime +} + +var ExtractSpanInfoRequest_EndTime_DEFAULT int64 + +func (p *ExtractSpanInfoRequest) GetEndTime() (v int64) { + if p == nil { + return + } + if !p.IsSetEndTime() { + return ExtractSpanInfoRequest_EndTime_DEFAULT + } + return *p.EndTime +} + +var ExtractSpanInfoRequest_PlatformType_DEFAULT common.PlatformType + +func (p *ExtractSpanInfoRequest) GetPlatformType() (v common.PlatformType) { + if p == nil { + return + } + if !p.IsSetPlatformType() { + return ExtractSpanInfoRequest_PlatformType_DEFAULT + } + return *p.PlatformType +} + +var ExtractSpanInfoRequest_FieldMappings_DEFAULT []*dataset0.FieldMapping + +func (p *ExtractSpanInfoRequest) GetFieldMappings() (v []*dataset0.FieldMapping) { + if p == nil { + return + } + if !p.IsSetFieldMappings() { + return ExtractSpanInfoRequest_FieldMappings_DEFAULT + } + return p.FieldMappings +} + +var ExtractSpanInfoRequest_Base_DEFAULT *base.Base + +func (p *ExtractSpanInfoRequest) GetBase() (v *base.Base) { + if p == nil { + return + } + if !p.IsSetBase() { + return ExtractSpanInfoRequest_Base_DEFAULT + } + return p.Base +} +func (p *ExtractSpanInfoRequest) SetWorkspaceID(val int64) { + p.WorkspaceID = val +} +func (p *ExtractSpanInfoRequest) SetTraceID(val string) { + p.TraceID = val +} +func (p *ExtractSpanInfoRequest) SetSpanIds(val []string) { + p.SpanIds = val +} +func (p *ExtractSpanInfoRequest) SetStartTime(val *int64) { + p.StartTime = val +} +func (p *ExtractSpanInfoRequest) SetEndTime(val *int64) { + p.EndTime = val +} +func (p *ExtractSpanInfoRequest) SetPlatformType(val *common.PlatformType) { + p.PlatformType = val +} +func (p *ExtractSpanInfoRequest) SetFieldMappings(val []*dataset0.FieldMapping) { + p.FieldMappings = val +} +func (p *ExtractSpanInfoRequest) SetBase(val *base.Base) { + p.Base = val +} + +var fieldIDToName_ExtractSpanInfoRequest = map[int16]string{ + 1: "workspace_id", + 2: "trace_id", + 3: "span_ids", + 4: "start_time", + 5: "end_time", + 6: "platform_type", + 7: "field_mappings", + 255: "Base", +} + +func (p *ExtractSpanInfoRequest) IsSetStartTime() bool { + return p.StartTime != nil +} + +func (p *ExtractSpanInfoRequest) IsSetEndTime() bool { + return p.EndTime != nil +} + +func (p *ExtractSpanInfoRequest) IsSetPlatformType() bool { + return p.PlatformType != nil +} + +func (p *ExtractSpanInfoRequest) IsSetFieldMappings() bool { + return p.FieldMappings != nil +} + +func (p *ExtractSpanInfoRequest) IsSetBase() bool { + return p.Base != nil +} + +func (p *ExtractSpanInfoRequest) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + var issetTraceID bool = false + var issetSpanIds bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.STRING { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetTraceID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 3: + if fieldTypeId == thrift.LIST { + if err = p.ReadField3(iprot); err != nil { + goto ReadFieldError + } + issetSpanIds = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 4: + if fieldTypeId == thrift.I64 { + if err = p.ReadField4(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 5: + if fieldTypeId == thrift.I64 { + if err = p.ReadField5(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 6: + if fieldTypeId == thrift.STRING { + if err = p.ReadField6(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 7: + if fieldTypeId == thrift.LIST { + if err = p.ReadField7(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetTraceID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetSpanIds { + fieldId = 3 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ExtractSpanInfoRequest[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_ExtractSpanInfoRequest[fieldId])) +} + +func (p *ExtractSpanInfoRequest) ReadField1(iprot thrift.TProtocol) error { + + var _field int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = v + } + p.WorkspaceID = _field + return nil +} +func (p *ExtractSpanInfoRequest) ReadField2(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.TraceID = _field + return nil +} +func (p *ExtractSpanInfoRequest) ReadField3(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + + var _elem string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _elem = v + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.SpanIds = _field + return nil +} +func (p *ExtractSpanInfoRequest) ReadField4(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.StartTime = _field + return nil +} +func (p *ExtractSpanInfoRequest) ReadField5(iprot thrift.TProtocol) error { + + var _field *int64 + if v, err := iprot.ReadI64(); err != nil { + return err + } else { + _field = &v + } + p.EndTime = _field + return nil +} +func (p *ExtractSpanInfoRequest) ReadField6(iprot thrift.TProtocol) error { + + var _field *common.PlatformType + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = &v + } + p.PlatformType = _field + return nil +} +func (p *ExtractSpanInfoRequest) ReadField7(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*dataset0.FieldMapping, 0, size) + values := make([]dataset0.FieldMapping, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FieldMappings = _field + return nil +} +func (p *ExtractSpanInfoRequest) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBase() + if err := _field.Read(iprot); err != nil { + return err + } + p.Base = _field + return nil +} + +func (p *ExtractSpanInfoRequest) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ExtractSpanInfoRequest"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + if err = p.writeField3(oprot); err != nil { + fieldId = 3 + goto WriteFieldError + } + if err = p.writeField4(oprot); err != nil { + fieldId = 4 + goto WriteFieldError + } + if err = p.writeField5(oprot); err != nil { + fieldId = 5 + goto WriteFieldError + } + if err = p.writeField6(oprot); err != nil { + fieldId = 6 + goto WriteFieldError + } + if err = p.writeField7(oprot); err != nil { + fieldId = 7 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *ExtractSpanInfoRequest) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("workspace_id", thrift.I64, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(p.WorkspaceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *ExtractSpanInfoRequest) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("trace_id", thrift.STRING, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.TraceID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} +func (p *ExtractSpanInfoRequest) writeField3(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("span_ids", thrift.LIST, 3); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRING, len(p.SpanIds)); err != nil { + return err + } + for _, v := range p.SpanIds { + if err := oprot.WriteString(v); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err) +} +func (p *ExtractSpanInfoRequest) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetStartTime() { + if err = oprot.WriteFieldBegin("start_time", thrift.I64, 4); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.StartTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err) +} +func (p *ExtractSpanInfoRequest) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetEndTime() { + if err = oprot.WriteFieldBegin("end_time", thrift.I64, 5); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteI64(*p.EndTime); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err) +} +func (p *ExtractSpanInfoRequest) writeField6(oprot thrift.TProtocol) (err error) { + if p.IsSetPlatformType() { + if err = oprot.WriteFieldBegin("platform_type", thrift.STRING, 6); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(*p.PlatformType); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err) +} +func (p *ExtractSpanInfoRequest) writeField7(oprot thrift.TProtocol) (err error) { + if p.IsSetFieldMappings() { + if err = oprot.WriteFieldBegin("field_mappings", thrift.LIST, 7); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FieldMappings)); err != nil { + return err + } + for _, v := range p.FieldMappings { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err) +} +func (p *ExtractSpanInfoRequest) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBase() { + if err = oprot.WriteFieldBegin("Base", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.Base.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *ExtractSpanInfoRequest) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ExtractSpanInfoRequest(%+v)", *p) + +} + +func (p *ExtractSpanInfoRequest) DeepEqual(ano *ExtractSpanInfoRequest) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.WorkspaceID) { + return false + } + if !p.Field2DeepEqual(ano.TraceID) { + return false + } + if !p.Field3DeepEqual(ano.SpanIds) { + return false + } + if !p.Field4DeepEqual(ano.StartTime) { + return false + } + if !p.Field5DeepEqual(ano.EndTime) { + return false + } + if !p.Field6DeepEqual(ano.PlatformType) { + return false + } + if !p.Field7DeepEqual(ano.FieldMappings) { + return false + } + if !p.Field255DeepEqual(ano.Base) { + return false + } + return true +} + +func (p *ExtractSpanInfoRequest) Field1DeepEqual(src int64) bool { + + if p.WorkspaceID != src { + return false + } + return true +} +func (p *ExtractSpanInfoRequest) Field2DeepEqual(src string) bool { + + if strings.Compare(p.TraceID, src) != 0 { + return false + } + return true +} +func (p *ExtractSpanInfoRequest) Field3DeepEqual(src []string) bool { + + if len(p.SpanIds) != len(src) { + return false + } + for i, v := range p.SpanIds { + _src := src[i] + if strings.Compare(v, _src) != 0 { + return false + } + } + return true +} +func (p *ExtractSpanInfoRequest) Field4DeepEqual(src *int64) bool { + + if p.StartTime == src { + return true + } else if p.StartTime == nil || src == nil { + return false + } + if *p.StartTime != *src { + return false + } + return true +} +func (p *ExtractSpanInfoRequest) Field5DeepEqual(src *int64) bool { + + if p.EndTime == src { + return true + } else if p.EndTime == nil || src == nil { + return false + } + if *p.EndTime != *src { + return false + } + return true +} +func (p *ExtractSpanInfoRequest) Field6DeepEqual(src *common.PlatformType) bool { + + if p.PlatformType == src { + return true + } else if p.PlatformType == nil || src == nil { + return false + } + if strings.Compare(*p.PlatformType, *src) != 0 { + return false + } + return true +} +func (p *ExtractSpanInfoRequest) Field7DeepEqual(src []*dataset0.FieldMapping) bool { + + if len(p.FieldMappings) != len(src) { + return false + } + for i, v := range p.FieldMappings { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *ExtractSpanInfoRequest) Field255DeepEqual(src *base.Base) bool { + + if !p.Base.DeepEqual(src) { + return false + } + return true +} + +type SpanInfo struct { + SpanID string `thrift:"span_id,1,required" frugal:"1,required,string" form:"span_id,required" json:"span_id,required" query:"span_id,required"` + FieldList []*dataset0.FieldData `thrift:"field_list,2,required" frugal:"2,required,list" form:"field_list,required" json:"field_list,required" query:"field_list,required"` +} + +func NewSpanInfo() *SpanInfo { + return &SpanInfo{} +} + +func (p *SpanInfo) InitDefault() { +} + +func (p *SpanInfo) GetSpanID() (v string) { + if p != nil { + return p.SpanID + } + return +} + +func (p *SpanInfo) GetFieldList() (v []*dataset0.FieldData) { + if p != nil { + return p.FieldList + } + return +} +func (p *SpanInfo) SetSpanID(val string) { + p.SpanID = val +} +func (p *SpanInfo) SetFieldList(val []*dataset0.FieldData) { + p.FieldList = val +} + +var fieldIDToName_SpanInfo = map[int16]string{ + 1: "span_id", + 2: "field_list", +} + +func (p *SpanInfo) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetSpanID bool = false + var issetFieldList bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetSpanID = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 2: + if fieldTypeId == thrift.LIST { + if err = p.ReadField2(iprot); err != nil { + goto ReadFieldError + } + issetFieldList = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetSpanID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetFieldList { + fieldId = 2 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_SpanInfo[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_SpanInfo[fieldId])) +} + +func (p *SpanInfo) ReadField1(iprot thrift.TProtocol) error { + + var _field string + if v, err := iprot.ReadString(); err != nil { + return err + } else { + _field = v + } + p.SpanID = _field + return nil +} +func (p *SpanInfo) ReadField2(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*dataset0.FieldData, 0, size) + values := make([]dataset0.FieldData, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.FieldList = _field + return nil +} + +func (p *SpanInfo) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("SpanInfo"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField2(oprot); err != nil { + fieldId = 2 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *SpanInfo) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("span_id", thrift.STRING, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteString(p.SpanID); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *SpanInfo) writeField2(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("field_list", thrift.LIST, 2); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.FieldList)); err != nil { + return err + } + for _, v := range p.FieldList { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err) +} + +func (p *SpanInfo) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("SpanInfo(%+v)", *p) + +} + +func (p *SpanInfo) DeepEqual(ano *SpanInfo) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SpanID) { + return false + } + if !p.Field2DeepEqual(ano.FieldList) { + return false + } + return true +} + +func (p *SpanInfo) Field1DeepEqual(src string) bool { + + if strings.Compare(p.SpanID, src) != 0 { + return false + } + return true +} +func (p *SpanInfo) Field2DeepEqual(src []*dataset0.FieldData) bool { + + if len(p.FieldList) != len(src) { + return false + } + for i, v := range p.FieldList { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} + +type ExtractSpanInfoResponse struct { + SpanInfos []*SpanInfo `thrift:"span_infos,1,required" frugal:"1,required,list" form:"span_infos,required" json:"span_infos,required" query:"span_infos,required"` + BaseResp *base.BaseResp `thrift:"BaseResp,255,optional" frugal:"255,optional,base.BaseResp" form:"BaseResp" json:"BaseResp,omitempty" query:"BaseResp"` +} + +func NewExtractSpanInfoResponse() *ExtractSpanInfoResponse { + return &ExtractSpanInfoResponse{} +} + +func (p *ExtractSpanInfoResponse) InitDefault() { +} + +func (p *ExtractSpanInfoResponse) GetSpanInfos() (v []*SpanInfo) { + if p != nil { + return p.SpanInfos + } + return +} + +var ExtractSpanInfoResponse_BaseResp_DEFAULT *base.BaseResp + +func (p *ExtractSpanInfoResponse) GetBaseResp() (v *base.BaseResp) { + if p == nil { + return + } + if !p.IsSetBaseResp() { + return ExtractSpanInfoResponse_BaseResp_DEFAULT + } + return p.BaseResp +} +func (p *ExtractSpanInfoResponse) SetSpanInfos(val []*SpanInfo) { + p.SpanInfos = val +} +func (p *ExtractSpanInfoResponse) SetBaseResp(val *base.BaseResp) { + p.BaseResp = val +} + +var fieldIDToName_ExtractSpanInfoResponse = map[int16]string{ + 1: "span_infos", + 255: "BaseResp", +} + +func (p *ExtractSpanInfoResponse) IsSetBaseResp() bool { + return p.BaseResp != nil +} + +func (p *ExtractSpanInfoResponse) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + var issetSpanInfos bool = false + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + issetSpanInfos = true + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + case 255: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField255(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + if !issetSpanInfos { + fieldId = 1 + goto RequiredFieldNotSetError + } + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ExtractSpanInfoResponse[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +RequiredFieldNotSetError: + return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_ExtractSpanInfoResponse[fieldId])) +} + +func (p *ExtractSpanInfoResponse) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return err + } + _field := make([]*SpanInfo, 0, size) + values := make([]SpanInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + + if err := _elem.Read(iprot); err != nil { + return err + } + + _field = append(_field, _elem) + } + if err := iprot.ReadListEnd(); err != nil { + return err + } + p.SpanInfos = _field + return nil +} +func (p *ExtractSpanInfoResponse) ReadField255(iprot thrift.TProtocol) error { + _field := base.NewBaseResp() + if err := _field.Read(iprot); err != nil { + return err + } + p.BaseResp = _field + return nil +} + +func (p *ExtractSpanInfoResponse) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ExtractSpanInfoResponse"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + if err = p.writeField255(oprot); err != nil { + fieldId = 255 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *ExtractSpanInfoResponse) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("span_infos", thrift.LIST, 1); err != nil { + goto WriteFieldBeginError + } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SpanInfos)); err != nil { + return err + } + for _, v := range p.SpanInfos { + if err := v.Write(oprot); err != nil { + return err + } + } + if err := oprot.WriteListEnd(); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} +func (p *ExtractSpanInfoResponse) writeField255(oprot thrift.TProtocol) (err error) { + if p.IsSetBaseResp() { + if err = oprot.WriteFieldBegin("BaseResp", thrift.STRUCT, 255); err != nil { + goto WriteFieldBeginError + } + if err := p.BaseResp.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 255 end error: ", p), err) +} + +func (p *ExtractSpanInfoResponse) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ExtractSpanInfoResponse(%+v)", *p) + +} + +func (p *ExtractSpanInfoResponse) DeepEqual(ano *ExtractSpanInfoResponse) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false + } + if !p.Field1DeepEqual(ano.SpanInfos) { + return false + } + if !p.Field255DeepEqual(ano.BaseResp) { + return false + } + return true +} + +func (p *ExtractSpanInfoResponse) Field1DeepEqual(src []*SpanInfo) bool { + + if len(p.SpanInfos) != len(src) { + return false + } + for i, v := range p.SpanInfos { + _src := src[i] + if !v.DeepEqual(_src) { + return false + } + } + return true +} +func (p *ExtractSpanInfoResponse) Field255DeepEqual(src *base.BaseResp) bool { + + if !p.BaseResp.DeepEqual(src) { + return false + } + return true +} + +type TraceService interface { + ListSpans(ctx context.Context, req *ListSpansRequest) (r *ListSpansResponse, err error) + + GetTrace(ctx context.Context, req *GetTraceRequest) (r *GetTraceResponse, err error) + + BatchGetTracesAdvanceInfo(ctx context.Context, req *BatchGetTracesAdvanceInfoRequest) (r *BatchGetTracesAdvanceInfoResponse, err error) + + IngestTracesInner(ctx context.Context, req *IngestTracesRequest) (r *IngestTracesResponse, err error) + + GetTracesMetaInfo(ctx context.Context, req *GetTracesMetaInfoRequest) (r *GetTracesMetaInfoResponse, err error) + + CreateView(ctx context.Context, req *CreateViewRequest) (r *CreateViewResponse, err error) + + UpdateView(ctx context.Context, req *UpdateViewRequest) (r *UpdateViewResponse, err error) + + DeleteView(ctx context.Context, req *DeleteViewRequest) (r *DeleteViewResponse, err error) + + ListViews(ctx context.Context, req *ListViewsRequest) (r *ListViewsResponse, err error) + + CreateManualAnnotation(ctx context.Context, req *CreateManualAnnotationRequest) (r *CreateManualAnnotationResponse, err error) + + UpdateManualAnnotation(ctx context.Context, req *UpdateManualAnnotationRequest) (r *UpdateManualAnnotationResponse, err error) + + DeleteManualAnnotation(ctx context.Context, req *DeleteManualAnnotationRequest) (r *DeleteManualAnnotationResponse, err error) + + ListAnnotations(ctx context.Context, req *ListAnnotationsRequest) (r *ListAnnotationsResponse, err error) + + ExportTracesToDataset(ctx context.Context, req *ExportTracesToDatasetRequest) (r *ExportTracesToDatasetResponse, err error) + + PreviewExportTracesToDataset(ctx context.Context, req *PreviewExportTracesToDatasetRequest) (r *PreviewExportTracesToDatasetResponse, err error) + + ChangeEvaluatorScore(ctx context.Context, req *ChangeEvaluatorScoreRequest) (r *ChangeEvaluatorScoreResponse, err error) + + ListAnnotationEvaluators(ctx context.Context, req *ListAnnotationEvaluatorsRequest) (r *ListAnnotationEvaluatorsResponse, err error) + + ExtractSpanInfo(ctx context.Context, req *ExtractSpanInfoRequest) (r *ExtractSpanInfoResponse, err error) +} + +type TraceServiceClient struct { + c thrift.TClient +} + +func NewTraceServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *TraceServiceClient { + return &TraceServiceClient{ + c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), + } +} + +func NewTraceServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *TraceServiceClient { + return &TraceServiceClient{ + c: thrift.NewTStandardClient(iprot, oprot), + } +} + +func NewTraceServiceClient(c thrift.TClient) *TraceServiceClient { + return &TraceServiceClient{ + c: c, + } +} + +func (p *TraceServiceClient) Client_() thrift.TClient { + return p.c +} + +func (p *TraceServiceClient) ListSpans(ctx context.Context, req *ListSpansRequest) (r *ListSpansResponse, err error) { + var _args TraceServiceListSpansArgs + _args.Req = req + var _result TraceServiceListSpansResult + if err = p.Client_().Call(ctx, "ListSpans", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) GetTrace(ctx context.Context, req *GetTraceRequest) (r *GetTraceResponse, err error) { + var _args TraceServiceGetTraceArgs + _args.Req = req + var _result TraceServiceGetTraceResult + if err = p.Client_().Call(ctx, "GetTrace", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) BatchGetTracesAdvanceInfo(ctx context.Context, req *BatchGetTracesAdvanceInfoRequest) (r *BatchGetTracesAdvanceInfoResponse, err error) { + var _args TraceServiceBatchGetTracesAdvanceInfoArgs + _args.Req = req + var _result TraceServiceBatchGetTracesAdvanceInfoResult + if err = p.Client_().Call(ctx, "BatchGetTracesAdvanceInfo", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) IngestTracesInner(ctx context.Context, req *IngestTracesRequest) (r *IngestTracesResponse, err error) { + var _args TraceServiceIngestTracesInnerArgs + _args.Req = req + var _result TraceServiceIngestTracesInnerResult + if err = p.Client_().Call(ctx, "IngestTracesInner", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) GetTracesMetaInfo(ctx context.Context, req *GetTracesMetaInfoRequest) (r *GetTracesMetaInfoResponse, err error) { + var _args TraceServiceGetTracesMetaInfoArgs + _args.Req = req + var _result TraceServiceGetTracesMetaInfoResult + if err = p.Client_().Call(ctx, "GetTracesMetaInfo", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) CreateView(ctx context.Context, req *CreateViewRequest) (r *CreateViewResponse, err error) { + var _args TraceServiceCreateViewArgs + _args.Req = req + var _result TraceServiceCreateViewResult + if err = p.Client_().Call(ctx, "CreateView", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) UpdateView(ctx context.Context, req *UpdateViewRequest) (r *UpdateViewResponse, err error) { + var _args TraceServiceUpdateViewArgs + _args.Req = req + var _result TraceServiceUpdateViewResult + if err = p.Client_().Call(ctx, "UpdateView", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) DeleteView(ctx context.Context, req *DeleteViewRequest) (r *DeleteViewResponse, err error) { + var _args TraceServiceDeleteViewArgs + _args.Req = req + var _result TraceServiceDeleteViewResult + if err = p.Client_().Call(ctx, "DeleteView", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) ListViews(ctx context.Context, req *ListViewsRequest) (r *ListViewsResponse, err error) { + var _args TraceServiceListViewsArgs + _args.Req = req + var _result TraceServiceListViewsResult + if err = p.Client_().Call(ctx, "ListViews", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) CreateManualAnnotation(ctx context.Context, req *CreateManualAnnotationRequest) (r *CreateManualAnnotationResponse, err error) { + var _args TraceServiceCreateManualAnnotationArgs + _args.Req = req + var _result TraceServiceCreateManualAnnotationResult + if err = p.Client_().Call(ctx, "CreateManualAnnotation", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) UpdateManualAnnotation(ctx context.Context, req *UpdateManualAnnotationRequest) (r *UpdateManualAnnotationResponse, err error) { + var _args TraceServiceUpdateManualAnnotationArgs + _args.Req = req + var _result TraceServiceUpdateManualAnnotationResult + if err = p.Client_().Call(ctx, "UpdateManualAnnotation", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) DeleteManualAnnotation(ctx context.Context, req *DeleteManualAnnotationRequest) (r *DeleteManualAnnotationResponse, err error) { + var _args TraceServiceDeleteManualAnnotationArgs + _args.Req = req + var _result TraceServiceDeleteManualAnnotationResult + if err = p.Client_().Call(ctx, "DeleteManualAnnotation", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) ListAnnotations(ctx context.Context, req *ListAnnotationsRequest) (r *ListAnnotationsResponse, err error) { + var _args TraceServiceListAnnotationsArgs + _args.Req = req + var _result TraceServiceListAnnotationsResult + if err = p.Client_().Call(ctx, "ListAnnotations", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) ExportTracesToDataset(ctx context.Context, req *ExportTracesToDatasetRequest) (r *ExportTracesToDatasetResponse, err error) { + var _args TraceServiceExportTracesToDatasetArgs + _args.Req = req + var _result TraceServiceExportTracesToDatasetResult + if err = p.Client_().Call(ctx, "ExportTracesToDataset", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) PreviewExportTracesToDataset(ctx context.Context, req *PreviewExportTracesToDatasetRequest) (r *PreviewExportTracesToDatasetResponse, err error) { + var _args TraceServicePreviewExportTracesToDatasetArgs + _args.Req = req + var _result TraceServicePreviewExportTracesToDatasetResult + if err = p.Client_().Call(ctx, "PreviewExportTracesToDataset", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) ChangeEvaluatorScore(ctx context.Context, req *ChangeEvaluatorScoreRequest) (r *ChangeEvaluatorScoreResponse, err error) { + var _args TraceServiceChangeEvaluatorScoreArgs + _args.Req = req + var _result TraceServiceChangeEvaluatorScoreResult + if err = p.Client_().Call(ctx, "ChangeEvaluatorScore", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) ListAnnotationEvaluators(ctx context.Context, req *ListAnnotationEvaluatorsRequest) (r *ListAnnotationEvaluatorsResponse, err error) { + var _args TraceServiceListAnnotationEvaluatorsArgs + _args.Req = req + var _result TraceServiceListAnnotationEvaluatorsResult + if err = p.Client_().Call(ctx, "ListAnnotationEvaluators", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} +func (p *TraceServiceClient) ExtractSpanInfo(ctx context.Context, req *ExtractSpanInfoRequest) (r *ExtractSpanInfoResponse, err error) { + var _args TraceServiceExtractSpanInfoArgs + _args.Req = req + var _result TraceServiceExtractSpanInfoResult + if err = p.Client_().Call(ctx, "ExtractSpanInfo", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +type TraceServiceProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler TraceService +} + +func (p *TraceServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *TraceServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *TraceServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewTraceServiceProcessor(handler TraceService) *TraceServiceProcessor { + self := &TraceServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self.AddToProcessorMap("ListSpans", &traceServiceProcessorListSpans{handler: handler}) + self.AddToProcessorMap("GetTrace", &traceServiceProcessorGetTrace{handler: handler}) + self.AddToProcessorMap("BatchGetTracesAdvanceInfo", &traceServiceProcessorBatchGetTracesAdvanceInfo{handler: handler}) + self.AddToProcessorMap("IngestTracesInner", &traceServiceProcessorIngestTracesInner{handler: handler}) + self.AddToProcessorMap("GetTracesMetaInfo", &traceServiceProcessorGetTracesMetaInfo{handler: handler}) + self.AddToProcessorMap("CreateView", &traceServiceProcessorCreateView{handler: handler}) + self.AddToProcessorMap("UpdateView", &traceServiceProcessorUpdateView{handler: handler}) + self.AddToProcessorMap("DeleteView", &traceServiceProcessorDeleteView{handler: handler}) + self.AddToProcessorMap("ListViews", &traceServiceProcessorListViews{handler: handler}) + self.AddToProcessorMap("CreateManualAnnotation", &traceServiceProcessorCreateManualAnnotation{handler: handler}) + self.AddToProcessorMap("UpdateManualAnnotation", &traceServiceProcessorUpdateManualAnnotation{handler: handler}) + self.AddToProcessorMap("DeleteManualAnnotation", &traceServiceProcessorDeleteManualAnnotation{handler: handler}) + self.AddToProcessorMap("ListAnnotations", &traceServiceProcessorListAnnotations{handler: handler}) + self.AddToProcessorMap("ExportTracesToDataset", &traceServiceProcessorExportTracesToDataset{handler: handler}) + self.AddToProcessorMap("PreviewExportTracesToDataset", &traceServiceProcessorPreviewExportTracesToDataset{handler: handler}) + self.AddToProcessorMap("ChangeEvaluatorScore", &traceServiceProcessorChangeEvaluatorScore{handler: handler}) + self.AddToProcessorMap("ListAnnotationEvaluators", &traceServiceProcessorListAnnotationEvaluators{handler: handler}) + self.AddToProcessorMap("ExtractSpanInfo", &traceServiceProcessorExtractSpanInfo{handler: handler}) + return self +} +func (p *TraceServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x +} + +type traceServiceProcessorListSpans struct { + handler TraceService +} + +func (p *traceServiceProcessorListSpans) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { args := TraceServiceListSpansArgs{} if err = args.Read(iprot); err != nil { iprot.ReadMessageEnd() x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("ListSpans", thrift.EXCEPTION, seqId) + oprot.WriteMessageBegin("ListSpans", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceListSpansResult{} + var retval *ListSpansResponse + if retval, err2 = p.handler.ListSpans(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ListSpans: "+err2.Error()) + oprot.WriteMessageBegin("ListSpans", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ListSpans", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorGetTrace struct { + handler TraceService +} + +func (p *traceServiceProcessorGetTrace) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceGetTraceArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("GetTrace", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceGetTraceResult{} + var retval *GetTraceResponse + if retval, err2 = p.handler.GetTrace(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetTrace: "+err2.Error()) + oprot.WriteMessageBegin("GetTrace", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("GetTrace", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorBatchGetTracesAdvanceInfo struct { + handler TraceService +} + +func (p *traceServiceProcessorBatchGetTracesAdvanceInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceBatchGetTracesAdvanceInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("BatchGetTracesAdvanceInfo", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceBatchGetTracesAdvanceInfoResult{} + var retval *BatchGetTracesAdvanceInfoResponse + if retval, err2 = p.handler.BatchGetTracesAdvanceInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing BatchGetTracesAdvanceInfo: "+err2.Error()) + oprot.WriteMessageBegin("BatchGetTracesAdvanceInfo", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("BatchGetTracesAdvanceInfo", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorIngestTracesInner struct { + handler TraceService +} + +func (p *traceServiceProcessorIngestTracesInner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceIngestTracesInnerArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("IngestTracesInner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceIngestTracesInnerResult{} + var retval *IngestTracesResponse + if retval, err2 = p.handler.IngestTracesInner(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing IngestTracesInner: "+err2.Error()) + oprot.WriteMessageBegin("IngestTracesInner", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("IngestTracesInner", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorGetTracesMetaInfo struct { + handler TraceService +} + +func (p *traceServiceProcessorGetTracesMetaInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceGetTracesMetaInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("GetTracesMetaInfo", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceGetTracesMetaInfoResult{} + var retval *GetTracesMetaInfoResponse + if retval, err2 = p.handler.GetTracesMetaInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetTracesMetaInfo: "+err2.Error()) + oprot.WriteMessageBegin("GetTracesMetaInfo", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("GetTracesMetaInfo", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorCreateView struct { + handler TraceService +} + +func (p *traceServiceProcessorCreateView) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceCreateViewArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("CreateView", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceCreateViewResult{} + var retval *CreateViewResponse + if retval, err2 = p.handler.CreateView(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CreateView: "+err2.Error()) + oprot.WriteMessageBegin("CreateView", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("CreateView", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorUpdateView struct { + handler TraceService +} + +func (p *traceServiceProcessorUpdateView) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceUpdateViewArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("UpdateView", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceUpdateViewResult{} + var retval *UpdateViewResponse + if retval, err2 = p.handler.UpdateView(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing UpdateView: "+err2.Error()) + oprot.WriteMessageBegin("UpdateView", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("UpdateView", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorDeleteView struct { + handler TraceService +} + +func (p *traceServiceProcessorDeleteView) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceDeleteViewArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("DeleteView", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceDeleteViewResult{} + var retval *DeleteViewResponse + if retval, err2 = p.handler.DeleteView(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing DeleteView: "+err2.Error()) + oprot.WriteMessageBegin("DeleteView", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("DeleteView", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorListViews struct { + handler TraceService +} + +func (p *traceServiceProcessorListViews) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceListViewsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ListViews", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceListViewsResult{} + var retval *ListViewsResponse + if retval, err2 = p.handler.ListViews(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ListViews: "+err2.Error()) + oprot.WriteMessageBegin("ListViews", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ListViews", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorCreateManualAnnotation struct { + handler TraceService +} + +func (p *traceServiceProcessorCreateManualAnnotation) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceCreateManualAnnotationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("CreateManualAnnotation", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceCreateManualAnnotationResult{} + var retval *CreateManualAnnotationResponse + if retval, err2 = p.handler.CreateManualAnnotation(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CreateManualAnnotation: "+err2.Error()) + oprot.WriteMessageBegin("CreateManualAnnotation", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("CreateManualAnnotation", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorUpdateManualAnnotation struct { + handler TraceService +} + +func (p *traceServiceProcessorUpdateManualAnnotation) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceUpdateManualAnnotationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("UpdateManualAnnotation", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceUpdateManualAnnotationResult{} + var retval *UpdateManualAnnotationResponse + if retval, err2 = p.handler.UpdateManualAnnotation(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing UpdateManualAnnotation: "+err2.Error()) + oprot.WriteMessageBegin("UpdateManualAnnotation", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("UpdateManualAnnotation", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorDeleteManualAnnotation struct { + handler TraceService +} + +func (p *traceServiceProcessorDeleteManualAnnotation) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceDeleteManualAnnotationArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("DeleteManualAnnotation", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceDeleteManualAnnotationResult{} + var retval *DeleteManualAnnotationResponse + if retval, err2 = p.handler.DeleteManualAnnotation(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing DeleteManualAnnotation: "+err2.Error()) + oprot.WriteMessageBegin("DeleteManualAnnotation", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("DeleteManualAnnotation", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorListAnnotations struct { + handler TraceService +} + +func (p *traceServiceProcessorListAnnotations) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceListAnnotationsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ListAnnotations", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceListAnnotationsResult{} + var retval *ListAnnotationsResponse + if retval, err2 = p.handler.ListAnnotations(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ListAnnotations: "+err2.Error()) + oprot.WriteMessageBegin("ListAnnotations", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ListAnnotations", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorExportTracesToDataset struct { + handler TraceService +} + +func (p *traceServiceProcessorExportTracesToDataset) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceExportTracesToDatasetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ExportTracesToDataset", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceExportTracesToDatasetResult{} + var retval *ExportTracesToDatasetResponse + if retval, err2 = p.handler.ExportTracesToDataset(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ExportTracesToDataset: "+err2.Error()) + oprot.WriteMessageBegin("ExportTracesToDataset", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ExportTracesToDataset", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorPreviewExportTracesToDataset struct { + handler TraceService +} + +func (p *traceServiceProcessorPreviewExportTracesToDataset) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServicePreviewExportTracesToDatasetArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("PreviewExportTracesToDataset", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServicePreviewExportTracesToDatasetResult{} + var retval *PreviewExportTracesToDatasetResponse + if retval, err2 = p.handler.PreviewExportTracesToDataset(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing PreviewExportTracesToDataset: "+err2.Error()) + oprot.WriteMessageBegin("PreviewExportTracesToDataset", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("PreviewExportTracesToDataset", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorChangeEvaluatorScore struct { + handler TraceService +} + +func (p *traceServiceProcessorChangeEvaluatorScore) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceChangeEvaluatorScoreArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ChangeEvaluatorScore", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceChangeEvaluatorScoreResult{} + var retval *ChangeEvaluatorScoreResponse + if retval, err2 = p.handler.ChangeEvaluatorScore(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ChangeEvaluatorScore: "+err2.Error()) + oprot.WriteMessageBegin("ChangeEvaluatorScore", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ChangeEvaluatorScore", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorListAnnotationEvaluators struct { + handler TraceService +} + +func (p *traceServiceProcessorListAnnotationEvaluators) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceListAnnotationEvaluatorsArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ListAnnotationEvaluators", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceListAnnotationEvaluatorsResult{} + var retval *ListAnnotationEvaluatorsResponse + if retval, err2 = p.handler.ListAnnotationEvaluators(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ListAnnotationEvaluators: "+err2.Error()) + oprot.WriteMessageBegin("ListAnnotationEvaluators", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ListAnnotationEvaluators", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type traceServiceProcessorExtractSpanInfo struct { + handler TraceService +} + +func (p *traceServiceProcessorExtractSpanInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := TraceServiceExtractSpanInfoArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("ExtractSpanInfo", thrift.EXCEPTION, seqId) x.Write(oprot) oprot.WriteMessageEnd() oprot.Flush(ctx) return false, err } - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceListSpansResult{} - var retval *ListSpansResponse - if retval, err2 = p.handler.ListSpans(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ListSpans: "+err2.Error()) - oprot.WriteMessageBegin("ListSpans", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("ListSpans", thrift.REPLY, seqId); err2 != nil { - err = err2 + iprot.ReadMessageEnd() + var err2 error + result := TraceServiceExtractSpanInfoResult{} + var retval *ExtractSpanInfoResponse + if retval, err2 = p.handler.ExtractSpanInfo(ctx, args.Req); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ExtractSpanInfo: "+err2.Error()) + oprot.WriteMessageBegin("ExtractSpanInfo", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return true, err2 + } else { + result.Success = retval + } + if err2 = oprot.WriteMessageBegin("ExtractSpanInfo", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + +type TraceServiceListSpansArgs struct { + Req *ListSpansRequest `thrift:"req,1" frugal:"1,default,ListSpansRequest"` +} + +func NewTraceServiceListSpansArgs() *TraceServiceListSpansArgs { + return &TraceServiceListSpansArgs{} +} + +func (p *TraceServiceListSpansArgs) InitDefault() { +} + +var TraceServiceListSpansArgs_Req_DEFAULT *ListSpansRequest + +func (p *TraceServiceListSpansArgs) GetReq() (v *ListSpansRequest) { + if p == nil { + return + } + if !p.IsSetReq() { + return TraceServiceListSpansArgs_Req_DEFAULT + } + return p.Req +} +func (p *TraceServiceListSpansArgs) SetReq(val *ListSpansRequest) { + p.Req = val +} + +var fieldIDToName_TraceServiceListSpansArgs = map[int16]string{ + 1: "req", +} + +func (p *TraceServiceListSpansArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TraceServiceListSpansArgs) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError + } + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } + } + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError + } + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListSpansArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TraceServiceListSpansArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewListSpansRequest() + if err := _field.Read(iprot); err != nil { + return err + } + p.Req = _field + return nil +} + +func (p *TraceServiceListSpansArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ListSpans_args"); err != nil { + goto WriteStructBeginError + } + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TraceServiceListSpansArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError + } + if err := p.Req.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TraceServiceListSpansArgs) String() string { + if p == nil { + return "" } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("TraceServiceListSpansArgs(%+v)", *p) + +} + +func (p *TraceServiceListSpansArgs) DeepEqual(ano *TraceServiceListSpansArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Req) { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + return true +} + +func (p *TraceServiceListSpansArgs) Field1DeepEqual(src *ListSpansRequest) bool { + + if !p.Req.DeepEqual(src) { + return false } - if err != nil { + return true +} + +type TraceServiceListSpansResult struct { + Success *ListSpansResponse `thrift:"success,0,optional" frugal:"0,optional,ListSpansResponse"` +} + +func NewTraceServiceListSpansResult() *TraceServiceListSpansResult { + return &TraceServiceListSpansResult{} +} + +func (p *TraceServiceListSpansResult) InitDefault() { +} + +var TraceServiceListSpansResult_Success_DEFAULT *ListSpansResponse + +func (p *TraceServiceListSpansResult) GetSuccess() (v *ListSpansResponse) { + if p == nil { return } - return true, err + if !p.IsSetSuccess() { + return TraceServiceListSpansResult_Success_DEFAULT + } + return p.Success +} +func (p *TraceServiceListSpansResult) SetSuccess(x interface{}) { + p.Success = x.(*ListSpansResponse) } -type traceServiceProcessorGetTrace struct { - handler TraceService +var fieldIDToName_TraceServiceListSpansResult = map[int16]string{ + 0: "success", } -func (p *traceServiceProcessorGetTrace) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceGetTraceArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("GetTrace", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *TraceServiceListSpansResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TraceServiceListSpansResult) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceGetTraceResult{} - var retval *GetTraceResponse - if retval, err2 = p.handler.GetTrace(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetTrace: "+err2.Error()) - oprot.WriteMessageBegin("GetTrace", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err2 = oprot.WriteMessageBegin("GetTrace", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListSpansResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TraceServiceListSpansResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewListSpansResponse() + if err := _field.Read(iprot); err != nil { + return err } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + p.Success = _field + return nil +} + +func (p *TraceServiceListSpansResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("ListSpans_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err != nil { - return + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - return true, err -} - -type traceServiceProcessorBatchGetTracesAdvanceInfo struct { - handler TraceService + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *traceServiceProcessorBatchGetTracesAdvanceInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceBatchGetTracesAdvanceInfoArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("BatchGetTracesAdvanceInfo", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *TraceServiceListSpansResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceBatchGetTracesAdvanceInfoResult{} - var retval *BatchGetTracesAdvanceInfoResponse - if retval, err2 = p.handler.BatchGetTracesAdvanceInfo(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing BatchGetTracesAdvanceInfo: "+err2.Error()) - oprot.WriteMessageBegin("BatchGetTracesAdvanceInfo", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("BatchGetTracesAdvanceInfo", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 +func (p *TraceServiceListSpansResult) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("TraceServiceListSpansResult(%+v)", *p) + +} + +func (p *TraceServiceListSpansResult) DeepEqual(ano *TraceServiceListSpansResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *TraceServiceListSpansResult) Field0DeepEqual(src *ListSpansResponse) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type traceServiceProcessorIngestTracesInner struct { - handler TraceService +type TraceServiceGetTraceArgs struct { + Req *GetTraceRequest `thrift:"req,1" frugal:"1,default,GetTraceRequest"` } -func (p *traceServiceProcessorIngestTracesInner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceIngestTracesInnerArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("IngestTracesInner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewTraceServiceGetTraceArgs() *TraceServiceGetTraceArgs { + return &TraceServiceGetTraceArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceIngestTracesInnerResult{} - var retval *IngestTracesResponse - if retval, err2 = p.handler.IngestTracesInner(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing IngestTracesInner: "+err2.Error()) - oprot.WriteMessageBegin("IngestTracesInner", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("IngestTracesInner", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { +func (p *TraceServiceGetTraceArgs) InitDefault() { +} + +var TraceServiceGetTraceArgs_Req_DEFAULT *GetTraceRequest + +func (p *TraceServiceGetTraceArgs) GetReq() (v *GetTraceRequest) { + if p == nil { return } - return true, err + if !p.IsSetReq() { + return TraceServiceGetTraceArgs_Req_DEFAULT + } + return p.Req +} +func (p *TraceServiceGetTraceArgs) SetReq(val *GetTraceRequest) { + p.Req = val } -type traceServiceProcessorGetTracesMetaInfo struct { - handler TraceService +var fieldIDToName_TraceServiceGetTraceArgs = map[int16]string{ + 1: "req", } -func (p *traceServiceProcessorGetTracesMetaInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceGetTracesMetaInfoArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("GetTracesMetaInfo", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *TraceServiceGetTraceArgs) IsSetReq() bool { + return p.Req != nil +} + +func (p *TraceServiceGetTraceArgs) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceGetTracesMetaInfoResult{} - var retval *GetTracesMetaInfoResponse - if retval, err2 = p.handler.GetTracesMetaInfo(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing GetTracesMetaInfo: "+err2.Error()) - oprot.WriteMessageBegin("GetTracesMetaInfo", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err2 = oprot.WriteMessageBegin("GetTracesMetaInfo", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTraceArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) +} + +func (p *TraceServiceGetTraceArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewGetTraceRequest() + if err := _field.Read(iprot); err != nil { + return err } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + p.Req = _field + return nil +} + +func (p *TraceServiceGetTraceArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("GetTrace_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err != nil { - return + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - return true, err -} - -type traceServiceProcessorCreateView struct { - handler TraceService + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *traceServiceProcessorCreateView) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceCreateViewArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("CreateView", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *TraceServiceGetTraceArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceCreateViewResult{} - var retval *CreateViewResponse - if retval, err2 = p.handler.CreateView(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CreateView: "+err2.Error()) - oprot.WriteMessageBegin("CreateView", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval + if err := p.Req.Write(oprot); err != nil { + return err } - if err2 = oprot.WriteMessageBegin("CreateView", thrift.REPLY, seqId); err2 != nil { - err = err2 + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) +} + +func (p *TraceServiceGetTraceArgs) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("TraceServiceGetTraceArgs(%+v)", *p) + +} + +func (p *TraceServiceGetTraceArgs) DeepEqual(ano *TraceServiceGetTraceArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Req) { + return false } - if err != nil { - return + return true +} + +func (p *TraceServiceGetTraceArgs) Field1DeepEqual(src *GetTraceRequest) bool { + + if !p.Req.DeepEqual(src) { + return false } - return true, err + return true } -type traceServiceProcessorUpdateView struct { - handler TraceService +type TraceServiceGetTraceResult struct { + Success *GetTraceResponse `thrift:"success,0,optional" frugal:"0,optional,GetTraceResponse"` } -func (p *traceServiceProcessorUpdateView) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceUpdateViewArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("UpdateView", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewTraceServiceGetTraceResult() *TraceServiceGetTraceResult { + return &TraceServiceGetTraceResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceUpdateViewResult{} - var retval *UpdateViewResponse - if retval, err2 = p.handler.UpdateView(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing UpdateView: "+err2.Error()) - oprot.WriteMessageBegin("UpdateView", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("UpdateView", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { +func (p *TraceServiceGetTraceResult) InitDefault() { +} + +var TraceServiceGetTraceResult_Success_DEFAULT *GetTraceResponse + +func (p *TraceServiceGetTraceResult) GetSuccess() (v *GetTraceResponse) { + if p == nil { return } - return true, err + if !p.IsSetSuccess() { + return TraceServiceGetTraceResult_Success_DEFAULT + } + return p.Success +} +func (p *TraceServiceGetTraceResult) SetSuccess(x interface{}) { + p.Success = x.(*GetTraceResponse) } -type traceServiceProcessorDeleteView struct { - handler TraceService +var fieldIDToName_TraceServiceGetTraceResult = map[int16]string{ + 0: "success", } -func (p *traceServiceProcessorDeleteView) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceDeleteViewArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("DeleteView", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *TraceServiceGetTraceResult) IsSetSuccess() bool { + return p.Success != nil +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceDeleteViewResult{} - var retval *DeleteViewResponse - if retval, err2 = p.handler.DeleteView(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing DeleteView: "+err2.Error()) - oprot.WriteMessageBegin("DeleteView", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("DeleteView", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *TraceServiceGetTraceResult) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type traceServiceProcessorListViews struct { - handler TraceService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTraceResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *traceServiceProcessorListViews) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceListViewsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("ListViews", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *TraceServiceGetTraceResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewGetTraceResponse() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceListViewsResult{} - var retval *ListViewsResponse - if retval, err2 = p.handler.ListViews(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ListViews: "+err2.Error()) - oprot.WriteMessageBegin("ListViews", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval +func (p *TraceServiceGetTraceResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("GetTrace_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageBegin("ListViews", thrift.REPLY, seqId); err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TraceServiceGetTraceResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } + } + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *TraceServiceGetTraceResult) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("TraceServiceGetTraceResult(%+v)", *p) + +} + +func (p *TraceServiceGetTraceResult) DeepEqual(ano *TraceServiceGetTraceResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *TraceServiceGetTraceResult) Field0DeepEqual(src *GetTraceResponse) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type traceServiceProcessorCreateManualAnnotation struct { - handler TraceService +type TraceServiceBatchGetTracesAdvanceInfoArgs struct { + Req *BatchGetTracesAdvanceInfoRequest `thrift:"req,1" frugal:"1,default,BatchGetTracesAdvanceInfoRequest"` } -func (p *traceServiceProcessorCreateManualAnnotation) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceCreateManualAnnotationArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("CreateManualAnnotation", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewTraceServiceBatchGetTracesAdvanceInfoArgs() *TraceServiceBatchGetTracesAdvanceInfoArgs { + return &TraceServiceBatchGetTracesAdvanceInfoArgs{} +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceCreateManualAnnotationResult{} - var retval *CreateManualAnnotationResponse - if retval, err2 = p.handler.CreateManualAnnotation(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing CreateManualAnnotation: "+err2.Error()) - oprot.WriteMessageBegin("CreateManualAnnotation", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("CreateManualAnnotation", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) InitDefault() { +} + +var TraceServiceBatchGetTracesAdvanceInfoArgs_Req_DEFAULT *BatchGetTracesAdvanceInfoRequest + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) GetReq() (v *BatchGetTracesAdvanceInfoRequest) { + if p == nil { return } - return true, err + if !p.IsSetReq() { + return TraceServiceBatchGetTracesAdvanceInfoArgs_Req_DEFAULT + } + return p.Req +} +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) SetReq(val *BatchGetTracesAdvanceInfoRequest) { + p.Req = val } -type traceServiceProcessorUpdateManualAnnotation struct { - handler TraceService +var fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoArgs = map[int16]string{ + 1: "req", } -func (p *traceServiceProcessorUpdateManualAnnotation) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceUpdateManualAnnotationArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("UpdateManualAnnotation", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) IsSetReq() bool { + return p.Req != nil +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceUpdateManualAnnotationResult{} - var retval *UpdateManualAnnotationResponse - if retval, err2 = p.handler.UpdateManualAnnotation(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing UpdateManualAnnotation: "+err2.Error()) - oprot.WriteMessageBegin("UpdateManualAnnotation", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("UpdateManualAnnotation", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField1(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type traceServiceProcessorDeleteManualAnnotation struct { - handler TraceService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoArgs[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *traceServiceProcessorDeleteManualAnnotation) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceDeleteManualAnnotationArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("DeleteManualAnnotation", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewBatchGetTracesAdvanceInfoRequest() + if err := _field.Read(iprot); err != nil { + return err } + p.Req = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceDeleteManualAnnotationResult{} - var retval *DeleteManualAnnotationResponse - if retval, err2 = p.handler.DeleteManualAnnotation(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing DeleteManualAnnotation: "+err2.Error()) - oprot.WriteMessageBegin("DeleteManualAnnotation", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("BatchGetTracesAdvanceInfo_args"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageBegin("DeleteManualAnnotation", thrift.REPLY, seqId); err2 != nil { - err = err2 + if p != nil { + if err = p.writeField1(oprot); err != nil { + fieldId = 1 + goto WriteFieldError + } } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { + goto WriteFieldBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if err := p.Req.Write(oprot); err != nil { + return err } - if err != nil { - return + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError } - return true, err -} - -type traceServiceProcessorListAnnotations struct { - handler TraceService + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *traceServiceProcessorListAnnotations) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceListAnnotationsArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("ListAnnotations", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) String() string { + if p == nil { + return "" } + return fmt.Sprintf("TraceServiceBatchGetTracesAdvanceInfoArgs(%+v)", *p) - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceListAnnotationsResult{} - var retval *ListAnnotationsResponse - if retval, err2 = p.handler.ListAnnotations(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ListAnnotations: "+err2.Error()) - oprot.WriteMessageBegin("ListAnnotations", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("ListAnnotations", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) DeepEqual(ano *TraceServiceBatchGetTracesAdvanceInfoArgs) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field1DeepEqual(ano.Req) { + return false } - if err != nil { - return + return true +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) Field1DeepEqual(src *BatchGetTracesAdvanceInfoRequest) bool { + + if !p.Req.DeepEqual(src) { + return false } - return true, err + return true } -type traceServiceProcessorExportTracesToDataset struct { - handler TraceService +type TraceServiceBatchGetTracesAdvanceInfoResult struct { + Success *BatchGetTracesAdvanceInfoResponse `thrift:"success,0,optional" frugal:"0,optional,BatchGetTracesAdvanceInfoResponse"` } -func (p *traceServiceProcessorExportTracesToDataset) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServiceExportTracesToDatasetArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("ExportTracesToDataset", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } +func NewTraceServiceBatchGetTracesAdvanceInfoResult() *TraceServiceBatchGetTracesAdvanceInfoResult { + return &TraceServiceBatchGetTracesAdvanceInfoResult{} +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServiceExportTracesToDatasetResult{} - var retval *ExportTracesToDatasetResponse - if retval, err2 = p.handler.ExportTracesToDataset(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ExportTracesToDataset: "+err2.Error()) - oprot.WriteMessageBegin("ExportTracesToDataset", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("ExportTracesToDataset", thrift.REPLY, seqId); err2 != nil { - err = err2 +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) InitDefault() { +} + +var TraceServiceBatchGetTracesAdvanceInfoResult_Success_DEFAULT *BatchGetTracesAdvanceInfoResponse + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) GetSuccess() (v *BatchGetTracesAdvanceInfoResponse) { + if p == nil { + return } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + if !p.IsSetSuccess() { + return TraceServiceBatchGetTracesAdvanceInfoResult_Success_DEFAULT } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return p.Success +} +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) SetSuccess(x interface{}) { + p.Success = x.(*BatchGetTracesAdvanceInfoResponse) +} + +var fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoResult = map[int16]string{ + 0: "success", +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) Read(iprot thrift.TProtocol) (err error) { + var fieldTypeId thrift.TType + var fieldId int16 + + if _, err = iprot.ReadStructBegin(); err != nil { + goto ReadStructBeginError } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + + for { + _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin() + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + if err = p.ReadField0(iprot); err != nil { + goto ReadFieldError + } + } else if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + default: + if err = iprot.Skip(fieldTypeId); err != nil { + goto SkipFieldError + } + } + if err = iprot.ReadFieldEnd(); err != nil { + goto ReadFieldEndError + } } - if err != nil { - return + if err = iprot.ReadStructEnd(); err != nil { + goto ReadStructEndError } - return true, err -} -type traceServiceProcessorPreviewExportTracesToDataset struct { - handler TraceService + return nil +ReadStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err) +ReadFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoResult[fieldId]), err) +SkipFieldError: + return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) + +ReadFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err) +ReadStructEndError: + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *traceServiceProcessorPreviewExportTracesToDataset) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := TraceServicePreviewExportTracesToDatasetArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("PreviewExportTracesToDataset", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewBatchGetTracesAdvanceInfoResponse() + if err := _field.Read(iprot); err != nil { + return err } + p.Success = _field + return nil +} - iprot.ReadMessageEnd() - var err2 error - result := TraceServicePreviewExportTracesToDatasetResult{} - var retval *PreviewExportTracesToDatasetResponse - if retval, err2 = p.handler.PreviewExportTracesToDataset(ctx, args.Req); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing PreviewExportTracesToDataset: "+err2.Error()) - oprot.WriteMessageBegin("PreviewExportTracesToDataset", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) Write(oprot thrift.TProtocol) (err error) { + var fieldId int16 + if err = oprot.WriteStructBegin("BatchGetTracesAdvanceInfo_result"); err != nil { + goto WriteStructBeginError } - if err2 = oprot.WriteMessageBegin("PreviewExportTracesToDataset", thrift.REPLY, seqId); err2 != nil { - err = err2 + if p != nil { + if err = p.writeField0(oprot); err != nil { + fieldId = 0 + goto WriteFieldError + } + } + if err = oprot.WriteFieldStop(); err != nil { + goto WriteFieldStopError + } + if err = oprot.WriteStructEnd(); err != nil { + goto WriteStructEndError + } + return nil +WriteStructBeginError: + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) +WriteFieldError: + return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err) +WriteFieldStopError: + return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err) +WriteStructEndError: + return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { + goto WriteFieldBeginError + } + if err := p.Success.Write(oprot); err != nil { + return err + } + if err = oprot.WriteFieldEnd(); err != nil { + goto WriteFieldEndError + } } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 + return nil +WriteFieldBeginError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err) +WriteFieldEndError: + return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) String() string { + if p == nil { + return "" } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 + return fmt.Sprintf("TraceServiceBatchGetTracesAdvanceInfoResult(%+v)", *p) + +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) DeepEqual(ano *TraceServiceBatchGetTracesAdvanceInfoResult) bool { + if p == ano { + return true + } else if p == nil || ano == nil { + return false } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 + if !p.Field0DeepEqual(ano.Success) { + return false } - if err != nil { - return + return true +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) Field0DeepEqual(src *BatchGetTracesAdvanceInfoResponse) bool { + + if !p.Success.DeepEqual(src) { + return false } - return true, err + return true } -type TraceServiceListSpansArgs struct { - Req *ListSpansRequest `thrift:"req,1" frugal:"1,default,ListSpansRequest"` +type TraceServiceIngestTracesInnerArgs struct { + Req *IngestTracesRequest `thrift:"req,1" frugal:"1,default,IngestTracesRequest"` } -func NewTraceServiceListSpansArgs() *TraceServiceListSpansArgs { - return &TraceServiceListSpansArgs{} +func NewTraceServiceIngestTracesInnerArgs() *TraceServiceIngestTracesInnerArgs { + return &TraceServiceIngestTracesInnerArgs{} } -func (p *TraceServiceListSpansArgs) InitDefault() { +func (p *TraceServiceIngestTracesInnerArgs) InitDefault() { } -var TraceServiceListSpansArgs_Req_DEFAULT *ListSpansRequest +var TraceServiceIngestTracesInnerArgs_Req_DEFAULT *IngestTracesRequest -func (p *TraceServiceListSpansArgs) GetReq() (v *ListSpansRequest) { +func (p *TraceServiceIngestTracesInnerArgs) GetReq() (v *IngestTracesRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceListSpansArgs_Req_DEFAULT + return TraceServiceIngestTracesInnerArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceListSpansArgs) SetReq(val *ListSpansRequest) { +func (p *TraceServiceIngestTracesInnerArgs) SetReq(val *IngestTracesRequest) { p.Req = val } -var fieldIDToName_TraceServiceListSpansArgs = map[int16]string{ +var fieldIDToName_TraceServiceIngestTracesInnerArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceListSpansArgs) IsSetReq() bool { +func (p *TraceServiceIngestTracesInnerArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceListSpansArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceIngestTracesInnerArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -15612,7 +19524,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListSpansArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceIngestTracesInnerArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15622,8 +19534,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceListSpansArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewListSpansRequest() +func (p *TraceServiceIngestTracesInnerArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewIngestTracesRequest() if err := _field.Read(iprot); err != nil { return err } @@ -15631,9 +19543,9 @@ func (p *TraceServiceListSpansArgs) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceListSpansArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceIngestTracesInnerArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ListSpans_args"); err != nil { + if err = oprot.WriteStructBegin("IngestTracesInner_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15659,7 +19571,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceListSpansArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceIngestTracesInnerArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -15676,15 +19588,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceListSpansArgs) String() string { +func (p *TraceServiceIngestTracesInnerArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceListSpansArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceIngestTracesInnerArgs(%+v)", *p) } -func (p *TraceServiceListSpansArgs) DeepEqual(ano *TraceServiceListSpansArgs) bool { +func (p *TraceServiceIngestTracesInnerArgs) DeepEqual(ano *TraceServiceIngestTracesInnerArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -15696,7 +19608,7 @@ func (p *TraceServiceListSpansArgs) DeepEqual(ano *TraceServiceListSpansArgs) bo return true } -func (p *TraceServiceListSpansArgs) Field1DeepEqual(src *ListSpansRequest) bool { +func (p *TraceServiceIngestTracesInnerArgs) Field1DeepEqual(src *IngestTracesRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -15704,41 +19616,41 @@ func (p *TraceServiceListSpansArgs) Field1DeepEqual(src *ListSpansRequest) bool return true } -type TraceServiceListSpansResult struct { - Success *ListSpansResponse `thrift:"success,0,optional" frugal:"0,optional,ListSpansResponse"` +type TraceServiceIngestTracesInnerResult struct { + Success *IngestTracesResponse `thrift:"success,0,optional" frugal:"0,optional,IngestTracesResponse"` } -func NewTraceServiceListSpansResult() *TraceServiceListSpansResult { - return &TraceServiceListSpansResult{} +func NewTraceServiceIngestTracesInnerResult() *TraceServiceIngestTracesInnerResult { + return &TraceServiceIngestTracesInnerResult{} } -func (p *TraceServiceListSpansResult) InitDefault() { +func (p *TraceServiceIngestTracesInnerResult) InitDefault() { } -var TraceServiceListSpansResult_Success_DEFAULT *ListSpansResponse +var TraceServiceIngestTracesInnerResult_Success_DEFAULT *IngestTracesResponse -func (p *TraceServiceListSpansResult) GetSuccess() (v *ListSpansResponse) { +func (p *TraceServiceIngestTracesInnerResult) GetSuccess() (v *IngestTracesResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceListSpansResult_Success_DEFAULT + return TraceServiceIngestTracesInnerResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceListSpansResult) SetSuccess(x interface{}) { - p.Success = x.(*ListSpansResponse) +func (p *TraceServiceIngestTracesInnerResult) SetSuccess(x interface{}) { + p.Success = x.(*IngestTracesResponse) } -var fieldIDToName_TraceServiceListSpansResult = map[int16]string{ +var fieldIDToName_TraceServiceIngestTracesInnerResult = map[int16]string{ 0: "success", } -func (p *TraceServiceListSpansResult) IsSetSuccess() bool { +func (p *TraceServiceIngestTracesInnerResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceListSpansResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceIngestTracesInnerResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -15783,7 +19695,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListSpansResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceIngestTracesInnerResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15793,8 +19705,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceListSpansResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewListSpansResponse() +func (p *TraceServiceIngestTracesInnerResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewIngestTracesResponse() if err := _field.Read(iprot); err != nil { return err } @@ -15802,9 +19714,9 @@ func (p *TraceServiceListSpansResult) ReadField0(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceListSpansResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceIngestTracesInnerResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ListSpans_result"); err != nil { + if err = oprot.WriteStructBegin("IngestTracesInner_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -15830,7 +19742,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceListSpansResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceIngestTracesInnerResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -15849,15 +19761,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceListSpansResult) String() string { +func (p *TraceServiceIngestTracesInnerResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceListSpansResult(%+v)", *p) + return fmt.Sprintf("TraceServiceIngestTracesInnerResult(%+v)", *p) } -func (p *TraceServiceListSpansResult) DeepEqual(ano *TraceServiceListSpansResult) bool { +func (p *TraceServiceIngestTracesInnerResult) DeepEqual(ano *TraceServiceIngestTracesInnerResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -15869,7 +19781,7 @@ func (p *TraceServiceListSpansResult) DeepEqual(ano *TraceServiceListSpansResult return true } -func (p *TraceServiceListSpansResult) Field0DeepEqual(src *ListSpansResponse) bool { +func (p *TraceServiceIngestTracesInnerResult) Field0DeepEqual(src *IngestTracesResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -15877,41 +19789,41 @@ func (p *TraceServiceListSpansResult) Field0DeepEqual(src *ListSpansResponse) bo return true } -type TraceServiceGetTraceArgs struct { - Req *GetTraceRequest `thrift:"req,1" frugal:"1,default,GetTraceRequest"` +type TraceServiceGetTracesMetaInfoArgs struct { + Req *GetTracesMetaInfoRequest `thrift:"req,1" frugal:"1,default,GetTracesMetaInfoRequest"` } -func NewTraceServiceGetTraceArgs() *TraceServiceGetTraceArgs { - return &TraceServiceGetTraceArgs{} +func NewTraceServiceGetTracesMetaInfoArgs() *TraceServiceGetTracesMetaInfoArgs { + return &TraceServiceGetTracesMetaInfoArgs{} } -func (p *TraceServiceGetTraceArgs) InitDefault() { +func (p *TraceServiceGetTracesMetaInfoArgs) InitDefault() { } -var TraceServiceGetTraceArgs_Req_DEFAULT *GetTraceRequest +var TraceServiceGetTracesMetaInfoArgs_Req_DEFAULT *GetTracesMetaInfoRequest -func (p *TraceServiceGetTraceArgs) GetReq() (v *GetTraceRequest) { +func (p *TraceServiceGetTracesMetaInfoArgs) GetReq() (v *GetTracesMetaInfoRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceGetTraceArgs_Req_DEFAULT + return TraceServiceGetTracesMetaInfoArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceGetTraceArgs) SetReq(val *GetTraceRequest) { +func (p *TraceServiceGetTracesMetaInfoArgs) SetReq(val *GetTracesMetaInfoRequest) { p.Req = val } -var fieldIDToName_TraceServiceGetTraceArgs = map[int16]string{ +var fieldIDToName_TraceServiceGetTracesMetaInfoArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceGetTraceArgs) IsSetReq() bool { +func (p *TraceServiceGetTracesMetaInfoArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceGetTraceArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceGetTracesMetaInfoArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -15956,7 +19868,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTraceArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTracesMetaInfoArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -15966,8 +19878,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceGetTraceArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewGetTraceRequest() +func (p *TraceServiceGetTracesMetaInfoArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewGetTracesMetaInfoRequest() if err := _field.Read(iprot); err != nil { return err } @@ -15975,9 +19887,9 @@ func (p *TraceServiceGetTraceArgs) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceGetTraceArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceGetTracesMetaInfoArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("GetTrace_args"); err != nil { + if err = oprot.WriteStructBegin("GetTracesMetaInfo_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16003,7 +19915,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceGetTraceArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceGetTracesMetaInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -16020,15 +19932,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceGetTraceArgs) String() string { +func (p *TraceServiceGetTracesMetaInfoArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceGetTraceArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceGetTracesMetaInfoArgs(%+v)", *p) } -func (p *TraceServiceGetTraceArgs) DeepEqual(ano *TraceServiceGetTraceArgs) bool { +func (p *TraceServiceGetTracesMetaInfoArgs) DeepEqual(ano *TraceServiceGetTracesMetaInfoArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -16040,7 +19952,7 @@ func (p *TraceServiceGetTraceArgs) DeepEqual(ano *TraceServiceGetTraceArgs) bool return true } -func (p *TraceServiceGetTraceArgs) Field1DeepEqual(src *GetTraceRequest) bool { +func (p *TraceServiceGetTracesMetaInfoArgs) Field1DeepEqual(src *GetTracesMetaInfoRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -16048,41 +19960,41 @@ func (p *TraceServiceGetTraceArgs) Field1DeepEqual(src *GetTraceRequest) bool { return true } -type TraceServiceGetTraceResult struct { - Success *GetTraceResponse `thrift:"success,0,optional" frugal:"0,optional,GetTraceResponse"` +type TraceServiceGetTracesMetaInfoResult struct { + Success *GetTracesMetaInfoResponse `thrift:"success,0,optional" frugal:"0,optional,GetTracesMetaInfoResponse"` } -func NewTraceServiceGetTraceResult() *TraceServiceGetTraceResult { - return &TraceServiceGetTraceResult{} +func NewTraceServiceGetTracesMetaInfoResult() *TraceServiceGetTracesMetaInfoResult { + return &TraceServiceGetTracesMetaInfoResult{} } -func (p *TraceServiceGetTraceResult) InitDefault() { +func (p *TraceServiceGetTracesMetaInfoResult) InitDefault() { } -var TraceServiceGetTraceResult_Success_DEFAULT *GetTraceResponse +var TraceServiceGetTracesMetaInfoResult_Success_DEFAULT *GetTracesMetaInfoResponse -func (p *TraceServiceGetTraceResult) GetSuccess() (v *GetTraceResponse) { +func (p *TraceServiceGetTracesMetaInfoResult) GetSuccess() (v *GetTracesMetaInfoResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceGetTraceResult_Success_DEFAULT + return TraceServiceGetTracesMetaInfoResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceGetTraceResult) SetSuccess(x interface{}) { - p.Success = x.(*GetTraceResponse) +func (p *TraceServiceGetTracesMetaInfoResult) SetSuccess(x interface{}) { + p.Success = x.(*GetTracesMetaInfoResponse) } -var fieldIDToName_TraceServiceGetTraceResult = map[int16]string{ +var fieldIDToName_TraceServiceGetTracesMetaInfoResult = map[int16]string{ 0: "success", } -func (p *TraceServiceGetTraceResult) IsSetSuccess() bool { +func (p *TraceServiceGetTracesMetaInfoResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceGetTraceResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceGetTracesMetaInfoResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16127,7 +20039,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTraceResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTracesMetaInfoResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16137,8 +20049,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceGetTraceResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewGetTraceResponse() +func (p *TraceServiceGetTracesMetaInfoResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewGetTracesMetaInfoResponse() if err := _field.Read(iprot); err != nil { return err } @@ -16146,9 +20058,9 @@ func (p *TraceServiceGetTraceResult) ReadField0(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceGetTraceResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceGetTracesMetaInfoResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("GetTrace_result"); err != nil { + if err = oprot.WriteStructBegin("GetTracesMetaInfo_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16174,7 +20086,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceGetTraceResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceGetTracesMetaInfoResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -16193,15 +20105,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceGetTraceResult) String() string { +func (p *TraceServiceGetTracesMetaInfoResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceGetTraceResult(%+v)", *p) + return fmt.Sprintf("TraceServiceGetTracesMetaInfoResult(%+v)", *p) } -func (p *TraceServiceGetTraceResult) DeepEqual(ano *TraceServiceGetTraceResult) bool { +func (p *TraceServiceGetTracesMetaInfoResult) DeepEqual(ano *TraceServiceGetTracesMetaInfoResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -16213,7 +20125,7 @@ func (p *TraceServiceGetTraceResult) DeepEqual(ano *TraceServiceGetTraceResult) return true } -func (p *TraceServiceGetTraceResult) Field0DeepEqual(src *GetTraceResponse) bool { +func (p *TraceServiceGetTracesMetaInfoResult) Field0DeepEqual(src *GetTracesMetaInfoResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -16221,41 +20133,41 @@ func (p *TraceServiceGetTraceResult) Field0DeepEqual(src *GetTraceResponse) bool return true } -type TraceServiceBatchGetTracesAdvanceInfoArgs struct { - Req *BatchGetTracesAdvanceInfoRequest `thrift:"req,1" frugal:"1,default,BatchGetTracesAdvanceInfoRequest"` +type TraceServiceCreateViewArgs struct { + Req *CreateViewRequest `thrift:"req,1" frugal:"1,default,CreateViewRequest"` } -func NewTraceServiceBatchGetTracesAdvanceInfoArgs() *TraceServiceBatchGetTracesAdvanceInfoArgs { - return &TraceServiceBatchGetTracesAdvanceInfoArgs{} +func NewTraceServiceCreateViewArgs() *TraceServiceCreateViewArgs { + return &TraceServiceCreateViewArgs{} } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) InitDefault() { +func (p *TraceServiceCreateViewArgs) InitDefault() { } -var TraceServiceBatchGetTracesAdvanceInfoArgs_Req_DEFAULT *BatchGetTracesAdvanceInfoRequest +var TraceServiceCreateViewArgs_Req_DEFAULT *CreateViewRequest -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) GetReq() (v *BatchGetTracesAdvanceInfoRequest) { +func (p *TraceServiceCreateViewArgs) GetReq() (v *CreateViewRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceBatchGetTracesAdvanceInfoArgs_Req_DEFAULT + return TraceServiceCreateViewArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) SetReq(val *BatchGetTracesAdvanceInfoRequest) { +func (p *TraceServiceCreateViewArgs) SetReq(val *CreateViewRequest) { p.Req = val } -var fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoArgs = map[int16]string{ +var fieldIDToName_TraceServiceCreateViewArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) IsSetReq() bool { +func (p *TraceServiceCreateViewArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateViewArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16300,7 +20212,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateViewArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16309,9 +20221,9 @@ ReadFieldEndError: ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } - -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewBatchGetTracesAdvanceInfoRequest() + +func (p *TraceServiceCreateViewArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewCreateViewRequest() if err := _field.Read(iprot); err != nil { return err } @@ -16319,9 +20231,9 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) ReadField1(iprot thrift.TPro return nil } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateViewArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("BatchGetTracesAdvanceInfo_args"); err != nil { + if err = oprot.WriteStructBegin("CreateView_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16347,7 +20259,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateViewArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -16364,15 +20276,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) String() string { +func (p *TraceServiceCreateViewArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceBatchGetTracesAdvanceInfoArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceCreateViewArgs(%+v)", *p) } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) DeepEqual(ano *TraceServiceBatchGetTracesAdvanceInfoArgs) bool { +func (p *TraceServiceCreateViewArgs) DeepEqual(ano *TraceServiceCreateViewArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -16384,7 +20296,7 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) DeepEqual(ano *TraceServiceB return true } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) Field1DeepEqual(src *BatchGetTracesAdvanceInfoRequest) bool { +func (p *TraceServiceCreateViewArgs) Field1DeepEqual(src *CreateViewRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -16392,41 +20304,41 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) Field1DeepEqual(src *BatchGe return true } -type TraceServiceBatchGetTracesAdvanceInfoResult struct { - Success *BatchGetTracesAdvanceInfoResponse `thrift:"success,0,optional" frugal:"0,optional,BatchGetTracesAdvanceInfoResponse"` +type TraceServiceCreateViewResult struct { + Success *CreateViewResponse `thrift:"success,0,optional" frugal:"0,optional,CreateViewResponse"` } -func NewTraceServiceBatchGetTracesAdvanceInfoResult() *TraceServiceBatchGetTracesAdvanceInfoResult { - return &TraceServiceBatchGetTracesAdvanceInfoResult{} +func NewTraceServiceCreateViewResult() *TraceServiceCreateViewResult { + return &TraceServiceCreateViewResult{} } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) InitDefault() { +func (p *TraceServiceCreateViewResult) InitDefault() { } -var TraceServiceBatchGetTracesAdvanceInfoResult_Success_DEFAULT *BatchGetTracesAdvanceInfoResponse +var TraceServiceCreateViewResult_Success_DEFAULT *CreateViewResponse -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) GetSuccess() (v *BatchGetTracesAdvanceInfoResponse) { +func (p *TraceServiceCreateViewResult) GetSuccess() (v *CreateViewResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceBatchGetTracesAdvanceInfoResult_Success_DEFAULT + return TraceServiceCreateViewResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) SetSuccess(x interface{}) { - p.Success = x.(*BatchGetTracesAdvanceInfoResponse) +func (p *TraceServiceCreateViewResult) SetSuccess(x interface{}) { + p.Success = x.(*CreateViewResponse) } -var fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoResult = map[int16]string{ +var fieldIDToName_TraceServiceCreateViewResult = map[int16]string{ 0: "success", } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) IsSetSuccess() bool { +func (p *TraceServiceCreateViewResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateViewResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16471,7 +20383,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateViewResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16481,8 +20393,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewBatchGetTracesAdvanceInfoResponse() +func (p *TraceServiceCreateViewResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewCreateViewResponse() if err := _field.Read(iprot); err != nil { return err } @@ -16490,9 +20402,9 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) ReadField0(iprot thrift.TP return nil } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateViewResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("BatchGetTracesAdvanceInfo_result"); err != nil { + if err = oprot.WriteStructBegin("CreateView_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16518,7 +20430,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateViewResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -16537,15 +20449,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) String() string { +func (p *TraceServiceCreateViewResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceBatchGetTracesAdvanceInfoResult(%+v)", *p) + return fmt.Sprintf("TraceServiceCreateViewResult(%+v)", *p) } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) DeepEqual(ano *TraceServiceBatchGetTracesAdvanceInfoResult) bool { +func (p *TraceServiceCreateViewResult) DeepEqual(ano *TraceServiceCreateViewResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -16557,7 +20469,7 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) DeepEqual(ano *TraceServic return true } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) Field0DeepEqual(src *BatchGetTracesAdvanceInfoResponse) bool { +func (p *TraceServiceCreateViewResult) Field0DeepEqual(src *CreateViewResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -16565,41 +20477,41 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) Field0DeepEqual(src *Batch return true } -type TraceServiceIngestTracesInnerArgs struct { - Req *IngestTracesRequest `thrift:"req,1" frugal:"1,default,IngestTracesRequest"` +type TraceServiceUpdateViewArgs struct { + Req *UpdateViewRequest `thrift:"req,1" frugal:"1,default,UpdateViewRequest"` } -func NewTraceServiceIngestTracesInnerArgs() *TraceServiceIngestTracesInnerArgs { - return &TraceServiceIngestTracesInnerArgs{} +func NewTraceServiceUpdateViewArgs() *TraceServiceUpdateViewArgs { + return &TraceServiceUpdateViewArgs{} } -func (p *TraceServiceIngestTracesInnerArgs) InitDefault() { +func (p *TraceServiceUpdateViewArgs) InitDefault() { } -var TraceServiceIngestTracesInnerArgs_Req_DEFAULT *IngestTracesRequest +var TraceServiceUpdateViewArgs_Req_DEFAULT *UpdateViewRequest -func (p *TraceServiceIngestTracesInnerArgs) GetReq() (v *IngestTracesRequest) { +func (p *TraceServiceUpdateViewArgs) GetReq() (v *UpdateViewRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceIngestTracesInnerArgs_Req_DEFAULT + return TraceServiceUpdateViewArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceIngestTracesInnerArgs) SetReq(val *IngestTracesRequest) { +func (p *TraceServiceUpdateViewArgs) SetReq(val *UpdateViewRequest) { p.Req = val } -var fieldIDToName_TraceServiceIngestTracesInnerArgs = map[int16]string{ +var fieldIDToName_TraceServiceUpdateViewArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceIngestTracesInnerArgs) IsSetReq() bool { +func (p *TraceServiceUpdateViewArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceIngestTracesInnerArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateViewArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16644,7 +20556,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceIngestTracesInnerArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateViewArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16654,8 +20566,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceIngestTracesInnerArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewIngestTracesRequest() +func (p *TraceServiceUpdateViewArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewUpdateViewRequest() if err := _field.Read(iprot); err != nil { return err } @@ -16663,9 +20575,9 @@ func (p *TraceServiceIngestTracesInnerArgs) ReadField1(iprot thrift.TProtocol) e return nil } -func (p *TraceServiceIngestTracesInnerArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateViewArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("IngestTracesInner_args"); err != nil { + if err = oprot.WriteStructBegin("UpdateView_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16691,7 +20603,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceIngestTracesInnerArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateViewArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -16708,15 +20620,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceIngestTracesInnerArgs) String() string { +func (p *TraceServiceUpdateViewArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceIngestTracesInnerArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceUpdateViewArgs(%+v)", *p) } -func (p *TraceServiceIngestTracesInnerArgs) DeepEqual(ano *TraceServiceIngestTracesInnerArgs) bool { +func (p *TraceServiceUpdateViewArgs) DeepEqual(ano *TraceServiceUpdateViewArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -16728,7 +20640,7 @@ func (p *TraceServiceIngestTracesInnerArgs) DeepEqual(ano *TraceServiceIngestTra return true } -func (p *TraceServiceIngestTracesInnerArgs) Field1DeepEqual(src *IngestTracesRequest) bool { +func (p *TraceServiceUpdateViewArgs) Field1DeepEqual(src *UpdateViewRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -16736,41 +20648,41 @@ func (p *TraceServiceIngestTracesInnerArgs) Field1DeepEqual(src *IngestTracesReq return true } -type TraceServiceIngestTracesInnerResult struct { - Success *IngestTracesResponse `thrift:"success,0,optional" frugal:"0,optional,IngestTracesResponse"` +type TraceServiceUpdateViewResult struct { + Success *UpdateViewResponse `thrift:"success,0,optional" frugal:"0,optional,UpdateViewResponse"` } -func NewTraceServiceIngestTracesInnerResult() *TraceServiceIngestTracesInnerResult { - return &TraceServiceIngestTracesInnerResult{} +func NewTraceServiceUpdateViewResult() *TraceServiceUpdateViewResult { + return &TraceServiceUpdateViewResult{} } -func (p *TraceServiceIngestTracesInnerResult) InitDefault() { +func (p *TraceServiceUpdateViewResult) InitDefault() { } -var TraceServiceIngestTracesInnerResult_Success_DEFAULT *IngestTracesResponse +var TraceServiceUpdateViewResult_Success_DEFAULT *UpdateViewResponse -func (p *TraceServiceIngestTracesInnerResult) GetSuccess() (v *IngestTracesResponse) { +func (p *TraceServiceUpdateViewResult) GetSuccess() (v *UpdateViewResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceIngestTracesInnerResult_Success_DEFAULT + return TraceServiceUpdateViewResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceIngestTracesInnerResult) SetSuccess(x interface{}) { - p.Success = x.(*IngestTracesResponse) +func (p *TraceServiceUpdateViewResult) SetSuccess(x interface{}) { + p.Success = x.(*UpdateViewResponse) } -var fieldIDToName_TraceServiceIngestTracesInnerResult = map[int16]string{ +var fieldIDToName_TraceServiceUpdateViewResult = map[int16]string{ 0: "success", } -func (p *TraceServiceIngestTracesInnerResult) IsSetSuccess() bool { +func (p *TraceServiceUpdateViewResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceIngestTracesInnerResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateViewResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16815,7 +20727,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceIngestTracesInnerResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateViewResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16825,8 +20737,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceIngestTracesInnerResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewIngestTracesResponse() +func (p *TraceServiceUpdateViewResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewUpdateViewResponse() if err := _field.Read(iprot); err != nil { return err } @@ -16834,9 +20746,9 @@ func (p *TraceServiceIngestTracesInnerResult) ReadField0(iprot thrift.TProtocol) return nil } -func (p *TraceServiceIngestTracesInnerResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateViewResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("IngestTracesInner_result"); err != nil { + if err = oprot.WriteStructBegin("UpdateView_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -16862,7 +20774,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceIngestTracesInnerResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateViewResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -16881,15 +20793,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceIngestTracesInnerResult) String() string { +func (p *TraceServiceUpdateViewResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceIngestTracesInnerResult(%+v)", *p) + return fmt.Sprintf("TraceServiceUpdateViewResult(%+v)", *p) } -func (p *TraceServiceIngestTracesInnerResult) DeepEqual(ano *TraceServiceIngestTracesInnerResult) bool { +func (p *TraceServiceUpdateViewResult) DeepEqual(ano *TraceServiceUpdateViewResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -16901,7 +20813,7 @@ func (p *TraceServiceIngestTracesInnerResult) DeepEqual(ano *TraceServiceIngestT return true } -func (p *TraceServiceIngestTracesInnerResult) Field0DeepEqual(src *IngestTracesResponse) bool { +func (p *TraceServiceUpdateViewResult) Field0DeepEqual(src *UpdateViewResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -16909,41 +20821,41 @@ func (p *TraceServiceIngestTracesInnerResult) Field0DeepEqual(src *IngestTracesR return true } -type TraceServiceGetTracesMetaInfoArgs struct { - Req *GetTracesMetaInfoRequest `thrift:"req,1" frugal:"1,default,GetTracesMetaInfoRequest"` +type TraceServiceDeleteViewArgs struct { + Req *DeleteViewRequest `thrift:"req,1" frugal:"1,default,DeleteViewRequest"` } -func NewTraceServiceGetTracesMetaInfoArgs() *TraceServiceGetTracesMetaInfoArgs { - return &TraceServiceGetTracesMetaInfoArgs{} +func NewTraceServiceDeleteViewArgs() *TraceServiceDeleteViewArgs { + return &TraceServiceDeleteViewArgs{} } -func (p *TraceServiceGetTracesMetaInfoArgs) InitDefault() { +func (p *TraceServiceDeleteViewArgs) InitDefault() { } -var TraceServiceGetTracesMetaInfoArgs_Req_DEFAULT *GetTracesMetaInfoRequest +var TraceServiceDeleteViewArgs_Req_DEFAULT *DeleteViewRequest -func (p *TraceServiceGetTracesMetaInfoArgs) GetReq() (v *GetTracesMetaInfoRequest) { +func (p *TraceServiceDeleteViewArgs) GetReq() (v *DeleteViewRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceGetTracesMetaInfoArgs_Req_DEFAULT + return TraceServiceDeleteViewArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceGetTracesMetaInfoArgs) SetReq(val *GetTracesMetaInfoRequest) { +func (p *TraceServiceDeleteViewArgs) SetReq(val *DeleteViewRequest) { p.Req = val } -var fieldIDToName_TraceServiceGetTracesMetaInfoArgs = map[int16]string{ +var fieldIDToName_TraceServiceDeleteViewArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceGetTracesMetaInfoArgs) IsSetReq() bool { +func (p *TraceServiceDeleteViewArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceGetTracesMetaInfoArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteViewArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -16988,7 +20900,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTracesMetaInfoArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteViewArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -16998,8 +20910,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceGetTracesMetaInfoArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewGetTracesMetaInfoRequest() +func (p *TraceServiceDeleteViewArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewDeleteViewRequest() if err := _field.Read(iprot); err != nil { return err } @@ -17007,9 +20919,9 @@ func (p *TraceServiceGetTracesMetaInfoArgs) ReadField1(iprot thrift.TProtocol) e return nil } -func (p *TraceServiceGetTracesMetaInfoArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteViewArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("GetTracesMetaInfo_args"); err != nil { + if err = oprot.WriteStructBegin("DeleteView_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17035,7 +20947,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceGetTracesMetaInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteViewArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -17052,15 +20964,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceGetTracesMetaInfoArgs) String() string { +func (p *TraceServiceDeleteViewArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceGetTracesMetaInfoArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceDeleteViewArgs(%+v)", *p) } -func (p *TraceServiceGetTracesMetaInfoArgs) DeepEqual(ano *TraceServiceGetTracesMetaInfoArgs) bool { +func (p *TraceServiceDeleteViewArgs) DeepEqual(ano *TraceServiceDeleteViewArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17072,7 +20984,7 @@ func (p *TraceServiceGetTracesMetaInfoArgs) DeepEqual(ano *TraceServiceGetTraces return true } -func (p *TraceServiceGetTracesMetaInfoArgs) Field1DeepEqual(src *GetTracesMetaInfoRequest) bool { +func (p *TraceServiceDeleteViewArgs) Field1DeepEqual(src *DeleteViewRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -17080,41 +20992,41 @@ func (p *TraceServiceGetTracesMetaInfoArgs) Field1DeepEqual(src *GetTracesMetaIn return true } -type TraceServiceGetTracesMetaInfoResult struct { - Success *GetTracesMetaInfoResponse `thrift:"success,0,optional" frugal:"0,optional,GetTracesMetaInfoResponse"` +type TraceServiceDeleteViewResult struct { + Success *DeleteViewResponse `thrift:"success,0,optional" frugal:"0,optional,DeleteViewResponse"` } -func NewTraceServiceGetTracesMetaInfoResult() *TraceServiceGetTracesMetaInfoResult { - return &TraceServiceGetTracesMetaInfoResult{} +func NewTraceServiceDeleteViewResult() *TraceServiceDeleteViewResult { + return &TraceServiceDeleteViewResult{} } -func (p *TraceServiceGetTracesMetaInfoResult) InitDefault() { +func (p *TraceServiceDeleteViewResult) InitDefault() { } -var TraceServiceGetTracesMetaInfoResult_Success_DEFAULT *GetTracesMetaInfoResponse +var TraceServiceDeleteViewResult_Success_DEFAULT *DeleteViewResponse -func (p *TraceServiceGetTracesMetaInfoResult) GetSuccess() (v *GetTracesMetaInfoResponse) { +func (p *TraceServiceDeleteViewResult) GetSuccess() (v *DeleteViewResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceGetTracesMetaInfoResult_Success_DEFAULT + return TraceServiceDeleteViewResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceGetTracesMetaInfoResult) SetSuccess(x interface{}) { - p.Success = x.(*GetTracesMetaInfoResponse) +func (p *TraceServiceDeleteViewResult) SetSuccess(x interface{}) { + p.Success = x.(*DeleteViewResponse) } -var fieldIDToName_TraceServiceGetTracesMetaInfoResult = map[int16]string{ +var fieldIDToName_TraceServiceDeleteViewResult = map[int16]string{ 0: "success", } -func (p *TraceServiceGetTracesMetaInfoResult) IsSetSuccess() bool { +func (p *TraceServiceDeleteViewResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceGetTracesMetaInfoResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteViewResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -17159,7 +21071,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTracesMetaInfoResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteViewResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17169,8 +21081,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceGetTracesMetaInfoResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewGetTracesMetaInfoResponse() +func (p *TraceServiceDeleteViewResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewDeleteViewResponse() if err := _field.Read(iprot); err != nil { return err } @@ -17178,9 +21090,9 @@ func (p *TraceServiceGetTracesMetaInfoResult) ReadField0(iprot thrift.TProtocol) return nil } -func (p *TraceServiceGetTracesMetaInfoResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteViewResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("GetTracesMetaInfo_result"); err != nil { + if err = oprot.WriteStructBegin("DeleteView_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17206,7 +21118,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceGetTracesMetaInfoResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteViewResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -17225,15 +21137,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceGetTracesMetaInfoResult) String() string { +func (p *TraceServiceDeleteViewResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceGetTracesMetaInfoResult(%+v)", *p) + return fmt.Sprintf("TraceServiceDeleteViewResult(%+v)", *p) } -func (p *TraceServiceGetTracesMetaInfoResult) DeepEqual(ano *TraceServiceGetTracesMetaInfoResult) bool { +func (p *TraceServiceDeleteViewResult) DeepEqual(ano *TraceServiceDeleteViewResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17245,7 +21157,7 @@ func (p *TraceServiceGetTracesMetaInfoResult) DeepEqual(ano *TraceServiceGetTrac return true } -func (p *TraceServiceGetTracesMetaInfoResult) Field0DeepEqual(src *GetTracesMetaInfoResponse) bool { +func (p *TraceServiceDeleteViewResult) Field0DeepEqual(src *DeleteViewResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -17253,41 +21165,41 @@ func (p *TraceServiceGetTracesMetaInfoResult) Field0DeepEqual(src *GetTracesMeta return true } -type TraceServiceCreateViewArgs struct { - Req *CreateViewRequest `thrift:"req,1" frugal:"1,default,CreateViewRequest"` +type TraceServiceListViewsArgs struct { + Req *ListViewsRequest `thrift:"req,1" frugal:"1,default,ListViewsRequest"` } -func NewTraceServiceCreateViewArgs() *TraceServiceCreateViewArgs { - return &TraceServiceCreateViewArgs{} +func NewTraceServiceListViewsArgs() *TraceServiceListViewsArgs { + return &TraceServiceListViewsArgs{} } -func (p *TraceServiceCreateViewArgs) InitDefault() { +func (p *TraceServiceListViewsArgs) InitDefault() { } -var TraceServiceCreateViewArgs_Req_DEFAULT *CreateViewRequest +var TraceServiceListViewsArgs_Req_DEFAULT *ListViewsRequest -func (p *TraceServiceCreateViewArgs) GetReq() (v *CreateViewRequest) { +func (p *TraceServiceListViewsArgs) GetReq() (v *ListViewsRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceCreateViewArgs_Req_DEFAULT + return TraceServiceListViewsArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceCreateViewArgs) SetReq(val *CreateViewRequest) { +func (p *TraceServiceListViewsArgs) SetReq(val *ListViewsRequest) { p.Req = val } -var fieldIDToName_TraceServiceCreateViewArgs = map[int16]string{ +var fieldIDToName_TraceServiceListViewsArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceCreateViewArgs) IsSetReq() bool { +func (p *TraceServiceListViewsArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceCreateViewArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceListViewsArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -17332,7 +21244,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateViewArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListViewsArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17342,8 +21254,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceCreateViewArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewCreateViewRequest() +func (p *TraceServiceListViewsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewListViewsRequest() if err := _field.Read(iprot); err != nil { return err } @@ -17351,9 +21263,9 @@ func (p *TraceServiceCreateViewArgs) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceCreateViewArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListViewsArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("CreateView_args"); err != nil { + if err = oprot.WriteStructBegin("ListViews_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17379,7 +21291,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceCreateViewArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListViewsArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -17396,15 +21308,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceCreateViewArgs) String() string { +func (p *TraceServiceListViewsArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceCreateViewArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceListViewsArgs(%+v)", *p) } -func (p *TraceServiceCreateViewArgs) DeepEqual(ano *TraceServiceCreateViewArgs) bool { +func (p *TraceServiceListViewsArgs) DeepEqual(ano *TraceServiceListViewsArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17416,7 +21328,7 @@ func (p *TraceServiceCreateViewArgs) DeepEqual(ano *TraceServiceCreateViewArgs) return true } -func (p *TraceServiceCreateViewArgs) Field1DeepEqual(src *CreateViewRequest) bool { +func (p *TraceServiceListViewsArgs) Field1DeepEqual(src *ListViewsRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -17424,41 +21336,41 @@ func (p *TraceServiceCreateViewArgs) Field1DeepEqual(src *CreateViewRequest) boo return true } -type TraceServiceCreateViewResult struct { - Success *CreateViewResponse `thrift:"success,0,optional" frugal:"0,optional,CreateViewResponse"` +type TraceServiceListViewsResult struct { + Success *ListViewsResponse `thrift:"success,0,optional" frugal:"0,optional,ListViewsResponse"` } -func NewTraceServiceCreateViewResult() *TraceServiceCreateViewResult { - return &TraceServiceCreateViewResult{} +func NewTraceServiceListViewsResult() *TraceServiceListViewsResult { + return &TraceServiceListViewsResult{} } -func (p *TraceServiceCreateViewResult) InitDefault() { +func (p *TraceServiceListViewsResult) InitDefault() { } -var TraceServiceCreateViewResult_Success_DEFAULT *CreateViewResponse +var TraceServiceListViewsResult_Success_DEFAULT *ListViewsResponse -func (p *TraceServiceCreateViewResult) GetSuccess() (v *CreateViewResponse) { +func (p *TraceServiceListViewsResult) GetSuccess() (v *ListViewsResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceCreateViewResult_Success_DEFAULT + return TraceServiceListViewsResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceCreateViewResult) SetSuccess(x interface{}) { - p.Success = x.(*CreateViewResponse) +func (p *TraceServiceListViewsResult) SetSuccess(x interface{}) { + p.Success = x.(*ListViewsResponse) } -var fieldIDToName_TraceServiceCreateViewResult = map[int16]string{ +var fieldIDToName_TraceServiceListViewsResult = map[int16]string{ 0: "success", } -func (p *TraceServiceCreateViewResult) IsSetSuccess() bool { +func (p *TraceServiceListViewsResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceCreateViewResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceListViewsResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -17503,7 +21415,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateViewResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListViewsResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17513,8 +21425,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceCreateViewResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewCreateViewResponse() +func (p *TraceServiceListViewsResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewListViewsResponse() if err := _field.Read(iprot); err != nil { return err } @@ -17522,9 +21434,9 @@ func (p *TraceServiceCreateViewResult) ReadField0(iprot thrift.TProtocol) error return nil } -func (p *TraceServiceCreateViewResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListViewsResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("CreateView_result"); err != nil { + if err = oprot.WriteStructBegin("ListViews_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17550,7 +21462,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceCreateViewResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListViewsResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -17569,15 +21481,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceCreateViewResult) String() string { +func (p *TraceServiceListViewsResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceCreateViewResult(%+v)", *p) + return fmt.Sprintf("TraceServiceListViewsResult(%+v)", *p) } -func (p *TraceServiceCreateViewResult) DeepEqual(ano *TraceServiceCreateViewResult) bool { +func (p *TraceServiceListViewsResult) DeepEqual(ano *TraceServiceListViewsResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17589,7 +21501,7 @@ func (p *TraceServiceCreateViewResult) DeepEqual(ano *TraceServiceCreateViewResu return true } -func (p *TraceServiceCreateViewResult) Field0DeepEqual(src *CreateViewResponse) bool { +func (p *TraceServiceListViewsResult) Field0DeepEqual(src *ListViewsResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -17597,41 +21509,41 @@ func (p *TraceServiceCreateViewResult) Field0DeepEqual(src *CreateViewResponse) return true } -type TraceServiceUpdateViewArgs struct { - Req *UpdateViewRequest `thrift:"req,1" frugal:"1,default,UpdateViewRequest"` +type TraceServiceCreateManualAnnotationArgs struct { + Req *CreateManualAnnotationRequest `thrift:"req,1" frugal:"1,default,CreateManualAnnotationRequest"` } -func NewTraceServiceUpdateViewArgs() *TraceServiceUpdateViewArgs { - return &TraceServiceUpdateViewArgs{} +func NewTraceServiceCreateManualAnnotationArgs() *TraceServiceCreateManualAnnotationArgs { + return &TraceServiceCreateManualAnnotationArgs{} } -func (p *TraceServiceUpdateViewArgs) InitDefault() { +func (p *TraceServiceCreateManualAnnotationArgs) InitDefault() { } -var TraceServiceUpdateViewArgs_Req_DEFAULT *UpdateViewRequest +var TraceServiceCreateManualAnnotationArgs_Req_DEFAULT *CreateManualAnnotationRequest -func (p *TraceServiceUpdateViewArgs) GetReq() (v *UpdateViewRequest) { +func (p *TraceServiceCreateManualAnnotationArgs) GetReq() (v *CreateManualAnnotationRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceUpdateViewArgs_Req_DEFAULT + return TraceServiceCreateManualAnnotationArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceUpdateViewArgs) SetReq(val *UpdateViewRequest) { +func (p *TraceServiceCreateManualAnnotationArgs) SetReq(val *CreateManualAnnotationRequest) { p.Req = val } -var fieldIDToName_TraceServiceUpdateViewArgs = map[int16]string{ +var fieldIDToName_TraceServiceCreateManualAnnotationArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceUpdateViewArgs) IsSetReq() bool { +func (p *TraceServiceCreateManualAnnotationArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceUpdateViewArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateManualAnnotationArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -17676,7 +21588,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateViewArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateManualAnnotationArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17686,8 +21598,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceUpdateViewArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewUpdateViewRequest() +func (p *TraceServiceCreateManualAnnotationArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewCreateManualAnnotationRequest() if err := _field.Read(iprot); err != nil { return err } @@ -17695,9 +21607,9 @@ func (p *TraceServiceUpdateViewArgs) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceUpdateViewArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateManualAnnotationArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("UpdateView_args"); err != nil { + if err = oprot.WriteStructBegin("CreateManualAnnotation_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17723,7 +21635,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceUpdateViewArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateManualAnnotationArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -17740,15 +21652,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceUpdateViewArgs) String() string { +func (p *TraceServiceCreateManualAnnotationArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceUpdateViewArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceCreateManualAnnotationArgs(%+v)", *p) } -func (p *TraceServiceUpdateViewArgs) DeepEqual(ano *TraceServiceUpdateViewArgs) bool { +func (p *TraceServiceCreateManualAnnotationArgs) DeepEqual(ano *TraceServiceCreateManualAnnotationArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17760,7 +21672,7 @@ func (p *TraceServiceUpdateViewArgs) DeepEqual(ano *TraceServiceUpdateViewArgs) return true } -func (p *TraceServiceUpdateViewArgs) Field1DeepEqual(src *UpdateViewRequest) bool { +func (p *TraceServiceCreateManualAnnotationArgs) Field1DeepEqual(src *CreateManualAnnotationRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -17768,41 +21680,41 @@ func (p *TraceServiceUpdateViewArgs) Field1DeepEqual(src *UpdateViewRequest) boo return true } -type TraceServiceUpdateViewResult struct { - Success *UpdateViewResponse `thrift:"success,0,optional" frugal:"0,optional,UpdateViewResponse"` +type TraceServiceCreateManualAnnotationResult struct { + Success *CreateManualAnnotationResponse `thrift:"success,0,optional" frugal:"0,optional,CreateManualAnnotationResponse"` } -func NewTraceServiceUpdateViewResult() *TraceServiceUpdateViewResult { - return &TraceServiceUpdateViewResult{} +func NewTraceServiceCreateManualAnnotationResult() *TraceServiceCreateManualAnnotationResult { + return &TraceServiceCreateManualAnnotationResult{} } -func (p *TraceServiceUpdateViewResult) InitDefault() { +func (p *TraceServiceCreateManualAnnotationResult) InitDefault() { } -var TraceServiceUpdateViewResult_Success_DEFAULT *UpdateViewResponse +var TraceServiceCreateManualAnnotationResult_Success_DEFAULT *CreateManualAnnotationResponse -func (p *TraceServiceUpdateViewResult) GetSuccess() (v *UpdateViewResponse) { +func (p *TraceServiceCreateManualAnnotationResult) GetSuccess() (v *CreateManualAnnotationResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceUpdateViewResult_Success_DEFAULT + return TraceServiceCreateManualAnnotationResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceUpdateViewResult) SetSuccess(x interface{}) { - p.Success = x.(*UpdateViewResponse) +func (p *TraceServiceCreateManualAnnotationResult) SetSuccess(x interface{}) { + p.Success = x.(*CreateManualAnnotationResponse) } -var fieldIDToName_TraceServiceUpdateViewResult = map[int16]string{ +var fieldIDToName_TraceServiceCreateManualAnnotationResult = map[int16]string{ 0: "success", } -func (p *TraceServiceUpdateViewResult) IsSetSuccess() bool { +func (p *TraceServiceCreateManualAnnotationResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceUpdateViewResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateManualAnnotationResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -17847,7 +21759,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateViewResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateManualAnnotationResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -17857,8 +21769,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceUpdateViewResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewUpdateViewResponse() +func (p *TraceServiceCreateManualAnnotationResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewCreateManualAnnotationResponse() if err := _field.Read(iprot); err != nil { return err } @@ -17866,9 +21778,9 @@ func (p *TraceServiceUpdateViewResult) ReadField0(iprot thrift.TProtocol) error return nil } -func (p *TraceServiceUpdateViewResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateManualAnnotationResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("UpdateView_result"); err != nil { + if err = oprot.WriteStructBegin("CreateManualAnnotation_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -17894,7 +21806,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceUpdateViewResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceCreateManualAnnotationResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -17913,15 +21825,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceUpdateViewResult) String() string { +func (p *TraceServiceCreateManualAnnotationResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceUpdateViewResult(%+v)", *p) + return fmt.Sprintf("TraceServiceCreateManualAnnotationResult(%+v)", *p) } -func (p *TraceServiceUpdateViewResult) DeepEqual(ano *TraceServiceUpdateViewResult) bool { +func (p *TraceServiceCreateManualAnnotationResult) DeepEqual(ano *TraceServiceCreateManualAnnotationResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -17933,7 +21845,7 @@ func (p *TraceServiceUpdateViewResult) DeepEqual(ano *TraceServiceUpdateViewResu return true } -func (p *TraceServiceUpdateViewResult) Field0DeepEqual(src *UpdateViewResponse) bool { +func (p *TraceServiceCreateManualAnnotationResult) Field0DeepEqual(src *CreateManualAnnotationResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -17941,41 +21853,41 @@ func (p *TraceServiceUpdateViewResult) Field0DeepEqual(src *UpdateViewResponse) return true } -type TraceServiceDeleteViewArgs struct { - Req *DeleteViewRequest `thrift:"req,1" frugal:"1,default,DeleteViewRequest"` +type TraceServiceUpdateManualAnnotationArgs struct { + Req *UpdateManualAnnotationRequest `thrift:"req,1" frugal:"1,default,UpdateManualAnnotationRequest"` } -func NewTraceServiceDeleteViewArgs() *TraceServiceDeleteViewArgs { - return &TraceServiceDeleteViewArgs{} +func NewTraceServiceUpdateManualAnnotationArgs() *TraceServiceUpdateManualAnnotationArgs { + return &TraceServiceUpdateManualAnnotationArgs{} } -func (p *TraceServiceDeleteViewArgs) InitDefault() { +func (p *TraceServiceUpdateManualAnnotationArgs) InitDefault() { } -var TraceServiceDeleteViewArgs_Req_DEFAULT *DeleteViewRequest +var TraceServiceUpdateManualAnnotationArgs_Req_DEFAULT *UpdateManualAnnotationRequest -func (p *TraceServiceDeleteViewArgs) GetReq() (v *DeleteViewRequest) { +func (p *TraceServiceUpdateManualAnnotationArgs) GetReq() (v *UpdateManualAnnotationRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceDeleteViewArgs_Req_DEFAULT + return TraceServiceUpdateManualAnnotationArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceDeleteViewArgs) SetReq(val *DeleteViewRequest) { +func (p *TraceServiceUpdateManualAnnotationArgs) SetReq(val *UpdateManualAnnotationRequest) { p.Req = val } -var fieldIDToName_TraceServiceDeleteViewArgs = map[int16]string{ +var fieldIDToName_TraceServiceUpdateManualAnnotationArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceDeleteViewArgs) IsSetReq() bool { +func (p *TraceServiceUpdateManualAnnotationArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceDeleteViewArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateManualAnnotationArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -18020,7 +21932,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteViewArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateManualAnnotationArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18030,8 +21942,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceDeleteViewArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewDeleteViewRequest() +func (p *TraceServiceUpdateManualAnnotationArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewUpdateManualAnnotationRequest() if err := _field.Read(iprot); err != nil { return err } @@ -18039,9 +21951,9 @@ func (p *TraceServiceDeleteViewArgs) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceDeleteViewArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateManualAnnotationArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("DeleteView_args"); err != nil { + if err = oprot.WriteStructBegin("UpdateManualAnnotation_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18067,7 +21979,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceDeleteViewArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateManualAnnotationArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -18084,15 +21996,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceDeleteViewArgs) String() string { +func (p *TraceServiceUpdateManualAnnotationArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceDeleteViewArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceUpdateManualAnnotationArgs(%+v)", *p) } -func (p *TraceServiceDeleteViewArgs) DeepEqual(ano *TraceServiceDeleteViewArgs) bool { +func (p *TraceServiceUpdateManualAnnotationArgs) DeepEqual(ano *TraceServiceUpdateManualAnnotationArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -18104,7 +22016,7 @@ func (p *TraceServiceDeleteViewArgs) DeepEqual(ano *TraceServiceDeleteViewArgs) return true } -func (p *TraceServiceDeleteViewArgs) Field1DeepEqual(src *DeleteViewRequest) bool { +func (p *TraceServiceUpdateManualAnnotationArgs) Field1DeepEqual(src *UpdateManualAnnotationRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -18112,41 +22024,41 @@ func (p *TraceServiceDeleteViewArgs) Field1DeepEqual(src *DeleteViewRequest) boo return true } -type TraceServiceDeleteViewResult struct { - Success *DeleteViewResponse `thrift:"success,0,optional" frugal:"0,optional,DeleteViewResponse"` +type TraceServiceUpdateManualAnnotationResult struct { + Success *UpdateManualAnnotationResponse `thrift:"success,0,optional" frugal:"0,optional,UpdateManualAnnotationResponse"` } -func NewTraceServiceDeleteViewResult() *TraceServiceDeleteViewResult { - return &TraceServiceDeleteViewResult{} +func NewTraceServiceUpdateManualAnnotationResult() *TraceServiceUpdateManualAnnotationResult { + return &TraceServiceUpdateManualAnnotationResult{} } -func (p *TraceServiceDeleteViewResult) InitDefault() { +func (p *TraceServiceUpdateManualAnnotationResult) InitDefault() { } -var TraceServiceDeleteViewResult_Success_DEFAULT *DeleteViewResponse +var TraceServiceUpdateManualAnnotationResult_Success_DEFAULT *UpdateManualAnnotationResponse -func (p *TraceServiceDeleteViewResult) GetSuccess() (v *DeleteViewResponse) { +func (p *TraceServiceUpdateManualAnnotationResult) GetSuccess() (v *UpdateManualAnnotationResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceDeleteViewResult_Success_DEFAULT + return TraceServiceUpdateManualAnnotationResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceDeleteViewResult) SetSuccess(x interface{}) { - p.Success = x.(*DeleteViewResponse) +func (p *TraceServiceUpdateManualAnnotationResult) SetSuccess(x interface{}) { + p.Success = x.(*UpdateManualAnnotationResponse) } -var fieldIDToName_TraceServiceDeleteViewResult = map[int16]string{ +var fieldIDToName_TraceServiceUpdateManualAnnotationResult = map[int16]string{ 0: "success", } -func (p *TraceServiceDeleteViewResult) IsSetSuccess() bool { +func (p *TraceServiceUpdateManualAnnotationResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceDeleteViewResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateManualAnnotationResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -18191,7 +22103,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteViewResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateManualAnnotationResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18201,8 +22113,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceDeleteViewResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewDeleteViewResponse() +func (p *TraceServiceUpdateManualAnnotationResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewUpdateManualAnnotationResponse() if err := _field.Read(iprot); err != nil { return err } @@ -18210,9 +22122,9 @@ func (p *TraceServiceDeleteViewResult) ReadField0(iprot thrift.TProtocol) error return nil } -func (p *TraceServiceDeleteViewResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateManualAnnotationResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("DeleteView_result"); err != nil { + if err = oprot.WriteStructBegin("UpdateManualAnnotation_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18238,7 +22150,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceDeleteViewResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceUpdateManualAnnotationResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -18257,15 +22169,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceDeleteViewResult) String() string { +func (p *TraceServiceUpdateManualAnnotationResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceDeleteViewResult(%+v)", *p) + return fmt.Sprintf("TraceServiceUpdateManualAnnotationResult(%+v)", *p) } -func (p *TraceServiceDeleteViewResult) DeepEqual(ano *TraceServiceDeleteViewResult) bool { +func (p *TraceServiceUpdateManualAnnotationResult) DeepEqual(ano *TraceServiceUpdateManualAnnotationResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -18277,7 +22189,7 @@ func (p *TraceServiceDeleteViewResult) DeepEqual(ano *TraceServiceDeleteViewResu return true } -func (p *TraceServiceDeleteViewResult) Field0DeepEqual(src *DeleteViewResponse) bool { +func (p *TraceServiceUpdateManualAnnotationResult) Field0DeepEqual(src *UpdateManualAnnotationResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -18285,41 +22197,41 @@ func (p *TraceServiceDeleteViewResult) Field0DeepEqual(src *DeleteViewResponse) return true } -type TraceServiceListViewsArgs struct { - Req *ListViewsRequest `thrift:"req,1" frugal:"1,default,ListViewsRequest"` +type TraceServiceDeleteManualAnnotationArgs struct { + Req *DeleteManualAnnotationRequest `thrift:"req,1" frugal:"1,default,DeleteManualAnnotationRequest"` } -func NewTraceServiceListViewsArgs() *TraceServiceListViewsArgs { - return &TraceServiceListViewsArgs{} +func NewTraceServiceDeleteManualAnnotationArgs() *TraceServiceDeleteManualAnnotationArgs { + return &TraceServiceDeleteManualAnnotationArgs{} } -func (p *TraceServiceListViewsArgs) InitDefault() { +func (p *TraceServiceDeleteManualAnnotationArgs) InitDefault() { } -var TraceServiceListViewsArgs_Req_DEFAULT *ListViewsRequest +var TraceServiceDeleteManualAnnotationArgs_Req_DEFAULT *DeleteManualAnnotationRequest -func (p *TraceServiceListViewsArgs) GetReq() (v *ListViewsRequest) { +func (p *TraceServiceDeleteManualAnnotationArgs) GetReq() (v *DeleteManualAnnotationRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceListViewsArgs_Req_DEFAULT + return TraceServiceDeleteManualAnnotationArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceListViewsArgs) SetReq(val *ListViewsRequest) { +func (p *TraceServiceDeleteManualAnnotationArgs) SetReq(val *DeleteManualAnnotationRequest) { p.Req = val } -var fieldIDToName_TraceServiceListViewsArgs = map[int16]string{ +var fieldIDToName_TraceServiceDeleteManualAnnotationArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceListViewsArgs) IsSetReq() bool { +func (p *TraceServiceDeleteManualAnnotationArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceListViewsArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteManualAnnotationArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -18364,7 +22276,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListViewsArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteManualAnnotationArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18374,8 +22286,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceListViewsArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewListViewsRequest() +func (p *TraceServiceDeleteManualAnnotationArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewDeleteManualAnnotationRequest() if err := _field.Read(iprot); err != nil { return err } @@ -18383,9 +22295,9 @@ func (p *TraceServiceListViewsArgs) ReadField1(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceListViewsArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteManualAnnotationArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ListViews_args"); err != nil { + if err = oprot.WriteStructBegin("DeleteManualAnnotation_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18411,7 +22323,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceListViewsArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteManualAnnotationArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -18428,15 +22340,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceListViewsArgs) String() string { +func (p *TraceServiceDeleteManualAnnotationArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceListViewsArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceDeleteManualAnnotationArgs(%+v)", *p) } -func (p *TraceServiceListViewsArgs) DeepEqual(ano *TraceServiceListViewsArgs) bool { +func (p *TraceServiceDeleteManualAnnotationArgs) DeepEqual(ano *TraceServiceDeleteManualAnnotationArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -18448,7 +22360,7 @@ func (p *TraceServiceListViewsArgs) DeepEqual(ano *TraceServiceListViewsArgs) bo return true } -func (p *TraceServiceListViewsArgs) Field1DeepEqual(src *ListViewsRequest) bool { +func (p *TraceServiceDeleteManualAnnotationArgs) Field1DeepEqual(src *DeleteManualAnnotationRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -18456,41 +22368,41 @@ func (p *TraceServiceListViewsArgs) Field1DeepEqual(src *ListViewsRequest) bool return true } -type TraceServiceListViewsResult struct { - Success *ListViewsResponse `thrift:"success,0,optional" frugal:"0,optional,ListViewsResponse"` +type TraceServiceDeleteManualAnnotationResult struct { + Success *DeleteManualAnnotationResponse `thrift:"success,0,optional" frugal:"0,optional,DeleteManualAnnotationResponse"` } -func NewTraceServiceListViewsResult() *TraceServiceListViewsResult { - return &TraceServiceListViewsResult{} +func NewTraceServiceDeleteManualAnnotationResult() *TraceServiceDeleteManualAnnotationResult { + return &TraceServiceDeleteManualAnnotationResult{} } -func (p *TraceServiceListViewsResult) InitDefault() { +func (p *TraceServiceDeleteManualAnnotationResult) InitDefault() { } -var TraceServiceListViewsResult_Success_DEFAULT *ListViewsResponse +var TraceServiceDeleteManualAnnotationResult_Success_DEFAULT *DeleteManualAnnotationResponse -func (p *TraceServiceListViewsResult) GetSuccess() (v *ListViewsResponse) { +func (p *TraceServiceDeleteManualAnnotationResult) GetSuccess() (v *DeleteManualAnnotationResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceListViewsResult_Success_DEFAULT + return TraceServiceDeleteManualAnnotationResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceListViewsResult) SetSuccess(x interface{}) { - p.Success = x.(*ListViewsResponse) +func (p *TraceServiceDeleteManualAnnotationResult) SetSuccess(x interface{}) { + p.Success = x.(*DeleteManualAnnotationResponse) } -var fieldIDToName_TraceServiceListViewsResult = map[int16]string{ +var fieldIDToName_TraceServiceDeleteManualAnnotationResult = map[int16]string{ 0: "success", } -func (p *TraceServiceListViewsResult) IsSetSuccess() bool { +func (p *TraceServiceDeleteManualAnnotationResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceListViewsResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteManualAnnotationResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -18535,7 +22447,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListViewsResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteManualAnnotationResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18545,8 +22457,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceListViewsResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewListViewsResponse() +func (p *TraceServiceDeleteManualAnnotationResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewDeleteManualAnnotationResponse() if err := _field.Read(iprot); err != nil { return err } @@ -18554,9 +22466,9 @@ func (p *TraceServiceListViewsResult) ReadField0(iprot thrift.TProtocol) error { return nil } -func (p *TraceServiceListViewsResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteManualAnnotationResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ListViews_result"); err != nil { + if err = oprot.WriteStructBegin("DeleteManualAnnotation_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18582,7 +22494,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceListViewsResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceDeleteManualAnnotationResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -18601,15 +22513,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceListViewsResult) String() string { +func (p *TraceServiceDeleteManualAnnotationResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceListViewsResult(%+v)", *p) + return fmt.Sprintf("TraceServiceDeleteManualAnnotationResult(%+v)", *p) } -func (p *TraceServiceListViewsResult) DeepEqual(ano *TraceServiceListViewsResult) bool { +func (p *TraceServiceDeleteManualAnnotationResult) DeepEqual(ano *TraceServiceDeleteManualAnnotationResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -18621,7 +22533,7 @@ func (p *TraceServiceListViewsResult) DeepEqual(ano *TraceServiceListViewsResult return true } -func (p *TraceServiceListViewsResult) Field0DeepEqual(src *ListViewsResponse) bool { +func (p *TraceServiceDeleteManualAnnotationResult) Field0DeepEqual(src *DeleteManualAnnotationResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -18629,41 +22541,41 @@ func (p *TraceServiceListViewsResult) Field0DeepEqual(src *ListViewsResponse) bo return true } -type TraceServiceCreateManualAnnotationArgs struct { - Req *CreateManualAnnotationRequest `thrift:"req,1" frugal:"1,default,CreateManualAnnotationRequest"` +type TraceServiceListAnnotationsArgs struct { + Req *ListAnnotationsRequest `thrift:"req,1" frugal:"1,default,ListAnnotationsRequest"` } -func NewTraceServiceCreateManualAnnotationArgs() *TraceServiceCreateManualAnnotationArgs { - return &TraceServiceCreateManualAnnotationArgs{} +func NewTraceServiceListAnnotationsArgs() *TraceServiceListAnnotationsArgs { + return &TraceServiceListAnnotationsArgs{} } -func (p *TraceServiceCreateManualAnnotationArgs) InitDefault() { +func (p *TraceServiceListAnnotationsArgs) InitDefault() { } -var TraceServiceCreateManualAnnotationArgs_Req_DEFAULT *CreateManualAnnotationRequest +var TraceServiceListAnnotationsArgs_Req_DEFAULT *ListAnnotationsRequest -func (p *TraceServiceCreateManualAnnotationArgs) GetReq() (v *CreateManualAnnotationRequest) { +func (p *TraceServiceListAnnotationsArgs) GetReq() (v *ListAnnotationsRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceCreateManualAnnotationArgs_Req_DEFAULT + return TraceServiceListAnnotationsArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceCreateManualAnnotationArgs) SetReq(val *CreateManualAnnotationRequest) { +func (p *TraceServiceListAnnotationsArgs) SetReq(val *ListAnnotationsRequest) { p.Req = val } -var fieldIDToName_TraceServiceCreateManualAnnotationArgs = map[int16]string{ +var fieldIDToName_TraceServiceListAnnotationsArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceCreateManualAnnotationArgs) IsSetReq() bool { +func (p *TraceServiceListAnnotationsArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceCreateManualAnnotationArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationsArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -18708,7 +22620,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateManualAnnotationArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationsArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18718,8 +22630,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceCreateManualAnnotationArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewCreateManualAnnotationRequest() +func (p *TraceServiceListAnnotationsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewListAnnotationsRequest() if err := _field.Read(iprot); err != nil { return err } @@ -18727,9 +22639,9 @@ func (p *TraceServiceCreateManualAnnotationArgs) ReadField1(iprot thrift.TProtoc return nil } -func (p *TraceServiceCreateManualAnnotationArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationsArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("CreateManualAnnotation_args"); err != nil { + if err = oprot.WriteStructBegin("ListAnnotations_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18755,7 +22667,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceCreateManualAnnotationArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationsArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -18772,15 +22684,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceCreateManualAnnotationArgs) String() string { +func (p *TraceServiceListAnnotationsArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceCreateManualAnnotationArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceListAnnotationsArgs(%+v)", *p) } -func (p *TraceServiceCreateManualAnnotationArgs) DeepEqual(ano *TraceServiceCreateManualAnnotationArgs) bool { +func (p *TraceServiceListAnnotationsArgs) DeepEqual(ano *TraceServiceListAnnotationsArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -18792,7 +22704,7 @@ func (p *TraceServiceCreateManualAnnotationArgs) DeepEqual(ano *TraceServiceCrea return true } -func (p *TraceServiceCreateManualAnnotationArgs) Field1DeepEqual(src *CreateManualAnnotationRequest) bool { +func (p *TraceServiceListAnnotationsArgs) Field1DeepEqual(src *ListAnnotationsRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -18800,41 +22712,41 @@ func (p *TraceServiceCreateManualAnnotationArgs) Field1DeepEqual(src *CreateManu return true } -type TraceServiceCreateManualAnnotationResult struct { - Success *CreateManualAnnotationResponse `thrift:"success,0,optional" frugal:"0,optional,CreateManualAnnotationResponse"` +type TraceServiceListAnnotationsResult struct { + Success *ListAnnotationsResponse `thrift:"success,0,optional" frugal:"0,optional,ListAnnotationsResponse"` } -func NewTraceServiceCreateManualAnnotationResult() *TraceServiceCreateManualAnnotationResult { - return &TraceServiceCreateManualAnnotationResult{} +func NewTraceServiceListAnnotationsResult() *TraceServiceListAnnotationsResult { + return &TraceServiceListAnnotationsResult{} } -func (p *TraceServiceCreateManualAnnotationResult) InitDefault() { +func (p *TraceServiceListAnnotationsResult) InitDefault() { } -var TraceServiceCreateManualAnnotationResult_Success_DEFAULT *CreateManualAnnotationResponse +var TraceServiceListAnnotationsResult_Success_DEFAULT *ListAnnotationsResponse -func (p *TraceServiceCreateManualAnnotationResult) GetSuccess() (v *CreateManualAnnotationResponse) { +func (p *TraceServiceListAnnotationsResult) GetSuccess() (v *ListAnnotationsResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceCreateManualAnnotationResult_Success_DEFAULT + return TraceServiceListAnnotationsResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceCreateManualAnnotationResult) SetSuccess(x interface{}) { - p.Success = x.(*CreateManualAnnotationResponse) +func (p *TraceServiceListAnnotationsResult) SetSuccess(x interface{}) { + p.Success = x.(*ListAnnotationsResponse) } -var fieldIDToName_TraceServiceCreateManualAnnotationResult = map[int16]string{ +var fieldIDToName_TraceServiceListAnnotationsResult = map[int16]string{ 0: "success", } -func (p *TraceServiceCreateManualAnnotationResult) IsSetSuccess() bool { +func (p *TraceServiceListAnnotationsResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceCreateManualAnnotationResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationsResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -18879,7 +22791,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateManualAnnotationResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationsResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -18889,8 +22801,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceCreateManualAnnotationResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewCreateManualAnnotationResponse() +func (p *TraceServiceListAnnotationsResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewListAnnotationsResponse() if err := _field.Read(iprot); err != nil { return err } @@ -18898,9 +22810,9 @@ func (p *TraceServiceCreateManualAnnotationResult) ReadField0(iprot thrift.TProt return nil } -func (p *TraceServiceCreateManualAnnotationResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationsResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("CreateManualAnnotation_result"); err != nil { + if err = oprot.WriteStructBegin("ListAnnotations_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -18926,7 +22838,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceCreateManualAnnotationResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationsResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -18945,15 +22857,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceCreateManualAnnotationResult) String() string { +func (p *TraceServiceListAnnotationsResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceCreateManualAnnotationResult(%+v)", *p) + return fmt.Sprintf("TraceServiceListAnnotationsResult(%+v)", *p) } -func (p *TraceServiceCreateManualAnnotationResult) DeepEqual(ano *TraceServiceCreateManualAnnotationResult) bool { +func (p *TraceServiceListAnnotationsResult) DeepEqual(ano *TraceServiceListAnnotationsResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -18965,7 +22877,7 @@ func (p *TraceServiceCreateManualAnnotationResult) DeepEqual(ano *TraceServiceCr return true } -func (p *TraceServiceCreateManualAnnotationResult) Field0DeepEqual(src *CreateManualAnnotationResponse) bool { +func (p *TraceServiceListAnnotationsResult) Field0DeepEqual(src *ListAnnotationsResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -18973,41 +22885,41 @@ func (p *TraceServiceCreateManualAnnotationResult) Field0DeepEqual(src *CreateMa return true } -type TraceServiceUpdateManualAnnotationArgs struct { - Req *UpdateManualAnnotationRequest `thrift:"req,1" frugal:"1,default,UpdateManualAnnotationRequest"` +type TraceServiceExportTracesToDatasetArgs struct { + Req *ExportTracesToDatasetRequest `thrift:"Req,1" frugal:"1,default,ExportTracesToDatasetRequest"` } -func NewTraceServiceUpdateManualAnnotationArgs() *TraceServiceUpdateManualAnnotationArgs { - return &TraceServiceUpdateManualAnnotationArgs{} +func NewTraceServiceExportTracesToDatasetArgs() *TraceServiceExportTracesToDatasetArgs { + return &TraceServiceExportTracesToDatasetArgs{} } -func (p *TraceServiceUpdateManualAnnotationArgs) InitDefault() { +func (p *TraceServiceExportTracesToDatasetArgs) InitDefault() { } -var TraceServiceUpdateManualAnnotationArgs_Req_DEFAULT *UpdateManualAnnotationRequest +var TraceServiceExportTracesToDatasetArgs_Req_DEFAULT *ExportTracesToDatasetRequest -func (p *TraceServiceUpdateManualAnnotationArgs) GetReq() (v *UpdateManualAnnotationRequest) { +func (p *TraceServiceExportTracesToDatasetArgs) GetReq() (v *ExportTracesToDatasetRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceUpdateManualAnnotationArgs_Req_DEFAULT + return TraceServiceExportTracesToDatasetArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceUpdateManualAnnotationArgs) SetReq(val *UpdateManualAnnotationRequest) { +func (p *TraceServiceExportTracesToDatasetArgs) SetReq(val *ExportTracesToDatasetRequest) { p.Req = val } -var fieldIDToName_TraceServiceUpdateManualAnnotationArgs = map[int16]string{ - 1: "req", +var fieldIDToName_TraceServiceExportTracesToDatasetArgs = map[int16]string{ + 1: "Req", } -func (p *TraceServiceUpdateManualAnnotationArgs) IsSetReq() bool { +func (p *TraceServiceExportTracesToDatasetArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceUpdateManualAnnotationArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceExportTracesToDatasetArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -19052,7 +22964,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateManualAnnotationArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExportTracesToDatasetArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19062,8 +22974,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceUpdateManualAnnotationArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewUpdateManualAnnotationRequest() +func (p *TraceServiceExportTracesToDatasetArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewExportTracesToDatasetRequest() if err := _field.Read(iprot); err != nil { return err } @@ -19071,9 +22983,9 @@ func (p *TraceServiceUpdateManualAnnotationArgs) ReadField1(iprot thrift.TProtoc return nil } -func (p *TraceServiceUpdateManualAnnotationArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceExportTracesToDatasetArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("UpdateManualAnnotation_args"); err != nil { + if err = oprot.WriteStructBegin("ExportTracesToDataset_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19099,8 +23011,8 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceUpdateManualAnnotationArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { +func (p *TraceServiceExportTracesToDatasetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("Req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } if err := p.Req.Write(oprot); err != nil { @@ -19116,15 +23028,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceUpdateManualAnnotationArgs) String() string { +func (p *TraceServiceExportTracesToDatasetArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceUpdateManualAnnotationArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceExportTracesToDatasetArgs(%+v)", *p) } -func (p *TraceServiceUpdateManualAnnotationArgs) DeepEqual(ano *TraceServiceUpdateManualAnnotationArgs) bool { +func (p *TraceServiceExportTracesToDatasetArgs) DeepEqual(ano *TraceServiceExportTracesToDatasetArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -19136,7 +23048,7 @@ func (p *TraceServiceUpdateManualAnnotationArgs) DeepEqual(ano *TraceServiceUpda return true } -func (p *TraceServiceUpdateManualAnnotationArgs) Field1DeepEqual(src *UpdateManualAnnotationRequest) bool { +func (p *TraceServiceExportTracesToDatasetArgs) Field1DeepEqual(src *ExportTracesToDatasetRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -19144,41 +23056,41 @@ func (p *TraceServiceUpdateManualAnnotationArgs) Field1DeepEqual(src *UpdateManu return true } -type TraceServiceUpdateManualAnnotationResult struct { - Success *UpdateManualAnnotationResponse `thrift:"success,0,optional" frugal:"0,optional,UpdateManualAnnotationResponse"` +type TraceServiceExportTracesToDatasetResult struct { + Success *ExportTracesToDatasetResponse `thrift:"success,0,optional" frugal:"0,optional,ExportTracesToDatasetResponse"` } -func NewTraceServiceUpdateManualAnnotationResult() *TraceServiceUpdateManualAnnotationResult { - return &TraceServiceUpdateManualAnnotationResult{} +func NewTraceServiceExportTracesToDatasetResult() *TraceServiceExportTracesToDatasetResult { + return &TraceServiceExportTracesToDatasetResult{} } -func (p *TraceServiceUpdateManualAnnotationResult) InitDefault() { +func (p *TraceServiceExportTracesToDatasetResult) InitDefault() { } -var TraceServiceUpdateManualAnnotationResult_Success_DEFAULT *UpdateManualAnnotationResponse +var TraceServiceExportTracesToDatasetResult_Success_DEFAULT *ExportTracesToDatasetResponse -func (p *TraceServiceUpdateManualAnnotationResult) GetSuccess() (v *UpdateManualAnnotationResponse) { +func (p *TraceServiceExportTracesToDatasetResult) GetSuccess() (v *ExportTracesToDatasetResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceUpdateManualAnnotationResult_Success_DEFAULT + return TraceServiceExportTracesToDatasetResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceUpdateManualAnnotationResult) SetSuccess(x interface{}) { - p.Success = x.(*UpdateManualAnnotationResponse) +func (p *TraceServiceExportTracesToDatasetResult) SetSuccess(x interface{}) { + p.Success = x.(*ExportTracesToDatasetResponse) } -var fieldIDToName_TraceServiceUpdateManualAnnotationResult = map[int16]string{ +var fieldIDToName_TraceServiceExportTracesToDatasetResult = map[int16]string{ 0: "success", } -func (p *TraceServiceUpdateManualAnnotationResult) IsSetSuccess() bool { +func (p *TraceServiceExportTracesToDatasetResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceUpdateManualAnnotationResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceExportTracesToDatasetResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -19223,7 +23135,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateManualAnnotationResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExportTracesToDatasetResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19233,8 +23145,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceUpdateManualAnnotationResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewUpdateManualAnnotationResponse() +func (p *TraceServiceExportTracesToDatasetResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewExportTracesToDatasetResponse() if err := _field.Read(iprot); err != nil { return err } @@ -19242,9 +23154,9 @@ func (p *TraceServiceUpdateManualAnnotationResult) ReadField0(iprot thrift.TProt return nil } -func (p *TraceServiceUpdateManualAnnotationResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceExportTracesToDatasetResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("UpdateManualAnnotation_result"); err != nil { + if err = oprot.WriteStructBegin("ExportTracesToDataset_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19270,7 +23182,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceUpdateManualAnnotationResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceExportTracesToDatasetResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -19289,15 +23201,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceUpdateManualAnnotationResult) String() string { +func (p *TraceServiceExportTracesToDatasetResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceUpdateManualAnnotationResult(%+v)", *p) + return fmt.Sprintf("TraceServiceExportTracesToDatasetResult(%+v)", *p) } -func (p *TraceServiceUpdateManualAnnotationResult) DeepEqual(ano *TraceServiceUpdateManualAnnotationResult) bool { +func (p *TraceServiceExportTracesToDatasetResult) DeepEqual(ano *TraceServiceExportTracesToDatasetResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -19309,7 +23221,7 @@ func (p *TraceServiceUpdateManualAnnotationResult) DeepEqual(ano *TraceServiceUp return true } -func (p *TraceServiceUpdateManualAnnotationResult) Field0DeepEqual(src *UpdateManualAnnotationResponse) bool { +func (p *TraceServiceExportTracesToDatasetResult) Field0DeepEqual(src *ExportTracesToDatasetResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -19317,41 +23229,41 @@ func (p *TraceServiceUpdateManualAnnotationResult) Field0DeepEqual(src *UpdateMa return true } -type TraceServiceDeleteManualAnnotationArgs struct { - Req *DeleteManualAnnotationRequest `thrift:"req,1" frugal:"1,default,DeleteManualAnnotationRequest"` +type TraceServicePreviewExportTracesToDatasetArgs struct { + Req *PreviewExportTracesToDatasetRequest `thrift:"Req,1" frugal:"1,default,PreviewExportTracesToDatasetRequest"` } -func NewTraceServiceDeleteManualAnnotationArgs() *TraceServiceDeleteManualAnnotationArgs { - return &TraceServiceDeleteManualAnnotationArgs{} +func NewTraceServicePreviewExportTracesToDatasetArgs() *TraceServicePreviewExportTracesToDatasetArgs { + return &TraceServicePreviewExportTracesToDatasetArgs{} } -func (p *TraceServiceDeleteManualAnnotationArgs) InitDefault() { +func (p *TraceServicePreviewExportTracesToDatasetArgs) InitDefault() { } -var TraceServiceDeleteManualAnnotationArgs_Req_DEFAULT *DeleteManualAnnotationRequest +var TraceServicePreviewExportTracesToDatasetArgs_Req_DEFAULT *PreviewExportTracesToDatasetRequest -func (p *TraceServiceDeleteManualAnnotationArgs) GetReq() (v *DeleteManualAnnotationRequest) { +func (p *TraceServicePreviewExportTracesToDatasetArgs) GetReq() (v *PreviewExportTracesToDatasetRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceDeleteManualAnnotationArgs_Req_DEFAULT + return TraceServicePreviewExportTracesToDatasetArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceDeleteManualAnnotationArgs) SetReq(val *DeleteManualAnnotationRequest) { +func (p *TraceServicePreviewExportTracesToDatasetArgs) SetReq(val *PreviewExportTracesToDatasetRequest) { p.Req = val } -var fieldIDToName_TraceServiceDeleteManualAnnotationArgs = map[int16]string{ - 1: "req", +var fieldIDToName_TraceServicePreviewExportTracesToDatasetArgs = map[int16]string{ + 1: "Req", } -func (p *TraceServiceDeleteManualAnnotationArgs) IsSetReq() bool { +func (p *TraceServicePreviewExportTracesToDatasetArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceDeleteManualAnnotationArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServicePreviewExportTracesToDatasetArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -19396,7 +23308,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteManualAnnotationArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServicePreviewExportTracesToDatasetArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19406,8 +23318,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceDeleteManualAnnotationArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewDeleteManualAnnotationRequest() +func (p *TraceServicePreviewExportTracesToDatasetArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewPreviewExportTracesToDatasetRequest() if err := _field.Read(iprot); err != nil { return err } @@ -19415,9 +23327,9 @@ func (p *TraceServiceDeleteManualAnnotationArgs) ReadField1(iprot thrift.TProtoc return nil } -func (p *TraceServiceDeleteManualAnnotationArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServicePreviewExportTracesToDatasetArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("DeleteManualAnnotation_args"); err != nil { + if err = oprot.WriteStructBegin("PreviewExportTracesToDataset_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19443,8 +23355,8 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceDeleteManualAnnotationArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { +func (p *TraceServicePreviewExportTracesToDatasetArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("Req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } if err := p.Req.Write(oprot); err != nil { @@ -19460,15 +23372,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceDeleteManualAnnotationArgs) String() string { +func (p *TraceServicePreviewExportTracesToDatasetArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceDeleteManualAnnotationArgs(%+v)", *p) + return fmt.Sprintf("TraceServicePreviewExportTracesToDatasetArgs(%+v)", *p) } -func (p *TraceServiceDeleteManualAnnotationArgs) DeepEqual(ano *TraceServiceDeleteManualAnnotationArgs) bool { +func (p *TraceServicePreviewExportTracesToDatasetArgs) DeepEqual(ano *TraceServicePreviewExportTracesToDatasetArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -19480,7 +23392,7 @@ func (p *TraceServiceDeleteManualAnnotationArgs) DeepEqual(ano *TraceServiceDele return true } -func (p *TraceServiceDeleteManualAnnotationArgs) Field1DeepEqual(src *DeleteManualAnnotationRequest) bool { +func (p *TraceServicePreviewExportTracesToDatasetArgs) Field1DeepEqual(src *PreviewExportTracesToDatasetRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -19488,41 +23400,41 @@ func (p *TraceServiceDeleteManualAnnotationArgs) Field1DeepEqual(src *DeleteManu return true } -type TraceServiceDeleteManualAnnotationResult struct { - Success *DeleteManualAnnotationResponse `thrift:"success,0,optional" frugal:"0,optional,DeleteManualAnnotationResponse"` +type TraceServicePreviewExportTracesToDatasetResult struct { + Success *PreviewExportTracesToDatasetResponse `thrift:"success,0,optional" frugal:"0,optional,PreviewExportTracesToDatasetResponse"` } -func NewTraceServiceDeleteManualAnnotationResult() *TraceServiceDeleteManualAnnotationResult { - return &TraceServiceDeleteManualAnnotationResult{} +func NewTraceServicePreviewExportTracesToDatasetResult() *TraceServicePreviewExportTracesToDatasetResult { + return &TraceServicePreviewExportTracesToDatasetResult{} } -func (p *TraceServiceDeleteManualAnnotationResult) InitDefault() { +func (p *TraceServicePreviewExportTracesToDatasetResult) InitDefault() { } -var TraceServiceDeleteManualAnnotationResult_Success_DEFAULT *DeleteManualAnnotationResponse +var TraceServicePreviewExportTracesToDatasetResult_Success_DEFAULT *PreviewExportTracesToDatasetResponse -func (p *TraceServiceDeleteManualAnnotationResult) GetSuccess() (v *DeleteManualAnnotationResponse) { +func (p *TraceServicePreviewExportTracesToDatasetResult) GetSuccess() (v *PreviewExportTracesToDatasetResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceDeleteManualAnnotationResult_Success_DEFAULT + return TraceServicePreviewExportTracesToDatasetResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceDeleteManualAnnotationResult) SetSuccess(x interface{}) { - p.Success = x.(*DeleteManualAnnotationResponse) +func (p *TraceServicePreviewExportTracesToDatasetResult) SetSuccess(x interface{}) { + p.Success = x.(*PreviewExportTracesToDatasetResponse) } -var fieldIDToName_TraceServiceDeleteManualAnnotationResult = map[int16]string{ +var fieldIDToName_TraceServicePreviewExportTracesToDatasetResult = map[int16]string{ 0: "success", } -func (p *TraceServiceDeleteManualAnnotationResult) IsSetSuccess() bool { +func (p *TraceServicePreviewExportTracesToDatasetResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceDeleteManualAnnotationResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServicePreviewExportTracesToDatasetResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -19567,7 +23479,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteManualAnnotationResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServicePreviewExportTracesToDatasetResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19577,8 +23489,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceDeleteManualAnnotationResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewDeleteManualAnnotationResponse() +func (p *TraceServicePreviewExportTracesToDatasetResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewPreviewExportTracesToDatasetResponse() if err := _field.Read(iprot); err != nil { return err } @@ -19586,9 +23498,9 @@ func (p *TraceServiceDeleteManualAnnotationResult) ReadField0(iprot thrift.TProt return nil } -func (p *TraceServiceDeleteManualAnnotationResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServicePreviewExportTracesToDatasetResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("DeleteManualAnnotation_result"); err != nil { + if err = oprot.WriteStructBegin("PreviewExportTracesToDataset_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19614,7 +23526,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceDeleteManualAnnotationResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServicePreviewExportTracesToDatasetResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -19633,15 +23545,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceDeleteManualAnnotationResult) String() string { +func (p *TraceServicePreviewExportTracesToDatasetResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceDeleteManualAnnotationResult(%+v)", *p) + return fmt.Sprintf("TraceServicePreviewExportTracesToDatasetResult(%+v)", *p) } -func (p *TraceServiceDeleteManualAnnotationResult) DeepEqual(ano *TraceServiceDeleteManualAnnotationResult) bool { +func (p *TraceServicePreviewExportTracesToDatasetResult) DeepEqual(ano *TraceServicePreviewExportTracesToDatasetResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -19653,7 +23565,7 @@ func (p *TraceServiceDeleteManualAnnotationResult) DeepEqual(ano *TraceServiceDe return true } -func (p *TraceServiceDeleteManualAnnotationResult) Field0DeepEqual(src *DeleteManualAnnotationResponse) bool { +func (p *TraceServicePreviewExportTracesToDatasetResult) Field0DeepEqual(src *PreviewExportTracesToDatasetResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -19661,41 +23573,41 @@ func (p *TraceServiceDeleteManualAnnotationResult) Field0DeepEqual(src *DeleteMa return true } -type TraceServiceListAnnotationsArgs struct { - Req *ListAnnotationsRequest `thrift:"req,1" frugal:"1,default,ListAnnotationsRequest"` +type TraceServiceChangeEvaluatorScoreArgs struct { + Req *ChangeEvaluatorScoreRequest `thrift:"req,1" frugal:"1,default,ChangeEvaluatorScoreRequest"` } -func NewTraceServiceListAnnotationsArgs() *TraceServiceListAnnotationsArgs { - return &TraceServiceListAnnotationsArgs{} +func NewTraceServiceChangeEvaluatorScoreArgs() *TraceServiceChangeEvaluatorScoreArgs { + return &TraceServiceChangeEvaluatorScoreArgs{} } -func (p *TraceServiceListAnnotationsArgs) InitDefault() { +func (p *TraceServiceChangeEvaluatorScoreArgs) InitDefault() { } -var TraceServiceListAnnotationsArgs_Req_DEFAULT *ListAnnotationsRequest +var TraceServiceChangeEvaluatorScoreArgs_Req_DEFAULT *ChangeEvaluatorScoreRequest -func (p *TraceServiceListAnnotationsArgs) GetReq() (v *ListAnnotationsRequest) { +func (p *TraceServiceChangeEvaluatorScoreArgs) GetReq() (v *ChangeEvaluatorScoreRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceListAnnotationsArgs_Req_DEFAULT + return TraceServiceChangeEvaluatorScoreArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceListAnnotationsArgs) SetReq(val *ListAnnotationsRequest) { +func (p *TraceServiceChangeEvaluatorScoreArgs) SetReq(val *ChangeEvaluatorScoreRequest) { p.Req = val } -var fieldIDToName_TraceServiceListAnnotationsArgs = map[int16]string{ +var fieldIDToName_TraceServiceChangeEvaluatorScoreArgs = map[int16]string{ 1: "req", } -func (p *TraceServiceListAnnotationsArgs) IsSetReq() bool { +func (p *TraceServiceChangeEvaluatorScoreArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceListAnnotationsArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceChangeEvaluatorScoreArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -19740,7 +23652,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationsArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceChangeEvaluatorScoreArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19750,8 +23662,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceListAnnotationsArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewListAnnotationsRequest() +func (p *TraceServiceChangeEvaluatorScoreArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewChangeEvaluatorScoreRequest() if err := _field.Read(iprot); err != nil { return err } @@ -19759,9 +23671,9 @@ func (p *TraceServiceListAnnotationsArgs) ReadField1(iprot thrift.TProtocol) err return nil } -func (p *TraceServiceListAnnotationsArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceChangeEvaluatorScoreArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ListAnnotations_args"); err != nil { + if err = oprot.WriteStructBegin("ChangeEvaluatorScore_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19787,7 +23699,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceListAnnotationsArgs) writeField1(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceChangeEvaluatorScoreArgs) writeField1(oprot thrift.TProtocol) (err error) { if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } @@ -19804,15 +23716,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceListAnnotationsArgs) String() string { +func (p *TraceServiceChangeEvaluatorScoreArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceListAnnotationsArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceChangeEvaluatorScoreArgs(%+v)", *p) } -func (p *TraceServiceListAnnotationsArgs) DeepEqual(ano *TraceServiceListAnnotationsArgs) bool { +func (p *TraceServiceChangeEvaluatorScoreArgs) DeepEqual(ano *TraceServiceChangeEvaluatorScoreArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -19824,7 +23736,7 @@ func (p *TraceServiceListAnnotationsArgs) DeepEqual(ano *TraceServiceListAnnotat return true } -func (p *TraceServiceListAnnotationsArgs) Field1DeepEqual(src *ListAnnotationsRequest) bool { +func (p *TraceServiceChangeEvaluatorScoreArgs) Field1DeepEqual(src *ChangeEvaluatorScoreRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -19832,41 +23744,41 @@ func (p *TraceServiceListAnnotationsArgs) Field1DeepEqual(src *ListAnnotationsRe return true } -type TraceServiceListAnnotationsResult struct { - Success *ListAnnotationsResponse `thrift:"success,0,optional" frugal:"0,optional,ListAnnotationsResponse"` +type TraceServiceChangeEvaluatorScoreResult struct { + Success *ChangeEvaluatorScoreResponse `thrift:"success,0,optional" frugal:"0,optional,ChangeEvaluatorScoreResponse"` } -func NewTraceServiceListAnnotationsResult() *TraceServiceListAnnotationsResult { - return &TraceServiceListAnnotationsResult{} +func NewTraceServiceChangeEvaluatorScoreResult() *TraceServiceChangeEvaluatorScoreResult { + return &TraceServiceChangeEvaluatorScoreResult{} } -func (p *TraceServiceListAnnotationsResult) InitDefault() { +func (p *TraceServiceChangeEvaluatorScoreResult) InitDefault() { } -var TraceServiceListAnnotationsResult_Success_DEFAULT *ListAnnotationsResponse +var TraceServiceChangeEvaluatorScoreResult_Success_DEFAULT *ChangeEvaluatorScoreResponse -func (p *TraceServiceListAnnotationsResult) GetSuccess() (v *ListAnnotationsResponse) { +func (p *TraceServiceChangeEvaluatorScoreResult) GetSuccess() (v *ChangeEvaluatorScoreResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceListAnnotationsResult_Success_DEFAULT + return TraceServiceChangeEvaluatorScoreResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceListAnnotationsResult) SetSuccess(x interface{}) { - p.Success = x.(*ListAnnotationsResponse) +func (p *TraceServiceChangeEvaluatorScoreResult) SetSuccess(x interface{}) { + p.Success = x.(*ChangeEvaluatorScoreResponse) } -var fieldIDToName_TraceServiceListAnnotationsResult = map[int16]string{ +var fieldIDToName_TraceServiceChangeEvaluatorScoreResult = map[int16]string{ 0: "success", } -func (p *TraceServiceListAnnotationsResult) IsSetSuccess() bool { +func (p *TraceServiceChangeEvaluatorScoreResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceListAnnotationsResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceChangeEvaluatorScoreResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -19911,7 +23823,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationsResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceChangeEvaluatorScoreResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -19921,8 +23833,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceListAnnotationsResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewListAnnotationsResponse() +func (p *TraceServiceChangeEvaluatorScoreResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewChangeEvaluatorScoreResponse() if err := _field.Read(iprot); err != nil { return err } @@ -19930,9 +23842,9 @@ func (p *TraceServiceListAnnotationsResult) ReadField0(iprot thrift.TProtocol) e return nil } -func (p *TraceServiceListAnnotationsResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceChangeEvaluatorScoreResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ListAnnotations_result"); err != nil { + if err = oprot.WriteStructBegin("ChangeEvaluatorScore_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -19958,7 +23870,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceListAnnotationsResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceChangeEvaluatorScoreResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -19977,15 +23889,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceListAnnotationsResult) String() string { +func (p *TraceServiceChangeEvaluatorScoreResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceListAnnotationsResult(%+v)", *p) + return fmt.Sprintf("TraceServiceChangeEvaluatorScoreResult(%+v)", *p) } -func (p *TraceServiceListAnnotationsResult) DeepEqual(ano *TraceServiceListAnnotationsResult) bool { +func (p *TraceServiceChangeEvaluatorScoreResult) DeepEqual(ano *TraceServiceChangeEvaluatorScoreResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -19997,7 +23909,7 @@ func (p *TraceServiceListAnnotationsResult) DeepEqual(ano *TraceServiceListAnnot return true } -func (p *TraceServiceListAnnotationsResult) Field0DeepEqual(src *ListAnnotationsResponse) bool { +func (p *TraceServiceChangeEvaluatorScoreResult) Field0DeepEqual(src *ChangeEvaluatorScoreResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -20005,41 +23917,41 @@ func (p *TraceServiceListAnnotationsResult) Field0DeepEqual(src *ListAnnotations return true } -type TraceServiceExportTracesToDatasetArgs struct { - Req *ExportTracesToDatasetRequest `thrift:"Req,1" frugal:"1,default,ExportTracesToDatasetRequest"` +type TraceServiceListAnnotationEvaluatorsArgs struct { + Req *ListAnnotationEvaluatorsRequest `thrift:"req,1" frugal:"1,default,ListAnnotationEvaluatorsRequest"` } -func NewTraceServiceExportTracesToDatasetArgs() *TraceServiceExportTracesToDatasetArgs { - return &TraceServiceExportTracesToDatasetArgs{} +func NewTraceServiceListAnnotationEvaluatorsArgs() *TraceServiceListAnnotationEvaluatorsArgs { + return &TraceServiceListAnnotationEvaluatorsArgs{} } -func (p *TraceServiceExportTracesToDatasetArgs) InitDefault() { +func (p *TraceServiceListAnnotationEvaluatorsArgs) InitDefault() { } -var TraceServiceExportTracesToDatasetArgs_Req_DEFAULT *ExportTracesToDatasetRequest +var TraceServiceListAnnotationEvaluatorsArgs_Req_DEFAULT *ListAnnotationEvaluatorsRequest -func (p *TraceServiceExportTracesToDatasetArgs) GetReq() (v *ExportTracesToDatasetRequest) { +func (p *TraceServiceListAnnotationEvaluatorsArgs) GetReq() (v *ListAnnotationEvaluatorsRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServiceExportTracesToDatasetArgs_Req_DEFAULT + return TraceServiceListAnnotationEvaluatorsArgs_Req_DEFAULT } return p.Req } -func (p *TraceServiceExportTracesToDatasetArgs) SetReq(val *ExportTracesToDatasetRequest) { +func (p *TraceServiceListAnnotationEvaluatorsArgs) SetReq(val *ListAnnotationEvaluatorsRequest) { p.Req = val } -var fieldIDToName_TraceServiceExportTracesToDatasetArgs = map[int16]string{ - 1: "Req", +var fieldIDToName_TraceServiceListAnnotationEvaluatorsArgs = map[int16]string{ + 1: "req", } -func (p *TraceServiceExportTracesToDatasetArgs) IsSetReq() bool { +func (p *TraceServiceListAnnotationEvaluatorsArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServiceExportTracesToDatasetArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationEvaluatorsArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -20084,7 +23996,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExportTracesToDatasetArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationEvaluatorsArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -20094,8 +24006,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceExportTracesToDatasetArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewExportTracesToDatasetRequest() +func (p *TraceServiceListAnnotationEvaluatorsArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewListAnnotationEvaluatorsRequest() if err := _field.Read(iprot); err != nil { return err } @@ -20103,9 +24015,9 @@ func (p *TraceServiceExportTracesToDatasetArgs) ReadField1(iprot thrift.TProtoco return nil } -func (p *TraceServiceExportTracesToDatasetArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationEvaluatorsArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ExportTracesToDataset_args"); err != nil { + if err = oprot.WriteStructBegin("ListAnnotationEvaluators_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -20131,8 +24043,8 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceExportTracesToDatasetArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("Req", thrift.STRUCT, 1); err != nil { +func (p *TraceServiceListAnnotationEvaluatorsArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } if err := p.Req.Write(oprot); err != nil { @@ -20148,15 +24060,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServiceExportTracesToDatasetArgs) String() string { +func (p *TraceServiceListAnnotationEvaluatorsArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceExportTracesToDatasetArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceListAnnotationEvaluatorsArgs(%+v)", *p) } -func (p *TraceServiceExportTracesToDatasetArgs) DeepEqual(ano *TraceServiceExportTracesToDatasetArgs) bool { +func (p *TraceServiceListAnnotationEvaluatorsArgs) DeepEqual(ano *TraceServiceListAnnotationEvaluatorsArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -20168,7 +24080,7 @@ func (p *TraceServiceExportTracesToDatasetArgs) DeepEqual(ano *TraceServiceExpor return true } -func (p *TraceServiceExportTracesToDatasetArgs) Field1DeepEqual(src *ExportTracesToDatasetRequest) bool { +func (p *TraceServiceListAnnotationEvaluatorsArgs) Field1DeepEqual(src *ListAnnotationEvaluatorsRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -20176,41 +24088,41 @@ func (p *TraceServiceExportTracesToDatasetArgs) Field1DeepEqual(src *ExportTrace return true } -type TraceServiceExportTracesToDatasetResult struct { - Success *ExportTracesToDatasetResponse `thrift:"success,0,optional" frugal:"0,optional,ExportTracesToDatasetResponse"` +type TraceServiceListAnnotationEvaluatorsResult struct { + Success *ListAnnotationEvaluatorsResponse `thrift:"success,0,optional" frugal:"0,optional,ListAnnotationEvaluatorsResponse"` } -func NewTraceServiceExportTracesToDatasetResult() *TraceServiceExportTracesToDatasetResult { - return &TraceServiceExportTracesToDatasetResult{} +func NewTraceServiceListAnnotationEvaluatorsResult() *TraceServiceListAnnotationEvaluatorsResult { + return &TraceServiceListAnnotationEvaluatorsResult{} } -func (p *TraceServiceExportTracesToDatasetResult) InitDefault() { +func (p *TraceServiceListAnnotationEvaluatorsResult) InitDefault() { } -var TraceServiceExportTracesToDatasetResult_Success_DEFAULT *ExportTracesToDatasetResponse +var TraceServiceListAnnotationEvaluatorsResult_Success_DEFAULT *ListAnnotationEvaluatorsResponse -func (p *TraceServiceExportTracesToDatasetResult) GetSuccess() (v *ExportTracesToDatasetResponse) { +func (p *TraceServiceListAnnotationEvaluatorsResult) GetSuccess() (v *ListAnnotationEvaluatorsResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServiceExportTracesToDatasetResult_Success_DEFAULT + return TraceServiceListAnnotationEvaluatorsResult_Success_DEFAULT } return p.Success } -func (p *TraceServiceExportTracesToDatasetResult) SetSuccess(x interface{}) { - p.Success = x.(*ExportTracesToDatasetResponse) +func (p *TraceServiceListAnnotationEvaluatorsResult) SetSuccess(x interface{}) { + p.Success = x.(*ListAnnotationEvaluatorsResponse) } -var fieldIDToName_TraceServiceExportTracesToDatasetResult = map[int16]string{ +var fieldIDToName_TraceServiceListAnnotationEvaluatorsResult = map[int16]string{ 0: "success", } -func (p *TraceServiceExportTracesToDatasetResult) IsSetSuccess() bool { +func (p *TraceServiceListAnnotationEvaluatorsResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServiceExportTracesToDatasetResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationEvaluatorsResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -20255,7 +24167,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExportTracesToDatasetResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationEvaluatorsResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -20265,8 +24177,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServiceExportTracesToDatasetResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewExportTracesToDatasetResponse() +func (p *TraceServiceListAnnotationEvaluatorsResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewListAnnotationEvaluatorsResponse() if err := _field.Read(iprot); err != nil { return err } @@ -20274,9 +24186,9 @@ func (p *TraceServiceExportTracesToDatasetResult) ReadField0(iprot thrift.TProto return nil } -func (p *TraceServiceExportTracesToDatasetResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationEvaluatorsResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("ExportTracesToDataset_result"); err != nil { + if err = oprot.WriteStructBegin("ListAnnotationEvaluators_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -20302,7 +24214,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServiceExportTracesToDatasetResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceListAnnotationEvaluatorsResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -20321,15 +24233,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServiceExportTracesToDatasetResult) String() string { +func (p *TraceServiceListAnnotationEvaluatorsResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServiceExportTracesToDatasetResult(%+v)", *p) + return fmt.Sprintf("TraceServiceListAnnotationEvaluatorsResult(%+v)", *p) } -func (p *TraceServiceExportTracesToDatasetResult) DeepEqual(ano *TraceServiceExportTracesToDatasetResult) bool { +func (p *TraceServiceListAnnotationEvaluatorsResult) DeepEqual(ano *TraceServiceListAnnotationEvaluatorsResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -20341,7 +24253,7 @@ func (p *TraceServiceExportTracesToDatasetResult) DeepEqual(ano *TraceServiceExp return true } -func (p *TraceServiceExportTracesToDatasetResult) Field0DeepEqual(src *ExportTracesToDatasetResponse) bool { +func (p *TraceServiceListAnnotationEvaluatorsResult) Field0DeepEqual(src *ListAnnotationEvaluatorsResponse) bool { if !p.Success.DeepEqual(src) { return false @@ -20349,41 +24261,41 @@ func (p *TraceServiceExportTracesToDatasetResult) Field0DeepEqual(src *ExportTra return true } -type TraceServicePreviewExportTracesToDatasetArgs struct { - Req *PreviewExportTracesToDatasetRequest `thrift:"Req,1" frugal:"1,default,PreviewExportTracesToDatasetRequest"` +type TraceServiceExtractSpanInfoArgs struct { + Req *ExtractSpanInfoRequest `thrift:"req,1" frugal:"1,default,ExtractSpanInfoRequest"` } -func NewTraceServicePreviewExportTracesToDatasetArgs() *TraceServicePreviewExportTracesToDatasetArgs { - return &TraceServicePreviewExportTracesToDatasetArgs{} +func NewTraceServiceExtractSpanInfoArgs() *TraceServiceExtractSpanInfoArgs { + return &TraceServiceExtractSpanInfoArgs{} } -func (p *TraceServicePreviewExportTracesToDatasetArgs) InitDefault() { +func (p *TraceServiceExtractSpanInfoArgs) InitDefault() { } -var TraceServicePreviewExportTracesToDatasetArgs_Req_DEFAULT *PreviewExportTracesToDatasetRequest +var TraceServiceExtractSpanInfoArgs_Req_DEFAULT *ExtractSpanInfoRequest -func (p *TraceServicePreviewExportTracesToDatasetArgs) GetReq() (v *PreviewExportTracesToDatasetRequest) { +func (p *TraceServiceExtractSpanInfoArgs) GetReq() (v *ExtractSpanInfoRequest) { if p == nil { return } if !p.IsSetReq() { - return TraceServicePreviewExportTracesToDatasetArgs_Req_DEFAULT + return TraceServiceExtractSpanInfoArgs_Req_DEFAULT } return p.Req } -func (p *TraceServicePreviewExportTracesToDatasetArgs) SetReq(val *PreviewExportTracesToDatasetRequest) { +func (p *TraceServiceExtractSpanInfoArgs) SetReq(val *ExtractSpanInfoRequest) { p.Req = val } -var fieldIDToName_TraceServicePreviewExportTracesToDatasetArgs = map[int16]string{ - 1: "Req", +var fieldIDToName_TraceServiceExtractSpanInfoArgs = map[int16]string{ + 1: "req", } -func (p *TraceServicePreviewExportTracesToDatasetArgs) IsSetReq() bool { +func (p *TraceServiceExtractSpanInfoArgs) IsSetReq() bool { return p.Req != nil } -func (p *TraceServicePreviewExportTracesToDatasetArgs) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceExtractSpanInfoArgs) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -20428,7 +24340,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServicePreviewExportTracesToDatasetArgs[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExtractSpanInfoArgs[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -20438,8 +24350,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServicePreviewExportTracesToDatasetArgs) ReadField1(iprot thrift.TProtocol) error { - _field := NewPreviewExportTracesToDatasetRequest() +func (p *TraceServiceExtractSpanInfoArgs) ReadField1(iprot thrift.TProtocol) error { + _field := NewExtractSpanInfoRequest() if err := _field.Read(iprot); err != nil { return err } @@ -20447,9 +24359,9 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) ReadField1(iprot thrift.T return nil } -func (p *TraceServicePreviewExportTracesToDatasetArgs) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceExtractSpanInfoArgs) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("PreviewExportTracesToDataset_args"); err != nil { + if err = oprot.WriteStructBegin("ExtractSpanInfo_args"); err != nil { goto WriteStructBeginError } if p != nil { @@ -20475,8 +24387,8 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServicePreviewExportTracesToDatasetArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err = oprot.WriteFieldBegin("Req", thrift.STRUCT, 1); err != nil { +func (p *TraceServiceExtractSpanInfoArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err = oprot.WriteFieldBegin("req", thrift.STRUCT, 1); err != nil { goto WriteFieldBeginError } if err := p.Req.Write(oprot); err != nil { @@ -20492,15 +24404,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err) } -func (p *TraceServicePreviewExportTracesToDatasetArgs) String() string { +func (p *TraceServiceExtractSpanInfoArgs) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServicePreviewExportTracesToDatasetArgs(%+v)", *p) + return fmt.Sprintf("TraceServiceExtractSpanInfoArgs(%+v)", *p) } -func (p *TraceServicePreviewExportTracesToDatasetArgs) DeepEqual(ano *TraceServicePreviewExportTracesToDatasetArgs) bool { +func (p *TraceServiceExtractSpanInfoArgs) DeepEqual(ano *TraceServiceExtractSpanInfoArgs) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -20512,7 +24424,7 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) DeepEqual(ano *TraceServi return true } -func (p *TraceServicePreviewExportTracesToDatasetArgs) Field1DeepEqual(src *PreviewExportTracesToDatasetRequest) bool { +func (p *TraceServiceExtractSpanInfoArgs) Field1DeepEqual(src *ExtractSpanInfoRequest) bool { if !p.Req.DeepEqual(src) { return false @@ -20520,41 +24432,41 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) Field1DeepEqual(src *Prev return true } -type TraceServicePreviewExportTracesToDatasetResult struct { - Success *PreviewExportTracesToDatasetResponse `thrift:"success,0,optional" frugal:"0,optional,PreviewExportTracesToDatasetResponse"` +type TraceServiceExtractSpanInfoResult struct { + Success *ExtractSpanInfoResponse `thrift:"success,0,optional" frugal:"0,optional,ExtractSpanInfoResponse"` } -func NewTraceServicePreviewExportTracesToDatasetResult() *TraceServicePreviewExportTracesToDatasetResult { - return &TraceServicePreviewExportTracesToDatasetResult{} +func NewTraceServiceExtractSpanInfoResult() *TraceServiceExtractSpanInfoResult { + return &TraceServiceExtractSpanInfoResult{} } -func (p *TraceServicePreviewExportTracesToDatasetResult) InitDefault() { +func (p *TraceServiceExtractSpanInfoResult) InitDefault() { } -var TraceServicePreviewExportTracesToDatasetResult_Success_DEFAULT *PreviewExportTracesToDatasetResponse +var TraceServiceExtractSpanInfoResult_Success_DEFAULT *ExtractSpanInfoResponse -func (p *TraceServicePreviewExportTracesToDatasetResult) GetSuccess() (v *PreviewExportTracesToDatasetResponse) { +func (p *TraceServiceExtractSpanInfoResult) GetSuccess() (v *ExtractSpanInfoResponse) { if p == nil { return } if !p.IsSetSuccess() { - return TraceServicePreviewExportTracesToDatasetResult_Success_DEFAULT + return TraceServiceExtractSpanInfoResult_Success_DEFAULT } return p.Success } -func (p *TraceServicePreviewExportTracesToDatasetResult) SetSuccess(x interface{}) { - p.Success = x.(*PreviewExportTracesToDatasetResponse) +func (p *TraceServiceExtractSpanInfoResult) SetSuccess(x interface{}) { + p.Success = x.(*ExtractSpanInfoResponse) } -var fieldIDToName_TraceServicePreviewExportTracesToDatasetResult = map[int16]string{ +var fieldIDToName_TraceServiceExtractSpanInfoResult = map[int16]string{ 0: "success", } -func (p *TraceServicePreviewExportTracesToDatasetResult) IsSetSuccess() bool { +func (p *TraceServiceExtractSpanInfoResult) IsSetSuccess() bool { return p.Success != nil } -func (p *TraceServicePreviewExportTracesToDatasetResult) Read(iprot thrift.TProtocol) (err error) { +func (p *TraceServiceExtractSpanInfoResult) Read(iprot thrift.TProtocol) (err error) { var fieldTypeId thrift.TType var fieldId int16 @@ -20599,7 +24511,7 @@ ReadStructBeginError: ReadFieldBeginError: return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServicePreviewExportTracesToDatasetResult[fieldId]), err) + return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExtractSpanInfoResult[fieldId]), err) SkipFieldError: return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) @@ -20609,8 +24521,8 @@ ReadStructEndError: return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) } -func (p *TraceServicePreviewExportTracesToDatasetResult) ReadField0(iprot thrift.TProtocol) error { - _field := NewPreviewExportTracesToDatasetResponse() +func (p *TraceServiceExtractSpanInfoResult) ReadField0(iprot thrift.TProtocol) error { + _field := NewExtractSpanInfoResponse() if err := _field.Read(iprot); err != nil { return err } @@ -20618,9 +24530,9 @@ func (p *TraceServicePreviewExportTracesToDatasetResult) ReadField0(iprot thrift return nil } -func (p *TraceServicePreviewExportTracesToDatasetResult) Write(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceExtractSpanInfoResult) Write(oprot thrift.TProtocol) (err error) { var fieldId int16 - if err = oprot.WriteStructBegin("PreviewExportTracesToDataset_result"); err != nil { + if err = oprot.WriteStructBegin("ExtractSpanInfo_result"); err != nil { goto WriteStructBeginError } if p != nil { @@ -20646,7 +24558,7 @@ WriteStructEndError: return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err) } -func (p *TraceServicePreviewExportTracesToDatasetResult) writeField0(oprot thrift.TProtocol) (err error) { +func (p *TraceServiceExtractSpanInfoResult) writeField0(oprot thrift.TProtocol) (err error) { if p.IsSetSuccess() { if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil { goto WriteFieldBeginError @@ -20665,15 +24577,15 @@ WriteFieldEndError: return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err) } -func (p *TraceServicePreviewExportTracesToDatasetResult) String() string { +func (p *TraceServiceExtractSpanInfoResult) String() string { if p == nil { return "" } - return fmt.Sprintf("TraceServicePreviewExportTracesToDatasetResult(%+v)", *p) + return fmt.Sprintf("TraceServiceExtractSpanInfoResult(%+v)", *p) } -func (p *TraceServicePreviewExportTracesToDatasetResult) DeepEqual(ano *TraceServicePreviewExportTracesToDatasetResult) bool { +func (p *TraceServiceExtractSpanInfoResult) DeepEqual(ano *TraceServiceExtractSpanInfoResult) bool { if p == ano { return true } else if p == nil || ano == nil { @@ -20685,7 +24597,7 @@ func (p *TraceServicePreviewExportTracesToDatasetResult) DeepEqual(ano *TraceSer return true } -func (p *TraceServicePreviewExportTracesToDatasetResult) Field0DeepEqual(src *PreviewExportTracesToDatasetResponse) bool { +func (p *TraceServiceExtractSpanInfoResult) Field0DeepEqual(src *ExtractSpanInfoResponse) bool { if !p.Success.DeepEqual(src) { return false diff --git a/backend/kitex_gen/coze/loop/observability/trace/coze.loop.observability.trace_validator.go b/backend/kitex_gen/coze/loop/observability/trace/coze.loop.observability.trace_validator.go index 018f84d44..c8976cb21 100644 --- a/backend/kitex_gen/coze/loop/observability/trace/coze.loop.observability.trace_validator.go +++ b/backend/kitex_gen/coze/loop/observability/trace/coze.loop.observability.trace_validator.go @@ -382,3 +382,107 @@ func (p *PreviewExportTracesToDatasetResponse) IsValid() error { } return nil } +func (p *ChangeEvaluatorScoreRequest) IsValid() error { + if p.WorkspaceID <= int64(0) { + return fmt.Errorf("field WorkspaceID gt rule failed, current value: %v", p.WorkspaceID) + } + if len(p.AnnotationID) < int(1) { + return fmt.Errorf("field AnnotationID min_len rule failed, current value: %d", len(p.AnnotationID)) + } + if len(p.SpanID) < int(1) { + return fmt.Errorf("field SpanID min_len rule failed, current value: %d", len(p.SpanID)) + } + if p.StartTime <= int64(0) { + return fmt.Errorf("field StartTime gt rule failed, current value: %v", p.StartTime) + } + if p.Correction != nil { + if err := p.Correction.IsValid(); err != nil { + return fmt.Errorf("field Correction not valid, %w", err) + } + } + if p.Base != nil { + if err := p.Base.IsValid(); err != nil { + return fmt.Errorf("field Base not valid, %w", err) + } + } + return nil +} +func (p *ChangeEvaluatorScoreResponse) IsValid() error { + if p.Annotation != nil { + if err := p.Annotation.IsValid(); err != nil { + return fmt.Errorf("field Annotation not valid, %w", err) + } + } + if p.BaseResp != nil { + if err := p.BaseResp.IsValid(); err != nil { + return fmt.Errorf("field BaseResp not valid, %w", err) + } + } + return nil +} +func (p *ListAnnotationEvaluatorsRequest) IsValid() error { + if p.WorkspaceID <= int64(0) { + return fmt.Errorf("field WorkspaceID gt rule failed, current value: %v", p.WorkspaceID) + } + if p.Base != nil { + if err := p.Base.IsValid(); err != nil { + return fmt.Errorf("field Base not valid, %w", err) + } + } + return nil +} +func (p *ListAnnotationEvaluatorsResponse) IsValid() error { + if p.BaseResp != nil { + if err := p.BaseResp.IsValid(); err != nil { + return fmt.Errorf("field BaseResp not valid, %w", err) + } + } + return nil +} +func (p *ExtractSpanInfoRequest) IsValid() error { + if p.WorkspaceID <= int64(0) { + return fmt.Errorf("field WorkspaceID gt rule failed, current value: %v", p.WorkspaceID) + } + if len(p.TraceID) < int(1) { + return fmt.Errorf("field TraceID min_len rule failed, current value: %d", len(p.TraceID)) + } + if len(p.SpanIds) < int(1) { + return fmt.Errorf("field SpanIds MinLen rule failed, current value: %v", p.SpanIds) + } + if len(p.SpanIds) > int(500) { + return fmt.Errorf("field SpanIds MaxLen rule failed, current value: %v", p.SpanIds) + } + if p.StartTime != nil { + if *p.StartTime <= int64(0) { + return fmt.Errorf("field StartTime gt rule failed, current value: %v", *p.StartTime) + } + } + if p.EndTime != nil { + if *p.EndTime <= int64(0) { + return fmt.Errorf("field EndTime gt rule failed, current value: %v", *p.EndTime) + } + } + if len(p.FieldMappings) < int(1) { + return fmt.Errorf("field FieldMappings MinLen rule failed, current value: %v", p.FieldMappings) + } + if len(p.FieldMappings) > int(100) { + return fmt.Errorf("field FieldMappings MaxLen rule failed, current value: %v", p.FieldMappings) + } + if p.Base != nil { + if err := p.Base.IsValid(); err != nil { + return fmt.Errorf("field Base not valid, %w", err) + } + } + return nil +} +func (p *SpanInfo) IsValid() error { + return nil +} +func (p *ExtractSpanInfoResponse) IsValid() error { + if p.BaseResp != nil { + if err := p.BaseResp.IsValid(); err != nil { + return fmt.Errorf("field BaseResp not valid, %w", err) + } + } + return nil +} diff --git a/backend/kitex_gen/coze/loop/observability/trace/k-coze.loop.observability.trace.go b/backend/kitex_gen/coze/loop/observability/trace/k-coze.loop.observability.trace.go index 525b1da01..bada89ab9 100644 --- a/backend/kitex_gen/coze/loop/observability/trace/k-coze.loop.observability.trace.go +++ b/backend/kitex_gen/coze/loop/observability/trace/k-coze.loop.observability.trace.go @@ -18,6 +18,7 @@ import ( dataset0 "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/dataset" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/span" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/view" ) @@ -29,6 +30,7 @@ var ( _ = dataset0.KitexUnusedProtection _ = filter.KitexUnusedProtection _ = span.KitexUnusedProtection + _ = task.KitexUnusedProtection _ = view.KitexUnusedProtection ) @@ -10823,6 +10825,2039 @@ func (p *PreviewExportTracesToDatasetResponse) DeepCopy(s interface{}) error { return nil } +func (p *ChangeEvaluatorScoreRequest) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + var issetAnnotationID bool = false + var issetSpanID bool = false + var issetStartTime bool = false + var issetCorrection bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetAnnotationID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetSpanID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetStartTime = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetCorrection = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetAnnotationID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetSpanID { + fieldId = 3 + goto RequiredFieldNotSetError + } + + if !issetStartTime { + fieldId = 4 + goto RequiredFieldNotSetError + } + + if !issetCorrection { + fieldId = 5 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ChangeEvaluatorScoreRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_ChangeEvaluatorScoreRequest[fieldId])) +} + +func (p *ChangeEvaluatorScoreRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.AnnotationID = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.SpanID = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.StartTime = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + _field := annotation.NewCorrection() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Correction = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + var _field *common.PlatformType + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.PlatformType = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreRequest) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBase() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Base = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreRequest) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *ChangeEvaluatorScoreRequest) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *ChangeEvaluatorScoreRequest) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *ChangeEvaluatorScoreRequest) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.WorkspaceID) + return offset +} + +func (p *ChangeEvaluatorScoreRequest) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.AnnotationID) + return offset +} + +func (p *ChangeEvaluatorScoreRequest) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 3) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.SpanID) + return offset +} + +func (p *ChangeEvaluatorScoreRequest) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 4) + offset += thrift.Binary.WriteI64(buf[offset:], p.StartTime) + return offset +} + +func (p *ChangeEvaluatorScoreRequest) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 5) + offset += p.Correction.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *ChangeEvaluatorScoreRequest) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetPlatformType() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 6) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.PlatformType) + } + return offset +} + +func (p *ChangeEvaluatorScoreRequest) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBase() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.Base.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ChangeEvaluatorScoreRequest) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *ChangeEvaluatorScoreRequest) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.AnnotationID) + return l +} + +func (p *ChangeEvaluatorScoreRequest) field3Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.SpanID) + return l +} + +func (p *ChangeEvaluatorScoreRequest) field4Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *ChangeEvaluatorScoreRequest) field5Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Correction.BLength() + return l +} + +func (p *ChangeEvaluatorScoreRequest) field6Length() int { + l := 0 + if p.IsSetPlatformType() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.PlatformType) + } + return l +} + +func (p *ChangeEvaluatorScoreRequest) field255Length() int { + l := 0 + if p.IsSetBase() { + l += thrift.Binary.FieldBeginLength() + l += p.Base.BLength() + } + return l +} + +func (p *ChangeEvaluatorScoreRequest) DeepCopy(s interface{}) error { + src, ok := s.(*ChangeEvaluatorScoreRequest) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.WorkspaceID = src.WorkspaceID + + if src.AnnotationID != "" { + p.AnnotationID = kutils.StringDeepCopy(src.AnnotationID) + } + + if src.SpanID != "" { + p.SpanID = kutils.StringDeepCopy(src.SpanID) + } + + p.StartTime = src.StartTime + + var _correction *annotation.Correction + if src.Correction != nil { + _correction = &annotation.Correction{} + if err := _correction.DeepCopy(src.Correction); err != nil { + return err + } + } + p.Correction = _correction + + if src.PlatformType != nil { + tmp := *src.PlatformType + p.PlatformType = &tmp + } + + var _base *base.Base + if src.Base != nil { + _base = &base.Base{} + if err := _base.DeepCopy(src.Base); err != nil { + return err + } + } + p.Base = _base + + return nil +} + +func (p *ChangeEvaluatorScoreResponse) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetAnnotation bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetAnnotation = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetAnnotation { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ChangeEvaluatorScoreResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_ChangeEvaluatorScoreResponse[fieldId])) +} + +func (p *ChangeEvaluatorScoreResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := annotation.NewAnnotation() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Annotation = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreResponse) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBaseResp() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseResp = _field + return offset, nil +} + +func (p *ChangeEvaluatorScoreResponse) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *ChangeEvaluatorScoreResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *ChangeEvaluatorScoreResponse) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *ChangeEvaluatorScoreResponse) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Annotation.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *ChangeEvaluatorScoreResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseResp() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.BaseResp.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ChangeEvaluatorScoreResponse) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Annotation.BLength() + return l +} + +func (p *ChangeEvaluatorScoreResponse) field255Length() int { + l := 0 + if p.IsSetBaseResp() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseResp.BLength() + } + return l +} + +func (p *ChangeEvaluatorScoreResponse) DeepCopy(s interface{}) error { + src, ok := s.(*ChangeEvaluatorScoreResponse) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _annotation *annotation.Annotation + if src.Annotation != nil { + _annotation = &annotation.Annotation{} + if err := _annotation.DeepCopy(src.Annotation); err != nil { + return err + } + } + p.Annotation = _annotation + + var _baseResp *base.BaseResp + if src.BaseResp != nil { + _baseResp = &base.BaseResp{} + if err := _baseResp.DeepCopy(src.BaseResp); err != nil { + return err + } + } + p.BaseResp = _baseResp + + return nil +} + +func (p *ListAnnotationEvaluatorsRequest) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ListAnnotationEvaluatorsRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_ListAnnotationEvaluatorsRequest[fieldId])) +} + +func (p *ListAnnotationEvaluatorsRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *ListAnnotationEvaluatorsRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field *string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.Name = _field + return offset, nil +} + +func (p *ListAnnotationEvaluatorsRequest) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBase() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Base = _field + return offset, nil +} + +func (p *ListAnnotationEvaluatorsRequest) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *ListAnnotationEvaluatorsRequest) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *ListAnnotationEvaluatorsRequest) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *ListAnnotationEvaluatorsRequest) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.WorkspaceID) + return offset +} + +func (p *ListAnnotationEvaluatorsRequest) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetName() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.Name) + } + return offset +} + +func (p *ListAnnotationEvaluatorsRequest) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBase() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.Base.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ListAnnotationEvaluatorsRequest) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *ListAnnotationEvaluatorsRequest) field2Length() int { + l := 0 + if p.IsSetName() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.Name) + } + return l +} + +func (p *ListAnnotationEvaluatorsRequest) field255Length() int { + l := 0 + if p.IsSetBase() { + l += thrift.Binary.FieldBeginLength() + l += p.Base.BLength() + } + return l +} + +func (p *ListAnnotationEvaluatorsRequest) DeepCopy(s interface{}) error { + src, ok := s.(*ListAnnotationEvaluatorsRequest) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.WorkspaceID = src.WorkspaceID + + if src.Name != nil { + var tmp string + if *src.Name != "" { + tmp = kutils.StringDeepCopy(*src.Name) + } + p.Name = &tmp + } + + var _base *base.Base + if src.Base != nil { + _base = &base.Base{} + if err := _base.DeepCopy(src.Base); err != nil { + return err + } + } + p.Base = _base + + return nil +} + +func (p *ListAnnotationEvaluatorsResponse) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetEvaluators bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetEvaluators = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetEvaluators { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ListAnnotationEvaluatorsResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_ListAnnotationEvaluatorsResponse[fieldId])) +} + +func (p *ListAnnotationEvaluatorsResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*annotation.AnnotationEvaluator, 0, size) + values := make([]annotation.AnnotationEvaluator, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.Evaluators = _field + return offset, nil +} + +func (p *ListAnnotationEvaluatorsResponse) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBaseResp() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseResp = _field + return offset, nil +} + +func (p *ListAnnotationEvaluatorsResponse) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *ListAnnotationEvaluatorsResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *ListAnnotationEvaluatorsResponse) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *ListAnnotationEvaluatorsResponse) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 1) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.Evaluators { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + return offset +} + +func (p *ListAnnotationEvaluatorsResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseResp() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.BaseResp.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ListAnnotationEvaluatorsResponse) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.Evaluators { + _ = v + l += v.BLength() + } + return l +} + +func (p *ListAnnotationEvaluatorsResponse) field255Length() int { + l := 0 + if p.IsSetBaseResp() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseResp.BLength() + } + return l +} + +func (p *ListAnnotationEvaluatorsResponse) DeepCopy(s interface{}) error { + src, ok := s.(*ListAnnotationEvaluatorsResponse) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.Evaluators != nil { + p.Evaluators = make([]*annotation.AnnotationEvaluator, 0, len(src.Evaluators)) + for _, elem := range src.Evaluators { + var _elem *annotation.AnnotationEvaluator + if elem != nil { + _elem = &annotation.AnnotationEvaluator{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.Evaluators = append(p.Evaluators, _elem) + } + } + + var _baseResp *base.BaseResp + if src.BaseResp != nil { + _baseResp = &base.BaseResp{} + if err := _baseResp.DeepCopy(src.BaseResp); err != nil { + return err + } + } + p.BaseResp = _baseResp + + return nil +} + +func (p *ExtractSpanInfoRequest) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetWorkspaceID bool = false + var issetTraceID bool = false + var issetSpanIds bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetWorkspaceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetTraceID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 3: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField3(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetSpanIds = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 4: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField4(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 5: + if fieldTypeId == thrift.I64 { + l, err = p.FastReadField5(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 6: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField6(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 7: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField7(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetWorkspaceID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetTraceID { + fieldId = 2 + goto RequiredFieldNotSetError + } + + if !issetSpanIds { + fieldId = 3 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ExtractSpanInfoRequest[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_ExtractSpanInfoRequest[fieldId])) +} + +func (p *ExtractSpanInfoRequest) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.WorkspaceID = _field + return offset, nil +} + +func (p *ExtractSpanInfoRequest) FastReadField2(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.TraceID = _field + return offset, nil +} + +func (p *ExtractSpanInfoRequest) FastReadField3(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]string, 0, size) + for i := 0; i < size; i++ { + var _elem string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _elem = v + } + + _field = append(_field, _elem) + } + p.SpanIds = _field + return offset, nil +} + +func (p *ExtractSpanInfoRequest) FastReadField4(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.StartTime = _field + return offset, nil +} + +func (p *ExtractSpanInfoRequest) FastReadField5(buf []byte) (int, error) { + offset := 0 + + var _field *int64 + if v, l, err := thrift.Binary.ReadI64(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.EndTime = _field + return offset, nil +} + +func (p *ExtractSpanInfoRequest) FastReadField6(buf []byte) (int, error) { + offset := 0 + + var _field *common.PlatformType + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = &v + } + p.PlatformType = _field + return offset, nil +} + +func (p *ExtractSpanInfoRequest) FastReadField7(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*dataset0.FieldMapping, 0, size) + values := make([]dataset0.FieldMapping, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.FieldMappings = _field + return offset, nil +} + +func (p *ExtractSpanInfoRequest) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBase() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Base = _field + return offset, nil +} + +func (p *ExtractSpanInfoRequest) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *ExtractSpanInfoRequest) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField4(buf[offset:], w) + offset += p.fastWriteField5(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + offset += p.fastWriteField3(buf[offset:], w) + offset += p.fastWriteField6(buf[offset:], w) + offset += p.fastWriteField7(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *ExtractSpanInfoRequest) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + l += p.field3Length() + l += p.field4Length() + l += p.field5Length() + l += p.field6Length() + l += p.field7Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *ExtractSpanInfoRequest) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 1) + offset += thrift.Binary.WriteI64(buf[offset:], p.WorkspaceID) + return offset +} + +func (p *ExtractSpanInfoRequest) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 2) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.TraceID) + return offset +} + +func (p *ExtractSpanInfoRequest) fastWriteField3(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 3) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.SpanIds { + length++ + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, v) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRING, length) + return offset +} + +func (p *ExtractSpanInfoRequest) fastWriteField4(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetStartTime() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 4) + offset += thrift.Binary.WriteI64(buf[offset:], *p.StartTime) + } + return offset +} + +func (p *ExtractSpanInfoRequest) fastWriteField5(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetEndTime() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.I64, 5) + offset += thrift.Binary.WriteI64(buf[offset:], *p.EndTime) + } + return offset +} + +func (p *ExtractSpanInfoRequest) fastWriteField6(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetPlatformType() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 6) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, *p.PlatformType) + } + return offset +} + +func (p *ExtractSpanInfoRequest) fastWriteField7(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetFieldMappings() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 7) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.FieldMappings { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + } + return offset +} + +func (p *ExtractSpanInfoRequest) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBase() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.Base.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ExtractSpanInfoRequest) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + return l +} + +func (p *ExtractSpanInfoRequest) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.TraceID) + return l +} + +func (p *ExtractSpanInfoRequest) field3Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.SpanIds { + _ = v + l += thrift.Binary.StringLengthNocopy(v) + } + return l +} + +func (p *ExtractSpanInfoRequest) field4Length() int { + l := 0 + if p.IsSetStartTime() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *ExtractSpanInfoRequest) field5Length() int { + l := 0 + if p.IsSetEndTime() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.I64Length() + } + return l +} + +func (p *ExtractSpanInfoRequest) field6Length() int { + l := 0 + if p.IsSetPlatformType() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(*p.PlatformType) + } + return l +} + +func (p *ExtractSpanInfoRequest) field7Length() int { + l := 0 + if p.IsSetFieldMappings() { + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.FieldMappings { + _ = v + l += v.BLength() + } + } + return l +} + +func (p *ExtractSpanInfoRequest) field255Length() int { + l := 0 + if p.IsSetBase() { + l += thrift.Binary.FieldBeginLength() + l += p.Base.BLength() + } + return l +} + +func (p *ExtractSpanInfoRequest) DeepCopy(s interface{}) error { + src, ok := s.(*ExtractSpanInfoRequest) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + p.WorkspaceID = src.WorkspaceID + + if src.TraceID != "" { + p.TraceID = kutils.StringDeepCopy(src.TraceID) + } + + if src.SpanIds != nil { + p.SpanIds = make([]string, 0, len(src.SpanIds)) + for _, elem := range src.SpanIds { + var _elem string + if elem != "" { + _elem = kutils.StringDeepCopy(elem) + } + p.SpanIds = append(p.SpanIds, _elem) + } + } + + if src.StartTime != nil { + tmp := *src.StartTime + p.StartTime = &tmp + } + + if src.EndTime != nil { + tmp := *src.EndTime + p.EndTime = &tmp + } + + if src.PlatformType != nil { + tmp := *src.PlatformType + p.PlatformType = &tmp + } + + if src.FieldMappings != nil { + p.FieldMappings = make([]*dataset0.FieldMapping, 0, len(src.FieldMappings)) + for _, elem := range src.FieldMappings { + var _elem *dataset0.FieldMapping + if elem != nil { + _elem = &dataset0.FieldMapping{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.FieldMappings = append(p.FieldMappings, _elem) + } + } + + var _base *base.Base + if src.Base != nil { + _base = &base.Base{} + if err := _base.DeepCopy(src.Base); err != nil { + return err + } + } + p.Base = _base + + return nil +} + +func (p *SpanInfo) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetSpanID bool = false + var issetFieldList bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetSpanID = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 2: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField2(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetFieldList = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetSpanID { + fieldId = 1 + goto RequiredFieldNotSetError + } + + if !issetFieldList { + fieldId = 2 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_SpanInfo[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_SpanInfo[fieldId])) +} + +func (p *SpanInfo) FastReadField1(buf []byte) (int, error) { + offset := 0 + + var _field string + if v, l, err := thrift.Binary.ReadString(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + _field = v + } + p.SpanID = _field + return offset, nil +} + +func (p *SpanInfo) FastReadField2(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*dataset0.FieldData, 0, size) + values := make([]dataset0.FieldData, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.FieldList = _field + return offset, nil +} + +func (p *SpanInfo) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *SpanInfo) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField2(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *SpanInfo) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field2Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *SpanInfo) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRING, 1) + offset += thrift.Binary.WriteStringNocopy(buf[offset:], w, p.SpanID) + return offset +} + +func (p *SpanInfo) fastWriteField2(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 2) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.FieldList { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + return offset +} + +func (p *SpanInfo) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.StringLengthNocopy(p.SpanID) + return l +} + +func (p *SpanInfo) field2Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.FieldList { + _ = v + l += v.BLength() + } + return l +} + +func (p *SpanInfo) DeepCopy(s interface{}) error { + src, ok := s.(*SpanInfo) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.SpanID != "" { + p.SpanID = kutils.StringDeepCopy(src.SpanID) + } + + if src.FieldList != nil { + p.FieldList = make([]*dataset0.FieldData, 0, len(src.FieldList)) + for _, elem := range src.FieldList { + var _elem *dataset0.FieldData + if elem != nil { + _elem = &dataset0.FieldData{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.FieldList = append(p.FieldList, _elem) + } + } + + return nil +} + +func (p *ExtractSpanInfoResponse) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + var issetSpanInfos bool = false + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + issetSpanInfos = true + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + case 255: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField255(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + if !issetSpanInfos { + fieldId = 1 + goto RequiredFieldNotSetError + } + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_ExtractSpanInfoResponse[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +RequiredFieldNotSetError: + return offset, thrift.NewProtocolException(thrift.INVALID_DATA, fmt.Sprintf("required field %s is not set", fieldIDToName_ExtractSpanInfoResponse[fieldId])) +} + +func (p *ExtractSpanInfoResponse) FastReadField1(buf []byte) (int, error) { + offset := 0 + + _, size, l, err := thrift.Binary.ReadListBegin(buf[offset:]) + offset += l + if err != nil { + return offset, err + } + _field := make([]*SpanInfo, 0, size) + values := make([]SpanInfo, size) + for i := 0; i < size; i++ { + _elem := &values[i] + _elem.InitDefault() + if l, err := _elem.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + + _field = append(_field, _elem) + } + p.SpanInfos = _field + return offset, nil +} + +func (p *ExtractSpanInfoResponse) FastReadField255(buf []byte) (int, error) { + offset := 0 + _field := base.NewBaseResp() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.BaseResp = _field + return offset, nil +} + +func (p *ExtractSpanInfoResponse) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *ExtractSpanInfoResponse) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + offset += p.fastWriteField255(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *ExtractSpanInfoResponse) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + l += p.field255Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *ExtractSpanInfoResponse) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.LIST, 1) + listBeginOffset := offset + offset += thrift.Binary.ListBeginLength() + var length int + for _, v := range p.SpanInfos { + length++ + offset += v.FastWriteNocopy(buf[offset:], w) + } + thrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length) + return offset +} + +func (p *ExtractSpanInfoResponse) fastWriteField255(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetBaseResp() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 255) + offset += p.BaseResp.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *ExtractSpanInfoResponse) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += thrift.Binary.ListBeginLength() + for _, v := range p.SpanInfos { + _ = v + l += v.BLength() + } + return l +} + +func (p *ExtractSpanInfoResponse) field255Length() int { + l := 0 + if p.IsSetBaseResp() { + l += thrift.Binary.FieldBeginLength() + l += p.BaseResp.BLength() + } + return l +} + +func (p *ExtractSpanInfoResponse) DeepCopy(s interface{}) error { + src, ok := s.(*ExtractSpanInfoResponse) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + if src.SpanInfos != nil { + p.SpanInfos = make([]*SpanInfo, 0, len(src.SpanInfos)) + for _, elem := range src.SpanInfos { + var _elem *SpanInfo + if elem != nil { + _elem = &SpanInfo{} + if err := _elem.DeepCopy(elem); err != nil { + return err + } + } + + p.SpanInfos = append(p.SpanInfos, _elem) + } + } + + var _baseResp *base.BaseResp + if src.BaseResp != nil { + _baseResp = &base.BaseResp{} + if err := _baseResp.DeepCopy(src.BaseResp); err != nil { + return err + } + } + p.BaseResp = _baseResp + + return nil +} + func (p *TraceServiceListSpansArgs) FastRead(buf []byte) (int, error) { var err error @@ -10867,14 +12902,716 @@ func (p *TraceServiceListSpansArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListSpansArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListSpansArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TraceServiceListSpansArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewListSpansRequest() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Req = _field + return offset, nil +} + +func (p *TraceServiceListSpansArgs) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TraceServiceListSpansArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TraceServiceListSpansArgs) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TraceServiceListSpansArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Req.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *TraceServiceListSpansArgs) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Req.BLength() + return l +} + +func (p *TraceServiceListSpansArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceListSpansArgs) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _req *ListSpansRequest + if src.Req != nil { + _req = &ListSpansRequest{} + if err := _req.DeepCopy(src.Req); err != nil { + return err + } + } + p.Req = _req + + return nil +} + +func (p *TraceServiceListSpansResult) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListSpansResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TraceServiceListSpansResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + _field := NewListSpansResponse() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = _field + return offset, nil +} + +func (p *TraceServiceListSpansResult) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TraceServiceListSpansResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField0(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TraceServiceListSpansResult) BLength() int { + l := 0 + if p != nil { + l += p.field0Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TraceServiceListSpansResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TraceServiceListSpansResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += thrift.Binary.FieldBeginLength() + l += p.Success.BLength() + } + return l +} + +func (p *TraceServiceListSpansResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceListSpansResult) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _success *ListSpansResponse + if src.Success != nil { + _success = &ListSpansResponse{} + if err := _success.DeepCopy(src.Success); err != nil { + return err + } + } + p.Success = _success + + return nil +} + +func (p *TraceServiceGetTraceArgs) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTraceArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TraceServiceGetTraceArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewGetTraceRequest() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Req = _field + return offset, nil +} + +func (p *TraceServiceGetTraceArgs) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TraceServiceGetTraceArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TraceServiceGetTraceArgs) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TraceServiceGetTraceArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Req.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *TraceServiceGetTraceArgs) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Req.BLength() + return l +} + +func (p *TraceServiceGetTraceArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceGetTraceArgs) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _req *GetTraceRequest + if src.Req != nil { + _req = &GetTraceRequest{} + if err := _req.DeepCopy(src.Req); err != nil { + return err + } + } + p.Req = _req + + return nil +} + +func (p *TraceServiceGetTraceResult) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTraceResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TraceServiceGetTraceResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + _field := NewGetTraceResponse() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = _field + return offset, nil +} + +func (p *TraceServiceGetTraceResult) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TraceServiceGetTraceResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField0(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TraceServiceGetTraceResult) BLength() int { + l := 0 + if p != nil { + l += p.field0Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TraceServiceGetTraceResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TraceServiceGetTraceResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += thrift.Binary.FieldBeginLength() + l += p.Success.BLength() + } + return l +} + +func (p *TraceServiceGetTraceResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceGetTraceResult) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _success *GetTraceResponse + if src.Success != nil { + _success = &GetTraceResponse{} + if err := _success.DeepCopy(src.Success); err != nil { + return err + } + } + p.Success = _success + + return nil +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoArgs[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastReadField1(buf []byte) (int, error) { + offset := 0 + _field := NewBatchGetTracesAdvanceInfoRequest() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Req = _field + return offset, nil +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField1(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) BLength() int { + l := 0 + if p != nil { + l += p.field1Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) + offset += p.Req.FastWriteNocopy(buf[offset:], w) + return offset +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) field1Length() int { + l := 0 + l += thrift.Binary.FieldBeginLength() + l += p.Req.BLength() + return l +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceBatchGetTracesAdvanceInfoArgs) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _req *BatchGetTracesAdvanceInfoRequest + if src.Req != nil { + _req = &BatchGetTracesAdvanceInfoRequest{} + if err := _req.DeepCopy(src.Req); err != nil { + return err + } + } + p.Req = _req + + return nil +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 0: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField0(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoResult[fieldId]), err) +SkipFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastReadField0(buf []byte) (int, error) { + offset := 0 + _field := NewBatchGetTracesAdvanceInfoResponse() + if l, err := _field.FastRead(buf[offset:]); err != nil { + return offset, err + } else { + offset += l + } + p.Success = _field + return offset, nil +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastWrite(buf []byte) int { + return p.FastWriteNocopy(buf, nil) +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p != nil { + offset += p.fastWriteField0(buf[offset:], w) + } + offset += thrift.Binary.WriteFieldStop(buf[offset:]) + return offset +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) BLength() int { + l := 0 + if p != nil { + l += p.field0Length() + } + l += thrift.Binary.FieldStopLength() + return l +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { + offset := 0 + if p.IsSetSuccess() { + offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) + offset += p.Success.FastWriteNocopy(buf[offset:], w) + } + return offset +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) field0Length() int { + l := 0 + if p.IsSetSuccess() { + l += thrift.Binary.FieldBeginLength() + l += p.Success.BLength() + } + return l +} + +func (p *TraceServiceBatchGetTracesAdvanceInfoResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceBatchGetTracesAdvanceInfoResult) + if !ok { + return fmt.Errorf("%T's type not matched %T", s, p) + } + + var _success *BatchGetTracesAdvanceInfoResponse + if src.Success != nil { + _success = &BatchGetTracesAdvanceInfoResponse{} + if err := _success.DeepCopy(src.Success); err != nil { + return err + } + } + p.Success = _success + + return nil +} + +func (p *TraceServiceIngestTracesInnerArgs) FastRead(buf []byte) (int, error) { + + var err error + var offset int + var l int + var fieldTypeId thrift.TType + var fieldId int16 + for { + fieldTypeId, fieldId, l, err = thrift.Binary.ReadFieldBegin(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldBeginError + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + l, err = p.FastReadField1(buf[offset:]) + offset += l + if err != nil { + goto ReadFieldError + } + } else { + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + default: + l, err = thrift.Binary.Skip(buf[offset:], fieldTypeId) + offset += l + if err != nil { + goto SkipFieldError + } + } + } + + return offset, nil +ReadFieldBeginError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) +ReadFieldError: + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceIngestTracesInnerArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceListSpansArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceIngestTracesInnerArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewListSpansRequest() + _field := NewIngestTracesRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -10884,11 +13621,11 @@ func (p *TraceServiceListSpansArgs) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceListSpansArgs) FastWrite(buf []byte) int { +func (p *TraceServiceIngestTracesInnerArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceListSpansArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceIngestTracesInnerArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -10897,7 +13634,7 @@ func (p *TraceServiceListSpansArgs) FastWriteNocopy(buf []byte, w thrift.NocopyW return offset } -func (p *TraceServiceListSpansArgs) BLength() int { +func (p *TraceServiceIngestTracesInnerArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -10906,29 +13643,29 @@ func (p *TraceServiceListSpansArgs) BLength() int { return l } -func (p *TraceServiceListSpansArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceIngestTracesInnerArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceListSpansArgs) field1Length() int { +func (p *TraceServiceIngestTracesInnerArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceListSpansArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceListSpansArgs) +func (p *TraceServiceIngestTracesInnerArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceIngestTracesInnerArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *ListSpansRequest + var _req *IngestTracesRequest if src.Req != nil { - _req = &ListSpansRequest{} + _req = &IngestTracesRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -10938,7 +13675,7 @@ func (p *TraceServiceListSpansArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceListSpansResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceIngestTracesInnerResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -10982,14 +13719,14 @@ func (p *TraceServiceListSpansResult) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListSpansResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceIngestTracesInnerResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceListSpansResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceIngestTracesInnerResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewListSpansResponse() + _field := NewIngestTracesResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -10999,11 +13736,11 @@ func (p *TraceServiceListSpansResult) FastReadField0(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceListSpansResult) FastWrite(buf []byte) int { +func (p *TraceServiceIngestTracesInnerResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceListSpansResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceIngestTracesInnerResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -11012,7 +13749,7 @@ func (p *TraceServiceListSpansResult) FastWriteNocopy(buf []byte, w thrift.Nocop return offset } -func (p *TraceServiceListSpansResult) BLength() int { +func (p *TraceServiceIngestTracesInnerResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -11021,7 +13758,7 @@ func (p *TraceServiceListSpansResult) BLength() int { return l } -func (p *TraceServiceListSpansResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceIngestTracesInnerResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -11030,7 +13767,7 @@ func (p *TraceServiceListSpansResult) fastWriteField0(buf []byte, w thrift.Nocop return offset } -func (p *TraceServiceListSpansResult) field0Length() int { +func (p *TraceServiceIngestTracesInnerResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -11039,15 +13776,15 @@ func (p *TraceServiceListSpansResult) field0Length() int { return l } -func (p *TraceServiceListSpansResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceListSpansResult) +func (p *TraceServiceIngestTracesInnerResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceIngestTracesInnerResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *ListSpansResponse + var _success *IngestTracesResponse if src.Success != nil { - _success = &ListSpansResponse{} + _success = &IngestTracesResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -11057,7 +13794,7 @@ func (p *TraceServiceListSpansResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceGetTraceArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceGetTracesMetaInfoArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11101,14 +13838,14 @@ func (p *TraceServiceGetTraceArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTraceArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTracesMetaInfoArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceGetTraceArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceGetTracesMetaInfoArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewGetTraceRequest() + _field := NewGetTracesMetaInfoRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -11118,11 +13855,11 @@ func (p *TraceServiceGetTraceArgs) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceGetTraceArgs) FastWrite(buf []byte) int { +func (p *TraceServiceGetTracesMetaInfoArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceGetTraceArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceGetTracesMetaInfoArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -11131,7 +13868,7 @@ func (p *TraceServiceGetTraceArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWr return offset } -func (p *TraceServiceGetTraceArgs) BLength() int { +func (p *TraceServiceGetTracesMetaInfoArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -11140,29 +13877,29 @@ func (p *TraceServiceGetTraceArgs) BLength() int { return l } -func (p *TraceServiceGetTraceArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceGetTracesMetaInfoArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceGetTraceArgs) field1Length() int { +func (p *TraceServiceGetTracesMetaInfoArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceGetTraceArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceGetTraceArgs) +func (p *TraceServiceGetTracesMetaInfoArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceGetTracesMetaInfoArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *GetTraceRequest + var _req *GetTracesMetaInfoRequest if src.Req != nil { - _req = &GetTraceRequest{} + _req = &GetTracesMetaInfoRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -11172,7 +13909,7 @@ func (p *TraceServiceGetTraceArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceGetTraceResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceGetTracesMetaInfoResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11216,14 +13953,14 @@ func (p *TraceServiceGetTraceResult) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTraceResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTracesMetaInfoResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceGetTraceResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceGetTracesMetaInfoResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewGetTraceResponse() + _field := NewGetTracesMetaInfoResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -11233,11 +13970,11 @@ func (p *TraceServiceGetTraceResult) FastReadField0(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceGetTraceResult) FastWrite(buf []byte) int { +func (p *TraceServiceGetTracesMetaInfoResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceGetTraceResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceGetTracesMetaInfoResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -11246,7 +13983,7 @@ func (p *TraceServiceGetTraceResult) FastWriteNocopy(buf []byte, w thrift.Nocopy return offset } -func (p *TraceServiceGetTraceResult) BLength() int { +func (p *TraceServiceGetTracesMetaInfoResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -11255,7 +13992,7 @@ func (p *TraceServiceGetTraceResult) BLength() int { return l } -func (p *TraceServiceGetTraceResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceGetTracesMetaInfoResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -11264,7 +14001,7 @@ func (p *TraceServiceGetTraceResult) fastWriteField0(buf []byte, w thrift.Nocopy return offset } -func (p *TraceServiceGetTraceResult) field0Length() int { +func (p *TraceServiceGetTracesMetaInfoResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -11273,15 +14010,15 @@ func (p *TraceServiceGetTraceResult) field0Length() int { return l } -func (p *TraceServiceGetTraceResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceGetTraceResult) +func (p *TraceServiceGetTracesMetaInfoResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceGetTracesMetaInfoResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *GetTraceResponse + var _success *GetTracesMetaInfoResponse if src.Success != nil { - _success = &GetTraceResponse{} + _success = &GetTracesMetaInfoResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -11291,7 +14028,7 @@ func (p *TraceServiceGetTraceResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceCreateViewArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11335,14 +14072,14 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastRead(buf []byte) (int, e ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateViewArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceCreateViewArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewBatchGetTracesAdvanceInfoRequest() + _field := NewCreateViewRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -11352,11 +14089,11 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastReadField1(buf []byte) ( return offset, nil } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastWrite(buf []byte) int { +func (p *TraceServiceCreateViewArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceCreateViewArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -11365,7 +14102,7 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) FastWriteNocopy(buf []byte, return offset } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) BLength() int { +func (p *TraceServiceCreateViewArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -11374,29 +14111,29 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) BLength() int { return l } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceCreateViewArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) field1Length() int { +func (p *TraceServiceCreateViewArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceBatchGetTracesAdvanceInfoArgs) +func (p *TraceServiceCreateViewArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceCreateViewArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *BatchGetTracesAdvanceInfoRequest + var _req *CreateViewRequest if src.Req != nil { - _req = &BatchGetTracesAdvanceInfoRequest{} + _req = &CreateViewRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -11406,7 +14143,7 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoArgs) DeepCopy(s interface{}) erro return nil } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceCreateViewResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11450,14 +14187,14 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastRead(buf []byte) (int, ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceBatchGetTracesAdvanceInfoResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateViewResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceCreateViewResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewBatchGetTracesAdvanceInfoResponse() + _field := NewCreateViewResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -11467,11 +14204,11 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastReadField0(buf []byte) return offset, nil } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastWrite(buf []byte) int { +func (p *TraceServiceCreateViewResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceCreateViewResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -11480,7 +14217,7 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) FastWriteNocopy(buf []byte return offset } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) BLength() int { +func (p *TraceServiceCreateViewResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -11489,7 +14226,7 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) BLength() int { return l } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceCreateViewResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -11498,7 +14235,7 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) fastWriteField0(buf []byte return offset } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) field0Length() int { +func (p *TraceServiceCreateViewResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -11507,15 +14244,15 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) field0Length() int { return l } -func (p *TraceServiceBatchGetTracesAdvanceInfoResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceBatchGetTracesAdvanceInfoResult) +func (p *TraceServiceCreateViewResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceCreateViewResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *BatchGetTracesAdvanceInfoResponse + var _success *CreateViewResponse if src.Success != nil { - _success = &BatchGetTracesAdvanceInfoResponse{} + _success = &CreateViewResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -11525,7 +14262,7 @@ func (p *TraceServiceBatchGetTracesAdvanceInfoResult) DeepCopy(s interface{}) er return nil } -func (p *TraceServiceIngestTracesInnerArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceUpdateViewArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11569,14 +14306,14 @@ func (p *TraceServiceIngestTracesInnerArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceIngestTracesInnerArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateViewArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceIngestTracesInnerArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceUpdateViewArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewIngestTracesRequest() + _field := NewUpdateViewRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -11586,11 +14323,11 @@ func (p *TraceServiceIngestTracesInnerArgs) FastReadField1(buf []byte) (int, err return offset, nil } -func (p *TraceServiceIngestTracesInnerArgs) FastWrite(buf []byte) int { +func (p *TraceServiceUpdateViewArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceIngestTracesInnerArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceUpdateViewArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -11599,7 +14336,7 @@ func (p *TraceServiceIngestTracesInnerArgs) FastWriteNocopy(buf []byte, w thrift return offset } -func (p *TraceServiceIngestTracesInnerArgs) BLength() int { +func (p *TraceServiceUpdateViewArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -11608,29 +14345,29 @@ func (p *TraceServiceIngestTracesInnerArgs) BLength() int { return l } -func (p *TraceServiceIngestTracesInnerArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceUpdateViewArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceIngestTracesInnerArgs) field1Length() int { +func (p *TraceServiceUpdateViewArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceIngestTracesInnerArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceIngestTracesInnerArgs) +func (p *TraceServiceUpdateViewArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceUpdateViewArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *IngestTracesRequest + var _req *UpdateViewRequest if src.Req != nil { - _req = &IngestTracesRequest{} + _req = &UpdateViewRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -11640,7 +14377,7 @@ func (p *TraceServiceIngestTracesInnerArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceIngestTracesInnerResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceUpdateViewResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11684,14 +14421,14 @@ func (p *TraceServiceIngestTracesInnerResult) FastRead(buf []byte) (int, error) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceIngestTracesInnerResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateViewResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceIngestTracesInnerResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceUpdateViewResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewIngestTracesResponse() + _field := NewUpdateViewResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -11701,11 +14438,11 @@ func (p *TraceServiceIngestTracesInnerResult) FastReadField0(buf []byte) (int, e return offset, nil } -func (p *TraceServiceIngestTracesInnerResult) FastWrite(buf []byte) int { +func (p *TraceServiceUpdateViewResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceIngestTracesInnerResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceUpdateViewResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -11714,7 +14451,7 @@ func (p *TraceServiceIngestTracesInnerResult) FastWriteNocopy(buf []byte, w thri return offset } -func (p *TraceServiceIngestTracesInnerResult) BLength() int { +func (p *TraceServiceUpdateViewResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -11723,7 +14460,7 @@ func (p *TraceServiceIngestTracesInnerResult) BLength() int { return l } -func (p *TraceServiceIngestTracesInnerResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceUpdateViewResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -11732,7 +14469,7 @@ func (p *TraceServiceIngestTracesInnerResult) fastWriteField0(buf []byte, w thri return offset } -func (p *TraceServiceIngestTracesInnerResult) field0Length() int { +func (p *TraceServiceUpdateViewResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -11741,15 +14478,15 @@ func (p *TraceServiceIngestTracesInnerResult) field0Length() int { return l } -func (p *TraceServiceIngestTracesInnerResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceIngestTracesInnerResult) +func (p *TraceServiceUpdateViewResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceUpdateViewResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *IngestTracesResponse + var _success *UpdateViewResponse if src.Success != nil { - _success = &IngestTracesResponse{} + _success = &UpdateViewResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -11759,7 +14496,7 @@ func (p *TraceServiceIngestTracesInnerResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceGetTracesMetaInfoArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceDeleteViewArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11803,14 +14540,14 @@ func (p *TraceServiceGetTracesMetaInfoArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTracesMetaInfoArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteViewArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceGetTracesMetaInfoArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceDeleteViewArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewGetTracesMetaInfoRequest() + _field := NewDeleteViewRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -11820,11 +14557,11 @@ func (p *TraceServiceGetTracesMetaInfoArgs) FastReadField1(buf []byte) (int, err return offset, nil } -func (p *TraceServiceGetTracesMetaInfoArgs) FastWrite(buf []byte) int { +func (p *TraceServiceDeleteViewArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceGetTracesMetaInfoArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceDeleteViewArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -11833,7 +14570,7 @@ func (p *TraceServiceGetTracesMetaInfoArgs) FastWriteNocopy(buf []byte, w thrift return offset } -func (p *TraceServiceGetTracesMetaInfoArgs) BLength() int { +func (p *TraceServiceDeleteViewArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -11842,29 +14579,29 @@ func (p *TraceServiceGetTracesMetaInfoArgs) BLength() int { return l } -func (p *TraceServiceGetTracesMetaInfoArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceDeleteViewArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceGetTracesMetaInfoArgs) field1Length() int { +func (p *TraceServiceDeleteViewArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceGetTracesMetaInfoArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceGetTracesMetaInfoArgs) +func (p *TraceServiceDeleteViewArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceDeleteViewArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *GetTracesMetaInfoRequest + var _req *DeleteViewRequest if src.Req != nil { - _req = &GetTracesMetaInfoRequest{} + _req = &DeleteViewRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -11874,7 +14611,7 @@ func (p *TraceServiceGetTracesMetaInfoArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceGetTracesMetaInfoResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceDeleteViewResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -11918,14 +14655,14 @@ func (p *TraceServiceGetTracesMetaInfoResult) FastRead(buf []byte) (int, error) ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceGetTracesMetaInfoResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteViewResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceGetTracesMetaInfoResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceDeleteViewResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewGetTracesMetaInfoResponse() + _field := NewDeleteViewResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -11935,11 +14672,11 @@ func (p *TraceServiceGetTracesMetaInfoResult) FastReadField0(buf []byte) (int, e return offset, nil } -func (p *TraceServiceGetTracesMetaInfoResult) FastWrite(buf []byte) int { +func (p *TraceServiceDeleteViewResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceGetTracesMetaInfoResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceDeleteViewResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -11948,7 +14685,7 @@ func (p *TraceServiceGetTracesMetaInfoResult) FastWriteNocopy(buf []byte, w thri return offset } -func (p *TraceServiceGetTracesMetaInfoResult) BLength() int { +func (p *TraceServiceDeleteViewResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -11957,7 +14694,7 @@ func (p *TraceServiceGetTracesMetaInfoResult) BLength() int { return l } -func (p *TraceServiceGetTracesMetaInfoResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceDeleteViewResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -11966,7 +14703,7 @@ func (p *TraceServiceGetTracesMetaInfoResult) fastWriteField0(buf []byte, w thri return offset } -func (p *TraceServiceGetTracesMetaInfoResult) field0Length() int { +func (p *TraceServiceDeleteViewResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -11975,15 +14712,15 @@ func (p *TraceServiceGetTracesMetaInfoResult) field0Length() int { return l } -func (p *TraceServiceGetTracesMetaInfoResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceGetTracesMetaInfoResult) +func (p *TraceServiceDeleteViewResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceDeleteViewResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *GetTracesMetaInfoResponse + var _success *DeleteViewResponse if src.Success != nil { - _success = &GetTracesMetaInfoResponse{} + _success = &DeleteViewResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -11993,7 +14730,7 @@ func (p *TraceServiceGetTracesMetaInfoResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceCreateViewArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceListViewsArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12037,14 +14774,14 @@ func (p *TraceServiceCreateViewArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateViewArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListViewsArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceCreateViewArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceListViewsArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewCreateViewRequest() + _field := NewListViewsRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12054,11 +14791,11 @@ func (p *TraceServiceCreateViewArgs) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceCreateViewArgs) FastWrite(buf []byte) int { +func (p *TraceServiceListViewsArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceCreateViewArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListViewsArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -12067,7 +14804,7 @@ func (p *TraceServiceCreateViewArgs) FastWriteNocopy(buf []byte, w thrift.Nocopy return offset } -func (p *TraceServiceCreateViewArgs) BLength() int { +func (p *TraceServiceListViewsArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -12076,29 +14813,29 @@ func (p *TraceServiceCreateViewArgs) BLength() int { return l } -func (p *TraceServiceCreateViewArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListViewsArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceCreateViewArgs) field1Length() int { +func (p *TraceServiceListViewsArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceCreateViewArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceCreateViewArgs) +func (p *TraceServiceListViewsArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceListViewsArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *CreateViewRequest + var _req *ListViewsRequest if src.Req != nil { - _req = &CreateViewRequest{} + _req = &ListViewsRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -12108,7 +14845,7 @@ func (p *TraceServiceCreateViewArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceCreateViewResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceListViewsResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12152,14 +14889,14 @@ func (p *TraceServiceCreateViewResult) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateViewResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListViewsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceCreateViewResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceListViewsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewCreateViewResponse() + _field := NewListViewsResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12169,11 +14906,11 @@ func (p *TraceServiceCreateViewResult) FastReadField0(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceCreateViewResult) FastWrite(buf []byte) int { +func (p *TraceServiceListViewsResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceCreateViewResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListViewsResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -12182,7 +14919,7 @@ func (p *TraceServiceCreateViewResult) FastWriteNocopy(buf []byte, w thrift.Noco return offset } -func (p *TraceServiceCreateViewResult) BLength() int { +func (p *TraceServiceListViewsResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -12191,7 +14928,7 @@ func (p *TraceServiceCreateViewResult) BLength() int { return l } -func (p *TraceServiceCreateViewResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListViewsResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -12200,7 +14937,7 @@ func (p *TraceServiceCreateViewResult) fastWriteField0(buf []byte, w thrift.Noco return offset } -func (p *TraceServiceCreateViewResult) field0Length() int { +func (p *TraceServiceListViewsResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -12209,15 +14946,15 @@ func (p *TraceServiceCreateViewResult) field0Length() int { return l } -func (p *TraceServiceCreateViewResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceCreateViewResult) +func (p *TraceServiceListViewsResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceListViewsResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *CreateViewResponse + var _success *ListViewsResponse if src.Success != nil { - _success = &CreateViewResponse{} + _success = &ListViewsResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -12227,7 +14964,7 @@ func (p *TraceServiceCreateViewResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceUpdateViewArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceCreateManualAnnotationArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12271,14 +15008,14 @@ func (p *TraceServiceUpdateViewArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateViewArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateManualAnnotationArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceUpdateViewArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceCreateManualAnnotationArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewUpdateViewRequest() + _field := NewCreateManualAnnotationRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12288,11 +15025,11 @@ func (p *TraceServiceUpdateViewArgs) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceUpdateViewArgs) FastWrite(buf []byte) int { +func (p *TraceServiceCreateManualAnnotationArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceUpdateViewArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceCreateManualAnnotationArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -12301,7 +15038,7 @@ func (p *TraceServiceUpdateViewArgs) FastWriteNocopy(buf []byte, w thrift.Nocopy return offset } -func (p *TraceServiceUpdateViewArgs) BLength() int { +func (p *TraceServiceCreateManualAnnotationArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -12310,29 +15047,29 @@ func (p *TraceServiceUpdateViewArgs) BLength() int { return l } -func (p *TraceServiceUpdateViewArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceCreateManualAnnotationArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceUpdateViewArgs) field1Length() int { +func (p *TraceServiceCreateManualAnnotationArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceUpdateViewArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceUpdateViewArgs) +func (p *TraceServiceCreateManualAnnotationArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceCreateManualAnnotationArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *UpdateViewRequest + var _req *CreateManualAnnotationRequest if src.Req != nil { - _req = &UpdateViewRequest{} + _req = &CreateManualAnnotationRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -12342,7 +15079,7 @@ func (p *TraceServiceUpdateViewArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceUpdateViewResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceCreateManualAnnotationResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12386,14 +15123,14 @@ func (p *TraceServiceUpdateViewResult) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateViewResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateManualAnnotationResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceUpdateViewResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceCreateManualAnnotationResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewUpdateViewResponse() + _field := NewCreateManualAnnotationResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12403,11 +15140,11 @@ func (p *TraceServiceUpdateViewResult) FastReadField0(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceUpdateViewResult) FastWrite(buf []byte) int { +func (p *TraceServiceCreateManualAnnotationResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceUpdateViewResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceCreateManualAnnotationResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -12416,7 +15153,7 @@ func (p *TraceServiceUpdateViewResult) FastWriteNocopy(buf []byte, w thrift.Noco return offset } -func (p *TraceServiceUpdateViewResult) BLength() int { +func (p *TraceServiceCreateManualAnnotationResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -12425,7 +15162,7 @@ func (p *TraceServiceUpdateViewResult) BLength() int { return l } -func (p *TraceServiceUpdateViewResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceCreateManualAnnotationResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -12434,7 +15171,7 @@ func (p *TraceServiceUpdateViewResult) fastWriteField0(buf []byte, w thrift.Noco return offset } -func (p *TraceServiceUpdateViewResult) field0Length() int { +func (p *TraceServiceCreateManualAnnotationResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -12443,15 +15180,15 @@ func (p *TraceServiceUpdateViewResult) field0Length() int { return l } -func (p *TraceServiceUpdateViewResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceUpdateViewResult) +func (p *TraceServiceCreateManualAnnotationResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceCreateManualAnnotationResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *UpdateViewResponse + var _success *CreateManualAnnotationResponse if src.Success != nil { - _success = &UpdateViewResponse{} + _success = &CreateManualAnnotationResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -12461,7 +15198,7 @@ func (p *TraceServiceUpdateViewResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceDeleteViewArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceUpdateManualAnnotationArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12505,14 +15242,14 @@ func (p *TraceServiceDeleteViewArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteViewArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateManualAnnotationArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceDeleteViewArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceUpdateManualAnnotationArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewDeleteViewRequest() + _field := NewUpdateManualAnnotationRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12522,11 +15259,11 @@ func (p *TraceServiceDeleteViewArgs) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceDeleteViewArgs) FastWrite(buf []byte) int { +func (p *TraceServiceUpdateManualAnnotationArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceDeleteViewArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceUpdateManualAnnotationArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -12535,7 +15272,7 @@ func (p *TraceServiceDeleteViewArgs) FastWriteNocopy(buf []byte, w thrift.Nocopy return offset } -func (p *TraceServiceDeleteViewArgs) BLength() int { +func (p *TraceServiceUpdateManualAnnotationArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -12544,29 +15281,29 @@ func (p *TraceServiceDeleteViewArgs) BLength() int { return l } -func (p *TraceServiceDeleteViewArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceUpdateManualAnnotationArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceDeleteViewArgs) field1Length() int { +func (p *TraceServiceUpdateManualAnnotationArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceDeleteViewArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceDeleteViewArgs) +func (p *TraceServiceUpdateManualAnnotationArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceUpdateManualAnnotationArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *DeleteViewRequest + var _req *UpdateManualAnnotationRequest if src.Req != nil { - _req = &DeleteViewRequest{} + _req = &UpdateManualAnnotationRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -12576,7 +15313,7 @@ func (p *TraceServiceDeleteViewArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceDeleteViewResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceUpdateManualAnnotationResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12620,14 +15357,14 @@ func (p *TraceServiceDeleteViewResult) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteViewResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateManualAnnotationResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceDeleteViewResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceUpdateManualAnnotationResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewDeleteViewResponse() + _field := NewUpdateManualAnnotationResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12637,11 +15374,11 @@ func (p *TraceServiceDeleteViewResult) FastReadField0(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceDeleteViewResult) FastWrite(buf []byte) int { +func (p *TraceServiceUpdateManualAnnotationResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceDeleteViewResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceUpdateManualAnnotationResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -12650,7 +15387,7 @@ func (p *TraceServiceDeleteViewResult) FastWriteNocopy(buf []byte, w thrift.Noco return offset } -func (p *TraceServiceDeleteViewResult) BLength() int { +func (p *TraceServiceUpdateManualAnnotationResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -12659,7 +15396,7 @@ func (p *TraceServiceDeleteViewResult) BLength() int { return l } -func (p *TraceServiceDeleteViewResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceUpdateManualAnnotationResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -12668,7 +15405,7 @@ func (p *TraceServiceDeleteViewResult) fastWriteField0(buf []byte, w thrift.Noco return offset } -func (p *TraceServiceDeleteViewResult) field0Length() int { +func (p *TraceServiceUpdateManualAnnotationResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -12677,15 +15414,15 @@ func (p *TraceServiceDeleteViewResult) field0Length() int { return l } -func (p *TraceServiceDeleteViewResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceDeleteViewResult) +func (p *TraceServiceUpdateManualAnnotationResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceUpdateManualAnnotationResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *DeleteViewResponse + var _success *UpdateManualAnnotationResponse if src.Success != nil { - _success = &DeleteViewResponse{} + _success = &UpdateManualAnnotationResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -12695,7 +15432,7 @@ func (p *TraceServiceDeleteViewResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceListViewsArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceDeleteManualAnnotationArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12739,14 +15476,14 @@ func (p *TraceServiceListViewsArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListViewsArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteManualAnnotationArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceListViewsArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceDeleteManualAnnotationArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewListViewsRequest() + _field := NewDeleteManualAnnotationRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12756,11 +15493,11 @@ func (p *TraceServiceListViewsArgs) FastReadField1(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceListViewsArgs) FastWrite(buf []byte) int { +func (p *TraceServiceDeleteManualAnnotationArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceListViewsArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceDeleteManualAnnotationArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -12769,7 +15506,7 @@ func (p *TraceServiceListViewsArgs) FastWriteNocopy(buf []byte, w thrift.NocopyW return offset } -func (p *TraceServiceListViewsArgs) BLength() int { +func (p *TraceServiceDeleteManualAnnotationArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -12778,29 +15515,29 @@ func (p *TraceServiceListViewsArgs) BLength() int { return l } -func (p *TraceServiceListViewsArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceDeleteManualAnnotationArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceListViewsArgs) field1Length() int { +func (p *TraceServiceDeleteManualAnnotationArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceListViewsArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceListViewsArgs) +func (p *TraceServiceDeleteManualAnnotationArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceDeleteManualAnnotationArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *ListViewsRequest + var _req *DeleteManualAnnotationRequest if src.Req != nil { - _req = &ListViewsRequest{} + _req = &DeleteManualAnnotationRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -12810,7 +15547,7 @@ func (p *TraceServiceListViewsArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceListViewsResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceDeleteManualAnnotationResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12854,14 +15591,14 @@ func (p *TraceServiceListViewsResult) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListViewsResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteManualAnnotationResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceListViewsResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceDeleteManualAnnotationResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewListViewsResponse() + _field := NewDeleteManualAnnotationResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12871,11 +15608,11 @@ func (p *TraceServiceListViewsResult) FastReadField0(buf []byte) (int, error) { return offset, nil } -func (p *TraceServiceListViewsResult) FastWrite(buf []byte) int { +func (p *TraceServiceDeleteManualAnnotationResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceListViewsResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceDeleteManualAnnotationResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -12884,7 +15621,7 @@ func (p *TraceServiceListViewsResult) FastWriteNocopy(buf []byte, w thrift.Nocop return offset } -func (p *TraceServiceListViewsResult) BLength() int { +func (p *TraceServiceDeleteManualAnnotationResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -12893,7 +15630,7 @@ func (p *TraceServiceListViewsResult) BLength() int { return l } -func (p *TraceServiceListViewsResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceDeleteManualAnnotationResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -12902,7 +15639,7 @@ func (p *TraceServiceListViewsResult) fastWriteField0(buf []byte, w thrift.Nocop return offset } -func (p *TraceServiceListViewsResult) field0Length() int { +func (p *TraceServiceDeleteManualAnnotationResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -12911,15 +15648,15 @@ func (p *TraceServiceListViewsResult) field0Length() int { return l } -func (p *TraceServiceListViewsResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceListViewsResult) +func (p *TraceServiceDeleteManualAnnotationResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceDeleteManualAnnotationResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *ListViewsResponse + var _success *DeleteManualAnnotationResponse if src.Success != nil { - _success = &ListViewsResponse{} + _success = &DeleteManualAnnotationResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -12929,7 +15666,7 @@ func (p *TraceServiceListViewsResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceCreateManualAnnotationArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceListAnnotationsArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -12973,14 +15710,14 @@ func (p *TraceServiceCreateManualAnnotationArgs) FastRead(buf []byte) (int, erro ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateManualAnnotationArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationsArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceCreateManualAnnotationArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceListAnnotationsArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewCreateManualAnnotationRequest() + _field := NewListAnnotationsRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -12990,11 +15727,11 @@ func (p *TraceServiceCreateManualAnnotationArgs) FastReadField1(buf []byte) (int return offset, nil } -func (p *TraceServiceCreateManualAnnotationArgs) FastWrite(buf []byte) int { +func (p *TraceServiceListAnnotationsArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceCreateManualAnnotationArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListAnnotationsArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -13003,7 +15740,7 @@ func (p *TraceServiceCreateManualAnnotationArgs) FastWriteNocopy(buf []byte, w t return offset } -func (p *TraceServiceCreateManualAnnotationArgs) BLength() int { +func (p *TraceServiceListAnnotationsArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -13012,29 +15749,29 @@ func (p *TraceServiceCreateManualAnnotationArgs) BLength() int { return l } -func (p *TraceServiceCreateManualAnnotationArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListAnnotationsArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceCreateManualAnnotationArgs) field1Length() int { +func (p *TraceServiceListAnnotationsArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceCreateManualAnnotationArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceCreateManualAnnotationArgs) +func (p *TraceServiceListAnnotationsArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceListAnnotationsArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *CreateManualAnnotationRequest + var _req *ListAnnotationsRequest if src.Req != nil { - _req = &CreateManualAnnotationRequest{} + _req = &ListAnnotationsRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -13044,7 +15781,7 @@ func (p *TraceServiceCreateManualAnnotationArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceCreateManualAnnotationResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceListAnnotationsResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13088,14 +15825,14 @@ func (p *TraceServiceCreateManualAnnotationResult) FastRead(buf []byte) (int, er ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceCreateManualAnnotationResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceCreateManualAnnotationResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceListAnnotationsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewCreateManualAnnotationResponse() + _field := NewListAnnotationsResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -13105,11 +15842,11 @@ func (p *TraceServiceCreateManualAnnotationResult) FastReadField0(buf []byte) (i return offset, nil } -func (p *TraceServiceCreateManualAnnotationResult) FastWrite(buf []byte) int { +func (p *TraceServiceListAnnotationsResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceCreateManualAnnotationResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListAnnotationsResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -13118,7 +15855,7 @@ func (p *TraceServiceCreateManualAnnotationResult) FastWriteNocopy(buf []byte, w return offset } -func (p *TraceServiceCreateManualAnnotationResult) BLength() int { +func (p *TraceServiceListAnnotationsResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -13127,7 +15864,7 @@ func (p *TraceServiceCreateManualAnnotationResult) BLength() int { return l } -func (p *TraceServiceCreateManualAnnotationResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListAnnotationsResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -13136,7 +15873,7 @@ func (p *TraceServiceCreateManualAnnotationResult) fastWriteField0(buf []byte, w return offset } -func (p *TraceServiceCreateManualAnnotationResult) field0Length() int { +func (p *TraceServiceListAnnotationsResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -13145,15 +15882,15 @@ func (p *TraceServiceCreateManualAnnotationResult) field0Length() int { return l } -func (p *TraceServiceCreateManualAnnotationResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceCreateManualAnnotationResult) +func (p *TraceServiceListAnnotationsResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceListAnnotationsResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *CreateManualAnnotationResponse + var _success *ListAnnotationsResponse if src.Success != nil { - _success = &CreateManualAnnotationResponse{} + _success = &ListAnnotationsResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -13163,7 +15900,7 @@ func (p *TraceServiceCreateManualAnnotationResult) DeepCopy(s interface{}) error return nil } -func (p *TraceServiceUpdateManualAnnotationArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceExportTracesToDatasetArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13207,14 +15944,14 @@ func (p *TraceServiceUpdateManualAnnotationArgs) FastRead(buf []byte) (int, erro ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateManualAnnotationArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExportTracesToDatasetArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceUpdateManualAnnotationArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceExportTracesToDatasetArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewUpdateManualAnnotationRequest() + _field := NewExportTracesToDatasetRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -13224,11 +15961,11 @@ func (p *TraceServiceUpdateManualAnnotationArgs) FastReadField1(buf []byte) (int return offset, nil } -func (p *TraceServiceUpdateManualAnnotationArgs) FastWrite(buf []byte) int { +func (p *TraceServiceExportTracesToDatasetArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceUpdateManualAnnotationArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceExportTracesToDatasetArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -13237,7 +15974,7 @@ func (p *TraceServiceUpdateManualAnnotationArgs) FastWriteNocopy(buf []byte, w t return offset } -func (p *TraceServiceUpdateManualAnnotationArgs) BLength() int { +func (p *TraceServiceExportTracesToDatasetArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -13246,29 +15983,29 @@ func (p *TraceServiceUpdateManualAnnotationArgs) BLength() int { return l } -func (p *TraceServiceUpdateManualAnnotationArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceExportTracesToDatasetArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceUpdateManualAnnotationArgs) field1Length() int { +func (p *TraceServiceExportTracesToDatasetArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceUpdateManualAnnotationArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceUpdateManualAnnotationArgs) +func (p *TraceServiceExportTracesToDatasetArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceExportTracesToDatasetArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *UpdateManualAnnotationRequest + var _req *ExportTracesToDatasetRequest if src.Req != nil { - _req = &UpdateManualAnnotationRequest{} + _req = &ExportTracesToDatasetRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -13278,7 +16015,7 @@ func (p *TraceServiceUpdateManualAnnotationArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceUpdateManualAnnotationResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceExportTracesToDatasetResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13322,14 +16059,14 @@ func (p *TraceServiceUpdateManualAnnotationResult) FastRead(buf []byte) (int, er ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceUpdateManualAnnotationResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExportTracesToDatasetResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceUpdateManualAnnotationResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceExportTracesToDatasetResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewUpdateManualAnnotationResponse() + _field := NewExportTracesToDatasetResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -13339,11 +16076,11 @@ func (p *TraceServiceUpdateManualAnnotationResult) FastReadField0(buf []byte) (i return offset, nil } -func (p *TraceServiceUpdateManualAnnotationResult) FastWrite(buf []byte) int { +func (p *TraceServiceExportTracesToDatasetResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceUpdateManualAnnotationResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceExportTracesToDatasetResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -13352,7 +16089,7 @@ func (p *TraceServiceUpdateManualAnnotationResult) FastWriteNocopy(buf []byte, w return offset } -func (p *TraceServiceUpdateManualAnnotationResult) BLength() int { +func (p *TraceServiceExportTracesToDatasetResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -13361,7 +16098,7 @@ func (p *TraceServiceUpdateManualAnnotationResult) BLength() int { return l } -func (p *TraceServiceUpdateManualAnnotationResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceExportTracesToDatasetResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -13370,7 +16107,7 @@ func (p *TraceServiceUpdateManualAnnotationResult) fastWriteField0(buf []byte, w return offset } -func (p *TraceServiceUpdateManualAnnotationResult) field0Length() int { +func (p *TraceServiceExportTracesToDatasetResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -13379,15 +16116,15 @@ func (p *TraceServiceUpdateManualAnnotationResult) field0Length() int { return l } -func (p *TraceServiceUpdateManualAnnotationResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceUpdateManualAnnotationResult) +func (p *TraceServiceExportTracesToDatasetResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceExportTracesToDatasetResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *UpdateManualAnnotationResponse + var _success *ExportTracesToDatasetResponse if src.Success != nil { - _success = &UpdateManualAnnotationResponse{} + _success = &ExportTracesToDatasetResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -13397,7 +16134,7 @@ func (p *TraceServiceUpdateManualAnnotationResult) DeepCopy(s interface{}) error return nil } -func (p *TraceServiceDeleteManualAnnotationArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServicePreviewExportTracesToDatasetArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13441,14 +16178,14 @@ func (p *TraceServiceDeleteManualAnnotationArgs) FastRead(buf []byte) (int, erro ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteManualAnnotationArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServicePreviewExportTracesToDatasetArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceDeleteManualAnnotationArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServicePreviewExportTracesToDatasetArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewDeleteManualAnnotationRequest() + _field := NewPreviewExportTracesToDatasetRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -13458,11 +16195,11 @@ func (p *TraceServiceDeleteManualAnnotationArgs) FastReadField1(buf []byte) (int return offset, nil } -func (p *TraceServiceDeleteManualAnnotationArgs) FastWrite(buf []byte) int { +func (p *TraceServicePreviewExportTracesToDatasetArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceDeleteManualAnnotationArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServicePreviewExportTracesToDatasetArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -13471,7 +16208,7 @@ func (p *TraceServiceDeleteManualAnnotationArgs) FastWriteNocopy(buf []byte, w t return offset } -func (p *TraceServiceDeleteManualAnnotationArgs) BLength() int { +func (p *TraceServicePreviewExportTracesToDatasetArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -13480,29 +16217,29 @@ func (p *TraceServiceDeleteManualAnnotationArgs) BLength() int { return l } -func (p *TraceServiceDeleteManualAnnotationArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServicePreviewExportTracesToDatasetArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceDeleteManualAnnotationArgs) field1Length() int { +func (p *TraceServicePreviewExportTracesToDatasetArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceDeleteManualAnnotationArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceDeleteManualAnnotationArgs) +func (p *TraceServicePreviewExportTracesToDatasetArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServicePreviewExportTracesToDatasetArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *DeleteManualAnnotationRequest + var _req *PreviewExportTracesToDatasetRequest if src.Req != nil { - _req = &DeleteManualAnnotationRequest{} + _req = &PreviewExportTracesToDatasetRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -13512,7 +16249,7 @@ func (p *TraceServiceDeleteManualAnnotationArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceDeleteManualAnnotationResult) FastRead(buf []byte) (int, error) { +func (p *TraceServicePreviewExportTracesToDatasetResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13556,14 +16293,14 @@ func (p *TraceServiceDeleteManualAnnotationResult) FastRead(buf []byte) (int, er ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceDeleteManualAnnotationResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServicePreviewExportTracesToDatasetResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceDeleteManualAnnotationResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServicePreviewExportTracesToDatasetResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewDeleteManualAnnotationResponse() + _field := NewPreviewExportTracesToDatasetResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -13573,11 +16310,11 @@ func (p *TraceServiceDeleteManualAnnotationResult) FastReadField0(buf []byte) (i return offset, nil } -func (p *TraceServiceDeleteManualAnnotationResult) FastWrite(buf []byte) int { +func (p *TraceServicePreviewExportTracesToDatasetResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceDeleteManualAnnotationResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServicePreviewExportTracesToDatasetResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -13586,7 +16323,7 @@ func (p *TraceServiceDeleteManualAnnotationResult) FastWriteNocopy(buf []byte, w return offset } -func (p *TraceServiceDeleteManualAnnotationResult) BLength() int { +func (p *TraceServicePreviewExportTracesToDatasetResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -13595,7 +16332,7 @@ func (p *TraceServiceDeleteManualAnnotationResult) BLength() int { return l } -func (p *TraceServiceDeleteManualAnnotationResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServicePreviewExportTracesToDatasetResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -13604,7 +16341,7 @@ func (p *TraceServiceDeleteManualAnnotationResult) fastWriteField0(buf []byte, w return offset } -func (p *TraceServiceDeleteManualAnnotationResult) field0Length() int { +func (p *TraceServicePreviewExportTracesToDatasetResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -13613,15 +16350,15 @@ func (p *TraceServiceDeleteManualAnnotationResult) field0Length() int { return l } -func (p *TraceServiceDeleteManualAnnotationResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceDeleteManualAnnotationResult) +func (p *TraceServicePreviewExportTracesToDatasetResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServicePreviewExportTracesToDatasetResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *DeleteManualAnnotationResponse + var _success *PreviewExportTracesToDatasetResponse if src.Success != nil { - _success = &DeleteManualAnnotationResponse{} + _success = &PreviewExportTracesToDatasetResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -13631,7 +16368,7 @@ func (p *TraceServiceDeleteManualAnnotationResult) DeepCopy(s interface{}) error return nil } -func (p *TraceServiceListAnnotationsArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceChangeEvaluatorScoreArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13675,14 +16412,14 @@ func (p *TraceServiceListAnnotationsArgs) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationsArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceChangeEvaluatorScoreArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceListAnnotationsArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceChangeEvaluatorScoreArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewListAnnotationsRequest() + _field := NewChangeEvaluatorScoreRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -13692,11 +16429,11 @@ func (p *TraceServiceListAnnotationsArgs) FastReadField1(buf []byte) (int, error return offset, nil } -func (p *TraceServiceListAnnotationsArgs) FastWrite(buf []byte) int { +func (p *TraceServiceChangeEvaluatorScoreArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceListAnnotationsArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceChangeEvaluatorScoreArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -13705,7 +16442,7 @@ func (p *TraceServiceListAnnotationsArgs) FastWriteNocopy(buf []byte, w thrift.N return offset } -func (p *TraceServiceListAnnotationsArgs) BLength() int { +func (p *TraceServiceChangeEvaluatorScoreArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -13714,29 +16451,29 @@ func (p *TraceServiceListAnnotationsArgs) BLength() int { return l } -func (p *TraceServiceListAnnotationsArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceChangeEvaluatorScoreArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceListAnnotationsArgs) field1Length() int { +func (p *TraceServiceChangeEvaluatorScoreArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceListAnnotationsArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceListAnnotationsArgs) +func (p *TraceServiceChangeEvaluatorScoreArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceChangeEvaluatorScoreArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *ListAnnotationsRequest + var _req *ChangeEvaluatorScoreRequest if src.Req != nil { - _req = &ListAnnotationsRequest{} + _req = &ChangeEvaluatorScoreRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -13746,7 +16483,7 @@ func (p *TraceServiceListAnnotationsArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceListAnnotationsResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceChangeEvaluatorScoreResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13790,14 +16527,14 @@ func (p *TraceServiceListAnnotationsResult) FastRead(buf []byte) (int, error) { ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationsResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceChangeEvaluatorScoreResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceListAnnotationsResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceChangeEvaluatorScoreResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewListAnnotationsResponse() + _field := NewChangeEvaluatorScoreResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -13807,11 +16544,11 @@ func (p *TraceServiceListAnnotationsResult) FastReadField0(buf []byte) (int, err return offset, nil } -func (p *TraceServiceListAnnotationsResult) FastWrite(buf []byte) int { +func (p *TraceServiceChangeEvaluatorScoreResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceListAnnotationsResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceChangeEvaluatorScoreResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -13820,7 +16557,7 @@ func (p *TraceServiceListAnnotationsResult) FastWriteNocopy(buf []byte, w thrift return offset } -func (p *TraceServiceListAnnotationsResult) BLength() int { +func (p *TraceServiceChangeEvaluatorScoreResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -13829,7 +16566,7 @@ func (p *TraceServiceListAnnotationsResult) BLength() int { return l } -func (p *TraceServiceListAnnotationsResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceChangeEvaluatorScoreResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -13838,7 +16575,7 @@ func (p *TraceServiceListAnnotationsResult) fastWriteField0(buf []byte, w thrift return offset } -func (p *TraceServiceListAnnotationsResult) field0Length() int { +func (p *TraceServiceChangeEvaluatorScoreResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -13847,15 +16584,15 @@ func (p *TraceServiceListAnnotationsResult) field0Length() int { return l } -func (p *TraceServiceListAnnotationsResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceListAnnotationsResult) +func (p *TraceServiceChangeEvaluatorScoreResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceChangeEvaluatorScoreResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *ListAnnotationsResponse + var _success *ChangeEvaluatorScoreResponse if src.Success != nil { - _success = &ListAnnotationsResponse{} + _success = &ChangeEvaluatorScoreResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -13865,7 +16602,7 @@ func (p *TraceServiceListAnnotationsResult) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceExportTracesToDatasetArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceListAnnotationEvaluatorsArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -13909,14 +16646,14 @@ func (p *TraceServiceExportTracesToDatasetArgs) FastRead(buf []byte) (int, error ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExportTracesToDatasetArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationEvaluatorsArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceExportTracesToDatasetArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceListAnnotationEvaluatorsArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewExportTracesToDatasetRequest() + _field := NewListAnnotationEvaluatorsRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -13926,11 +16663,11 @@ func (p *TraceServiceExportTracesToDatasetArgs) FastReadField1(buf []byte) (int, return offset, nil } -func (p *TraceServiceExportTracesToDatasetArgs) FastWrite(buf []byte) int { +func (p *TraceServiceListAnnotationEvaluatorsArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceExportTracesToDatasetArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListAnnotationEvaluatorsArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -13939,7 +16676,7 @@ func (p *TraceServiceExportTracesToDatasetArgs) FastWriteNocopy(buf []byte, w th return offset } -func (p *TraceServiceExportTracesToDatasetArgs) BLength() int { +func (p *TraceServiceListAnnotationEvaluatorsArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -13948,29 +16685,29 @@ func (p *TraceServiceExportTracesToDatasetArgs) BLength() int { return l } -func (p *TraceServiceExportTracesToDatasetArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListAnnotationEvaluatorsArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServiceExportTracesToDatasetArgs) field1Length() int { +func (p *TraceServiceListAnnotationEvaluatorsArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServiceExportTracesToDatasetArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceExportTracesToDatasetArgs) +func (p *TraceServiceListAnnotationEvaluatorsArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceListAnnotationEvaluatorsArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *ExportTracesToDatasetRequest + var _req *ListAnnotationEvaluatorsRequest if src.Req != nil { - _req = &ExportTracesToDatasetRequest{} + _req = &ListAnnotationEvaluatorsRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -13980,7 +16717,7 @@ func (p *TraceServiceExportTracesToDatasetArgs) DeepCopy(s interface{}) error { return nil } -func (p *TraceServiceExportTracesToDatasetResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceListAnnotationEvaluatorsResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -14024,14 +16761,14 @@ func (p *TraceServiceExportTracesToDatasetResult) FastRead(buf []byte) (int, err ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExportTracesToDatasetResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceListAnnotationEvaluatorsResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServiceExportTracesToDatasetResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceListAnnotationEvaluatorsResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewExportTracesToDatasetResponse() + _field := NewListAnnotationEvaluatorsResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -14041,11 +16778,11 @@ func (p *TraceServiceExportTracesToDatasetResult) FastReadField0(buf []byte) (in return offset, nil } -func (p *TraceServiceExportTracesToDatasetResult) FastWrite(buf []byte) int { +func (p *TraceServiceListAnnotationEvaluatorsResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServiceExportTracesToDatasetResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListAnnotationEvaluatorsResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -14054,7 +16791,7 @@ func (p *TraceServiceExportTracesToDatasetResult) FastWriteNocopy(buf []byte, w return offset } -func (p *TraceServiceExportTracesToDatasetResult) BLength() int { +func (p *TraceServiceListAnnotationEvaluatorsResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -14063,7 +16800,7 @@ func (p *TraceServiceExportTracesToDatasetResult) BLength() int { return l } -func (p *TraceServiceExportTracesToDatasetResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceListAnnotationEvaluatorsResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -14072,7 +16809,7 @@ func (p *TraceServiceExportTracesToDatasetResult) fastWriteField0(buf []byte, w return offset } -func (p *TraceServiceExportTracesToDatasetResult) field0Length() int { +func (p *TraceServiceListAnnotationEvaluatorsResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -14081,15 +16818,15 @@ func (p *TraceServiceExportTracesToDatasetResult) field0Length() int { return l } -func (p *TraceServiceExportTracesToDatasetResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServiceExportTracesToDatasetResult) +func (p *TraceServiceListAnnotationEvaluatorsResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceListAnnotationEvaluatorsResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *ExportTracesToDatasetResponse + var _success *ListAnnotationEvaluatorsResponse if src.Success != nil { - _success = &ExportTracesToDatasetResponse{} + _success = &ListAnnotationEvaluatorsResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -14099,7 +16836,7 @@ func (p *TraceServiceExportTracesToDatasetResult) DeepCopy(s interface{}) error return nil } -func (p *TraceServicePreviewExportTracesToDatasetArgs) FastRead(buf []byte) (int, error) { +func (p *TraceServiceExtractSpanInfoArgs) FastRead(buf []byte) (int, error) { var err error var offset int @@ -14143,14 +16880,14 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) FastRead(buf []byte) (int ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServicePreviewExportTracesToDatasetArgs[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExtractSpanInfoArgs[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServicePreviewExportTracesToDatasetArgs) FastReadField1(buf []byte) (int, error) { +func (p *TraceServiceExtractSpanInfoArgs) FastReadField1(buf []byte) (int, error) { offset := 0 - _field := NewPreviewExportTracesToDatasetRequest() + _field := NewExtractSpanInfoRequest() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -14160,11 +16897,11 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) FastReadField1(buf []byte return offset, nil } -func (p *TraceServicePreviewExportTracesToDatasetArgs) FastWrite(buf []byte) int { +func (p *TraceServiceExtractSpanInfoArgs) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServicePreviewExportTracesToDatasetArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceExtractSpanInfoArgs) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField1(buf[offset:], w) @@ -14173,7 +16910,7 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) FastWriteNocopy(buf []byt return offset } -func (p *TraceServicePreviewExportTracesToDatasetArgs) BLength() int { +func (p *TraceServiceExtractSpanInfoArgs) BLength() int { l := 0 if p != nil { l += p.field1Length() @@ -14182,29 +16919,29 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) BLength() int { return l } -func (p *TraceServicePreviewExportTracesToDatasetArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceExtractSpanInfoArgs) fastWriteField1(buf []byte, w thrift.NocopyWriter) int { offset := 0 offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 1) offset += p.Req.FastWriteNocopy(buf[offset:], w) return offset } -func (p *TraceServicePreviewExportTracesToDatasetArgs) field1Length() int { +func (p *TraceServiceExtractSpanInfoArgs) field1Length() int { l := 0 l += thrift.Binary.FieldBeginLength() l += p.Req.BLength() return l } -func (p *TraceServicePreviewExportTracesToDatasetArgs) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServicePreviewExportTracesToDatasetArgs) +func (p *TraceServiceExtractSpanInfoArgs) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceExtractSpanInfoArgs) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _req *PreviewExportTracesToDatasetRequest + var _req *ExtractSpanInfoRequest if src.Req != nil { - _req = &PreviewExportTracesToDatasetRequest{} + _req = &ExtractSpanInfoRequest{} if err := _req.DeepCopy(src.Req); err != nil { return err } @@ -14214,7 +16951,7 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) DeepCopy(s interface{}) e return nil } -func (p *TraceServicePreviewExportTracesToDatasetResult) FastRead(buf []byte) (int, error) { +func (p *TraceServiceExtractSpanInfoResult) FastRead(buf []byte) (int, error) { var err error var offset int @@ -14258,14 +16995,14 @@ func (p *TraceServicePreviewExportTracesToDatasetResult) FastRead(buf []byte) (i ReadFieldBeginError: return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err) ReadFieldError: - return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServicePreviewExportTracesToDatasetResult[fieldId]), err) + return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TraceServiceExtractSpanInfoResult[fieldId]), err) SkipFieldError: return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err) } -func (p *TraceServicePreviewExportTracesToDatasetResult) FastReadField0(buf []byte) (int, error) { +func (p *TraceServiceExtractSpanInfoResult) FastReadField0(buf []byte) (int, error) { offset := 0 - _field := NewPreviewExportTracesToDatasetResponse() + _field := NewExtractSpanInfoResponse() if l, err := _field.FastRead(buf[offset:]); err != nil { return offset, err } else { @@ -14275,11 +17012,11 @@ func (p *TraceServicePreviewExportTracesToDatasetResult) FastReadField0(buf []by return offset, nil } -func (p *TraceServicePreviewExportTracesToDatasetResult) FastWrite(buf []byte) int { +func (p *TraceServiceExtractSpanInfoResult) FastWrite(buf []byte) int { return p.FastWriteNocopy(buf, nil) } -func (p *TraceServicePreviewExportTracesToDatasetResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceExtractSpanInfoResult) FastWriteNocopy(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p != nil { offset += p.fastWriteField0(buf[offset:], w) @@ -14288,7 +17025,7 @@ func (p *TraceServicePreviewExportTracesToDatasetResult) FastWriteNocopy(buf []b return offset } -func (p *TraceServicePreviewExportTracesToDatasetResult) BLength() int { +func (p *TraceServiceExtractSpanInfoResult) BLength() int { l := 0 if p != nil { l += p.field0Length() @@ -14297,7 +17034,7 @@ func (p *TraceServicePreviewExportTracesToDatasetResult) BLength() int { return l } -func (p *TraceServicePreviewExportTracesToDatasetResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { +func (p *TraceServiceExtractSpanInfoResult) fastWriteField0(buf []byte, w thrift.NocopyWriter) int { offset := 0 if p.IsSetSuccess() { offset += thrift.Binary.WriteFieldBegin(buf[offset:], thrift.STRUCT, 0) @@ -14306,7 +17043,7 @@ func (p *TraceServicePreviewExportTracesToDatasetResult) fastWriteField0(buf []b return offset } -func (p *TraceServicePreviewExportTracesToDatasetResult) field0Length() int { +func (p *TraceServiceExtractSpanInfoResult) field0Length() int { l := 0 if p.IsSetSuccess() { l += thrift.Binary.FieldBeginLength() @@ -14315,15 +17052,15 @@ func (p *TraceServicePreviewExportTracesToDatasetResult) field0Length() int { return l } -func (p *TraceServicePreviewExportTracesToDatasetResult) DeepCopy(s interface{}) error { - src, ok := s.(*TraceServicePreviewExportTracesToDatasetResult) +func (p *TraceServiceExtractSpanInfoResult) DeepCopy(s interface{}) error { + src, ok := s.(*TraceServiceExtractSpanInfoResult) if !ok { return fmt.Errorf("%T's type not matched %T", s, p) } - var _success *PreviewExportTracesToDatasetResponse + var _success *ExtractSpanInfoResponse if src.Success != nil { - _success = &PreviewExportTracesToDatasetResponse{} + _success = &ExtractSpanInfoResponse{} if err := _success.DeepCopy(src.Success); err != nil { return err } @@ -14452,3 +17189,27 @@ func (p *TraceServicePreviewExportTracesToDatasetArgs) GetFirstArgument() interf func (p *TraceServicePreviewExportTracesToDatasetResult) GetResult() interface{} { return p.Success } + +func (p *TraceServiceChangeEvaluatorScoreArgs) GetFirstArgument() interface{} { + return p.Req +} + +func (p *TraceServiceChangeEvaluatorScoreResult) GetResult() interface{} { + return p.Success +} + +func (p *TraceServiceListAnnotationEvaluatorsArgs) GetFirstArgument() interface{} { + return p.Req +} + +func (p *TraceServiceListAnnotationEvaluatorsResult) GetResult() interface{} { + return p.Success +} + +func (p *TraceServiceExtractSpanInfoArgs) GetFirstArgument() interface{} { + return p.Req +} + +func (p *TraceServiceExtractSpanInfoResult) GetResult() interface{} { + return p.Success +} diff --git a/backend/kitex_gen/coze/loop/observability/trace/traceservice/client.go b/backend/kitex_gen/coze/loop/observability/trace/traceservice/client.go index d7e9b385e..da26be9b5 100644 --- a/backend/kitex_gen/coze/loop/observability/trace/traceservice/client.go +++ b/backend/kitex_gen/coze/loop/observability/trace/traceservice/client.go @@ -26,6 +26,9 @@ type Client interface { ListAnnotations(ctx context.Context, req *trace.ListAnnotationsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationsResponse, err error) ExportTracesToDataset(ctx context.Context, req *trace.ExportTracesToDatasetRequest, callOptions ...callopt.Option) (r *trace.ExportTracesToDatasetResponse, err error) PreviewExportTracesToDataset(ctx context.Context, req *trace.PreviewExportTracesToDatasetRequest, callOptions ...callopt.Option) (r *trace.PreviewExportTracesToDatasetResponse, err error) + ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest, callOptions ...callopt.Option) (r *trace.ChangeEvaluatorScoreResponse, err error) + ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationEvaluatorsResponse, err error) + ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest, callOptions ...callopt.Option) (r *trace.ExtractSpanInfoResponse, err error) } // NewClient creates a client for the service defined in IDL. @@ -131,3 +134,18 @@ func (p *kTraceServiceClient) PreviewExportTracesToDataset(ctx context.Context, ctx = client.NewCtxWithCallOptions(ctx, callOptions) return p.kClient.PreviewExportTracesToDataset(ctx, req) } + +func (p *kTraceServiceClient) ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest, callOptions ...callopt.Option) (r *trace.ChangeEvaluatorScoreResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ChangeEvaluatorScore(ctx, req) +} + +func (p *kTraceServiceClient) ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest, callOptions ...callopt.Option) (r *trace.ListAnnotationEvaluatorsResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ListAnnotationEvaluators(ctx, req) +} + +func (p *kTraceServiceClient) ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest, callOptions ...callopt.Option) (r *trace.ExtractSpanInfoResponse, err error) { + ctx = client.NewCtxWithCallOptions(ctx, callOptions) + return p.kClient.ExtractSpanInfo(ctx, req) +} diff --git a/backend/kitex_gen/coze/loop/observability/trace/traceservice/traceservice.go b/backend/kitex_gen/coze/loop/observability/trace/traceservice/traceservice.go index 61f1fda71..0aeace78b 100644 --- a/backend/kitex_gen/coze/loop/observability/trace/traceservice/traceservice.go +++ b/backend/kitex_gen/coze/loop/observability/trace/traceservice/traceservice.go @@ -118,6 +118,27 @@ var serviceMethods = map[string]kitex.MethodInfo{ false, kitex.WithStreamingMode(kitex.StreamingNone), ), + "ChangeEvaluatorScore": kitex.NewMethodInfo( + changeEvaluatorScoreHandler, + newTraceServiceChangeEvaluatorScoreArgs, + newTraceServiceChangeEvaluatorScoreResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ListAnnotationEvaluators": kitex.NewMethodInfo( + listAnnotationEvaluatorsHandler, + newTraceServiceListAnnotationEvaluatorsArgs, + newTraceServiceListAnnotationEvaluatorsResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), + "ExtractSpanInfo": kitex.NewMethodInfo( + extractSpanInfoHandler, + newTraceServiceExtractSpanInfoArgs, + newTraceServiceExtractSpanInfoResult, + false, + kitex.WithStreamingMode(kitex.StreamingNone), + ), } var ( @@ -436,6 +457,63 @@ func newTraceServicePreviewExportTracesToDatasetResult() interface{} { return trace.NewTraceServicePreviewExportTracesToDatasetResult() } +func changeEvaluatorScoreHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceChangeEvaluatorScoreArgs) + realResult := result.(*trace.TraceServiceChangeEvaluatorScoreResult) + success, err := handler.(trace.TraceService).ChangeEvaluatorScore(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceChangeEvaluatorScoreArgs() interface{} { + return trace.NewTraceServiceChangeEvaluatorScoreArgs() +} + +func newTraceServiceChangeEvaluatorScoreResult() interface{} { + return trace.NewTraceServiceChangeEvaluatorScoreResult() +} + +func listAnnotationEvaluatorsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceListAnnotationEvaluatorsArgs) + realResult := result.(*trace.TraceServiceListAnnotationEvaluatorsResult) + success, err := handler.(trace.TraceService).ListAnnotationEvaluators(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceListAnnotationEvaluatorsArgs() interface{} { + return trace.NewTraceServiceListAnnotationEvaluatorsArgs() +} + +func newTraceServiceListAnnotationEvaluatorsResult() interface{} { + return trace.NewTraceServiceListAnnotationEvaluatorsResult() +} + +func extractSpanInfoHandler(ctx context.Context, handler interface{}, arg, result interface{}) error { + realArg := arg.(*trace.TraceServiceExtractSpanInfoArgs) + realResult := result.(*trace.TraceServiceExtractSpanInfoResult) + success, err := handler.(trace.TraceService).ExtractSpanInfo(ctx, realArg.Req) + if err != nil { + return err + } + realResult.Success = success + return nil +} + +func newTraceServiceExtractSpanInfoArgs() interface{} { + return trace.NewTraceServiceExtractSpanInfoArgs() +} + +func newTraceServiceExtractSpanInfoResult() interface{} { + return trace.NewTraceServiceExtractSpanInfoResult() +} + type kClient struct { c client.Client sc client.Streaming @@ -597,3 +675,33 @@ func (p *kClient) PreviewExportTracesToDataset(ctx context.Context, req *trace.P } return _result.GetSuccess(), nil } + +func (p *kClient) ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest) (r *trace.ChangeEvaluatorScoreResponse, err error) { + var _args trace.TraceServiceChangeEvaluatorScoreArgs + _args.Req = req + var _result trace.TraceServiceChangeEvaluatorScoreResult + if err = p.c.Call(ctx, "ChangeEvaluatorScore", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest) (r *trace.ListAnnotationEvaluatorsResponse, err error) { + var _args trace.TraceServiceListAnnotationEvaluatorsArgs + _args.Req = req + var _result trace.TraceServiceListAnnotationEvaluatorsResult + if err = p.c.Call(ctx, "ListAnnotationEvaluators", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} + +func (p *kClient) ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest) (r *trace.ExtractSpanInfoResponse, err error) { + var _args trace.TraceServiceExtractSpanInfoArgs + _args.Req = req + var _result trace.TraceServiceExtractSpanInfoResult + if err = p.c.Call(ctx, "ExtractSpanInfo", &_args, &_result); err != nil { + return + } + return _result.GetSuccess(), nil +} diff --git a/backend/loop_gen/coze/loop/observability/lotask/local_taskservice.go b/backend/loop_gen/coze/loop/observability/lotask/local_taskservice.go new file mode 100644 index 000000000..4119e4def --- /dev/null +++ b/backend/loop_gen/coze/loop/observability/lotask/local_taskservice.go @@ -0,0 +1,140 @@ +// Code generated by cozeloop. DO NOT EDIT. +package lotask // import github.com/coze-dev/coze-loop/backend/lotask + +import ( + "context" + + "github.com/cloudwego/kitex/client/callopt" + "github.com/cloudwego/kitex/pkg/endpoint" + "github.com/cloudwego/kitex/pkg/rpcinfo" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" +) + +type LocalTaskService struct { + impl task.TaskService // the service implementation + mds endpoint.Middleware +} + +func NewLocalTaskService(impl task.TaskService, mds ...endpoint.Middleware) *LocalTaskService { + return &LocalTaskService{ + impl: impl, + mds: endpoint.Chain(mds...), + } +} + +func (l *LocalTaskService) CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest, callOptions ...callopt.Option) (*task.CheckTaskNameResponse, error) { + chain := l.mds(func(ctx context.Context, in, out interface{}) error { + arg := in.(*task.TaskServiceCheckTaskNameArgs) + result := out.(*task.TaskServiceCheckTaskNameResult) + resp, err := l.impl.CheckTaskName(ctx, arg.Req) + if err != nil { + return err + } + result.SetSuccess(resp) + return nil + }) + + arg := &task.TaskServiceCheckTaskNameArgs{Req: req} + result := &task.TaskServiceCheckTaskNameResult{} + ctx = l.injectRPCInfo(ctx, "CheckTaskName") + if err := chain(ctx, arg, result); err != nil { + return nil, err + } + return result.GetSuccess(), nil +} + +func (l *LocalTaskService) CreateTask(ctx context.Context, req *task.CreateTaskRequest, callOptions ...callopt.Option) (*task.CreateTaskResponse, error) { + chain := l.mds(func(ctx context.Context, in, out interface{}) error { + arg := in.(*task.TaskServiceCreateTaskArgs) + result := out.(*task.TaskServiceCreateTaskResult) + resp, err := l.impl.CreateTask(ctx, arg.Req) + if err != nil { + return err + } + result.SetSuccess(resp) + return nil + }) + + arg := &task.TaskServiceCreateTaskArgs{Req: req} + result := &task.TaskServiceCreateTaskResult{} + ctx = l.injectRPCInfo(ctx, "CreateTask") + if err := chain(ctx, arg, result); err != nil { + return nil, err + } + return result.GetSuccess(), nil +} + +func (l *LocalTaskService) UpdateTask(ctx context.Context, req *task.UpdateTaskRequest, callOptions ...callopt.Option) (*task.UpdateTaskResponse, error) { + chain := l.mds(func(ctx context.Context, in, out interface{}) error { + arg := in.(*task.TaskServiceUpdateTaskArgs) + result := out.(*task.TaskServiceUpdateTaskResult) + resp, err := l.impl.UpdateTask(ctx, arg.Req) + if err != nil { + return err + } + result.SetSuccess(resp) + return nil + }) + + arg := &task.TaskServiceUpdateTaskArgs{Req: req} + result := &task.TaskServiceUpdateTaskResult{} + ctx = l.injectRPCInfo(ctx, "UpdateTask") + if err := chain(ctx, arg, result); err != nil { + return nil, err + } + return result.GetSuccess(), nil +} + +func (l *LocalTaskService) ListTasks(ctx context.Context, req *task.ListTasksRequest, callOptions ...callopt.Option) (*task.ListTasksResponse, error) { + chain := l.mds(func(ctx context.Context, in, out interface{}) error { + arg := in.(*task.TaskServiceListTasksArgs) + result := out.(*task.TaskServiceListTasksResult) + resp, err := l.impl.ListTasks(ctx, arg.Req) + if err != nil { + return err + } + result.SetSuccess(resp) + return nil + }) + + arg := &task.TaskServiceListTasksArgs{Req: req} + result := &task.TaskServiceListTasksResult{} + ctx = l.injectRPCInfo(ctx, "ListTasks") + if err := chain(ctx, arg, result); err != nil { + return nil, err + } + return result.GetSuccess(), nil +} + +func (l *LocalTaskService) GetTask(ctx context.Context, req *task.GetTaskRequest, callOptions ...callopt.Option) (*task.GetTaskResponse, error) { + chain := l.mds(func(ctx context.Context, in, out interface{}) error { + arg := in.(*task.TaskServiceGetTaskArgs) + result := out.(*task.TaskServiceGetTaskResult) + resp, err := l.impl.GetTask(ctx, arg.Req) + if err != nil { + return err + } + result.SetSuccess(resp) + return nil + }) + + arg := &task.TaskServiceGetTaskArgs{Req: req} + result := &task.TaskServiceGetTaskResult{} + ctx = l.injectRPCInfo(ctx, "GetTask") + if err := chain(ctx, arg, result); err != nil { + return nil, err + } + return result.GetSuccess(), nil +} + +func (l *LocalTaskService) injectRPCInfo(ctx context.Context, method string) context.Context { + rpcStats := rpcinfo.AsMutableRPCStats(rpcinfo.NewRPCStats()) + ri := rpcinfo.NewRPCInfo( + rpcinfo.NewEndpointInfo("TaskService", method, nil, nil), + rpcinfo.NewEndpointInfo("TaskService", method, nil, nil), + rpcinfo.NewServerInvocation(), + nil, + rpcStats.ImmutableView(), + ) + return rpcinfo.NewCtxWithRPCInfo(ctx, ri) +} diff --git a/backend/loop_gen/coze/loop/observability/lotrace/local_traceservice.go b/backend/loop_gen/coze/loop/observability/lotrace/local_traceservice.go index 9f26b1ecc..4a8f153e4 100644 --- a/backend/loop_gen/coze/loop/observability/lotrace/local_traceservice.go +++ b/backend/loop_gen/coze/loop/observability/lotrace/local_traceservice.go @@ -337,6 +337,69 @@ func (l *LocalTraceService) PreviewExportTracesToDataset(ctx context.Context, Re return result.GetSuccess(), nil } +func (l *LocalTraceService) ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest, callOptions ...callopt.Option) (*trace.ChangeEvaluatorScoreResponse, error) { + chain := l.mds(func(ctx context.Context, in, out interface{}) error { + arg := in.(*trace.TraceServiceChangeEvaluatorScoreArgs) + result := out.(*trace.TraceServiceChangeEvaluatorScoreResult) + resp, err := l.impl.ChangeEvaluatorScore(ctx, arg.Req) + if err != nil { + return err + } + result.SetSuccess(resp) + return nil + }) + + arg := &trace.TraceServiceChangeEvaluatorScoreArgs{Req: req} + result := &trace.TraceServiceChangeEvaluatorScoreResult{} + ctx = l.injectRPCInfo(ctx, "ChangeEvaluatorScore") + if err := chain(ctx, arg, result); err != nil { + return nil, err + } + return result.GetSuccess(), nil +} + +func (l *LocalTraceService) ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest, callOptions ...callopt.Option) (*trace.ListAnnotationEvaluatorsResponse, error) { + chain := l.mds(func(ctx context.Context, in, out interface{}) error { + arg := in.(*trace.TraceServiceListAnnotationEvaluatorsArgs) + result := out.(*trace.TraceServiceListAnnotationEvaluatorsResult) + resp, err := l.impl.ListAnnotationEvaluators(ctx, arg.Req) + if err != nil { + return err + } + result.SetSuccess(resp) + return nil + }) + + arg := &trace.TraceServiceListAnnotationEvaluatorsArgs{Req: req} + result := &trace.TraceServiceListAnnotationEvaluatorsResult{} + ctx = l.injectRPCInfo(ctx, "ListAnnotationEvaluators") + if err := chain(ctx, arg, result); err != nil { + return nil, err + } + return result.GetSuccess(), nil +} + +func (l *LocalTraceService) ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest, callOptions ...callopt.Option) (*trace.ExtractSpanInfoResponse, error) { + chain := l.mds(func(ctx context.Context, in, out interface{}) error { + arg := in.(*trace.TraceServiceExtractSpanInfoArgs) + result := out.(*trace.TraceServiceExtractSpanInfoResult) + resp, err := l.impl.ExtractSpanInfo(ctx, arg.Req) + if err != nil { + return err + } + result.SetSuccess(resp) + return nil + }) + + arg := &trace.TraceServiceExtractSpanInfoArgs{Req: req} + result := &trace.TraceServiceExtractSpanInfoResult{} + ctx = l.injectRPCInfo(ctx, "ExtractSpanInfo") + if err := chain(ctx, arg, result); err != nil { + return nil, err + } + return result.GetSuccess(), nil +} + func (l *LocalTraceService) injectRPCInfo(ctx context.Context, method string) context.Context { rpcStats := rpcinfo.AsMutableRPCStats(rpcinfo.NewRPCStats()) ri := rpcinfo.NewRPCInfo( diff --git a/backend/modules/data/domain/component/conf/conf.go b/backend/modules/data/domain/component/conf/conf.go index ceaec66d3..11d43bc42 100644 --- a/backend/modules/data/domain/component/conf/conf.go +++ b/backend/modules/data/domain/component/conf/conf.go @@ -69,6 +69,7 @@ type ConsumerConfig struct { ConsumeGoroutineNums int `mapstructure:"consume_goroutine_nums"` // Timeout for consumer one message ConsumeTimeout time.Duration `mapstructure:"consume_timeout"` + EnablePPE *bool `mapstructure:"enable_ppe"` } type TagSpec struct { diff --git a/backend/modules/data/infra/mq/consumer/dataset_job_comsumer.go b/backend/modules/data/infra/mq/consumer/dataset_job_comsumer.go index d5f603404..f770b2892 100644 --- a/backend/modules/data/infra/mq/consumer/dataset_job_comsumer.go +++ b/backend/modules/data/infra/mq/consumer/dataset_job_comsumer.go @@ -7,7 +7,6 @@ import ( "context" json "github.com/bytedance/sonic" - "github.com/coze-dev/coze-loop/backend/infra/mq" "github.com/coze-dev/coze-loop/backend/modules/data/application" dataset_conf "github.com/coze-dev/coze-loop/backend/modules/data/domain/component/conf" @@ -45,6 +44,7 @@ func (e *DatasetJobConsumer) ConsumerCfg(ctx context.Context) (*mq.ConsumerConfi ConsumeTimeout: cfg.ConsumeTimeout, TagExpression: cfg.TagExpression, ConsumeGoroutineNums: cfg.ConsumeGoroutineNums, + EnablePPE: cfg.EnablePPE, } return res, nil } diff --git a/backend/modules/evaluation/application/convertor/evaluation_set/evaluation_set_item.go b/backend/modules/evaluation/application/convertor/evaluation_set/evaluation_set_item.go index 5c59e4519..115ec9a6b 100644 --- a/backend/modules/evaluation/application/convertor/evaluation_set/evaluation_set_item.go +++ b/backend/modules/evaluation/application/convertor/evaluation_set/evaluation_set_item.go @@ -165,6 +165,28 @@ func ItemErrorGroupDO2DTOs(dos []*entity.ItemErrorGroup) []*dataset.ItemErrorGro return result } +func CreateDatasetItemOutputDO2DTOs(dos []*entity.CreateDatasetItemOutput) []*dataset.CreateDatasetItemOutput { + if dos == nil { + return nil + } + result := make([]*dataset.CreateDatasetItemOutput, 0) + for _, do := range dos { + result = append(result, CreateDatasetItemOutputDO2DTO(do)) + } + return result +} +func CreateDatasetItemOutputDO2DTO(do *entity.CreateDatasetItemOutput) *dataset.CreateDatasetItemOutput { + if do == nil { + return nil + } + return &dataset.CreateDatasetItemOutput{ + ItemIndex: do.ItemIndex, + ItemKey: do.ItemKey, + ItemID: do.ItemID, + IsNewItem: do.IsNewItem, + } +} + func ItemErrorGroupDO2DTO(do *entity.ItemErrorGroup) *dataset.ItemErrorGroup { if do == nil { return nil diff --git a/backend/modules/evaluation/application/evaluation_set_app.go b/backend/modules/evaluation/application/evaluation_set_app.go index 6dd473041..c42f4a52f 100644 --- a/backend/modules/evaluation/application/evaluation_set_app.go +++ b/backend/modules/evaluation/application/evaluation_set_app.go @@ -77,14 +77,16 @@ func (e *EvaluationSetApplicationImpl) CreateEvaluationSet(ctx context.Context, if req.EvaluationSetSchema == nil { return nil, errorx.NewByCode(errno.CommonInvalidParamCode, errorx.WithExtraMsg("schema is nil")) } - // 鉴权 - err = e.auth.Authorization(ctx, &rpc.AuthorizationParam{ - ObjectID: strconv.FormatInt(req.WorkspaceID, 10), - SpaceID: req.WorkspaceID, - ActionObjects: []*rpc.ActionObject{{Action: gptr.Of("createLoopEvaluationSet"), EntityType: gptr.Of(rpc.AuthEntityType_Space)}}, - }) - if err != nil { - return nil, err + if req.Session == nil { + // 鉴权 + err = e.auth.Authorization(ctx, &rpc.AuthorizationParam{ + ObjectID: strconv.FormatInt(req.WorkspaceID, 10), + SpaceID: req.WorkspaceID, + ActionObjects: []*rpc.ActionObject{{Action: gptr.Of("createLoopEvaluationSet"), EntityType: gptr.Of(rpc.AuthEntityType_Space)}}, + }) + if err != nil { + return nil, err + } } // domain调用 var session *entity.Session @@ -292,7 +294,7 @@ func (e *EvaluationSetApplicationImpl) BatchCreateEvaluationSetItems(ctx context return nil, err } // domain调用 - idMap, errors, err := e.evaluationSetItemService.BatchCreateEvaluationSetItems(ctx, &entity.BatchCreateEvaluationSetItemsParam{ + idMap, errors, itemOutputs, err := e.evaluationSetItemService.BatchCreateEvaluationSetItems(ctx, &entity.BatchCreateEvaluationSetItemsParam{ SpaceID: req.WorkspaceID, EvaluationSetID: req.EvaluationSetID, Items: evaluation_set.ItemDTO2DOs(req.Items), @@ -304,8 +306,9 @@ func (e *EvaluationSetApplicationImpl) BatchCreateEvaluationSetItems(ctx context } // 返回结果构建、错误处理 return &eval_set.BatchCreateEvaluationSetItemsResponse{ - AddedItems: idMap, - Errors: evaluation_set.ItemErrorGroupDO2DTOs(errors), + AddedItems: idMap, + Errors: evaluation_set.ItemErrorGroupDO2DTOs(errors), + ItemOutputs: evaluation_set.CreateDatasetItemOutputDO2DTOs(itemOutputs), }, nil } diff --git a/backend/modules/evaluation/application/experiment_app.go b/backend/modules/evaluation/application/experiment_app.go index dd8d10f19..a399b44ff 100644 --- a/backend/modules/evaluation/application/experiment_app.go +++ b/backend/modules/evaluation/application/experiment_app.go @@ -706,7 +706,7 @@ func (e *experimentApplication) InvokeExperiment(ctx context.Context, req *expt. return nil, errorx.NewByCode(errno.CommonInvalidParamCode, errorx.WithExtraMsg("expt status not allow to invoke")) } itemDOS := evaluation_set.ItemDTO2DOs(req.Items) - idMap, evalSetErrors, err := e.evaluationSetItemService.BatchCreateEvaluationSetItems(ctx, &entity.BatchCreateEvaluationSetItemsParam{ + idMap, evalSetErrors, itemOutputs, err := e.evaluationSetItemService.BatchCreateEvaluationSetItems(ctx, &entity.BatchCreateEvaluationSetItemsParam{ SpaceID: req.GetWorkspaceID(), EvaluationSetID: req.GetEvaluationSetID(), Items: itemDOS, @@ -740,9 +740,10 @@ func (e *experimentApplication) InvokeExperiment(ctx context.Context, req *expt. } return &expt.InvokeExperimentResponse{ - AddedItems: idMap, - Errors: evaluation_set.ItemErrorGroupDO2DTOs(evalSetErrors), - BaseResp: base.NewBaseResp(), + AddedItems: idMap, + Errors: evaluation_set.ItemErrorGroupDO2DTOs(evalSetErrors), + ItemOutputs: evaluation_set.CreateDatasetItemOutputDO2DTOs(itemOutputs), + BaseResp: base.NewBaseResp(), }, nil } diff --git a/backend/modules/evaluation/domain/component/rpc/dataset.go b/backend/modules/evaluation/domain/component/rpc/dataset.go index 8f3fe3034..33a93c3cb 100644 --- a/backend/modules/evaluation/domain/component/rpc/dataset.go +++ b/backend/modules/evaluation/domain/component/rpc/dataset.go @@ -25,7 +25,7 @@ type IDatasetRPCAdapter interface { UpdateDatasetSchema(ctx context.Context, spaceID, evaluationSetID int64, schemas []*entity.FieldSchema) (err error) - BatchCreateDatasetItems(ctx context.Context, param *BatchCreateDatasetItemsParam) (idMap map[int64]int64, errorGroup []*entity.ItemErrorGroup, err error) + BatchCreateDatasetItems(ctx context.Context, param *BatchCreateDatasetItemsParam) (idMap map[int64]int64, errorGroup []*entity.ItemErrorGroup, itemOutputs []*entity.CreateDatasetItemOutput, err error) UpdateDatasetItem(ctx context.Context, spaceID, evaluationSetID, itemID int64, turns []*entity.Turn) (err error) BatchDeleteDatasetItems(ctx context.Context, spaceID, evaluationSetID int64, itemIDs []int64) (err error) ListDatasetItems(ctx context.Context, param *ListDatasetItemsParam) (items []*entity.EvaluationSetItem, total *int64, nextPageToken *string, err error) diff --git a/backend/modules/evaluation/domain/component/rpc/mocks/data_provider.go b/backend/modules/evaluation/domain/component/rpc/mocks/data_provider.go index 709382d8c..5606fed70 100644 --- a/backend/modules/evaluation/domain/component/rpc/mocks/data_provider.go +++ b/backend/modules/evaluation/domain/component/rpc/mocks/data_provider.go @@ -43,13 +43,14 @@ func (m *MockIDatasetRPCAdapter) EXPECT() *MockIDatasetRPCAdapterMockRecorder { } // BatchCreateDatasetItems mocks base method. -func (m *MockIDatasetRPCAdapter) BatchCreateDatasetItems(ctx context.Context, param *rpc.BatchCreateDatasetItemsParam) (map[int64]int64, []*entity.ItemErrorGroup, error) { +func (m *MockIDatasetRPCAdapter) BatchCreateDatasetItems(ctx context.Context, param *rpc.BatchCreateDatasetItemsParam) (map[int64]int64, []*entity.ItemErrorGroup, []*entity.CreateDatasetItemOutput, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchCreateDatasetItems", ctx, param) ret0, _ := ret[0].(map[int64]int64) ret1, _ := ret[1].([]*entity.ItemErrorGroup) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret2, _ := ret[2].([]*entity.CreateDatasetItemOutput) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 } // BatchCreateDatasetItems indicates an expected call of BatchCreateDatasetItems. diff --git a/backend/modules/evaluation/domain/entity/evaluation.go b/backend/modules/evaluation/domain/entity/evaluation.go new file mode 100644 index 000000000..8edaf6d23 --- /dev/null +++ b/backend/modules/evaluation/domain/entity/evaluation.go @@ -0,0 +1,4 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package entity diff --git a/backend/modules/evaluation/domain/entity/evaluation_set_item.go b/backend/modules/evaluation/domain/entity/evaluation_set_item.go index 9bb344164..1cc0886c1 100644 --- a/backend/modules/evaluation/domain/entity/evaluation_set_item.go +++ b/backend/modules/evaluation/domain/entity/evaluation_set_item.go @@ -163,3 +163,12 @@ type ItemSnapshotFieldMapping struct { // tag_array时,无值 MappingSubKey string `json:"mapping_subKey"` } + +type CreateDatasetItemOutput struct { + // item 在 BatchCreateDatasetItemsReq.items 中的索引 + ItemIndex *int32 + ItemKey *string + ItemID *int64 + // 是否是新的 Item。提供 itemKey 时,如果 itemKey 在数据集中已存在数据,则不算做「新 Item」,该字段为 false。 + IsNewItem *bool +} diff --git a/backend/modules/evaluation/domain/service/evaluation_set_item.go b/backend/modules/evaluation/domain/service/evaluation_set_item.go index 7e10d5cde..912ae3d17 100644 --- a/backend/modules/evaluation/domain/service/evaluation_set_item.go +++ b/backend/modules/evaluation/domain/service/evaluation_set_item.go @@ -11,7 +11,7 @@ import ( //go:generate mockgen -destination=mocks/evaluation_set_item.go -package=mocks . EvaluationSetItemService type EvaluationSetItemService interface { - BatchCreateEvaluationSetItems(ctx context.Context, param *entity.BatchCreateEvaluationSetItemsParam) (idMap map[int64]int64, errors []*entity.ItemErrorGroup, err error) + BatchCreateEvaluationSetItems(ctx context.Context, param *entity.BatchCreateEvaluationSetItemsParam) (idMap map[int64]int64, errors []*entity.ItemErrorGroup, itemOutputs []*entity.CreateDatasetItemOutput, err error) UpdateEvaluationSetItem(ctx context.Context, spaceID, evaluationSetID, itemID int64, turns []*entity.Turn) (err error) BatchDeleteEvaluationSetItems(ctx context.Context, spaceID, evaluationSetID int64, itemIDs []int64) (err error) ListEvaluationSetItems(ctx context.Context, param *entity.ListEvaluationSetItemsParam) (items []*entity.EvaluationSetItem, total *int64, nextPageToken *string, err error) diff --git a/backend/modules/evaluation/domain/service/evaluation_set_item_impl.go b/backend/modules/evaluation/domain/service/evaluation_set_item_impl.go index ae3a6739b..793464d8c 100644 --- a/backend/modules/evaluation/domain/service/evaluation_set_item_impl.go +++ b/backend/modules/evaluation/domain/service/evaluation_set_item_impl.go @@ -31,9 +31,9 @@ func NewEvaluationSetItemServiceImpl(datasetRPCAdapter rpc.IDatasetRPCAdapter) E return evaluationSetItemServiceImpl } -func (d *EvaluationSetItemServiceImpl) BatchCreateEvaluationSetItems(ctx context.Context, param *entity.BatchCreateEvaluationSetItemsParam) (idMap map[int64]int64, errors []*entity.ItemErrorGroup, err error) { +func (d *EvaluationSetItemServiceImpl) BatchCreateEvaluationSetItems(ctx context.Context, param *entity.BatchCreateEvaluationSetItemsParam) (idMap map[int64]int64, errors []*entity.ItemErrorGroup, itemOutputs []*entity.CreateDatasetItemOutput, err error) { if param == nil { - return nil, nil, errorx.NewByCode(errno.CommonInternalErrorCode) + return nil, nil, nil, errorx.NewByCode(errno.CommonInternalErrorCode) } return d.datasetRPCAdapter.BatchCreateDatasetItems(ctx, &rpc.BatchCreateDatasetItemsParam{ SpaceID: param.SpaceID, diff --git a/backend/modules/evaluation/domain/service/expt_run_item_impl.go b/backend/modules/evaluation/domain/service/expt_run_item_impl.go index f78e41304..b5852e423 100644 --- a/backend/modules/evaluation/domain/service/expt_run_item_impl.go +++ b/backend/modules/evaluation/domain/service/expt_run_item_impl.go @@ -200,6 +200,12 @@ func (e *ExptItemEvalCtxExecutor) buildExptTurnEvalCtx(ctx context.Context, turn if fieldData.Name == "span_id" { etec.Ext["span_id"] = fieldData.Content.GetText() } + if fieldData.Name == "run_id" { + etec.Ext["run_id"] = fieldData.Content.GetText() + } + if fieldData.Name == "trace_id" { + etec.Ext["trace_id"] = fieldData.Content.GetText() + } } etec.Ext["task_id"] = eiec.Expt.SourceID etec.Ext["workspace_id"] = strconv.FormatInt(eiec.Expt.SpaceID, 10) diff --git a/backend/modules/evaluation/domain/service/expt_run_scheduler_event_impl.go b/backend/modules/evaluation/domain/service/expt_run_scheduler_event_impl.go index cc4274619..ef6d4b2c1 100644 --- a/backend/modules/evaluation/domain/service/expt_run_scheduler_event_impl.go +++ b/backend/modules/evaluation/domain/service/expt_run_scheduler_event_impl.go @@ -283,6 +283,7 @@ func (e *ExptSchedulerImpl) recordEvalItemRunLogs(ctx context.Context, event *en return err } time.Sleep(time.Millisecond * 50) + logs.CtxInfo(ctx, "[ExptEval] recordEvalItemRunLogs publish result, expt_id: %v, event: %v, item_id: %v, turn_evaluator_refs: %v", event.ExptID, event, item.ItemID, json.Jsonify(turnEvaluatorRefs)) err := mode.PublishResult(ctx, turnEvaluatorRefs, event) if err != nil { logs.CtxError(ctx, "publish online result fail, err: %v", err) diff --git a/backend/modules/evaluation/domain/service/expt_run_scheduler_mode_impl.go b/backend/modules/evaluation/domain/service/expt_run_scheduler_mode_impl.go index c4cd319a3..504ec7db6 100644 --- a/backend/modules/evaluation/domain/service/expt_run_scheduler_mode_impl.go +++ b/backend/modules/evaluation/domain/service/expt_run_scheduler_mode_impl.go @@ -532,6 +532,7 @@ func (e *ExptFailRetryExec) NextTick(ctx context.Context, event *entity.ExptSche func (e *ExptFailRetryExec) PublishResult(ctx context.Context, turnEvaluatorRefs []*entity.ExptTurnEvaluatorResultRef, event *entity.ExptScheduleEvent) error { if event.ExptType != entity.ExptType_Offline { // 不等于offline用于兼容历史数据,不带type的都先放行 + logs.CtxInfo(ctx, "[ExptEval] ExptFailRetryExec publishResult, expt_id: %v, event: %v", event.ExptID, event) return newExptBaseExec(e.manager, e.idem, e.configer, e.exptItemResultRepo, e.publisher, e.evaluatorRecordService).publishResult(ctx, turnEvaluatorRefs, event) } return nil @@ -660,6 +661,7 @@ func (e *ExptAppendExec) NextTick(ctx context.Context, event *entity.ExptSchedul } func (e *ExptAppendExec) PublishResult(ctx context.Context, turnEvaluatorRefs []*entity.ExptTurnEvaluatorResultRef, event *entity.ExptScheduleEvent) error { + logs.CtxInfo(ctx, "[ExptEval] ExptAppendExec publishResult, expt_id: %v, event: %v", event.ExptID, event) return newExptBaseExec(e.manager, e.idem, e.configer, e.exptItemResultRepo, e.publisher, e.evaluatorRecordService).publishResult(ctx, turnEvaluatorRefs, event) } @@ -772,6 +774,7 @@ func (e *exptBaseExec) exptEnd(ctx context.Context, event *entity.ExptScheduleEv } func (e *exptBaseExec) publishResult(ctx context.Context, turnEvaluatorRefs []*entity.ExptTurnEvaluatorResultRef, event *entity.ExptScheduleEvent) error { + logs.CtxInfo(ctx, "[ExptEval] publishResult, expt_id: %v, event: %v", event.ExptID, event) if len(turnEvaluatorRefs) == 0 { return nil } diff --git a/backend/modules/evaluation/domain/service/mocks/evaluation_set_item.go b/backend/modules/evaluation/domain/service/mocks/evaluation_set_item.go index 770eb694e..c79b809ff 100644 --- a/backend/modules/evaluation/domain/service/mocks/evaluation_set_item.go +++ b/backend/modules/evaluation/domain/service/mocks/evaluation_set_item.go @@ -42,13 +42,14 @@ func (m *MockEvaluationSetItemService) EXPECT() *MockEvaluationSetItemServiceMoc } // BatchCreateEvaluationSetItems mocks base method. -func (m *MockEvaluationSetItemService) BatchCreateEvaluationSetItems(ctx context.Context, param *entity.BatchCreateEvaluationSetItemsParam) (map[int64]int64, []*entity.ItemErrorGroup, error) { +func (m *MockEvaluationSetItemService) BatchCreateEvaluationSetItems(ctx context.Context, param *entity.BatchCreateEvaluationSetItemsParam) (map[int64]int64, []*entity.ItemErrorGroup, []*entity.CreateDatasetItemOutput, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BatchCreateEvaluationSetItems", ctx, param) ret0, _ := ret[0].(map[int64]int64) ret1, _ := ret[1].([]*entity.ItemErrorGroup) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 + ret2, _ := ret[2].([]*entity.CreateDatasetItemOutput) + ret3, _ := ret[3].(error) + return ret0, ret1, ret2, ret3 } // BatchCreateEvaluationSetItems indicates an expected call of BatchCreateEvaluationSetItems. diff --git a/backend/modules/evaluation/infra/mq/rocket/conf.go b/backend/modules/evaluation/infra/mq/rocket/conf.go index 282d6c753..b174f470f 100644 --- a/backend/modules/evaluation/infra/mq/rocket/conf.go +++ b/backend/modules/evaluation/infra/mq/rocket/conf.go @@ -33,6 +33,7 @@ type RMQConf struct { ConsumerGroup string `mapstructure:"consumer_group"` WorkerNum int `mapstructure:"worker_num"` ConsumeTimeout time.Duration `mapstructure:"consume_timeout"` + EnablePPE *bool `mapstructure:"enable_ppe"` } func (c *RMQConf) Valid() bool { @@ -57,5 +58,6 @@ func (c *RMQConf) ToConsumerCfg() mq.ConsumerConfig { ConsumerGroup: c.ConsumerGroup, ConsumeGoroutineNums: c.WorkerNum, ConsumeTimeout: c.ConsumeTimeout, + EnablePPE: c.EnablePPE, } } diff --git a/backend/modules/evaluation/infra/rpc/data/dataset.go b/backend/modules/evaluation/infra/rpc/data/dataset.go index fb8cd6263..78f9855fd 100644 --- a/backend/modules/evaluation/infra/rpc/data/dataset.go +++ b/backend/modules/evaluation/infra/rpc/data/dataset.go @@ -276,10 +276,10 @@ func (a *DatasetRPCAdapter) UpdateDatasetSchema(ctx context.Context, spaceID, ev return nil } -func (a *DatasetRPCAdapter) BatchCreateDatasetItems(ctx context.Context, param *rpc.BatchCreateDatasetItemsParam) (idMap map[int64]int64, errorGroup []*entity.ItemErrorGroup, err error) { +func (a *DatasetRPCAdapter) BatchCreateDatasetItems(ctx context.Context, param *rpc.BatchCreateDatasetItemsParam) (idMap map[int64]int64, errorGroup []*entity.ItemErrorGroup, itemOutputs []*entity.CreateDatasetItemOutput, err error) { datasetItems, err := convert2DatasetItems(ctx, param.Items) if err != nil { - return nil, nil, err + return nil, nil, nil, err } resp, err := a.client.BatchCreateDatasetItems(ctx, &dataset.BatchCreateDatasetItemsRequest{ WorkspaceID: ¶m.SpaceID, @@ -289,16 +289,16 @@ func (a *DatasetRPCAdapter) BatchCreateDatasetItems(ctx context.Context, param * AllowPartialAdd: param.AllowPartialAdd, }) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if resp == nil { - return nil, nil, errorx.NewByCode(errno.CommonRPCErrorCode) + return nil, nil, nil, errorx.NewByCode(errno.CommonRPCErrorCode) } if resp.BaseResp != nil && resp.BaseResp.StatusCode != 0 { logs.CtxInfo(ctx, "BatchCreateDatasetItems resp: %v", json.Jsonify(resp)) - return nil, nil, errorx.NewByCode(resp.BaseResp.StatusCode, errorx.WithExtraMsg(resp.BaseResp.StatusMessage)) + return nil, nil, nil, errorx.NewByCode(resp.BaseResp.StatusCode, errorx.WithExtraMsg(resp.BaseResp.StatusMessage)) } - return resp.GetAddedItems(), convert2EvaluationSetErrorGroups(ctx, resp.GetErrors()), nil + return resp.GetAddedItems(), convert2EvaluationSetErrorGroups(ctx, resp.GetErrors()), nil, nil } func (a *DatasetRPCAdapter) UpdateDatasetItem(ctx context.Context, spaceID, evaluationSetID, itemID int64, turns []*entity.Turn) (err error) { diff --git a/backend/modules/observability/application/convertor/filter.go b/backend/modules/observability/application/convertor/filter.go new file mode 100644 index 000000000..a23b9ee53 --- /dev/null +++ b/backend/modules/observability/application/convertor/filter.go @@ -0,0 +1,90 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package convertor + +import ( + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" +) + +func FilterFieldsDTO2DO(f *filter.FilterFields) *loop_span.FilterFields { + if f == nil { + return nil + } + ret := &loop_span.FilterFields{} + if f.QueryAndOr != nil { + ret.QueryAndOr = ptr.Of(loop_span.QueryAndOrEnum(*f.QueryAndOr)) + } + ret.FilterFields = make([]*loop_span.FilterField, 0) + for _, field := range f.GetFilterFields() { + if field == nil { + continue + } + fieldName := "" + if field.FieldName != nil { + fieldName = *field.FieldName + } + fField := &loop_span.FilterField{ + FieldName: fieldName, + Values: field.Values, + FieldType: fieldTypeDTO2DO(field.FieldType), + } + if field.QueryAndOr != nil { + fField.QueryAndOr = ptr.Of(loop_span.QueryAndOrEnum(*field.QueryAndOr)) + } + if field.QueryType != nil { + fField.QueryType = ptr.Of(loop_span.QueryTypeEnum(*field.QueryType)) + } + if field.SubFilter != nil { + fField.SubFilter = FilterFieldsDTO2DO(field.SubFilter) + } + ret.FilterFields = append(ret.FilterFields, fField) + } + return ret +} + +func fieldTypeDTO2DO(fieldType *filter.FieldType) loop_span.FieldType { + if fieldType == nil { + return loop_span.FieldTypeString + } + return loop_span.FieldType(*fieldType) +} + +func FilterFieldsDO2DTO(f *loop_span.FilterFields) *filter.FilterFields { + if f == nil { + return nil + } + ret := &filter.FilterFields{} + if f.QueryAndOr != nil { + ret.QueryAndOr = ptr.Of(filter.QueryRelation(*f.QueryAndOr)) + } + ret.FilterFields = make([]*filter.FilterField, 0) + for _, field := range f.FilterFields { + if field == nil { + continue + } + fField := &filter.FilterField{ + FieldName: ptr.Of(field.FieldName), + Values: field.Values, + FieldType: fieldTypeDO2DTO(field.FieldType), + } + if field.QueryAndOr != nil { + fField.QueryAndOr = ptr.Of(filter.QueryRelation(*field.QueryAndOr)) + } + if field.QueryType != nil { + fField.QueryType = ptr.Of(filter.QueryType(*field.QueryType)) + } + if field.SubFilter != nil { + fField.SubFilter = FilterFieldsDO2DTO(field.SubFilter) + } + ret.FilterFields = append(ret.FilterFields, fField) + } + return ret +} + +func fieldTypeDO2DTO(fieldType loop_span.FieldType) *filter.FieldType { + result := filter.FieldType(fieldType) + return &result +} \ No newline at end of file diff --git a/backend/modules/observability/application/convertor/task/task.go b/backend/modules/observability/application/convertor/task/task.go new file mode 100644 index 000000000..2c03ca4ff --- /dev/null +++ b/backend/modules/observability/application/convertor/task/task.go @@ -0,0 +1,200 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package task + +//func TaskPOs2DOs(ctx context.Context, taskPOs []*entity.ObservabilityTask, userInfos map[string]*entity_common.UserInfo) []*task.Task { +// var taskList []*task.Task +// if len(taskPOs) == 0 { +// return taskList +// } +// for _, v := range taskPOs { +// taskDO := TaskPO2DTO(ctx, v, userInfos) +// taskList = append(taskList, taskDO) +// } +// return taskList +//} +//func TaskPO2DTO(ctx context.Context, v *entity.ObservabilityTask, userMap map[string]*entity_common.UserInfo) *task.Task { +// if v == nil { +// return nil +// } +// var taskDetail *task.RunDetail +// var totalCount, successCount, failedCount int64 +// for _, tr := range v.TaskRuns { +// trDO := TaskRunPO2DTO(ctx, tr, nil) +// if trDO.RunDetail != nil { +// totalCount += *trDO.RunDetail.TotalCount +// successCount += *trDO.RunDetail.SuccessCount +// failedCount += *trDO.RunDetail.FailedCount +// } +// } +// taskDetail = &task.RunDetail{ +// TotalCount: gptr.Of(totalCount), +// SuccessCount: gptr.Of(successCount), +// FailedCount: gptr.Of(failedCount), +// } +// taskInfo := &task.Task{ +// ID: ptr.Of(v.ID), +// Name: v.Name, +// Description: v.Description, +// WorkspaceID: ptr.Of(v.WorkspaceID), +// TaskType: v.TaskType, +// TaskStatus: ptr.Of(v.TaskStatus), +// Rule: RulePO2DO(ctx, v.SpanFilter, v.EffectiveTime, v.Sampler, v.BackfillEffectiveTime), +// TaskConfig: TaskConfigPO2DO(ctx, v.TaskConfig), +// TaskDetail: taskDetail, +// BaseInfo: &common.BaseInfo{ +// CreatedAt: gptr.Of(v.CreatedAt.UnixMilli()), +// UpdatedAt: gptr.Of(v.UpdatedAt.UnixMilli()), +// CreatedBy: UserInfoPO2DO(userMap[v.CreatedBy], v.CreatedBy), +// UpdatedBy: UserInfoPO2DO(userMap[v.UpdatedBy], v.UpdatedBy), +// }, +// } +// return taskInfo +//} + +//func RulePO2DO(ctx context.Context, spanFilter, effectiveTime, sampler, backFillEffectiveTime *string) *task.Rule { +// var spanFilterDO *filter.SpanFilterFields +// if spanFilter != nil { +// spanFilterDO = SpanFilterPO2DO(ctx, spanFilter) +// } +// rule := &task.Rule{ +// SpanFilters: spanFilterDO, +// EffectiveTime: EffectiveTimePO2DO(ctx, effectiveTime), +// Sampler: SamplerPO2DO(ctx, sampler), +// BackfillEffectiveTime: EffectiveTimePO2DO(ctx, backFillEffectiveTime), +// } +// return rule +//} +//func SamplerPO2DO(ctx context.Context, sampler *string) *task.Sampler { +// if sampler == nil { +// return nil +// } +// var samplerDO task.Sampler +// if err := sonic.Unmarshal([]byte(*sampler), &samplerDO); err != nil { +// logs.CtxError(ctx, "SamplerPO2DO sonic.Unmarshal err:%v", err) +// return nil +// } +// return &samplerDO +//} + +//func TaskConfigPO2DO(ctx context.Context, taskConfig *string) *task.TaskConfig { +// if taskConfig == nil { +// return nil +// } +// var taskConfigDO task.TaskConfig +// if err := sonic.Unmarshal([]byte(*taskConfig), &taskConfigDO); err != nil { +// logs.CtxError(ctx, "TaskConfigPO2DO sonic.Unmarshal err:%v", err) +// return nil +// } +// return &taskConfigDO +//} + +//func BatchTaskPO2DTO(ctx context.Context, Tasks []*entity.ObservabilityTask) []*task.Task { +// ret := make([]*task.Task, len(Tasks)) +// for i, v := range Tasks { +// ret[i] = TaskPO2DTO(ctx, v, nil) +// } +// return ret +//} +//func EffectiveTimePO2DO(ctx context.Context, effectiveTime *string) *task.EffectiveTime { +// if effectiveTime == nil { +// return nil +// } +// var effectiveTimeDO task.EffectiveTime +// if err := sonic.Unmarshal([]byte(*effectiveTime), &effectiveTimeDO); err != nil { +// logs.CtxError(ctx, "EffectiveTimePO2DO sonic.Unmarshal err:%v", err) +// return nil +// } +// return &effectiveTimeDO +//} + +//func TaskDTO2PO(ctx context.Context, taskDO *task.Task, userID string, spanFilters *filter.SpanFilterFields) *entity.ObservabilityTask { +// if taskDO == nil { +// return nil +// } +// var createdBy, updatedBy string +// if taskDO.GetBaseInfo().GetCreatedBy() != nil { +// createdBy = taskDO.GetBaseInfo().GetCreatedBy().GetUserID() +// } +// if taskDO.GetBaseInfo().GetUpdatedBy() != nil { +// updatedBy = taskDO.GetBaseInfo().GetUpdatedBy().GetUserID() +// } +// if userID != "" { +// createdBy = userID +// updatedBy = userID +// } else { +// if taskDO.GetBaseInfo().GetCreatedBy() != nil { +// createdBy = taskDO.GetBaseInfo().GetCreatedBy().GetUserID() +// } +// if taskDO.GetBaseInfo().GetUpdatedBy() != nil { +// updatedBy = taskDO.GetBaseInfo().GetUpdatedBy().GetUserID() +// } +// } +// var spanFilterDO *filter.SpanFilterFields +// if spanFilters != nil { +// spanFilterDO = spanFilters +// } else { +// spanFilterDO = taskDO.GetRule().GetSpanFilters() +// } +// +// return &entity.ObservabilityTask{ +// ID: taskDO.GetID(), +// WorkspaceID: taskDO.GetWorkspaceID(), +// Name: taskDO.GetName(), +// Description: ptr.Of(taskDO.GetDescription()), +// TaskType: taskDO.GetTaskType(), +// TaskStatus: taskDO.GetTaskStatus(), +// TaskDetail: ptr.Of(ToJSONString(ctx, taskDO.GetTaskDetail())), +// SpanFilter: SpanFilterDTO2PO(ctx, spanFilterDO), +// EffectiveTime: ptr.Of(ToJSONString(ctx, taskDO.GetRule().GetEffectiveTime())), +// Sampler: ptr.Of(ToJSONString(ctx, taskDO.GetRule().GetSampler())), +// TaskConfig: TaskConfigDTO2PO(ctx, taskDO.GetTaskConfig()), +// CreatedAt: time.Now(), +// UpdatedAt: time.Now(), +// CreatedBy: createdBy, +// UpdatedBy: updatedBy, +// BackfillEffectiveTime: ptr.Of(ToJSONString(ctx, taskDO.GetRule().GetBackfillEffectiveTime())), +// } +//} +//func SpanFilterDTO2PO(ctx context.Context, filters *filter.SpanFilterFields) *string { +// var filtersDO *loop_span.FilterFields +// if filters.GetFilters() != nil { +// filtersDO = convertor.FilterFieldsDTO2DO(filters.GetFilters()) +// } +// filterDO := entity.SpanFilter{ +// PlatformType: filters.GetPlatformType(), +// SpanListType: filters.GetSpanListType(), +// } +// if filtersDO != nil { +// filterDO.Filters = *filtersDO +// } +// +// return ptr.Of(ToJSONString(ctx, filterDO)) +//} +// +//func TaskConfigDTO2PO(ctx context.Context, taskConfig *task.TaskConfig) *string { +// if taskConfig == nil { +// return nil +// } +// var evalSetNames []string +// jspnPathMapping := make(map[string]string) +// for _, autoEvaluateConfig := range taskConfig.GetAutoEvaluateConfigs() { +// for _, mapping := range autoEvaluateConfig.GetFieldMappings() { +// jspnPath := fmt.Sprintf("%s.%s", mapping.TraceFieldKey, mapping.TraceFieldJsonpath) +// if _, exits := jspnPathMapping[jspnPath]; exits { +// mapping.EvalSetName = gptr.Of(jspnPathMapping[jspnPath]) +// continue +// } +// evalSetName := getLastPartAfterDot(jspnPath) +// for exists := slices.Contains(evalSetNames, evalSetName); exists; exists = slices.Contains(evalSetNames, evalSetName) { +// evalSetName += "_" +// } +// mapping.EvalSetName = gptr.Of(evalSetName) +// evalSetNames = append(evalSetNames, evalSetName) +// jspnPathMapping[jspnPath] = evalSetName +// } +// } +// +// return gptr.Of(ToJSONString(ctx, taskConfig)) +//} diff --git a/backend/modules/observability/application/convertor/task/task_copy.go b/backend/modules/observability/application/convertor/task/task_copy.go new file mode 100644 index 000000000..ea644484e --- /dev/null +++ b/backend/modules/observability/application/convertor/task/task_copy.go @@ -0,0 +1,627 @@ +package task + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/bytedance/gg/gptr" + "github.com/bytedance/sonic" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/dataset" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + entity_common "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/common" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/lang/slices" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +func TaskDOs2DTOs(ctx context.Context, taskPOs []*entity.ObservabilityTask, userInfos map[string]*entity_common.UserInfo) []*task.Task { + var taskList []*task.Task + if len(taskPOs) == 0 { + return taskList + } + for _, v := range taskPOs { + taskDO := TaskDO2DTO(ctx, v, userInfos) + taskList = append(taskList, taskDO) + } + return taskList +} +func TaskDO2DTO(ctx context.Context, v *entity.ObservabilityTask, userMap map[string]*entity_common.UserInfo) *task.Task { + if v == nil { + return nil + } + var taskDetail *task.RunDetail + var totalCount, successCount, failedCount int64 + for _, tr := range v.TaskRuns { + trDO := TaskRunDO2DTO(ctx, tr, nil) + if trDO.RunDetail != nil { + totalCount += *trDO.RunDetail.TotalCount + successCount += *trDO.RunDetail.SuccessCount + failedCount += *trDO.RunDetail.FailedCount + } + } + taskDetail = &task.RunDetail{ + TotalCount: gptr.Of(totalCount), + SuccessCount: gptr.Of(successCount), + FailedCount: gptr.Of(failedCount), + } + taskInfo := &task.Task{ + ID: ptr.Of(v.ID), + Name: v.Name, + Description: v.Description, + WorkspaceID: ptr.Of(v.WorkspaceID), + TaskType: v.TaskType, + TaskStatus: ptr.Of(v.TaskStatus), + Rule: RuleDO2DTO(v.SpanFilter, v.EffectiveTime, v.Sampler, v.BackfillEffectiveTime), + TaskConfig: TaskConfigDO2DTO(v.TaskConfig), + TaskDetail: taskDetail, + BaseInfo: &common.BaseInfo{ + CreatedAt: gptr.Of(v.CreatedAt.UnixMilli()), + UpdatedAt: gptr.Of(v.UpdatedAt.UnixMilli()), + CreatedBy: UserInfoPO2DO(userMap[v.CreatedBy], v.CreatedBy), + UpdatedBy: UserInfoPO2DO(userMap[v.UpdatedBy], v.UpdatedBy), + }, + } + return taskInfo +} + +func TaskRunDO2DTO(ctx context.Context, v *entity.TaskRun, userMap map[string]*entity_common.UserInfo) *task.TaskRun { + if v == nil { + return nil + } + taskRunInfo := &task.TaskRun{ + ID: v.ID, + WorkspaceID: v.WorkspaceID, + TaskID: v.TaskID, + TaskType: v.TaskType, + RunStatus: v.RunStatus, + RunDetail: RunDetailDO2DTO(v.RunDetail), + BackfillRunDetail: BackfillRunDetailDO2DTO(v.BackfillDetail), + RunStartAt: v.RunStartAt.UnixMilli(), + RunEndAt: v.RunEndAt.UnixMilli(), + TaskRunConfig: TaskRunConfigDO2DTO(v.TaskRunConfig), + BaseInfo: buildTaskRunBaseInfo(v, userMap), + } + return taskRunInfo +} + +func TaskConfigDO2DTO(v *entity.TaskConfig) *task.TaskConfig { + if v == nil { + return nil + } + var autoEvaluateConfigs []*task.AutoEvaluateConfig + if len(v.AutoEvaluateConfigs) > 0 { + for _, config := range v.AutoEvaluateConfigs { + autoEvaluateConfigs = append(autoEvaluateConfigs, AutoEvaluateConfigDO2DTO(config)) + } + } + var dataReflowConfigs []*task.DataReflowConfig + if len(v.DataReflowConfig) > 0 { + for _, config := range v.DataReflowConfig { + dataReflowConfigs = append(dataReflowConfigs, DataReflowConfigDO2DTO(config)) + } + } + return &task.TaskConfig{ + AutoEvaluateConfigs: autoEvaluateConfigs, + DataReflowConfig: dataReflowConfigs, + } +} + +func AutoEvaluateConfigDO2DTO(v *entity.AutoEvaluateConfig) *task.AutoEvaluateConfig { + if v == nil { + return nil + } + var fieldMappings []*task.EvaluateFieldMapping + if len(v.FieldMappings) > 0 { + for _, config := range v.FieldMappings { + fieldMappings = append(fieldMappings, &task.EvaluateFieldMapping{ + FieldSchema: config.FieldSchema, + TraceFieldKey: config.TraceFieldKey, + TraceFieldJsonpath: config.TraceFieldJsonpath, + EvalSetName: config.EvalSetName, + }) + } + } + return &task.AutoEvaluateConfig{ + EvaluatorVersionID: v.EvaluatorVersionID, + EvaluatorID: v.EvaluatorID, + FieldMappings: fieldMappings, + } +} +func DataReflowConfigDO2DTO(v *entity.DataReflowConfig) *task.DataReflowConfig { + if v == nil { + return nil + } + var fieldMappings []*dataset.FieldMapping + if len(v.FieldMappings) > 0 { + for _, config := range v.FieldMappings { + fieldMappings = append(fieldMappings, ptr.Of(config)) + } + } + return &task.DataReflowConfig{ + DatasetID: v.DatasetID, + DatasetName: v.DatasetName, + DatasetSchema: ptr.Of(v.DatasetSchema), + FieldMappings: fieldMappings, + } +} + +func RuleDO2DTO(spanFilter *filter.SpanFilterFields, effectiveTime *entity.EffectiveTime, sampler *entity.Sampler, backfillEffectiveTime *entity.EffectiveTime) *task.Rule { + if spanFilter == nil { + return nil + } + return &task.Rule{ + SpanFilters: spanFilter, + Sampler: SamplerDO2DTO(sampler), + EffectiveTime: EffectiveTimeDO2DTO(effectiveTime), + BackfillEffectiveTime: EffectiveTimeDO2DTO(backfillEffectiveTime), + } +} +func SpanFilterPO2DO(ctx context.Context, spanFilter *string) *filter.SpanFilterFields { + if spanFilter == nil { + return nil + } + var spanFilterDO filter.SpanFilterFields + if err := sonic.Unmarshal([]byte(*spanFilter), &spanFilterDO); err != nil { + logs.CtxError(ctx, "SpanFilterPO2DO sonic.Unmarshal err:%v", err) + return nil + } + return &spanFilterDO +} + +func SamplerDO2DTO(sampler *entity.Sampler) *task.Sampler { + if sampler == nil { + return nil + } + return &task.Sampler{ + SampleRate: ptr.Of(sampler.SampleRate), + SampleSize: ptr.Of(sampler.SampleSize), + IsCycle: ptr.Of(sampler.IsCycle), + CycleCount: ptr.Of(sampler.CycleCount), + CycleInterval: ptr.Of(sampler.CycleInterval), + CycleTimeUnit: ptr.Of(sampler.CycleTimeUnit), + } +} + +func EffectiveTimeDO2DTO(effectiveTime *entity.EffectiveTime) *task.EffectiveTime { + if effectiveTime == nil { + return &task.EffectiveTime{ + StartAt: ptr.Of(int64(0)), + EndAt: ptr.Of(int64(0)), + } + } + return &task.EffectiveTime{ + StartAt: ptr.Of(effectiveTime.StartAt), + EndAt: ptr.Of(effectiveTime.EndAt), + } +} + +// RunDetailDO2DTO 将JSON字符串转换为RunDetail结构体 +func RunDetailDO2DTO(runDetail *entity.RunDetail) *task.RunDetail { + if runDetail == nil { + return nil + } + return &task.RunDetail{ + SuccessCount: ptr.Of(runDetail.SuccessCount), + FailedCount: ptr.Of(runDetail.FailedCount), + TotalCount: ptr.Of(runDetail.TotalCount), + } +} + +func BackfillRunDetailDO2DTO(backfillDetail *entity.BackfillDetail) *task.BackfillDetail { + if backfillDetail == nil { + return nil + } + return &task.BackfillDetail{ + SuccessCount: backfillDetail.SuccessCount, + FailedCount: backfillDetail.FailedCount, + TotalCount: backfillDetail.TotalCount, + BackfillStatus: backfillDetail.BackfillStatus, + LastSpanPageToken: backfillDetail.LastSpanPageToken, + } +} + +func TaskRunConfigDO2DTO(v *entity.TaskRunConfig) *task.TaskRunConfig { + if v == nil { + return nil + } + return &task.TaskRunConfig{ + AutoEvaluateRunConfig: AutoEvaluateRunConfigDO2DTO(v.AutoEvaluateRunConfig), + DataReflowRunConfig: DataReflowRunConfigDO2DTO(v.DataReflowRunConfig), + } +} + +func AutoEvaluateRunConfigDO2DTO(v *entity.AutoEvaluateRunConfig) *task.AutoEvaluateRunConfig { + if v == nil { + return nil + } + return &task.AutoEvaluateRunConfig{ + ExptID: v.ExptID, + ExptRunID: v.ExptRunID, + EvalID: v.EvalID, + SchemaID: v.SchemaID, + Schema: v.Schema, + EndAt: v.EndAt, + CycleStartAt: v.CycleStartAt, + CycleEndAt: v.CycleEndAt, + Status: v.Status, + } +} +func DataReflowRunConfigDO2DTO(v *entity.DataReflowRunConfig) *task.DataReflowRunConfig { + if v == nil { + return nil + } + return &task.DataReflowRunConfig{ + DatasetID: v.DatasetID, + DatasetRunID: v.DatasetRunID, + EndAt: v.EndAt, + CycleStartAt: v.CycleStartAt, + CycleEndAt: v.CycleEndAt, + Status: v.Status, + } +} + +func UserInfoPO2DO(userInfo *entity_common.UserInfo, userID string) *common.UserInfo { + if userInfo == nil { + return &common.UserInfo{ + UserID: gptr.Of(userID), + } + } + return &common.UserInfo{ + Name: ptr.Of(userInfo.Name), + EnName: ptr.Of(userInfo.EnName), + AvatarURL: ptr.Of(userInfo.AvatarURL), + AvatarThumb: ptr.Of(userInfo.AvatarThumb), + OpenID: ptr.Of(userInfo.OpenID), + UnionID: ptr.Of(userInfo.UnionID), + UserID: ptr.Of(userInfo.UserID), + Email: ptr.Of(userInfo.Email), + } +} + +func TaskDTO2DO(taskDTO *task.Task, userID string, spanFilters *filter.SpanFilterFields) *entity.ObservabilityTask { + if taskDTO == nil { + return nil + } + var createdBy, updatedBy string + if taskDTO.GetBaseInfo().GetCreatedBy() != nil { + createdBy = taskDTO.GetBaseInfo().GetCreatedBy().GetUserID() + } + if taskDTO.GetBaseInfo().GetUpdatedBy() != nil { + updatedBy = taskDTO.GetBaseInfo().GetUpdatedBy().GetUserID() + } + if userID != "" { + createdBy = userID + updatedBy = userID + } else { + if taskDTO.GetBaseInfo().GetCreatedBy() != nil { + createdBy = taskDTO.GetBaseInfo().GetCreatedBy().GetUserID() + } + if taskDTO.GetBaseInfo().GetUpdatedBy() != nil { + updatedBy = taskDTO.GetBaseInfo().GetUpdatedBy().GetUserID() + } + } + var spanFilterDO *filter.SpanFilterFields + if spanFilters != nil { + spanFilterDO = spanFilters + } else { + spanFilterDO = taskDTO.GetRule().GetSpanFilters() + } + + return &entity.ObservabilityTask{ + ID: taskDTO.GetID(), + WorkspaceID: taskDTO.GetWorkspaceID(), + Name: taskDTO.GetName(), + Description: ptr.Of(taskDTO.GetDescription()), + TaskType: taskDTO.GetTaskType(), + TaskStatus: taskDTO.GetTaskStatus(), + TaskDetail: RunDetailDTO2DO(taskDTO.GetTaskDetail()), + SpanFilter: spanFilterDO, + EffectiveTime: EffectiveTimeDTO2DO(taskDTO.GetRule().GetEffectiveTime()), + Sampler: SamplerDTO2DO(taskDTO.GetRule().GetSampler()), + TaskConfig: TaskConfigDTO2DO(taskDTO.GetTaskConfig()), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + CreatedBy: createdBy, + UpdatedBy: updatedBy, + BackfillEffectiveTime: EffectiveTimeDTO2DO(taskDTO.GetRule().GetBackfillEffectiveTime()), + } +} + +func RunDetailDTO2DO(runDetail *task.RunDetail) *entity.RunDetail { + if runDetail == nil { + return nil + } + return &entity.RunDetail{ + SuccessCount: *runDetail.SuccessCount, + FailedCount: *runDetail.FailedCount, + TotalCount: *runDetail.TotalCount, + } +} + +func EffectiveTimeDTO2DO(effectiveTime *task.EffectiveTime) *entity.EffectiveTime { + if effectiveTime == nil { + return nil + } + return &entity.EffectiveTime{ + StartAt: *effectiveTime.StartAt, + EndAt: *effectiveTime.EndAt, + } +} +func SamplerDTO2DO(sampler *task.Sampler) *entity.Sampler { + if sampler == nil { + return nil + } + return &entity.Sampler{ + SampleRate: sampler.GetSampleRate(), + SampleSize: sampler.GetSampleSize(), + IsCycle: sampler.GetIsCycle(), + CycleCount: sampler.GetCycleCount(), + CycleInterval: sampler.GetCycleInterval(), + CycleTimeUnit: sampler.GetCycleTimeUnit(), + } +} +func TaskConfigDTO2DO(taskConfig *task.TaskConfig) *entity.TaskConfig { + if taskConfig == nil { + return nil + } + autoEvaluateConfigs := make([]*entity.AutoEvaluateConfig, 0, len(taskConfig.AutoEvaluateConfigs)) + for _, autoEvaluateConfig := range taskConfig.AutoEvaluateConfigs { + var fieldMappings []*entity.EvaluateFieldMapping + if len(autoEvaluateConfig.FieldMappings) > 0 { + var evalSetNames []string + jspnPathMapping := make(map[string]string) + for _, config := range autoEvaluateConfig.FieldMappings { + var evalSetName string + jspnPath := fmt.Sprintf("%s.%s", config.TraceFieldKey, config.TraceFieldJsonpath) + if _, exits := jspnPathMapping[jspnPath]; exits { + evalSetName = jspnPathMapping[jspnPath] + } else { + evalSetName = getLastPartAfterDot(jspnPath) + for exists := slices.Contains(evalSetNames, evalSetName); exists; exists = slices.Contains(evalSetNames, evalSetName) { + evalSetName += "_" + } + } + evalSetNames = append(evalSetNames, evalSetName) + jspnPathMapping[jspnPath] = evalSetName + fieldMappings = append(fieldMappings, &entity.EvaluateFieldMapping{ + FieldSchema: config.FieldSchema, + TraceFieldKey: config.TraceFieldKey, + TraceFieldJsonpath: config.TraceFieldJsonpath, + EvalSetName: ptr.Of(evalSetName), + }) + } + + } + autoEvaluateConfigs = append(autoEvaluateConfigs, &entity.AutoEvaluateConfig{ + EvaluatorVersionID: autoEvaluateConfig.EvaluatorVersionID, + EvaluatorID: autoEvaluateConfig.EvaluatorID, + FieldMappings: fieldMappings, + }) + } + dataReflowConfigs := make([]*entity.DataReflowConfig, 0, len(taskConfig.DataReflowConfig)) + for _, dataReflowConfig := range taskConfig.DataReflowConfig { + var fieldMappings []dataset.FieldMapping + if len(dataReflowConfig.FieldMappings) > 0 { + for _, config := range dataReflowConfig.FieldMappings { + fieldMappings = append(fieldMappings, dataset.FieldMapping{ + FieldSchema: config.FieldSchema, + TraceFieldKey: config.TraceFieldKey, + TraceFieldJsonpath: config.TraceFieldJsonpath, + }) + } + } + dataReflowConfigs = append(dataReflowConfigs, &entity.DataReflowConfig{ + DatasetID: dataReflowConfig.DatasetID, + DatasetName: dataReflowConfig.DatasetName, + DatasetSchema: *dataReflowConfig.DatasetSchema, + FieldMappings: fieldMappings, + }) + } + return &entity.TaskConfig{ + AutoEvaluateConfigs: autoEvaluateConfigs, + DataReflowConfig: dataReflowConfigs, + } +} +func TaskRunDTO2DO(taskRun *task.TaskRun) *entity.TaskRun { + if taskRun == nil { + return nil + } + return &entity.TaskRun{ + ID: taskRun.ID, + TaskID: taskRun.TaskID, + WorkspaceID: taskRun.WorkspaceID, + TaskType: taskRun.TaskType, + RunStatus: taskRun.RunStatus, + RunDetail: RunDetailDTO2DO(taskRun.RunDetail), + BackfillDetail: BackfillRunDetailDTO2DO(taskRun.BackfillRunDetail), + RunStartAt: time.UnixMilli(taskRun.RunStartAt), + RunEndAt: time.UnixMilli(taskRun.RunEndAt), + TaskRunConfig: TaskRunConfigDTO2DO(taskRun.TaskRunConfig), + CreatedAt: time.UnixMilli(taskRun.GetBaseInfo().GetCreatedAt()), + UpdatedAt: time.UnixMilli(taskRun.GetBaseInfo().GetUpdatedAt()), + } + +} + +func TaskRunConfigDTO2DO(v *task.TaskRunConfig) *entity.TaskRunConfig { + if v == nil { + return nil + } + var autoEvaluateRunConfig *entity.AutoEvaluateRunConfig + if v.GetAutoEvaluateRunConfig() != nil { + autoEvaluateRunConfig = &entity.AutoEvaluateRunConfig{ + ExptID: v.GetAutoEvaluateRunConfig().GetExptID(), + ExptRunID: v.GetAutoEvaluateRunConfig().GetExptRunID(), + EvalID: v.GetAutoEvaluateRunConfig().GetEvalID(), + SchemaID: v.GetAutoEvaluateRunConfig().GetSchemaID(), + Schema: v.GetAutoEvaluateRunConfig().Schema, + EndAt: v.GetAutoEvaluateRunConfig().GetEndAt(), + CycleStartAt: v.GetAutoEvaluateRunConfig().GetCycleStartAt(), + CycleEndAt: v.GetAutoEvaluateRunConfig().GetCycleEndAt(), + Status: v.GetAutoEvaluateRunConfig().GetStatus(), + } + } + var dataReflowRunConfig *entity.DataReflowRunConfig + if v.GetDataReflowRunConfig() != nil { + dataReflowRunConfig = &entity.DataReflowRunConfig{ + DatasetID: v.GetDataReflowRunConfig().GetDatasetID(), + DatasetRunID: v.GetDataReflowRunConfig().GetDatasetRunID(), + EndAt: v.GetDataReflowRunConfig().GetEndAt(), + CycleStartAt: v.GetDataReflowRunConfig().GetCycleStartAt(), + CycleEndAt: v.GetDataReflowRunConfig().GetCycleEndAt(), + Status: v.GetDataReflowRunConfig().GetStatus(), + } + } + return &entity.TaskRunConfig{ + AutoEvaluateRunConfig: autoEvaluateRunConfig, + DataReflowRunConfig: dataReflowRunConfig, + } +} + +func BackfillRunDetailDTO2DO(v *task.BackfillDetail) *entity.BackfillDetail { + if v == nil { + return nil + } + return &entity.BackfillDetail{ + SuccessCount: v.SuccessCount, + FailedCount: v.FailedCount, + TotalCount: v.TotalCount, + BackfillStatus: v.BackfillStatus, + LastSpanPageToken: v.LastSpanPageToken, + } +} + +func CheckEffectiveTime(ctx context.Context, effectiveTime *task.EffectiveTime, taskStatus task.TaskStatus, effectiveTimeDO *entity.EffectiveTime) (*entity.EffectiveTime, error) { + if effectiveTimeDO == nil { + logs.CtxError(ctx, "EffectiveTimePO2DO error") + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("effective time is nil")) + } + var validEffectiveTime entity.EffectiveTime + // 开始时间不能大于结束时间 + if effectiveTime.GetStartAt() >= effectiveTime.GetEndAt() { + logs.CtxError(ctx, "Start time must be less than end time") + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("start time must be less than end time")) + } + // 开始、结束时间不能小于当前时间 + if effectiveTimeDO.StartAt != effectiveTime.GetStartAt() && effectiveTime.GetStartAt() < time.Now().UnixMilli() { + logs.CtxError(ctx, "update time must be greater than current time") + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("start time must be greater than current time")) + } + if effectiveTimeDO.EndAt != effectiveTime.GetEndAt() && effectiveTime.GetEndAt() < time.Now().UnixMilli() { + logs.CtxError(ctx, "update time must be greater than current time") + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("start time must be greater than current time")) + } + validEffectiveTime.StartAt = effectiveTimeDO.StartAt + validEffectiveTime.EndAt = effectiveTimeDO.EndAt + switch taskStatus { + case task.TaskStatusUnstarted: + if validEffectiveTime.StartAt != 0 { + validEffectiveTime.StartAt = *effectiveTime.StartAt + } + if validEffectiveTime.EndAt != 0 { + validEffectiveTime.EndAt = *effectiveTime.EndAt + } + case task.TaskStatusRunning, task.TaskStatusPending: + if validEffectiveTime.EndAt != 0 { + validEffectiveTime.EndAt = *effectiveTime.EndAt + } + default: + logs.CtxError(ctx, "Invalid task status:%s", taskStatus) + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid task status")) + } + return &validEffectiveTime, nil +} + +func CheckTaskStatus(ctx context.Context, taskStatus task.TaskStatus, currentTaskStatus task.TaskStatus) (task.TaskStatus, error) { + var validTaskStatus task.TaskStatus + // [0530]todo: 任务状态校验 + switch taskStatus { + case task.TaskStatusUnstarted: + if currentTaskStatus == task.TaskStatusUnstarted { + validTaskStatus = taskStatus + } else { + logs.CtxError(ctx, "Invalid task status:%s", taskStatus) + return "", errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid task status")) + } + case task.TaskStatusRunning: + if currentTaskStatus == task.TaskStatusUnstarted || currentTaskStatus == task.TaskStatusPending { + validTaskStatus = taskStatus + } else { + logs.CtxError(ctx, "Invalid task status:%s,currentTaskStatus:%s", taskStatus, currentTaskStatus) + return "", errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid task status")) + } + case task.TaskStatusPending: + if currentTaskStatus == task.TaskStatusRunning { + validTaskStatus = task.TaskStatusPending + } + case task.TaskStatusDisabled: + if currentTaskStatus == task.TaskStatusUnstarted || currentTaskStatus == task.TaskStatusPending { + validTaskStatus = task.TaskStatusDisabled + } + case task.TaskStatusSuccess: + if currentTaskStatus != task.TaskStatusSuccess { + validTaskStatus = task.TaskStatusSuccess + } + } + + return validTaskStatus, nil +} + +func getLastPartAfterDot(s string) string { + s = strings.TrimRight(s, ".") + lastDotIndex := strings.LastIndex(s, ".") + if lastDotIndex == -1 { + lastPart := s + return processBracket(lastPart) + } + lastPart := s[lastDotIndex+1:] + return processBracket(lastPart) +} + +// processBracket 处理字符串中的方括号,将其转换为下划线连接的形式 +func processBracket(s string) string { + openBracketIndex := strings.Index(s, "[") + if openBracketIndex == -1 { + return s + } + closeBracketIndex := strings.Index(s, "]") + if closeBracketIndex == -1 { + return s + } + base := s[:openBracketIndex] + index := s[openBracketIndex+1 : closeBracketIndex] + return base + "_" + index +} + +// ToJSONString 通用函数,将对象转换为 JSON 字符串指针 +func ToJSONString(ctx context.Context, obj interface{}) string { + if obj == nil { + return "" + } + jsonData, err := sonic.Marshal(obj) + if err != nil { + logs.CtxError(ctx, "JSON marshal error: %v", err) + return "" + } + jsonStr := string(jsonData) + return jsonStr +} + +// buildTaskRunBaseInfo 构建BaseInfo信息 +func buildTaskRunBaseInfo(v *entity.TaskRun, userMap map[string]*entity_common.UserInfo) *common.BaseInfo { + // 注意:TaskRun实体中没有CreatedBy和UpdatedBy字段 + // 使用空字符串作为默认值 + return &common.BaseInfo{ + CreatedAt: gptr.Of(v.CreatedAt.UnixMilli()), + UpdatedAt: gptr.Of(v.UpdatedAt.UnixMilli()), + CreatedBy: &common.UserInfo{UserID: gptr.Of("")}, + UpdatedBy: &common.UserInfo{UserID: gptr.Of("")}, + } +} diff --git a/backend/modules/observability/application/convertor/task/task_run.go b/backend/modules/observability/application/convertor/task/task_run.go new file mode 100644 index 000000000..bfc7f40e9 --- /dev/null +++ b/backend/modules/observability/application/convertor/task/task_run.go @@ -0,0 +1,135 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package task + +// func TaskRunPOs2DOs(ctx context.Context, taskRunPOs []*entity.TaskRun, userInfos map[string]*entity_common.UserInfo) []*task.TaskRun { +// var taskRunList []*task.TaskRun +// if len(taskRunPOs) == 0 { +// return taskRunList +// } +// for _, v := range taskRunPOs { +// taskRunDO := TaskRunPO2DTO(ctx, v, userInfos) +// taskRunList = append(taskRunList, taskRunDO) +// } +// return taskRunList +// } +// +// func TaskRunPO2DTO(ctx context.Context, v *entity.TaskRun, userMap map[string]*entity_common.UserInfo) *task.TaskRun { +// if v == nil { +// return nil +// } +// taskRunInfo := &task.TaskRun{ +// ID: v.ID, +// WorkspaceID: v.WorkspaceID, +// TaskID: v.TaskID, +// TaskType: v.TaskType, +// RunStatus: v.RunStatus, +// RunDetail: RunDetailPO2DTO(ctx, v.RunDetail), +// BackfillRunDetail: BackfillRunDetailPO2DTO(ctx, v.BackfillDetail), +// RunStartAt: v.RunStartAt.UnixMilli(), +// RunEndAt: v.RunEndAt.UnixMilli(), +// TaskRunConfig: TaskRunConfigPO2DTO(ctx, v.RunConfig), +// BaseInfo: buildTaskRunBaseInfo(v, userMap), +// } +// return taskRunInfo +// } +//func TaskRunDO2PO(ctx context.Context, v *task.TaskRun, userMap map[string]*entity_common.UserInfo) *entity.TaskRun { +// if v == nil { +// return nil +// } +// taskRunPO := &entity.TaskRun{ +// ID: v.ID, +// WorkspaceID: v.WorkspaceID, +// TaskID: v.TaskID, +// TaskType: v.TaskType, +// RunStatus: v.RunStatus, +// RunDetail: RunDetailDTO2PO(ctx, v.RunDetail), +// BackfillDetail: BackfillRunDetailDTO2PO(ctx, v.BackfillRunDetail), +// RunStartAt: time.UnixMilli(v.RunStartAt), +// RunEndAt: time.UnixMilli(v.RunEndAt), +// RunConfig: TaskRunConfigDTO2PO(ctx, v.TaskRunConfig), +// } +// return taskRunPO +//} +// +//func RunDetailDTO2PO(ctx context.Context, v *task.RunDetail) *string { +// if v == nil { +// return nil +// } +// runDetailJSON, err := sonic.MarshalString(v) +// if err != nil { +// logs.CtxError(ctx, "RunDetailDTO2PO sonic.MarshalString err:%v", err) +// return nil +// } +// return gptr.Of(runDetailJSON) +//} +// +//func BackfillRunDetailDTO2PO(ctx context.Context, v *task.BackfillDetail) *string { +// if v == nil { +// return nil +// } +// backfillDetailJSON, err := sonic.MarshalString(v) +// if err != nil { +// logs.CtxError(ctx, "BackfillRunDetailDTO2PO sonic.MarshalString err:%v", err) +// return nil +// } +// return gptr.Of(backfillDetailJSON) +//} +// +//func TaskRunConfigDTO2PO(ctx context.Context, v *task.TaskRunConfig) *string { +// if v == nil { +// return nil +// } +// taskRunConfigJSON, err := sonic.MarshalString(v) +// if err != nil { +// logs.CtxError(ctx, "TaskRunConfigDTO2PO sonic.MarshalString err:%v", err) +// return nil +// } +// return gptr.Of(taskRunConfigJSON) +//} +// +//// RunDetailPO2DTO 将JSON字符串转换为RunDetail结构体 +//func RunDetailPO2DTO(ctx context.Context, runDetail *string) *task.RunDetail { +// if runDetail == nil || *runDetail == "" { +// return nil +// } +// +// var runDetailDTO task.RunDetail +// if err := sonic.Unmarshal([]byte(*runDetail), &runDetailDTO); err != nil { +// logs.CtxError(ctx, "RunDetailPO2DTO sonic.Unmarshal err:%v", err) +// return nil +// } +// +// return &runDetailDTO +//} +// +//// RunDetailPO2DTO 将JSON字符串转换为RunDetail结构体 +//func BackfillRunDetailPO2DTO(ctx context.Context, runDetail *string) *task.BackfillDetail { +// if runDetail == nil || *runDetail == "" { +// return nil +// } +// +// var runDetailDTO task.BackfillDetail +// if err := sonic.Unmarshal([]byte(*runDetail), &runDetailDTO); err != nil { +// logs.CtxError(ctx, "RunDetailPO2DTO sonic.Unmarshal err:%v", err) +// return nil +// } +// +// return &runDetailDTO +//} +// +//// TaskRunConfigPO2DTO 将JSON字符串转换为TaskRunConfig结构体 +//func TaskRunConfigPO2DTO(ctx context.Context, runConfig *string) *task.TaskRunConfig { +// if runConfig == nil || *runConfig == "" { +// return nil +// } +// +// var runConfigDTO task.TaskRunConfig +// if err := sonic.Unmarshal([]byte(*runConfig), &runConfigDTO); err != nil { +// logs.CtxError(ctx, "TaskRunConfigPO2DTO sonic.Unmarshal err:%v", err) +// return nil +// } +// +// return &runConfigDTO +//} diff --git a/backend/modules/observability/application/convertor/trace/trace_export.go b/backend/modules/observability/application/convertor/trace/trace_export.go index 4253f415d..f2f42f597 100755 --- a/backend/modules/observability/application/convertor/trace/trace_export.go +++ b/backend/modules/observability/application/convertor/trace/trace_export.go @@ -49,7 +49,7 @@ func ExportRequestDTO2DO(req *trace.ExportTracesToDatasetRequest) *service.Expor // 转换字段映射 if req.IsSetFieldMappings() { - result.FieldMappings = convertFieldMappingsDTO2DO(req.GetFieldMappings()) + result.FieldMappings = ConvertFieldMappingsDTO2DO(req.GetFieldMappings()) } return result @@ -107,7 +107,7 @@ func PreviewRequestDTO2DO(req *trace.PreviewExportTracesToDatasetRequest) *servi // 转换字段映射 if req.IsSetFieldMappings() { - result.FieldMappings = convertFieldMappingsDTO2DO(req.GetFieldMappings()) + result.FieldMappings = ConvertFieldMappingsDTO2DO(req.GetFieldMappings()) } return result @@ -186,8 +186,8 @@ func convertDatasetSchemaDTO2DO(schema *dataset0.DatasetSchema) entity.DatasetSc return result } -// convertFieldMappingsDTO2DO 转换字段映射 -func convertFieldMappingsDTO2DO(mappings []*dataset0.FieldMapping) []entity.FieldMapping { +// ConvertFieldMappingsDTO2DO 转换字段映射 +func ConvertFieldMappingsDTO2DO(mappings []*dataset0.FieldMapping) []entity.FieldMapping { if len(mappings) == 0 { return nil } diff --git a/backend/modules/observability/application/convertor/trace/trace_export_test.go b/backend/modules/observability/application/convertor/trace/trace_export_test.go index b72503610..72aa80ca4 100755 --- a/backend/modules/observability/application/convertor/trace/trace_export_test.go +++ b/backend/modules/observability/application/convertor/trace/trace_export_test.go @@ -589,7 +589,7 @@ func TestConvertFieldMappingsDTO2DO(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := convertFieldMappingsDTO2DO(tt.mappings) + got := ConvertFieldMappingsDTO2DO(tt.mappings) assert.Equal(t, tt.want, got) }) } diff --git a/backend/modules/observability/application/task.go b/backend/modules/observability/application/task.go new file mode 100644 index 000000000..4d87c837f --- /dev/null +++ b/backend/modules/observability/application/task.go @@ -0,0 +1,335 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package application + +import ( + "context" + "strconv" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/middleware/session" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + domain_task "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor" + tconv "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service" + task_processor "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/processor" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/tracehub" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + trace_Svc "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service/trace/span_filter" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" +) + +type ITaskQueueConsumer interface { + SpanTrigger(ctx context.Context, event *entity.RawSpan) error + CallBack(ctx context.Context, event *entity.AutoEvalEvent) error + Correction(ctx context.Context, event *entity.CorrectionEvent) error + BackFill(ctx context.Context, event *entity.BackFillEvent) error +} +type ITaskApplication interface { + task.TaskService + ITaskQueueConsumer +} + +func NewTaskApplication( + taskService service.ITaskService, + authService rpc.IAuthProvider, + evalService rpc.IEvaluatorRPCAdapter, + evaluationService rpc.IEvaluationRPCAdapter, + userService rpc.IUserProvider, + tracehubSvc tracehub.ITraceHubService, + taskProcessor task_processor.TaskProcessor, + buildHelper trace_Svc.TraceFilterProcessorBuilder, +) (ITaskApplication, error) { + return &TaskApplication{ + taskSvc: taskService, + authSvc: authService, + evalSvc: evalService, + evaluationSvc: evaluationService, + userSvc: userService, + tracehubSvc: tracehubSvc, + taskProcessor: taskProcessor, + buildHelper: buildHelper, + }, nil +} + +type TaskApplication struct { + taskSvc service.ITaskService + authSvc rpc.IAuthProvider + evalSvc rpc.IEvaluatorRPCAdapter + evaluationSvc rpc.IEvaluationRPCAdapter + userSvc rpc.IUserProvider + tracehubSvc tracehub.ITraceHubService + taskProcessor task_processor.TaskProcessor + buildHelper trace_Svc.TraceFilterProcessorBuilder +} + +func (t *TaskApplication) CheckTaskName(ctx context.Context, req *task.CheckTaskNameRequest) (*task.CheckTaskNameResponse, error) { + resp := task.NewCheckTaskNameResponse() + if req == nil { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("no request provided")) + } else if req.GetWorkspaceID() <= 0 { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace_id")) + } + appID := session.AppIDInCtxOrEmpty(ctx) + var action string + if appID == 717152 { + action = rpc.AuthActionTraceTaskList + } else { + action = rpc.AuthActionTaskReadable + } + if err := t.authSvc.CheckWorkspacePermission(ctx, + action, + strconv.FormatInt(req.GetWorkspaceID(), 10), + false); err != nil { + return nil, err + } + sResp, err := t.taskSvc.CheckTaskName(ctx, &service.CheckTaskNameReq{ + WorkspaceID: req.GetWorkspaceID(), + Name: req.GetName(), + }) + if err != nil { + return resp, err + } + + return &task.CheckTaskNameResponse{ + Pass: sResp.Pass, + }, nil +} +func (t *TaskApplication) CreateTask(ctx context.Context, req *task.CreateTaskRequest) (*task.CreateTaskResponse, error) { + resp := task.NewCreateTaskResponse() + if err := t.validateCreateTaskReq(ctx, req); err != nil { + return resp, err + } + appID := session.AppIDInCtxOrEmpty(ctx) + var action string + if appID == 717152 { + action = rpc.AuthActionTraceTaskCreate + } else { + action = rpc.AuthActionTaskWritable + } + if err := t.authSvc.CheckWorkspacePermission(ctx, + action, + strconv.FormatInt(req.GetTask().GetWorkspaceID(), 10), + false); err != nil { + return resp, err + } + + userID := session.UserIDInCtxOrEmpty(ctx) + if userID == "" { + return nil, errorx.NewByCode(obErrorx.UserParseFailedCode) + } + // 创建task + req.Task.TaskStatus = ptr.Of(domain_task.TaskStatusUnstarted) + spanFilers, err := t.buildSpanFilters(ctx, req.Task.GetRule().GetSpanFilters(), req.GetTask().GetWorkspaceID()) + if err != nil { + return nil, err + } + sResp, err := t.taskSvc.CreateTask(ctx, &service.CreateTaskReq{Task: tconv.TaskDTO2DO(req.GetTask(), userID, spanFilers)}) + if err != nil { + return resp, err + } + + return &task.CreateTaskResponse{TaskID: sResp.TaskID}, nil +} +func (t *TaskApplication) buildSpanFilters(ctx context.Context, spanFilterFields *filter.SpanFilterFields, workspaceID int64) (*filter.SpanFilterFields, error) { + + switch spanFilterFields.GetPlatformType() { + case common.PlatformTypeCozeBot, common.PlatformTypeProject, common.PlatformTypeWorkflow, common.PlatformTypeInnerCozeBot: + platformFilter, err := t.buildHelper.BuildPlatformRelatedFilter(ctx, loop_span.PlatformType(spanFilterFields.GetPlatformType())) + if err != nil { + return nil, err + } + env := &span_filter.SpanEnv{ + WorkspaceID: workspaceID, + } + basicFilter, forceQuery, err := platformFilter.BuildBasicSpanFilter(ctx, env) + if err != nil { + return nil, err + } else if len(basicFilter) == 0 && !forceQuery { // if it's null, no need to query from ck + return nil, nil + } + basicFilterFields := &loop_span.FilterFields{ + QueryAndOr: ptr.Of(loop_span.QueryAndOrEnumAnd), + FilterFields: basicFilter, + } + filters := combineFilters(convertor.FilterFieldsDO2DTO(basicFilterFields), spanFilterFields.Filters) + return &filter.SpanFilterFields{ + Filters: filters, + PlatformType: spanFilterFields.PlatformType, + SpanListType: spanFilterFields.SpanListType, + }, nil + default: + return spanFilterFields, nil + } +} + +func combineFilters(filters ...*filter.FilterFields) *filter.FilterFields { + filterAggr := &filter.FilterFields{ + QueryAndOr: ptr.Of(filter.QueryRelationAnd), + } + for _, f := range filters { + if f == nil { + continue + } + filterAggr.FilterFields = append(filterAggr.FilterFields, &filter.FilterField{ + QueryAndOr: ptr.Of(filter.QueryRelationAnd), + SubFilter: f, + }) + } + return filterAggr +} + +func (t *TaskApplication) validateCreateTaskReq(ctx context.Context, req *task.CreateTaskRequest) error { + // 参数验证 + if req == nil || req.GetTask() == nil { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("no request provided")) + } else if req.GetTask().GetWorkspaceID() <= 0 { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace_id")) + } else if req.GetTask().GetName() == "" { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid task_name")) + } + if req.GetTask().GetRule() != nil && req.GetTask().GetRule().GetEffectiveTime() != nil { + startAt := req.GetTask().GetRule().GetEffectiveTime().GetStartAt() + endAt := req.GetTask().GetRule().GetEffectiveTime().GetEndAt() + if startAt <= time.Now().Add(-10*time.Minute).UnixMilli() { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("The start time must be no earlier than 10 minutes ago.")) + } + if startAt >= endAt { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("The start time must be earlier than the end time.")) + } + } + + return nil +} +func (t *TaskApplication) UpdateTask(ctx context.Context, req *task.UpdateTaskRequest) (*task.UpdateTaskResponse, error) { + resp := task.NewUpdateTaskResponse() + if req == nil { + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("no request provided")) + } else if req.GetWorkspaceID() <= 0 { + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace_id")) + } + appID := session.AppIDInCtxOrEmpty(ctx) + var action string + if appID == 717152 { + action = rpc.AuthActionTraceTaskEdit + } else { + action = rpc.AuthActionTaskWritable + } + if err := t.authSvc.CheckTaskPermission(ctx, + action, + strconv.FormatInt(req.GetWorkspaceID(), 10), + strconv.FormatInt(req.GetTaskID(), 10)); err != nil { + return nil, err + } + err := t.taskSvc.UpdateTask(ctx, &service.UpdateTaskReq{ + TaskID: req.GetTaskID(), + WorkspaceID: req.GetWorkspaceID(), + TaskStatus: req.TaskStatus, + Description: req.Description, + EffectiveTime: req.EffectiveTime, + SampleRate: req.SampleRate, + }) + if err != nil { + return resp, err + } + + return resp, nil +} +func (t *TaskApplication) ListTasks(ctx context.Context, req *task.ListTasksRequest) (*task.ListTasksResponse, error) { + resp := task.NewListTasksResponse() + if req == nil { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("no request provided")) + } else if req.GetWorkspaceID() <= 0 { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace_id")) + } + appID := session.AppIDInCtxOrEmpty(ctx) + var action string + if appID == 717152 { + action = rpc.AuthActionTraceTaskList + } else { + action = rpc.AuthActionTaskReadable + } + if err := t.authSvc.CheckWorkspacePermission(ctx, + action, + strconv.FormatInt(req.GetWorkspaceID(), 10), + false); err != nil { + return resp, err + } + sResp, err := t.taskSvc.ListTasks(ctx, &service.ListTasksReq{ + WorkspaceID: req.GetWorkspaceID(), + TaskFilters: req.GetTaskFilters(), + Limit: req.GetLimit(), + Offset: req.GetOffset(), + OrderBy: req.GetOrderBy(), + }) + if err != nil { + return resp, err + } + if sResp == nil { + return resp, nil + } + return &task.ListTasksResponse{ + Tasks: sResp.Tasks, + Total: sResp.Total, + }, nil +} +func (t *TaskApplication) GetTask(ctx context.Context, req *task.GetTaskRequest) (*task.GetTaskResponse, error) { + resp := task.NewGetTaskResponse() + if req == nil { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("no request provided")) + } else if req.GetWorkspaceID() <= 0 { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace_id")) + } + appID := session.AppIDInCtxOrEmpty(ctx) + var action string + if appID == 717152 { + action = rpc.AuthActionTraceTaskList + } else { + action = rpc.AuthActionTaskReadable + } + if err := t.authSvc.CheckWorkspacePermission(ctx, + action, + strconv.FormatInt(req.GetWorkspaceID(), 10), + false); err != nil { + return resp, err + } + sResp, err := t.taskSvc.GetTask(ctx, &service.GetTaskReq{ + TaskID: req.GetTaskID(), + WorkspaceID: req.GetWorkspaceID(), + }) + if err != nil { + return resp, err + } + if sResp == nil { + return resp, nil + } + + return &task.GetTaskResponse{ + Task: sResp.Task, + }, nil +} + +func (t *TaskApplication) SpanTrigger(ctx context.Context, event *entity.RawSpan) error { + return t.tracehubSvc.SpanTrigger(ctx, event) +} + +func (t *TaskApplication) CallBack(ctx context.Context, event *entity.AutoEvalEvent) error { + return t.tracehubSvc.CallBack(ctx, event) +} + +func (t *TaskApplication) Correction(ctx context.Context, event *entity.CorrectionEvent) error { + return t.tracehubSvc.Correction(ctx, event) +} + +func (t *TaskApplication) BackFill(ctx context.Context, event *entity.BackFillEvent) error { + return t.tracehubSvc.BackFill(ctx, event) +} diff --git a/backend/modules/observability/application/task_test.go b/backend/modules/observability/application/task_test.go new file mode 100755 index 000000000..0f1b51851 --- /dev/null +++ b/backend/modules/observability/application/task_test.go @@ -0,0 +1,818 @@ +package application + +import ( + "context" + "errors" + "strconv" + "testing" + "time" + + "github.com/bytedance/gg/gptr" + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + + "github.com/coze-dev/coze-loop/backend/infra/middleware/session" + taskdto "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + taskapi "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" + rpcmock "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc/mocks" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + svc "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service" + svcmock "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/mocks" + tracehubmock "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/tracehub/mocks" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" +) + +func ctxWithAppID(appID int32) context.Context { + return session.WithCtxUser(context.Background(), &session.User{ID: "uid", AppID: appID}) +} + +func assertErrorCode(t *testing.T, err error, code int32) { + t.Helper() + statusErr, ok := errorx.FromStatusError(err) + if !assert.True(t, ok, "error should be StatusError") { + return + } + assert.Equal(t, code, statusErr.Code()) +} + +func TestTaskApplication_CheckTaskName(t *testing.T) { + t.Parallel() + tests := []struct { + name string + fieldsBuilder func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) + ctx context.Context + req *taskapi.CheckTaskNameRequest + expectResp *taskapi.CheckTaskNameResponse + expectErr error + expectErrCode int32 + }{ + { + name: "nil request", + ctx: context.Background(), + req: nil, + expectResp: taskapi.NewCheckTaskNameResponse(), + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "invalid workspace", + ctx: context.Background(), + req: &taskapi.CheckTaskNameRequest{ + WorkspaceID: 0, + }, + expectResp: taskapi.NewCheckTaskNameResponse(), + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "auth error with trace app id", + ctx: ctxWithAppID(717152), + req: &taskapi.CheckTaskNameRequest{ + WorkspaceID: 101, + Name: "task", + }, + expectErr: errors.New("auth error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTraceTaskList, strconv.FormatInt(101, 10)).Return(errors.New("auth error")) + return nil, auth + }, + }, + { + name: "service error", + ctx: context.Background(), + req: &taskapi.CheckTaskNameRequest{ + WorkspaceID: 201, + Name: "dup", + }, + expectResp: taskapi.NewCheckTaskNameResponse(), + expectErr: errors.New("service error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTaskReadable, strconv.FormatInt(201, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().CheckTaskName(gomock.Any(), &svc.CheckTaskNameReq{WorkspaceID: 201, Name: "dup"}).Return(nil, errors.New("service error")) + return s, auth + }, + }, + { + name: "pass true", + ctx: context.Background(), + req: &taskapi.CheckTaskNameRequest{ + WorkspaceID: 301, + Name: "ok", + }, + expectResp: &taskapi.CheckTaskNameResponse{Pass: gptr.Of(true)}, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTaskReadable, strconv.FormatInt(301, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().CheckTaskName(gomock.Any(), &svc.CheckTaskNameReq{WorkspaceID: 301, Name: "ok"}).Return(&svc.CheckTaskNameResp{Pass: gptr.Of(true)}, nil) + return s, auth + }, + }, + { + name: "pass false with trace app id", + ctx: ctxWithAppID(717152), + req: &taskapi.CheckTaskNameRequest{ + WorkspaceID: 401, + Name: "dup", + }, + expectResp: &taskapi.CheckTaskNameResponse{Pass: gptr.Of(false)}, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTraceTaskList, strconv.FormatInt(401, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().CheckTaskName(gomock.Any(), &svc.CheckTaskNameReq{WorkspaceID: 401, Name: "dup"}).Return(&svc.CheckTaskNameResp{Pass: gptr.Of(false)}, nil) + return s, auth + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskSvc, authSvc := caseItem.fieldsBuilder(ctrl) + app := &TaskApplication{ + taskSvc: taskSvc, + authSvc: authSvc, + } + resp, err := app.CheckTaskName(caseItem.ctx, caseItem.req) + + if caseItem.expectErr != nil { + assert.EqualError(t, err, caseItem.expectErr.Error()) + } else if caseItem.expectErrCode != 0 { + assert.Error(t, err) + assertErrorCode(t, err, caseItem.expectErrCode) + } else { + assert.NoError(t, err) + } + + if caseItem.expectResp != nil { + assert.Equal(t, caseItem.expectResp, resp) + } else { + assert.Nil(t, resp) + } + }) + } +} + +func TestTaskApplication_CreateTask(t *testing.T) { + t.Parallel() + + newValidTask := func() *taskdto.Task { + return &taskdto.Task{ + Name: "task", + WorkspaceID: gptr.Of(int64(123)), + TaskType: taskdto.TaskTypeAutoEval, + Rule: &taskdto.Rule{ + EffectiveTime: &taskdto.EffectiveTime{ + StartAt: gptr.Of(time.Now().Add(time.Hour).UnixMilli()), + EndAt: gptr.Of(time.Now().Add(2 * time.Hour).UnixMilli()), + }, + }, + } + } + + taskForAuth := newValidTask() + taskForSvcErr := newValidTask() + taskForSuccess := newValidTask() + + tests := []struct { + name string + ctx context.Context + req *taskapi.CreateTaskRequest + fieldsBuilder func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) + expectResp *taskapi.CreateTaskResponse + expectErr error + expectErrCode int32 + }{ + { + name: "nil request", + ctx: context.Background(), + req: nil, + expectResp: taskapi.NewCreateTaskResponse(), + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "task nil", + ctx: context.Background(), + req: &taskapi.CreateTaskRequest{ + Task: nil, + }, + expectResp: taskapi.NewCreateTaskResponse(), + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "auth error", + ctx: ctxWithAppID(1), + req: &taskapi.CreateTaskRequest{Task: taskForAuth}, + expectResp: taskapi.NewCreateTaskResponse(), + expectErr: errors.New("auth error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTaskWritable, strconv.FormatInt(123, 10)).Return(errors.New("auth error")) + return nil, auth + }, + }, + { + name: "service error", + ctx: context.Background(), + req: &taskapi.CreateTaskRequest{Task: taskForSvcErr}, + expectResp: taskapi.NewCreateTaskResponse(), + expectErr: errors.New("svc error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTaskWritable, strconv.FormatInt(123, 10)).Return(nil) + svcMock := svcmock.NewMockITaskService(ctrl) + svcMock.EXPECT().CreateTask(gomock.Any(), &svc.CreateTaskReq{Task: taskForSvcErr}).Return(nil, errors.New("svc error")) + return svcMock, auth + }, + }, + { + name: "success with trace app", + ctx: ctxWithAppID(717152), + req: &taskapi.CreateTaskRequest{Task: taskForSuccess}, + expectResp: &taskapi.CreateTaskResponse{TaskID: gptr.Of(int64(1000))}, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTraceTaskCreate, strconv.FormatInt(123, 10)).Return(nil) + svcMock := svcmock.NewMockITaskService(ctrl) + svcMock.EXPECT().CreateTask(gomock.Any(), &svc.CreateTaskReq{Task: taskForSuccess}).Return(&svc.CreateTaskResp{TaskID: gptr.Of(int64(1000))}, nil) + return svcMock, auth + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskSvc, authSvc := caseItem.fieldsBuilder(ctrl) + app := &TaskApplication{ + taskSvc: taskSvc, + authSvc: authSvc, + } + + resp, err := app.CreateTask(caseItem.ctx, caseItem.req) + + if caseItem.expectErr != nil { + assert.EqualError(t, err, caseItem.expectErr.Error()) + } else if caseItem.expectErrCode != 0 { + assert.Error(t, err) + assertErrorCode(t, err, caseItem.expectErrCode) + } else { + assert.NoError(t, err) + } + + if caseItem.expectResp != nil { + assert.Equal(t, caseItem.expectResp, resp) + } else { + assert.Nil(t, resp) + } + }) + } +} + +func TestTaskApplication_UpdateTask(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + ctx context.Context + req *taskapi.UpdateTaskRequest + fieldsBuilder func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) + expectResp *taskapi.UpdateTaskResponse + expectErr error + expectErrCode int32 + }{ + { + name: "nil request", + ctx: context.Background(), + req: nil, + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "invalid workspace", + ctx: context.Background(), + req: &taskapi.UpdateTaskRequest{ + TaskID: 1, + WorkspaceID: 0, + }, + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "auth error", + ctx: ctxWithAppID(717152), + req: &taskapi.UpdateTaskRequest{TaskID: 11, WorkspaceID: 22}, + expectErr: errors.New("auth error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckTaskPermission(gomock.Any(), rpc.AuthActionTraceTaskEdit, strconv.FormatInt(22, 10), strconv.FormatInt(11, 10)).Return(errors.New("auth error")) + return nil, auth + }, + }, + { + name: "service error", + ctx: context.Background(), + req: &taskapi.UpdateTaskRequest{TaskID: 33, WorkspaceID: 44}, + expectResp: taskapi.NewUpdateTaskResponse(), + expectErr: errors.New("svc error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckTaskPermission(gomock.Any(), rpc.AuthActionTaskWritable, strconv.FormatInt(44, 10), strconv.FormatInt(33, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().UpdateTask(gomock.Any(), &svc.UpdateTaskReq{ + TaskID: 33, + WorkspaceID: 44, + TaskStatus: nil, + Description: nil, + }).Return(errors.New("svc error")) + return s, auth + }, + }, + { + name: "success", + ctx: context.Background(), + req: &taskapi.UpdateTaskRequest{TaskID: 55, WorkspaceID: 66}, + expectResp: taskapi.NewUpdateTaskResponse(), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckTaskPermission(gomock.Any(), rpc.AuthActionTaskWritable, strconv.FormatInt(66, 10), strconv.FormatInt(55, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().UpdateTask(gomock.Any(), &svc.UpdateTaskReq{ + TaskID: 55, + WorkspaceID: 66, + TaskStatus: nil, + Description: nil, + }).Return(nil) + return s, auth + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskSvc, authSvc := caseItem.fieldsBuilder(ctrl) + app := &TaskApplication{ + taskSvc: taskSvc, + authSvc: authSvc, + } + resp, err := app.UpdateTask(caseItem.ctx, caseItem.req) + + if caseItem.expectErr != nil { + assert.EqualError(t, err, caseItem.expectErr.Error()) + } else if caseItem.expectErrCode != 0 { + assert.Error(t, err) + assertErrorCode(t, err, caseItem.expectErrCode) + } else { + assert.NoError(t, err) + } + + if caseItem.expectResp != nil { + assert.Equal(t, caseItem.expectResp, resp) + } else { + assert.Nil(t, resp) + } + }) + } +} + +func TestTaskApplication_ListTasks(t *testing.T) { + t.Parallel() + + taskListResp := &svc.ListTasksResp{ + Tasks: []*taskdto.Task{{Name: "task1"}}, + Total: gptr.Of(int64(1)), + } + tests := []struct { + name string + ctx context.Context + req *taskapi.ListTasksRequest + fieldsBuilder func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) + expectResp *taskapi.ListTasksResponse + expectErr error + expectErrCode int32 + }{ + { + name: "nil request", + ctx: context.Background(), + req: nil, + expectResp: taskapi.NewListTasksResponse(), + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "invalid workspace", + ctx: context.Background(), + req: &taskapi.ListTasksRequest{WorkspaceID: 0}, + expectResp: taskapi.NewListTasksResponse(), + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "auth error", + ctx: ctxWithAppID(717152), + req: &taskapi.ListTasksRequest{WorkspaceID: 123}, + expectResp: taskapi.NewListTasksResponse(), + expectErr: errors.New("auth error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTraceTaskList, strconv.FormatInt(123, 10)).Return(errors.New("auth error")) + return nil, auth + }, + }, + { + name: "service error", + ctx: context.Background(), + req: &taskapi.ListTasksRequest{WorkspaceID: 456}, + expectResp: taskapi.NewListTasksResponse(), + expectErr: errors.New("svc error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTaskReadable, strconv.FormatInt(456, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().ListTasks(gomock.Any(), &svc.ListTasksReq{ + WorkspaceID: 456, + }).Return(nil, errors.New("svc error")) + return s, auth + }, + }, + { + name: "success", + ctx: context.Background(), + req: &taskapi.ListTasksRequest{WorkspaceID: 789}, + expectResp: &taskapi.ListTasksResponse{Tasks: taskListResp.Tasks, Total: taskListResp.Total}, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTaskReadable, strconv.FormatInt(789, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().ListTasks(gomock.Any(), &svc.ListTasksReq{ + WorkspaceID: 789, + }).Return(taskListResp, nil) + return s, auth + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskSvc, authSvc := caseItem.fieldsBuilder(ctrl) + app := &TaskApplication{ + taskSvc: taskSvc, + authSvc: authSvc, + } + resp, err := app.ListTasks(caseItem.ctx, caseItem.req) + + if caseItem.expectErr != nil { + assert.EqualError(t, err, caseItem.expectErr.Error()) + } else if caseItem.expectErrCode != 0 { + assert.Error(t, err) + assertErrorCode(t, err, caseItem.expectErrCode) + } else { + assert.NoError(t, err) + } + + if caseItem.expectResp != nil { + assert.Equal(t, caseItem.expectResp, resp) + } else { + assert.Nil(t, resp) + } + }) + } +} + +func TestTaskApplication_GetTask(t *testing.T) { + t.Parallel() + + taskResp := &svc.GetTaskResp{Task: &taskdto.Task{Name: "task"}} + + tests := []struct { + name string + ctx context.Context + req *taskapi.GetTaskRequest + fieldsBuilder func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) + expectResp *taskapi.GetTaskResponse + expectErr error + expectErrCode int32 + }{ + { + name: "nil request", + ctx: context.Background(), + req: nil, + expectResp: taskapi.NewGetTaskResponse(), + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "invalid workspace", + ctx: context.Background(), + req: &taskapi.GetTaskRequest{WorkspaceID: 0}, + expectResp: taskapi.NewGetTaskResponse(), + expectErrCode: obErrorx.CommercialCommonInvalidParamCodeCode, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + return nil, nil + }, + }, + { + name: "auth error", + ctx: ctxWithAppID(717152), + req: &taskapi.GetTaskRequest{WorkspaceID: 100, TaskID: 1}, + expectResp: taskapi.NewGetTaskResponse(), + expectErr: errors.New("auth error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTraceTaskList, strconv.FormatInt(100, 10)).Return(errors.New("auth error")) + return nil, auth + }, + }, + { + name: "service error", + ctx: context.Background(), + req: &taskapi.GetTaskRequest{WorkspaceID: 101, TaskID: 2}, + expectResp: taskapi.NewGetTaskResponse(), + expectErr: errors.New("svc error"), + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTaskReadable, strconv.FormatInt(101, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().GetTask(gomock.Any(), &svc.GetTaskReq{WorkspaceID: 101, TaskID: 2}).Return(nil, errors.New("svc error")) + return s, auth + }, + }, + { + name: "success", + ctx: context.Background(), + req: &taskapi.GetTaskRequest{WorkspaceID: 202, TaskID: 3}, + expectResp: &taskapi.GetTaskResponse{Task: taskResp.Task}, + fieldsBuilder: func(ctrl *gomock.Controller) (svc.ITaskService, rpc.IAuthProvider) { + auth := rpcmock.NewMockIAuthProvider(ctrl) + auth.EXPECT().CheckWorkspacePermission(gomock.Any(), rpc.AuthActionTaskReadable, strconv.FormatInt(202, 10)).Return(nil) + s := svcmock.NewMockITaskService(ctrl) + s.EXPECT().GetTask(gomock.Any(), &svc.GetTaskReq{WorkspaceID: 202, TaskID: 3}).Return(taskResp, nil) + return s, auth + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + taskSvc, authSvc := caseItem.fieldsBuilder(ctrl) + app := &TaskApplication{ + taskSvc: taskSvc, + authSvc: authSvc, + } + resp, err := app.GetTask(caseItem.ctx, caseItem.req) + + if caseItem.expectErr != nil { + assert.EqualError(t, err, caseItem.expectErr.Error()) + } else if caseItem.expectErrCode != 0 { + assert.Error(t, err) + assertErrorCode(t, err, caseItem.expectErrCode) + } else { + assert.NoError(t, err) + } + + if caseItem.expectResp != nil { + assert.Equal(t, caseItem.expectResp, resp) + } else { + assert.Nil(t, resp) + } + }) + } +} + +func TestTaskApplication_SpanTrigger(t *testing.T) { + t.Parallel() + + event := &entity.RawSpan{} + + tests := []struct { + name string + mockSvc func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService + expectErr bool + }{ + { + name: "trace hub error", + mockSvc: func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService { + svc := tracehubmock.NewMockITraceHubService(ctrl) + svc.EXPECT().SpanTrigger(gomock.Any(), event).Return(errors.New("hub error")) + return svc + }, + expectErr: true, + }, + { + name: "success", + mockSvc: func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService { + svc := tracehubmock.NewMockITraceHubService(ctrl) + svc.EXPECT().SpanTrigger(gomock.Any(), event).Return(nil) + return svc + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + traceSvc := caseItem.mockSvc(ctrl) + app := &TaskApplication{tracehubSvc: traceSvc} + err := app.SpanTrigger(context.Background(), event) + if caseItem.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestTaskApplication_CallBack(t *testing.T) { + t.Parallel() + + event := &entity.AutoEvalEvent{} + tests := []struct { + name string + mockSvc func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService + expectErr bool + }{ + { + name: "trace hub error", + mockSvc: func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService { + svc := tracehubmock.NewMockITraceHubService(ctrl) + svc.EXPECT().CallBack(gomock.Any(), event).Return(errors.New("hub error")) + return svc + }, + expectErr: true, + }, + { + name: "success", + mockSvc: func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService { + svc := tracehubmock.NewMockITraceHubService(ctrl) + svc.EXPECT().CallBack(gomock.Any(), event).Return(nil) + return svc + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + traceSvc := caseItem.mockSvc(ctrl) + app := &TaskApplication{tracehubSvc: traceSvc} + err := app.CallBack(context.Background(), event) + if caseItem.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestTaskApplication_Correction(t *testing.T) { + t.Parallel() + + event := &entity.CorrectionEvent{} + tests := []struct { + name string + mockSvc func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService + expectErr bool + }{ + { + name: "trace hub error", + mockSvc: func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService { + svc := tracehubmock.NewMockITraceHubService(ctrl) + svc.EXPECT().Correction(gomock.Any(), event).Return(errors.New("hub error")) + return svc + }, + expectErr: true, + }, + { + name: "success", + mockSvc: func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService { + svc := tracehubmock.NewMockITraceHubService(ctrl) + svc.EXPECT().Correction(gomock.Any(), event).Return(nil) + return svc + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + traceSvc := caseItem.mockSvc(ctrl) + app := &TaskApplication{tracehubSvc: traceSvc} + err := app.Correction(context.Background(), event) + if caseItem.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestTaskApplication_BackFill(t *testing.T) { + t.Parallel() + + event := &entity.BackFillEvent{} + tests := []struct { + name string + mockSvc func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService + expectErr bool + }{ + { + name: "trace hub error", + mockSvc: func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService { + svc := tracehubmock.NewMockITraceHubService(ctrl) + svc.EXPECT().BackFill(gomock.Any(), event).Return(errors.New("hub error")) + return svc + }, + expectErr: true, + }, + { + name: "success", + mockSvc: func(ctrl *gomock.Controller) *tracehubmock.MockITraceHubService { + svc := tracehubmock.NewMockITraceHubService(ctrl) + svc.EXPECT().BackFill(gomock.Any(), event).Return(nil) + return svc + }, + }, + } + + for _, tt := range tests { + caseItem := tt + t.Run(caseItem.name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + traceSvc := caseItem.mockSvc(ctrl) + app := &TaskApplication{tracehubSvc: traceSvc} + err := app.BackFill(context.Background(), event) + if caseItem.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/backend/modules/observability/application/trace.go b/backend/modules/observability/application/trace.go index 54486517c..b63a1df3c 100644 --- a/backend/modules/observability/application/trace.go +++ b/backend/modules/observability/application/trace.go @@ -8,6 +8,7 @@ import ( "strconv" "time" + "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/tenant" "github.com/samber/lo" @@ -171,7 +172,7 @@ func (t *TraceApplication) buildListSpansSvcReq(req *trace.ListSpansRequest) (*s ret.SpanListType = loop_span.SpanListTypeRootSpan } if req.Filters != nil { - ret.Filters = tconv.FilterFieldsDTO2DO(req.Filters) + ret.Filters = convertor.FilterFieldsDTO2DO(req.Filters) if err := ret.Filters.Validate(); err != nil { return nil, err } @@ -827,3 +828,114 @@ func (t *TraceApplication) PreviewExportTracesToDataset(ctx context.Context, req // 转换响应 return tconv.PreviewResponseDO2DTO(serviceResp), nil } +func (t *TraceApplication) ChangeEvaluatorScore(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest) (*trace.ChangeEvaluatorScoreResponse, error) { + if err := t.validateChangeEvaluatorScoreReq(ctx, req); err != nil { + return nil, err + } + if err := t.authSvc.CheckWorkspacePermission(ctx, + rpc.AuthActionTraceTaskCreate, + strconv.FormatInt(req.GetWorkspaceID(), 10), + false); err != nil { + return nil, err + } + + sResp, err := t.traceService.ChangeEvaluatorScore(ctx, &service.ChangeEvaluatorScoreRequest{ + WorkspaceID: req.WorkspaceID, + SpanID: req.SpanID, + StartTime: req.StartTime, + Correction: req.Correction, + PlatformType: loop_span.PlatformType(req.GetPlatformType()), + AnnotationID: req.AnnotationID, + }) + if err != nil { + return nil, err + } + + return &trace.ChangeEvaluatorScoreResponse{ + Annotation: sResp.Annotation, + }, nil +} + +func (t *TraceApplication) validateChangeEvaluatorScoreReq(ctx context.Context, req *trace.ChangeEvaluatorScoreRequest) error { + if req == nil { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("no request provided")) + } else if req.GetWorkspaceID() <= 0 { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace_id")) + } else if len(req.GetAnnotationID()) <= 0 { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid evaluator_record_id")) + } else if req.GetStartTime() <= 0 { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid start_time")) + } else if req.GetCorrection() == nil { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid correction")) + } + return nil +} +func (t *TraceApplication) ListAnnotationEvaluators(ctx context.Context, req *trace.ListAnnotationEvaluatorsRequest) (*trace.ListAnnotationEvaluatorsResponse, error) { + var resp *trace.ListAnnotationEvaluatorsResponse + if req == nil { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("no request provided")) + } else if req.GetWorkspaceID() <= 0 { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace_id")) + } + if err := t.authSvc.CheckWorkspacePermission(ctx, + rpc.AuthActionTraceTaskList, + strconv.FormatInt(req.GetWorkspaceID(), 10), + false); err != nil { + return resp, err + } + sResp, err := t.traceService.ListAnnotationEvaluators(ctx, &service.ListAnnotationEvaluatorsRequest{ + WorkspaceID: req.WorkspaceID, + Name: req.Name, + }) + if err != nil { + return resp, err + } + return &trace.ListAnnotationEvaluatorsResponse{Evaluators: sResp.Evaluators}, nil +} +func (t *TraceApplication) ExtractSpanInfo(ctx context.Context, req *trace.ExtractSpanInfoRequest) (*trace.ExtractSpanInfoResponse, error) { + var resp *trace.ExtractSpanInfoResponse + if err := t.validateExtractSpanInfoReq(ctx, req); err != nil { + return resp, err + } + if err := t.authSvc.CheckWorkspacePermission(ctx, + rpc.AuthActionTraceRead, + strconv.FormatInt(req.GetWorkspaceID(), 10), + false); err != nil { + return resp, err + } + sResp, err := t.traceService.ExtractSpanInfo(ctx, &service.ExtractSpanInfoRequest{ + WorkspaceID: req.WorkspaceID, + TraceID: req.TraceID, + SpanIds: req.SpanIds, + StartTime: req.GetStartTime(), + EndTime: req.GetEndTime(), + PlatformType: loop_span.PlatformType(req.GetPlatformType()), + FieldMappings: tconv.ConvertFieldMappingsDTO2DO(req.GetFieldMappings()), + }) + if err != nil { + return resp, err + } + return &trace.ExtractSpanInfoResponse{SpanInfos: sResp.SpanInfos}, nil +} +func (t *TraceApplication) validateExtractSpanInfoReq(ctx context.Context, req *trace.ExtractSpanInfoRequest) error { + if req == nil { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("no request provided")) + } else if req.GetWorkspaceID() <= 0 { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace_id")) + } else if len(req.SpanIds) > MaxSpanLength { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("span_ids length exceeds the limit")) + } + v := utils.DateValidator{ + Start: req.GetStartTime(), + End: req.GetEndTime(), + EarliestDays: t.traceConfig.GetTraceDataMaxDurationDay(ctx, req.PlatformType), + } + + if newStartTime, newEndTime, err := v.CorrectDate(); err != nil { + return err + } else { + req.SetStartTime(lo.ToPtr(newStartTime - time.Minute.Milliseconds())) + req.SetEndTime(lo.ToPtr(newEndTime + time.Minute.Milliseconds())) + } + return nil +} diff --git a/backend/modules/observability/application/wire.go b/backend/modules/observability/application/wire.go index 8b73fed41..96a2c4096 100644 --- a/backend/modules/observability/application/wire.go +++ b/backend/modules/observability/application/wire.go @@ -12,17 +12,25 @@ import ( "github.com/coze-dev/coze-loop/backend/infra/external/benefit" "github.com/coze-dev/coze-loop/backend/infra/idgen" "github.com/coze-dev/coze-loop/backend/infra/limiter" + "github.com/coze-dev/coze-loop/backend/infra/lock" "github.com/coze-dev/coze-loop/backend/infra/metrics" "github.com/coze-dev/coze-loop/backend/infra/mq" + "github.com/coze-dev/coze-loop/backend/infra/redis" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/data/dataset/datasetservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/data/tag/tagservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/evaluationsetservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/evaluatorservice" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/experimentservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/auth/authservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/file/fileservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/user/userservice" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/config" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" + trepo "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo" + taskSvc "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service" + task_processor "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/processor" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/tracehub" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/collector/exporter" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/collector/processor" @@ -40,8 +48,10 @@ import ( obrepo "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo" ckdao "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/ck" mysqldao "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" + tredis "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/redis/dao" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/auth" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/dataset" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/evaluation" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/evaluationset" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/evaluator" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/file" @@ -54,6 +64,17 @@ import ( ) var ( + taskDomainSet = wire.NewSet( + NewInitTaskProcessor, + taskSvc.NewTaskServiceImpl, + obrepo.NewTaskRepoImpl, + //obrepo.NewTaskRunRepoImpl, + mysqldao.NewTaskDaoImpl, + tredis.NewTaskDAO, + tredis.NewTaskRunDAO, + mysqldao.NewTaskRunDaoImpl, + mq2.NewBackfillProducerImpl, + ) traceDomainSet = wire.NewSet( service.NewTraceServiceImpl, service.NewTraceExportServiceImpl, @@ -69,7 +90,9 @@ var ( obconfig.NewTraceConfigCenter, tenant.NewTenantProvider, workspace.NewWorkspaceProvider, + evaluator.NewEvaluatorRPCProvider, NewDatasetServiceAdapter, + taskDomainSet, ) traceSet = wire.NewSet( NewTraceApplication, @@ -78,7 +101,6 @@ var ( auth.NewAuthProvider, user.NewUserRPCProvider, tag.NewTagRPCProvider, - evaluator.NewEvaluatorRPCProvider, traceDomainSet, ) traceIngestionSet = wire.NewSet( @@ -96,8 +118,21 @@ var ( auth.NewAuthProvider, traceDomainSet, ) + taskSet = wire.NewSet( + tracehub.NewTraceHubImpl, + NewTaskApplication, + auth.NewAuthProvider, + user.NewUserRPCProvider, + evaluation.NewEvaluationRPCProvider, + NewTaskLocker, + traceDomainSet, + ) ) +func NewTaskLocker(cmdable redis.Cmdable) lock.ILocker { + return lock.NewRedisLockerWithHolder(cmdable, "observability") +} + func NewTraceProcessorBuilder( traceConfig config.ITraceConfig, fileProvider rpc.IFileProvider, @@ -168,9 +203,17 @@ func NewDatasetServiceAdapter(evalSetService evaluationsetservice.Client, datase return adapter } +func NewInitTaskProcessor(datasetServiceProvider *service.DatasetServiceAdaptor, evalService rpc.IEvaluatorRPCAdapter, + evaluationService rpc.IEvaluationRPCAdapter, taskRepo trepo.ITaskRepo) *task_processor.TaskProcessor { + taskProcessor := task_processor.NewTaskProcessor() + taskProcessor.Register(task.TaskTypeAutoEval, task_processor.NewAutoEvaluteProcessor(0, datasetServiceProvider, evalService, evaluationService, taskRepo)) + return taskProcessor +} + func InitTraceApplication( db db.Provider, ckDb ck.Provider, + redis redis.Cmdable, meter metrics.Meter, mqFactory mq.IFactory, configFactory conf.IConfigLoaderFactory, @@ -197,6 +240,10 @@ func InitOpenAPIApplication( limiterFactory limiter.IRateLimiterFactory, authClient authservice.Client, meter metrics.Meter, + db db.Provider, + redis redis.Cmdable, + idgen idgen.IIDGenerator, + evalService evaluatorservice.Client, ) (IObservabilityOpenAPIApplication, error) { wire.Build(openApiSet) return nil, nil @@ -209,3 +256,24 @@ func InitTraceIngestionApplication( wire.Build(traceIngestionSet) return nil, nil } + +func InitTaskApplication( + db db.Provider, + idgen idgen.IIDGenerator, + configFactory conf.IConfigLoaderFactory, + benefit benefit.IBenefitService, + ckDb ck.Provider, + redis redis.Cmdable, + mqFactory mq.IFactory, + userClient userservice.Client, + authClient authservice.Client, + evalService evaluatorservice.Client, + evalSetService evaluationsetservice.Client, + exptService experimentservice.Client, + datasetService datasetservice.Client, + fileClient fileservice.Client, + taskProcessor task_processor.TaskProcessor, + aid int32) (ITaskApplication, error) { + wire.Build(taskSet) + return nil, nil +} diff --git a/backend/modules/observability/application/wire_gen.go b/backend/modules/observability/application/wire_gen.go index 9f7dd8ac3..0f719704e 100644 --- a/backend/modules/observability/application/wire_gen.go +++ b/backend/modules/observability/application/wire_gen.go @@ -12,20 +12,28 @@ import ( "github.com/coze-dev/coze-loop/backend/infra/external/benefit" "github.com/coze-dev/coze-loop/backend/infra/idgen" "github.com/coze-dev/coze-loop/backend/infra/limiter" + "github.com/coze-dev/coze-loop/backend/infra/lock" "github.com/coze-dev/coze-loop/backend/infra/metrics" "github.com/coze-dev/coze-loop/backend/infra/mq" + "github.com/coze-dev/coze-loop/backend/infra/redis" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/data/dataset/datasetservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/data/tag/tagservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/evaluationsetservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/evaluatorservice" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/experimentservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/auth/authservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/file/fileservice" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/user/userservice" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" config2 "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/config" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" + repo3 "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo" + service2 "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/processor" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/tracehub" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/collector/exporter" - "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/collector/processor" + processor2 "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/collector/processor" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/collector/receiver" repo2 "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/repo" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service" @@ -40,8 +48,10 @@ import ( "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo" ck2 "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/ck" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/redis/dao" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/auth" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/dataset" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/evaluation" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/evaluationset" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/evaluator" "github.com/coze-dev/coze-loop/backend/modules/observability/infra/rpc/file" @@ -55,7 +65,7 @@ import ( // Injectors from wire.go: -func InitTraceApplication(db2 db.Provider, ckDb ck.Provider, meter metrics.Meter, mqFactory mq.IFactory, configFactory conf.IConfigLoaderFactory, idgen2 idgen.IIDGenerator, fileClient fileservice.Client, benefit2 benefit.IBenefitService, authClient authservice.Client, userClient userservice.Client, evalService evaluatorservice.Client, evalSetService evaluationsetservice.Client, tagService tagservice.Client, datasetService datasetservice.Client) (ITraceApplication, error) { +func InitTraceApplication(db2 db.Provider, ckDb ck.Provider, redis2 redis.Cmdable, meter metrics.Meter, mqFactory mq.IFactory, configFactory conf.IConfigLoaderFactory, idgen2 idgen.IIDGenerator, fileClient fileservice.Client, benefit2 benefit.IBenefitService, authClient authservice.Client, userClient userservice.Client, evalService evaluatorservice.Client, evalSetService evaluationsetservice.Client, tagService tagservice.Client, datasetService datasetservice.Client) (ITraceApplication, error) { iSpansDao, err := ck2.NewSpansCkDaoImpl(ckDb) if err != nil { return nil, err @@ -85,7 +95,13 @@ func InitTraceApplication(db2 db.Provider, ckDb ck.Provider, meter metrics.Meter iFileProvider := file.NewFileRPCProvider(fileClient) traceFilterProcessorBuilder := NewTraceProcessorBuilder(iTraceConfig, iFileProvider, benefit2) iTenantProvider := tenant.NewTenantProvider(iTraceConfig) - iTraceService, err := service.NewTraceServiceImpl(iTraceRepo, iTraceConfig, iTraceProducer, iAnnotationProducer, iTraceMetrics, traceFilterProcessorBuilder, iTenantProvider) + iEvaluatorRPCAdapter := evaluator.NewEvaluatorRPCProvider(evalService) + iTaskDao := mysql.NewTaskDaoImpl(db2) + iTaskDAO := dao.NewTaskDAO(redis2) + iTaskRunDao := mysql.NewTaskRunDaoImpl(db2) + iTaskRunDAO := dao.NewTaskRunDAO(redis2) + iTaskRepo := repo.NewTaskRepoImpl(iTaskDao, idgen2, iTaskDAO, iTaskRunDao, iTaskRunDAO) + iTraceService, err := service.NewTraceServiceImpl(iTraceRepo, iTraceConfig, iTraceProducer, iAnnotationProducer, iTraceMetrics, traceFilterProcessorBuilder, iTenantProvider, iEvaluatorRPCAdapter, iTaskRepo) if err != nil { return nil, err } @@ -97,7 +113,6 @@ func InitTraceApplication(db2 db.Provider, ckDb ck.Provider, meter metrics.Meter iViewDao := mysql.NewViewDaoImpl(db2) iViewRepo := repo.NewViewRepoImpl(iViewDao, idgen2) iAuthProvider := auth.NewAuthProvider(authClient) - iEvaluatorRPCAdapter := evaluator.NewEvaluatorRPCProvider(evalService) iUserProvider := user.NewUserRPCProvider(userClient) iTagRPCAdapter := tag.NewTagRPCProvider(tagService) iTraceApplication, err := NewTraceApplication(iTraceService, iTraceExportService, iViewRepo, benefit2, iTenantProvider, iTraceMetrics, iTraceConfig, iAuthProvider, iEvaluatorRPCAdapter, iUserProvider, iTagRPCAdapter) @@ -107,7 +122,7 @@ func InitTraceApplication(db2 db.Provider, ckDb ck.Provider, meter metrics.Meter return iTraceApplication, nil } -func InitOpenAPIApplication(mqFactory mq.IFactory, configFactory conf.IConfigLoaderFactory, fileClient fileservice.Client, ckDb ck.Provider, benefit2 benefit.IBenefitService, limiterFactory limiter.IRateLimiterFactory, authClient authservice.Client, meter metrics.Meter) (IObservabilityOpenAPIApplication, error) { +func InitOpenAPIApplication(mqFactory mq.IFactory, configFactory conf.IConfigLoaderFactory, fileClient fileservice.Client, ckDb ck.Provider, benefit2 benefit.IBenefitService, limiterFactory limiter.IRateLimiterFactory, authClient authservice.Client, meter metrics.Meter, db2 db.Provider, redis2 redis.Cmdable, idgen2 idgen.IIDGenerator, evalService evaluatorservice.Client) (IObservabilityOpenAPIApplication, error) { iSpansDao, err := ck2.NewSpansCkDaoImpl(ckDb) if err != nil { return nil, err @@ -137,7 +152,13 @@ func InitOpenAPIApplication(mqFactory mq.IFactory, configFactory conf.IConfigLoa iFileProvider := file.NewFileRPCProvider(fileClient) traceFilterProcessorBuilder := NewTraceProcessorBuilder(iTraceConfig, iFileProvider, benefit2) iTenantProvider := tenant.NewTenantProvider(iTraceConfig) - iTraceService, err := service.NewTraceServiceImpl(iTraceRepo, iTraceConfig, iTraceProducer, iAnnotationProducer, iTraceMetrics, traceFilterProcessorBuilder, iTenantProvider) + iEvaluatorRPCAdapter := evaluator.NewEvaluatorRPCProvider(evalService) + iTaskDao := mysql.NewTaskDaoImpl(db2) + iTaskDAO := dao.NewTaskDAO(redis2) + iTaskRunDao := mysql.NewTaskRunDaoImpl(db2) + iTaskRunDAO := dao.NewTaskRunDAO(redis2) + iTaskRepo := repo.NewTaskRepoImpl(iTaskDao, idgen2, iTaskDAO, iTaskRunDao, iTaskRunDAO) + iTraceService, err := service.NewTraceServiceImpl(iTraceRepo, iTraceConfig, iTraceProducer, iAnnotationProducer, iTraceMetrics, traceFilterProcessorBuilder, iTenantProvider, iEvaluatorRPCAdapter, iTaskRepo) if err != nil { return nil, err } @@ -177,14 +198,70 @@ func InitTraceIngestionApplication(configFactory conf.IConfigLoaderFactory, ckDb return iTraceIngestionApplication, nil } +func InitTaskApplication(db2 db.Provider, idgen2 idgen.IIDGenerator, configFactory conf.IConfigLoaderFactory, benefit2 benefit.IBenefitService, ckDb ck.Provider, redis2 redis.Cmdable, mqFactory mq.IFactory, userClient userservice.Client, authClient authservice.Client, evalService evaluatorservice.Client, evalSetService evaluationsetservice.Client, exptService experimentservice.Client, datasetService datasetservice.Client, fileClient fileservice.Client, taskProcessor processor.TaskProcessor, aid int32) (ITaskApplication, error) { + iTaskDao := mysql.NewTaskDaoImpl(db2) + iTaskDAO := dao.NewTaskDAO(redis2) + iTaskRunDao := mysql.NewTaskRunDaoImpl(db2) + iTaskRunDAO := dao.NewTaskRunDAO(redis2) + iTaskRepo := repo.NewTaskRepoImpl(iTaskDao, idgen2, iTaskDAO, iTaskRunDao, iTaskRunDAO) + iUserProvider := user.NewUserRPCProvider(userClient) + iConfigLoader, err := NewTraceConfigLoader(configFactory) + if err != nil { + return nil, err + } + iTraceConfig := config.NewTraceConfigCenter(iConfigLoader) + iBackfillProducer, err := producer.NewBackfillProducerImpl(iTraceConfig, mqFactory) + if err != nil { + return nil, err + } + datasetServiceAdaptor := NewDatasetServiceAdapter(evalSetService, datasetService) + iEvaluatorRPCAdapter := evaluator.NewEvaluatorRPCProvider(evalService) + iEvaluationRPCAdapter := evaluation.NewEvaluationRPCProvider(exptService) + processorTaskProcessor := NewInitTaskProcessor(datasetServiceAdaptor, iEvaluatorRPCAdapter, iEvaluationRPCAdapter, iTaskRepo) + iTaskService, err := service2.NewTaskServiceImpl(iTaskRepo, iUserProvider, idgen2, iBackfillProducer, processorTaskProcessor) + if err != nil { + return nil, err + } + iAuthProvider := auth.NewAuthProvider(authClient) + iSpansDao, err := ck2.NewSpansCkDaoImpl(ckDb) + if err != nil { + return nil, err + } + iAnnotationDao, err := ck2.NewAnnotationCkDaoImpl(ckDb) + if err != nil { + return nil, err + } + iTraceRepo, err := repo.NewTraceCKRepoImpl(iSpansDao, iAnnotationDao, iTraceConfig) + if err != nil { + return nil, err + } + iTenantProvider := tenant.NewTenantProvider(iTraceConfig) + iFileProvider := file.NewFileRPCProvider(fileClient) + traceFilterProcessorBuilder := NewTraceProcessorBuilder(iTraceConfig, iFileProvider, benefit2) + iLocker := NewTaskLocker(redis2) + iTraceHubService, err := tracehub.NewTraceHubImpl(iTaskRepo, iTraceRepo, iTenantProvider, traceFilterProcessorBuilder, processorTaskProcessor, benefit2, aid, iBackfillProducer, iLocker) + if err != nil { + return nil, err + } + iTaskApplication, err := NewTaskApplication(iTaskService, iAuthProvider, iEvaluatorRPCAdapter, iEvaluationRPCAdapter, iUserProvider, iTraceHubService, taskProcessor, traceFilterProcessorBuilder) + if err != nil { + return nil, err + } + return iTaskApplication, nil +} + // wire.go: var ( + taskDomainSet = wire.NewSet( + NewInitTaskProcessor, service2.NewTaskServiceImpl, repo.NewTaskRepoImpl, mysql.NewTaskDaoImpl, dao.NewTaskDAO, dao.NewTaskRunDAO, mysql.NewTaskRunDaoImpl, producer.NewBackfillProducerImpl, + ) traceDomainSet = wire.NewSet(service.NewTraceServiceImpl, service.NewTraceExportServiceImpl, repo.NewTraceCKRepoImpl, ck2.NewSpansCkDaoImpl, ck2.NewAnnotationCkDaoImpl, metrics2.NewTraceMetricsImpl, producer.NewTraceProducerImpl, producer.NewAnnotationProducerImpl, file.NewFileRPCProvider, NewTraceConfigLoader, - NewTraceProcessorBuilder, config.NewTraceConfigCenter, tenant.NewTenantProvider, workspace.NewWorkspaceProvider, NewDatasetServiceAdapter, + NewTraceProcessorBuilder, config.NewTraceConfigCenter, tenant.NewTenantProvider, workspace.NewWorkspaceProvider, evaluator.NewEvaluatorRPCProvider, NewDatasetServiceAdapter, + taskDomainSet, ) traceSet = wire.NewSet( - NewTraceApplication, repo.NewViewRepoImpl, mysql.NewViewDaoImpl, auth.NewAuthProvider, user.NewUserRPCProvider, tag.NewTagRPCProvider, evaluator.NewEvaluatorRPCProvider, traceDomainSet, + NewTraceApplication, repo.NewViewRepoImpl, mysql.NewViewDaoImpl, auth.NewAuthProvider, user.NewUserRPCProvider, tag.NewTagRPCProvider, traceDomainSet, ) traceIngestionSet = wire.NewSet( NewIngestionApplication, service.NewIngestionServiceImpl, repo.NewTraceCKRepoImpl, ck2.NewSpansCkDaoImpl, ck2.NewAnnotationCkDaoImpl, config.NewTraceConfigCenter, NewTraceConfigLoader, @@ -193,8 +270,15 @@ var ( openApiSet = wire.NewSet( NewOpenAPIApplication, auth.NewAuthProvider, traceDomainSet, ) + taskSet = wire.NewSet(tracehub.NewTraceHubImpl, NewTaskApplication, auth.NewAuthProvider, user.NewUserRPCProvider, evaluation.NewEvaluationRPCProvider, NewTaskLocker, + traceDomainSet, + ) ) +func NewTaskLocker(cmdable redis.Cmdable) lock.ILocker { + return lock.NewRedisLockerWithHolder(cmdable, "observability") +} + func NewTraceProcessorBuilder( traceConfig config2.ITraceConfig, fileProvider rpc.IFileProvider, @@ -217,7 +301,7 @@ func NewTraceProcessorBuilder( func NewIngestionCollectorFactory(mqFactory mq.IFactory, traceRepo repo2.ITraceRepo) service.IngestionCollectorFactory { return service.NewIngestionCollectorFactory( []receiver.Factory{rmqreceiver.NewFactory(mqFactory)}, - []processor.Factory{queueprocessor.NewFactory()}, + []processor2.Factory{queueprocessor.NewFactory()}, []exporter.Factory{clickhouseexporter.NewFactory(traceRepo)}, ) } @@ -232,3 +316,10 @@ func NewDatasetServiceAdapter(evalSetService evaluationsetservice.Client, datase adapter.Register(entity.DatasetCategory_Evaluation, evaluationset.NewEvaluationSetProvider(evalSetService, datasetProvider)) return adapter } + +func NewInitTaskProcessor(datasetServiceProvider *service.DatasetServiceAdaptor, evalService rpc.IEvaluatorRPCAdapter, + evaluationService rpc.IEvaluationRPCAdapter, taskRepo repo3.ITaskRepo) *processor.TaskProcessor { + taskProcessor := processor.NewTaskProcessor() + taskProcessor.Register(task.TaskTypeAutoEval, processor.NewAutoEvaluteProcessor(0, datasetServiceProvider, evalService, evaluationService, taskRepo)) + return taskProcessor +} diff --git a/backend/modules/observability/domain/component/config/config.go b/backend/modules/observability/domain/component/config/config.go index ea00b0efb..af6e01e5b 100644 --- a/backend/modules/observability/domain/component/config/config.go +++ b/backend/modules/observability/domain/component/config/config.go @@ -45,6 +45,7 @@ type MqConsumerCfg struct { Topic string `mapstructure:"topic" json:"topic"` ConsumerGroup string `mapstructure:"consumer_group" json:"consumer_group"` WorkerNum int `mapstructure:"worker_num" json:"worker_num"` + EnablePPE *bool `mapstructure:"enable_ppe" json:"enable_ppe"` } type TraceCKCfg struct { @@ -116,6 +117,7 @@ type ITraceConfig interface { GetDefaultTraceTenant(ctx context.Context) string GetAnnotationSourceCfg(ctx context.Context) (*AnnotationSourceConfig, error) GetQueryMaxQPS(ctx context.Context, key string) (int, error) + GetBackfillMqProducerCfg(ctx context.Context) (*MqProducerCfg, error) conf.IConfigLoader } diff --git a/backend/modules/observability/domain/component/config/mocks/config.go b/backend/modules/observability/domain/component/config/mocks/config.go index 3e2d45000..1aea3c576 100644 --- a/backend/modules/observability/domain/component/config/mocks/config.go +++ b/backend/modules/observability/domain/component/config/mocks/config.go @@ -22,7 +22,6 @@ import ( type MockITraceConfig struct { ctrl *gomock.Controller recorder *MockITraceConfigMockRecorder - isgomock struct{} } // MockITraceConfigMockRecorder is the mock recorder for MockITraceConfig. @@ -43,202 +42,217 @@ func (m *MockITraceConfig) EXPECT() *MockITraceConfigMockRecorder { } // Get mocks base method. -func (m *MockITraceConfig) Get(ctx context.Context, key string) any { +func (m *MockITraceConfig) Get(arg0 context.Context, arg1 string) any { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, key) + ret := m.ctrl.Call(m, "Get", arg0, arg1) ret0, _ := ret[0].(any) return ret0 } // Get indicates an expected call of Get. -func (mr *MockITraceConfigMockRecorder) Get(ctx, key any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) Get(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockITraceConfig)(nil).Get), ctx, key) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockITraceConfig)(nil).Get), arg0, arg1) } // GetAnnotationMqProducerCfg mocks base method. -func (m *MockITraceConfig) GetAnnotationMqProducerCfg(ctx context.Context) (*config.MqProducerCfg, error) { +func (m *MockITraceConfig) GetAnnotationMqProducerCfg(arg0 context.Context) (*config.MqProducerCfg, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAnnotationMqProducerCfg", ctx) + ret := m.ctrl.Call(m, "GetAnnotationMqProducerCfg", arg0) ret0, _ := ret[0].(*config.MqProducerCfg) ret1, _ := ret[1].(error) return ret0, ret1 } // GetAnnotationMqProducerCfg indicates an expected call of GetAnnotationMqProducerCfg. -func (mr *MockITraceConfigMockRecorder) GetAnnotationMqProducerCfg(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetAnnotationMqProducerCfg(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnnotationMqProducerCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetAnnotationMqProducerCfg), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnnotationMqProducerCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetAnnotationMqProducerCfg), arg0) } // GetAnnotationSourceCfg mocks base method. -func (m *MockITraceConfig) GetAnnotationSourceCfg(ctx context.Context) (*config.AnnotationSourceConfig, error) { +func (m *MockITraceConfig) GetAnnotationSourceCfg(arg0 context.Context) (*config.AnnotationSourceConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAnnotationSourceCfg", ctx) + ret := m.ctrl.Call(m, "GetAnnotationSourceCfg", arg0) ret0, _ := ret[0].(*config.AnnotationSourceConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // GetAnnotationSourceCfg indicates an expected call of GetAnnotationSourceCfg. -func (mr *MockITraceConfigMockRecorder) GetAnnotationSourceCfg(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetAnnotationSourceCfg(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnnotationSourceCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetAnnotationSourceCfg), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnnotationSourceCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetAnnotationSourceCfg), arg0) +} + +// GetBackfillMqProducerCfg mocks base method. +func (m *MockITraceConfig) GetBackfillMqProducerCfg(arg0 context.Context) (*config.MqProducerCfg, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBackfillMqProducerCfg", arg0) + ret0, _ := ret[0].(*config.MqProducerCfg) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBackfillMqProducerCfg indicates an expected call of GetBackfillMqProducerCfg. +func (mr *MockITraceConfigMockRecorder) GetBackfillMqProducerCfg(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackfillMqProducerCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetBackfillMqProducerCfg), arg0) } // GetDefaultTraceTenant mocks base method. -func (m *MockITraceConfig) GetDefaultTraceTenant(ctx context.Context) string { +func (m *MockITraceConfig) GetDefaultTraceTenant(arg0 context.Context) string { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDefaultTraceTenant", ctx) + ret := m.ctrl.Call(m, "GetDefaultTraceTenant", arg0) ret0, _ := ret[0].(string) return ret0 } // GetDefaultTraceTenant indicates an expected call of GetDefaultTraceTenant. -func (mr *MockITraceConfigMockRecorder) GetDefaultTraceTenant(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetDefaultTraceTenant(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDefaultTraceTenant", reflect.TypeOf((*MockITraceConfig)(nil).GetDefaultTraceTenant), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDefaultTraceTenant", reflect.TypeOf((*MockITraceConfig)(nil).GetDefaultTraceTenant), arg0) } // GetPlatformSpansTrans mocks base method. -func (m *MockITraceConfig) GetPlatformSpansTrans(ctx context.Context) (*config.SpanTransHandlerConfig, error) { +func (m *MockITraceConfig) GetPlatformSpansTrans(arg0 context.Context) (*config.SpanTransHandlerConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPlatformSpansTrans", ctx) + ret := m.ctrl.Call(m, "GetPlatformSpansTrans", arg0) ret0, _ := ret[0].(*config.SpanTransHandlerConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPlatformSpansTrans indicates an expected call of GetPlatformSpansTrans. -func (mr *MockITraceConfigMockRecorder) GetPlatformSpansTrans(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetPlatformSpansTrans(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPlatformSpansTrans", reflect.TypeOf((*MockITraceConfig)(nil).GetPlatformSpansTrans), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPlatformSpansTrans", reflect.TypeOf((*MockITraceConfig)(nil).GetPlatformSpansTrans), arg0) } // GetPlatformTenants mocks base method. -func (m *MockITraceConfig) GetPlatformTenants(ctx context.Context) (*config.PlatformTenantsCfg, error) { +func (m *MockITraceConfig) GetPlatformTenants(arg0 context.Context) (*config.PlatformTenantsCfg, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPlatformTenants", ctx) + ret := m.ctrl.Call(m, "GetPlatformTenants", arg0) ret0, _ := ret[0].(*config.PlatformTenantsCfg) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPlatformTenants indicates an expected call of GetPlatformTenants. -func (mr *MockITraceConfigMockRecorder) GetPlatformTenants(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetPlatformTenants(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPlatformTenants", reflect.TypeOf((*MockITraceConfig)(nil).GetPlatformTenants), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPlatformTenants", reflect.TypeOf((*MockITraceConfig)(nil).GetPlatformTenants), arg0) } // GetQueryMaxQPS mocks base method. -func (m *MockITraceConfig) GetQueryMaxQPS(ctx context.Context, key string) (int, error) { +func (m *MockITraceConfig) GetQueryMaxQPS(arg0 context.Context, arg1 string) (int, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetQueryMaxQPS", ctx, key) + ret := m.ctrl.Call(m, "GetQueryMaxQPS", arg0, arg1) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // GetQueryMaxQPS indicates an expected call of GetQueryMaxQPS. -func (mr *MockITraceConfigMockRecorder) GetQueryMaxQPS(ctx, key any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetQueryMaxQPS(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueryMaxQPS", reflect.TypeOf((*MockITraceConfig)(nil).GetQueryMaxQPS), ctx, key) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueryMaxQPS", reflect.TypeOf((*MockITraceConfig)(nil).GetQueryMaxQPS), arg0, arg1) } // GetSystemViews mocks base method. -func (m *MockITraceConfig) GetSystemViews(ctx context.Context) ([]*config.SystemView, error) { +func (m *MockITraceConfig) GetSystemViews(arg0 context.Context) ([]*config.SystemView, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSystemViews", ctx) + ret := m.ctrl.Call(m, "GetSystemViews", arg0) ret0, _ := ret[0].([]*config.SystemView) ret1, _ := ret[1].(error) return ret0, ret1 } // GetSystemViews indicates an expected call of GetSystemViews. -func (mr *MockITraceConfigMockRecorder) GetSystemViews(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetSystemViews(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSystemViews", reflect.TypeOf((*MockITraceConfig)(nil).GetSystemViews), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSystemViews", reflect.TypeOf((*MockITraceConfig)(nil).GetSystemViews), arg0) } // GetTenantConfig mocks base method. -func (m *MockITraceConfig) GetTenantConfig(ctx context.Context) (*config.TenantCfg, error) { +func (m *MockITraceConfig) GetTenantConfig(arg0 context.Context) (*config.TenantCfg, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTenantConfig", ctx) + ret := m.ctrl.Call(m, "GetTenantConfig", arg0) ret0, _ := ret[0].(*config.TenantCfg) ret1, _ := ret[1].(error) return ret0, ret1 } // GetTenantConfig indicates an expected call of GetTenantConfig. -func (mr *MockITraceConfigMockRecorder) GetTenantConfig(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetTenantConfig(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTenantConfig", reflect.TypeOf((*MockITraceConfig)(nil).GetTenantConfig), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTenantConfig", reflect.TypeOf((*MockITraceConfig)(nil).GetTenantConfig), arg0) } // GetTraceCkCfg mocks base method. -func (m *MockITraceConfig) GetTraceCkCfg(ctx context.Context) (*config.TraceCKCfg, error) { +func (m *MockITraceConfig) GetTraceCkCfg(arg0 context.Context) (*config.TraceCKCfg, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTraceCkCfg", ctx) + ret := m.ctrl.Call(m, "GetTraceCkCfg", arg0) ret0, _ := ret[0].(*config.TraceCKCfg) ret1, _ := ret[1].(error) return ret0, ret1 } // GetTraceCkCfg indicates an expected call of GetTraceCkCfg. -func (mr *MockITraceConfigMockRecorder) GetTraceCkCfg(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetTraceCkCfg(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTraceCkCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetTraceCkCfg), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTraceCkCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetTraceCkCfg), arg0) } // GetTraceDataMaxDurationDay mocks base method. -func (m *MockITraceConfig) GetTraceDataMaxDurationDay(ctx context.Context, platformType *string) int64 { +func (m *MockITraceConfig) GetTraceDataMaxDurationDay(arg0 context.Context, arg1 *string) int64 { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTraceDataMaxDurationDay", ctx, platformType) + ret := m.ctrl.Call(m, "GetTraceDataMaxDurationDay", arg0, arg1) ret0, _ := ret[0].(int64) return ret0 } // GetTraceDataMaxDurationDay indicates an expected call of GetTraceDataMaxDurationDay. -func (mr *MockITraceConfigMockRecorder) GetTraceDataMaxDurationDay(ctx, platformType any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetTraceDataMaxDurationDay(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTraceDataMaxDurationDay", reflect.TypeOf((*MockITraceConfig)(nil).GetTraceDataMaxDurationDay), ctx, platformType) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTraceDataMaxDurationDay", reflect.TypeOf((*MockITraceConfig)(nil).GetTraceDataMaxDurationDay), arg0, arg1) } // GetTraceFieldMetaInfo mocks base method. -func (m *MockITraceConfig) GetTraceFieldMetaInfo(ctx context.Context) (*config.TraceFieldMetaInfoCfg, error) { +func (m *MockITraceConfig) GetTraceFieldMetaInfo(arg0 context.Context) (*config.TraceFieldMetaInfoCfg, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTraceFieldMetaInfo", ctx) + ret := m.ctrl.Call(m, "GetTraceFieldMetaInfo", arg0) ret0, _ := ret[0].(*config.TraceFieldMetaInfoCfg) ret1, _ := ret[1].(error) return ret0, ret1 } // GetTraceFieldMetaInfo indicates an expected call of GetTraceFieldMetaInfo. -func (mr *MockITraceConfigMockRecorder) GetTraceFieldMetaInfo(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetTraceFieldMetaInfo(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTraceFieldMetaInfo", reflect.TypeOf((*MockITraceConfig)(nil).GetTraceFieldMetaInfo), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTraceFieldMetaInfo", reflect.TypeOf((*MockITraceConfig)(nil).GetTraceFieldMetaInfo), arg0) } // GetTraceIngestTenantProducerCfg mocks base method. -func (m *MockITraceConfig) GetTraceIngestTenantProducerCfg(ctx context.Context) (map[string]*config.IngestConfig, error) { +func (m *MockITraceConfig) GetTraceIngestTenantProducerCfg(arg0 context.Context) (map[string]*config.IngestConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTraceIngestTenantProducerCfg", ctx) + ret := m.ctrl.Call(m, "GetTraceIngestTenantProducerCfg", arg0) ret0, _ := ret[0].(map[string]*config.IngestConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // GetTraceIngestTenantProducerCfg indicates an expected call of GetTraceIngestTenantProducerCfg. -func (mr *MockITraceConfigMockRecorder) GetTraceIngestTenantProducerCfg(ctx any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) GetTraceIngestTenantProducerCfg(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTraceIngestTenantProducerCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetTraceIngestTenantProducerCfg), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTraceIngestTenantProducerCfg", reflect.TypeOf((*MockITraceConfig)(nil).GetTraceIngestTenantProducerCfg), arg0) } // Unmarshal mocks base method. -func (m *MockITraceConfig) Unmarshal(ctx context.Context, value any, opts ...conf.DecodeOptionFn) error { +func (m *MockITraceConfig) Unmarshal(arg0 context.Context, arg1 any, arg2 ...conf.DecodeOptionFn) error { m.ctrl.T.Helper() - varargs := []any{ctx, value} - for _, a := range opts { + varargs := []any{arg0, arg1} + for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Unmarshal", varargs...) @@ -247,17 +261,17 @@ func (m *MockITraceConfig) Unmarshal(ctx context.Context, value any, opts ...con } // Unmarshal indicates an expected call of Unmarshal. -func (mr *MockITraceConfigMockRecorder) Unmarshal(ctx, value any, opts ...any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) Unmarshal(arg0, arg1 any, arg2 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]any{ctx, value}, opts...) + varargs := append([]any{arg0, arg1}, arg2...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmarshal", reflect.TypeOf((*MockITraceConfig)(nil).Unmarshal), varargs...) } // UnmarshalKey mocks base method. -func (m *MockITraceConfig) UnmarshalKey(ctx context.Context, key string, value any, opts ...conf.DecodeOptionFn) error { +func (m *MockITraceConfig) UnmarshalKey(arg0 context.Context, arg1 string, arg2 any, arg3 ...conf.DecodeOptionFn) error { m.ctrl.T.Helper() - varargs := []any{ctx, key, value} - for _, a := range opts { + varargs := []any{arg0, arg1, arg2} + for _, a := range arg3 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "UnmarshalKey", varargs...) @@ -266,8 +280,8 @@ func (m *MockITraceConfig) UnmarshalKey(ctx context.Context, key string, value a } // UnmarshalKey indicates an expected call of UnmarshalKey. -func (mr *MockITraceConfigMockRecorder) UnmarshalKey(ctx, key, value any, opts ...any) *gomock.Call { +func (mr *MockITraceConfigMockRecorder) UnmarshalKey(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]any{ctx, key, value}, opts...) + varargs := append([]any{arg0, arg1, arg2}, arg3...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnmarshalKey", reflect.TypeOf((*MockITraceConfig)(nil).UnmarshalKey), varargs...) } diff --git a/backend/modules/observability/domain/component/mq/backfill_producer.go b/backend/modules/observability/domain/component/mq/backfill_producer.go new file mode 100644 index 000000000..d9a1b187b --- /dev/null +++ b/backend/modules/observability/domain/component/mq/backfill_producer.go @@ -0,0 +1,14 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package mq + +import ( + "context" + + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" +) + +type IBackfillProducer interface { + SendBackfill(ctx context.Context, message *entity.BackFillEvent) error +} diff --git a/backend/modules/observability/domain/component/rpc/auth.go b/backend/modules/observability/domain/component/rpc/auth.go index d52e5bc29..201a1949c 100644 --- a/backend/modules/observability/domain/component/rpc/auth.go +++ b/backend/modules/observability/domain/component/rpc/auth.go @@ -15,6 +15,11 @@ const ( AuthActionAnnotationCreate = "createLoopTraceAnnotation" AuthActionTraceExport = "exportLoopTrace" AuthActionTracePreviewExport = "previewExportLoopTrace" + AuthActionTraceTaskCreate = "createLoopTask" + AuthActionTraceTaskList = "listLoopTask" + AuthActionTraceTaskEdit = "edit" + AuthActionTaskWritable = "task_writable" + AuthActionTaskReadable = "task_readable" ) //go:generate mockgen -destination=mocks/auth_provider.go -package=mocks . IAuthProvider @@ -23,4 +28,5 @@ type IAuthProvider interface { CheckViewPermission(ctx context.Context, action, workspaceId, viewId string) error CheckIngestPermission(ctx context.Context, workspaceId string) error CheckQueryPermission(ctx context.Context, workspaceId, platformType string) error + CheckTaskPermission(ctx context.Context, action, workspaceId, taskId string) error } diff --git a/backend/modules/observability/domain/component/rpc/dataset.go b/backend/modules/observability/domain/component/rpc/dataset.go index b1602bd09..616a8673e 100644 --- a/backend/modules/observability/domain/component/rpc/dataset.go +++ b/backend/modules/observability/domain/component/rpc/dataset.go @@ -17,6 +17,7 @@ type IDatasetProvider interface { UpdateDatasetSchema(ctx context.Context, dataset *entity.Dataset) error GetDataset(ctx context.Context, workspaceID, datasetID int64, category entity.DatasetCategory) (*entity.Dataset, error) ClearDatasetItems(ctx context.Context, workspaceID, datasetID int64, category entity.DatasetCategory) error + SearchDatasets(ctx context.Context, workspaceID int64, datasetID int64, category entity.DatasetCategory, name string) ([]*entity.Dataset, error) AddDatasetItems(ctx context.Context, datasetID int64, category entity.DatasetCategory, items []*entity.DatasetItem) ([]*entity.DatasetItem, []entity.ItemErrorGroup, error) ValidateDatasetItems(ctx context.Context, dataset *entity.Dataset, items []*entity.DatasetItem, ignoreCurrentCount *bool) ([]*entity.DatasetItem, []entity.ItemErrorGroup, error) } @@ -54,3 +55,7 @@ func (d *noopDatasetProvider) AddDatasetItems(ctx context.Context, datasetID int func (d *noopDatasetProvider) ValidateDatasetItems(ctx context.Context, dataset *entity.Dataset, items []*entity.DatasetItem, ignoreCurrentCount *bool) ([]*entity.DatasetItem, []entity.ItemErrorGroup, error) { return nil, nil, errorx.NewByCode(errno.CommonInvalidParamCode, errorx.WithExtraMsg("dataset category is invalid")) } + +func (d *noopDatasetProvider) SearchDatasets(ctx context.Context, workspaceID int64, datasetID int64, category entity.DatasetCategory, name string) ([]*entity.Dataset, error) { + return nil, errorx.NewByCode(errno.CommonInvalidParamCode, errorx.WithExtraMsg("dataset category is invalid")) +} diff --git a/backend/modules/observability/domain/component/rpc/evaluation.go b/backend/modules/observability/domain/component/rpc/evaluation.go new file mode 100644 index 000000000..80c0070c8 --- /dev/null +++ b/backend/modules/observability/domain/component/rpc/evaluation.go @@ -0,0 +1,67 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package rpc + +import ( + "context" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/eval_set" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/expt" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/eval_target" + "github.com/coze-dev/coze-loop/backend/modules/evaluation/domain/entity" +) + +type GetEvaluationSetReq struct { + WorkspaceID int64 + EvaluationSetID int64 +} +type CreateEvaluationSetReq struct { + EvaluationSet *entity.EvaluationSet + Session *common.Session +} +type SubmitExperimentReq struct { + WorkspaceID int64 + EvalSetVersionID *int64 + TargetVersionID *int64 + EvaluatorVersionIds []int64 + Name *string + Desc *string + EvalSetID *int64 + TargetID *int64 + TargetFieldMapping *expt.TargetFieldMapping + EvaluatorFieldMapping []*expt.EvaluatorFieldMapping + ItemConcurNum *int32 + EvaluatorsConcurNum *int32 + CreateEvalTargetParam *eval_target.CreateEvalTargetParam + ExptType *expt.ExptType + MaxAliveTime *int64 + SourceType *expt.SourceType + SourceID *string + Session *common.Session +} +type InvokeExperimentReq struct { + WorkspaceID int64 + EvaluationSetID int64 + Items []*eval_set.EvaluationSetItem + // items 中存在无效数据时,默认不会写入任何数据;设置 skipInvalidItems=true 会跳过无效数据,写入有效数据 // items 中存在无效数据时,默认不会写入任何数据;设置 skipInvalidItems=true 会跳过无效数据,写入有效数据 + SkipInvalidItems *bool + // 批量写入 items 如果超出数据集容量限制,默认不会写入任何数据;设置 partialAdd=true 会写入不超出容量限制的前 N 条 + AllowPartialAdd *bool + ExperimentID *int64 + ExperimentRunID *int64 + Ext map[string]string + Session *common.Session +} +type FinishExperimentReq struct { + WorkspaceID int64 + ExperimentID int64 + ExperimentRunID int64 + Session *common.Session +} +type IEvaluationRPCAdapter interface { + SubmitExperiment(ctx context.Context, param *SubmitExperimentReq) (exptID, exptRunID int64, err error) + InvokeExperiment(ctx context.Context, param *InvokeExperimentReq) (addedItems int64, err error) + FinishExperiment(ctx context.Context, param *FinishExperimentReq) (err error) +} diff --git a/backend/modules/observability/domain/component/rpc/evaluator.go b/backend/modules/observability/domain/component/rpc/evaluator.go index df2fbec9b..38cfe66dd 100644 --- a/backend/modules/observability/domain/component/rpc/evaluator.go +++ b/backend/modules/observability/domain/component/rpc/evaluator.go @@ -3,7 +3,9 @@ package rpc -import "context" +import ( + "context" +) type Evaluator struct { EvaluatorVersionID int64 @@ -15,8 +17,21 @@ type BatchGetEvaluatorVersionsParam struct { WorkspaceID int64 EvaluatorVersionIds []int64 } +type UpdateEvaluatorRecordParam struct { + WorkspaceID string + EvaluatorRecordID int64 + Score float64 + Reasoning string + UpdatedBy string +} +type ListEvaluatorsParam struct { + WorkspaceID int64 + Name *string +} //go:generate mockgen -destination=mocks/evaluator.go -package=mocks . IEvaluatorRPCAdapter type IEvaluatorRPCAdapter interface { BatchGetEvaluatorVersions(ctx context.Context, param *BatchGetEvaluatorVersionsParam) ([]*Evaluator, map[int64]*Evaluator, error) + UpdateEvaluatorRecord(ctx context.Context, param *UpdateEvaluatorRecordParam) error + ListEvaluators(ctx context.Context, param *ListEvaluatorsParam) ([]*Evaluator, error) } diff --git a/backend/modules/observability/domain/component/rpc/mocks/auth_provider.go b/backend/modules/observability/domain/component/rpc/mocks/auth_provider.go index b2e767f1b..78ec78448 100644 --- a/backend/modules/observability/domain/component/rpc/mocks/auth_provider.go +++ b/backend/modules/observability/domain/component/rpc/mocks/auth_provider.go @@ -68,6 +68,20 @@ func (mr *MockIAuthProviderMockRecorder) CheckQueryPermission(ctx, workspaceId, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckQueryPermission", reflect.TypeOf((*MockIAuthProvider)(nil).CheckQueryPermission), ctx, workspaceId, platformType) } +// CheckTaskPermission mocks base method. +func (m *MockIAuthProvider) CheckTaskPermission(arg0 context.Context, arg1, arg2, arg3 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckTaskPermission", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// CheckTaskPermission indicates an expected call of CheckTaskPermission. +func (mr *MockIAuthProviderMockRecorder) CheckTaskPermission(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckTaskPermission", reflect.TypeOf((*MockIAuthProvider)(nil).CheckTaskPermission), arg0, arg1, arg2, arg3) +} + // CheckViewPermission mocks base method. func (m *MockIAuthProvider) CheckViewPermission(ctx context.Context, action, workspaceId, viewId string) error { m.ctrl.T.Helper() diff --git a/backend/modules/observability/domain/component/rpc/mocks/dataset_provider_mock.go b/backend/modules/observability/domain/component/rpc/mocks/dataset_provider_mock.go index 7781966b8..efebc6c00 100644 --- a/backend/modules/observability/domain/component/rpc/mocks/dataset_provider_mock.go +++ b/backend/modules/observability/domain/component/rpc/mocks/dataset_provider_mock.go @@ -100,6 +100,21 @@ func (mr *MockIDatasetProviderMockRecorder) GetDataset(arg0, arg1, arg2, arg3 an return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataset", reflect.TypeOf((*MockIDatasetProvider)(nil).GetDataset), arg0, arg1, arg2, arg3) } +// SearchDatasets mocks base method. +func (m *MockIDatasetProvider) SearchDatasets(arg0 context.Context, arg1, arg2 int64, arg3 entity.DatasetCategory, arg4 string) ([]*entity.Dataset, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SearchDatasets", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].([]*entity.Dataset) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SearchDatasets indicates an expected call of SearchDatasets. +func (mr *MockIDatasetProviderMockRecorder) SearchDatasets(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchDatasets", reflect.TypeOf((*MockIDatasetProvider)(nil).SearchDatasets), arg0, arg1, arg2, arg3, arg4) +} + // UpdateDatasetSchema mocks base method. func (m *MockIDatasetProvider) UpdateDatasetSchema(arg0 context.Context, arg1 *entity.Dataset) error { m.ctrl.T.Helper() diff --git a/backend/modules/observability/domain/component/rpc/mocks/evaluator.go b/backend/modules/observability/domain/component/rpc/mocks/evaluator.go index 464f5f1ba..57bd0a127 100644 --- a/backend/modules/observability/domain/component/rpc/mocks/evaluator.go +++ b/backend/modules/observability/domain/component/rpc/mocks/evaluator.go @@ -21,7 +21,6 @@ import ( type MockIEvaluatorRPCAdapter struct { ctrl *gomock.Controller recorder *MockIEvaluatorRPCAdapterMockRecorder - isgomock struct{} } // MockIEvaluatorRPCAdapterMockRecorder is the mock recorder for MockIEvaluatorRPCAdapter. @@ -42,9 +41,9 @@ func (m *MockIEvaluatorRPCAdapter) EXPECT() *MockIEvaluatorRPCAdapterMockRecorde } // BatchGetEvaluatorVersions mocks base method. -func (m *MockIEvaluatorRPCAdapter) BatchGetEvaluatorVersions(ctx context.Context, param *rpc.BatchGetEvaluatorVersionsParam) ([]*rpc.Evaluator, map[int64]*rpc.Evaluator, error) { +func (m *MockIEvaluatorRPCAdapter) BatchGetEvaluatorVersions(arg0 context.Context, arg1 *rpc.BatchGetEvaluatorVersionsParam) ([]*rpc.Evaluator, map[int64]*rpc.Evaluator, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchGetEvaluatorVersions", ctx, param) + ret := m.ctrl.Call(m, "BatchGetEvaluatorVersions", arg0, arg1) ret0, _ := ret[0].([]*rpc.Evaluator) ret1, _ := ret[1].(map[int64]*rpc.Evaluator) ret2, _ := ret[2].(error) @@ -52,7 +51,36 @@ func (m *MockIEvaluatorRPCAdapter) BatchGetEvaluatorVersions(ctx context.Context } // BatchGetEvaluatorVersions indicates an expected call of BatchGetEvaluatorVersions. -func (mr *MockIEvaluatorRPCAdapterMockRecorder) BatchGetEvaluatorVersions(ctx, param any) *gomock.Call { +func (mr *MockIEvaluatorRPCAdapterMockRecorder) BatchGetEvaluatorVersions(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetEvaluatorVersions", reflect.TypeOf((*MockIEvaluatorRPCAdapter)(nil).BatchGetEvaluatorVersions), ctx, param) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetEvaluatorVersions", reflect.TypeOf((*MockIEvaluatorRPCAdapter)(nil).BatchGetEvaluatorVersions), arg0, arg1) +} + +// ListEvaluators mocks base method. +func (m *MockIEvaluatorRPCAdapter) ListEvaluators(arg0 context.Context, arg1 *rpc.ListEvaluatorsParam) ([]*rpc.Evaluator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListEvaluators", arg0, arg1) + ret0, _ := ret[0].([]*rpc.Evaluator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListEvaluators indicates an expected call of ListEvaluators. +func (mr *MockIEvaluatorRPCAdapterMockRecorder) ListEvaluators(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEvaluators", reflect.TypeOf((*MockIEvaluatorRPCAdapter)(nil).ListEvaluators), arg0, arg1) +} + +// UpdateEvaluatorRecord mocks base method. +func (m *MockIEvaluatorRPCAdapter) UpdateEvaluatorRecord(arg0 context.Context, arg1 *rpc.UpdateEvaluatorRecordParam) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateEvaluatorRecord", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateEvaluatorRecord indicates an expected call of UpdateEvaluatorRecord. +func (mr *MockIEvaluatorRPCAdapterMockRecorder) UpdateEvaluatorRecord(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEvaluatorRecord", reflect.TypeOf((*MockIEvaluatorRPCAdapter)(nil).UpdateEvaluatorRecord), arg0, arg1) } diff --git a/backend/modules/observability/domain/component/rpc/mocks/file.go b/backend/modules/observability/domain/component/rpc/mocks/file.go index 2ea9cda65..ed18922bc 100644 --- a/backend/modules/observability/domain/component/rpc/mocks/file.go +++ b/backend/modules/observability/domain/component/rpc/mocks/file.go @@ -20,7 +20,6 @@ import ( type MockIFileProvider struct { ctrl *gomock.Controller recorder *MockIFileProviderMockRecorder - isgomock struct{} } // MockIFileProviderMockRecorder is the mock recorder for MockIFileProvider. diff --git a/backend/modules/observability/domain/component/rpc/mocks/tag.go b/backend/modules/observability/domain/component/rpc/mocks/tag.go index db5d94694..67b72557d 100644 --- a/backend/modules/observability/domain/component/rpc/mocks/tag.go +++ b/backend/modules/observability/domain/component/rpc/mocks/tag.go @@ -21,7 +21,6 @@ import ( type MockITagRPCAdapter struct { ctrl *gomock.Controller recorder *MockITagRPCAdapterMockRecorder - isgomock struct{} } // MockITagRPCAdapterMockRecorder is the mock recorder for MockITagRPCAdapter. diff --git a/backend/modules/observability/domain/component/rpc/mocks/user.go b/backend/modules/observability/domain/component/rpc/mocks/user.go index bb55cfe63..eb5db57ab 100644 --- a/backend/modules/observability/domain/component/rpc/mocks/user.go +++ b/backend/modules/observability/domain/component/rpc/mocks/user.go @@ -21,7 +21,6 @@ import ( type MockIUserProvider struct { ctrl *gomock.Controller recorder *MockIUserProviderMockRecorder - isgomock struct{} } // MockIUserProviderMockRecorder is the mock recorder for MockIUserProvider. @@ -42,9 +41,9 @@ func (m *MockIUserProvider) EXPECT() *MockIUserProviderMockRecorder { } // GetUserInfo mocks base method. -func (m *MockIUserProvider) GetUserInfo(ctx context.Context, userIDs []string) ([]*common.UserInfo, map[string]*common.UserInfo, error) { +func (m *MockIUserProvider) GetUserInfo(arg0 context.Context, arg1 []string) ([]*common.UserInfo, map[string]*common.UserInfo, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUserInfo", ctx, userIDs) + ret := m.ctrl.Call(m, "GetUserInfo", arg0, arg1) ret0, _ := ret[0].([]*common.UserInfo) ret1, _ := ret[1].(map[string]*common.UserInfo) ret2, _ := ret[2].(error) @@ -52,7 +51,7 @@ func (m *MockIUserProvider) GetUserInfo(ctx context.Context, userIDs []string) ( } // GetUserInfo indicates an expected call of GetUserInfo. -func (mr *MockIUserProviderMockRecorder) GetUserInfo(ctx, userIDs any) *gomock.Call { +func (mr *MockIUserProviderMockRecorder) GetUserInfo(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserInfo", reflect.TypeOf((*MockIUserProvider)(nil).GetUserInfo), ctx, userIDs) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUserInfo", reflect.TypeOf((*MockIUserProvider)(nil).GetUserInfo), arg0, arg1) } diff --git a/backend/modules/observability/domain/task/entity/event.go b/backend/modules/observability/domain/task/entity/event.go new file mode 100644 index 000000000..26642eff2 --- /dev/null +++ b/backend/modules/observability/domain/task/entity/event.go @@ -0,0 +1,331 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package entity + +import ( + "strconv" + + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" +) + +type RawSpan struct { + TraceID string `json:"_trace_id"` + LogID string `json:"__logid"` + Method string `json:"_method"` + SpanID string `json:"_span_id"` + ParentID string `json:"_parent_id"` + Events []*EventInRawSpan `json:"_events"` + DurationInUs int64 `json:"_duration"` // unit: microsecond + StartTimeInUs int64 `json:"_start_time"` // unix microsecond + StatusCode int32 `json:"_status_code"` + SpanName string `json:"_span_name"` + SpanType string `json:"_span_type"` + ServerEnv *ServerInRawSpan `json:"_server_env"` + Tags map[string]any `json:"_tags"` // value can be: [float64, int64, bool, string, []byte] + SystemTags map[string]any `json:"_system_tags"` // value can be: [float64, int64, bool, string, []byte] + Tenant string `json:"tenant"` + SensitiveTags *SensitiveTags `json:"sensitive_tags"` +} +type EventInRawSpan struct { + Type string `json:"_type,omitempty"` + Name string `json:"_name,omitempty"` + Tags []*RawSpanTag `json:"_tags,omitempty"` + StartTime int64 `json:"_start_time,omitempty"` + Data []byte `json:"_data,omitempty"` +} +type RawSpanTag struct { + Key string + Value any // value can be: [float64, int64, bool, string, []byte] +} +type SensitiveTags struct { + Input string `json:"input"` + Output string `json:"output"` + InputTokens int64 `json:"input_tokens"` + OutputTokens int64 `json:"output_tokens"` + Tokens int64 `json:"tokens"` +} + +type ServerInRawSpan struct { + PSM string `json:"psm,omitempty"` + Cluster string `json:"cluster,omitempty"` + DC string `json:"dc,omitempty"` + Env string `json:"env,omitempty"` + PodName string `json:"pod_name,omitempty"` + Stage string `json:"stage,omitempty"` + Region string `json:"_region,omitempty"` +} + +var MockRawSpan = &RawSpan{ + TraceID: "1", + LogID: "2", + Method: "3", + SpanID: "4", + ParentID: "0", + DurationInUs: 0, + StartTimeInUs: 0, + StatusCode: 0, + SpanName: "xun_test", + Tags: map[string]any{ + "span_type": "root", + "tokens": 3, + "input": "世界上最美的火山", + "output": "富士山", + }, + Tenant: "fornax_saas", +} + +func (s *RawSpan) GetSensitiveTags() *SensitiveTags { + if s == nil { + return nil + } + return s.SensitiveTags +} +func (s *RawSpan) GetServerEnv() *ServerInRawSpan { + if s == nil { + return nil + } + return s.ServerEnv +} + +func (s *RawSpan) RawSpanConvertToLoopSpan() *loop_span.Span { + if s == nil { + return nil + } + systemTagsString := make(map[string]string) + systemTagsLong := make(map[string]int64) + systemTagsDouble := make(map[string]float64) + tagsString := make(map[string]string) + tagsLong := make(map[string]int64) + tagsDouble := make(map[string]float64) + tagsBool := make(map[string]bool) + tagsByte := make(map[string]string) + for k, v := range s.Tags { + switch v.(type) { + case string: + tagsString[k] = v.(string) + case int64: + tagsLong[k] = v.(int64) + case float64: + tagsDouble[k] = v.(float64) + case bool: + tagsBool[k] = v.(bool) + case []byte: + tagsByte[k] = string(v.([]byte)) + default: + tagsString[k] = "" + } + } + for k, v := range s.SystemTags { + switch v.(type) { + case string: + systemTagsString[k] = v.(string) + case int64: + systemTagsLong[k] = v.(int64) + case float64: + systemTagsDouble[k] = v.(float64) + default: + systemTagsString[k] = "" + } + } + tagsLong["input_tokens"] = s.SensitiveTags.InputTokens + tagsLong["output_tokens"] = s.SensitiveTags.OutputTokens + tagsLong["tokens"] = s.SensitiveTags.Tokens + if s.Tags == nil { + s.Tags = make(map[string]any) + } + if s.Tags["call_type"] == nil { + s.Tags["call_type"] = "" + } + callType := s.Tags["call_type"].(string) + if s.Tags["fornax_space_id"] == nil { + s.Tags["fornax_space_id"] = "" + } + spaceID := s.Tags["fornax_space_id"].(string) + if s.Tags["span_type"] == nil { + s.Tags["span_type"] = "" + } + spanType := s.Tags["span_type"].(string) + + result := &loop_span.Span{ + StartTime: s.StartTimeInUs / 1000, + SpanID: s.SpanID, + ParentID: s.ParentID, + LogID: s.LogID, + TraceID: s.TraceID, + DurationMicros: s.DurationInUs / 1000, + PSM: s.ServerEnv.PSM, + CallType: callType, + WorkspaceID: spaceID, + SpanName: s.SpanName, + SpanType: spanType, + Method: s.Method, + StatusCode: s.StatusCode, + Input: s.SensitiveTags.Input, + Output: s.SensitiveTags.Output, + SystemTagsString: systemTagsString, + SystemTagsLong: systemTagsLong, + SystemTagsDouble: systemTagsDouble, + TagsString: tagsString, + TagsLong: tagsLong, + TagsDouble: tagsDouble, + TagsBool: tagsBool, + TagsByte: tagsByte, + } + + return result +} + +type AutoEvalEvent struct { + ExptID int64 `json:"expt_id"` + TurnEvalResults []*OnlineExptTurnEvalResult `json:"turn_eval_results"` +} +type OnlineExptTurnEvalResult struct { + EvaluatorVersionID int64 `json:"evaluator_version_id"` + EvaluatorRecordID int64 `json:"evaluator_record_id"` + Score float64 `json:"score"` + Reasoning string `json:"reasoning"` + Status EvaluatorRunStatus `json:"status"` + EvaluatorRunError *EvaluatorRunError `json:"evaluator_run_error"` + Ext map[string]string `json:"ext"` + BaseInfo *BaseInfo `json:"base_info"` +} +type BaseInfo struct { + UpdatedBy *UserInfo `json:"updated_by"` + UpdatedAt int64 `json:"updated_at"` + CreatedBy *UserInfo `json:"created_by"` + CreatedAt int64 `json:"created_at"` +} +type UserInfo struct { + UserID string `json:"user_id"` +} +type EvaluatorRunStatus int + +const ( + EvaluatorRunStatus_Unknown = 0 + EvaluatorRunStatus_Success = 1 + EvaluatorRunStatus_Fail = 2 +) + +func (s *OnlineExptTurnEvalResult) GetSpanIDFromExt() string { + if s == nil { + return "" + } + return s.Ext["span_id"] +} +func (s *OnlineExptTurnEvalResult) GetTraceIDFromExt() string { + if s == nil { + return "" + } + return s.Ext["trace_id"] +} +func (s *OnlineExptTurnEvalResult) GetStartTimeFromExt() int64 { + if s == nil { + return 0 + } + startTimeStr := s.Ext["start_time"] + startTime, err := strconv.ParseInt(startTimeStr, 10, 64) + if err != nil { + return 0 + } + return startTime +} +func (s *OnlineExptTurnEvalResult) GetTaskIDFromExt() int64 { + if s == nil { + return 0 + } + taskIDStr := s.Ext["task_id"] + taskID, err := strconv.ParseInt(taskIDStr, 10, 64) + if err != nil { + return 0 + } + return taskID +} +func (s *OnlineExptTurnEvalResult) GetWorkspaceIDFromExt() (string, int64) { + if s == nil { + return "", 0 + } + workspaceIDStr := s.Ext["workspace_id"] + workspaceID, err := strconv.ParseInt(workspaceIDStr, 10, 64) + if err != nil { + return "", 0 + } + return workspaceIDStr, workspaceID +} + +type EvaluatorRunError struct { + Code int32 `json:"code"` + Message string `json:"message"` +} + +type Correction struct { + Score float64 `json:"score"` + Explain string `json:"explain"` + UpdatedBy string `json:"updated_by"` +} + +type EvaluatorResult struct { + Score float64 `json:"score"` + Correction *Correction `json:"correction"` + Reasoning string `json:"reasoning"` +} + +type CorrectionEvent struct { + EvaluatorResult *EvaluatorResult `json:"evaluator_result"` + EvaluatorRecordID int64 `json:"evaluator_record_id"` + EvaluatorVersionID int64 `json:"evaluator_version_id"` + Ext map[string]string `json:"ext"` + CreatedAt int64 `json:"created_at"` + UpdatedAt int64 `json:"updated_at"` +} + +type BackFillEvent struct { + SpaceID int64 `json:"space_id"` + TaskID int64 `json:"task_id"` +} + +func (c *CorrectionEvent) GetSpanIDFromExt() string { + if c == nil { + return "" + } + return c.Ext["span_id"] +} +func (c *CorrectionEvent) GetTraceIDFromExt() string { + if c == nil { + return "" + } + return c.Ext["trace_id"] +} +func (c *CorrectionEvent) GetStartTimeFromExt() int64 { + if c == nil { + return 0 + } + startTimeStr := c.Ext["start_time"] + startTime, err := strconv.ParseInt(startTimeStr, 10, 64) + if err != nil { + return 0 + } + return startTime +} +func (c *CorrectionEvent) GetTaskIDFromExt() int64 { + if c == nil { + return 0 + } + taskIDStr := c.Ext["task_id"] + taskID, err := strconv.ParseInt(taskIDStr, 10, 64) + if err != nil { + return 0 + } + return taskID +} +func (c *CorrectionEvent) GetWorkspaceIDFromExt() (string, int64) { + if c == nil { + return "", 0 + } + workspaceIDStr := c.Ext["workspace_id"] + workspaceID, err := strconv.ParseInt(workspaceIDStr, 10, 64) + if err != nil { + return "", 0 + } + return workspaceIDStr, workspaceID +} diff --git a/backend/modules/observability/domain/task/entity/task.go b/backend/modules/observability/domain/task/entity/task.go new file mode 100644 index 000000000..b04cf0f34 --- /dev/null +++ b/backend/modules/observability/domain/task/entity/task.go @@ -0,0 +1,82 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package entity + +// +//import ( +// "time" +// +// "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" +// "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" +// "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" +//) +// +//// do +//type ObservabilityTaskDO struct { +// ID int64 // Task ID +// WorkspaceID int64 // 空间ID +// Name string // 任务名称 +// Description *string // 任务描述 +// TaskType string // 任务类型 +// TaskStatus string // 任务状态 +// TaskDetail *string // 任务运行详情 +// SpanFilter *string // span 过滤条件 +// EffectiveTime *string // 生效时间 +// BackfillEffectiveTime *string // 历史回溯生效时间 +// Sampler *string // 采样器 +// TaskConfig *string // 相关任务的配置信息 +// CreatedAt time.Time // 创建时间 +// UpdatedAt time.Time // 更新时间 +// CreatedBy string // 创建人 +// UpdatedBy string // 更新人 +// +// TaskRuns []*TaskRun +//} +//type SpanFilter struct { +// Filters loop_span.FilterFields `json:"filters,omitempty"` +// PlatformType common.PlatformType `json:"platform_type,omitempty"` +// SpanListType common.SpanListType `json:"span_list_type,omitempty"` +//} +// +//type TaskRunDO struct { +// ID int64 // Task Run ID +// TaskID int64 // Task ID +// WorkspaceID int64 // 空间ID +// TaskType string // 任务类型 +// RunStatus string // Task Run状态 +// RunDetail *string // Task Run运行详情 +// BackfillDetail *string // 历史回溯运行详情 +// RunStartAt time.Time // run 开始时间 +// RunEndAt time.Time // run 结束时间 +// RunConfig *string // 相关任务的配置信息 +// CreatedAt time.Time // 创建时间 +// UpdatedAt time.Time // 更新时间 +//} +// +//func (t ObservabilityTask) IsFinished() bool { +// switch t.TaskStatus { +// case task.TaskStatusSuccess, task.TaskStatusDisabled, task.TaskStatusPending: +// return true +// default: +// return false +// } +//} +// +//func (t ObservabilityTask) GetBackfillTaskRun() *TaskRun { +// for _, taskRunPO := range t.TaskRuns { +// if taskRunPO.TaskType == task.TaskRunTypeBackFill { +// return taskRunPO +// } +// } +// return nil +//} +// +//func (t ObservabilityTask) GetCurrentTaskRun() *TaskRun { +// for _, taskRunPO := range t.TaskRuns { +// if taskRunPO.TaskType == task.TaskRunTypeNewData && taskRunPO.RunStatus == task.TaskStatusRunning { +// return taskRunPO +// } +// } +// return nil +//} diff --git a/backend/modules/observability/domain/task/entity/task_do.go b/backend/modules/observability/domain/task/entity/task_do.go new file mode 100644 index 000000000..7b219345c --- /dev/null +++ b/backend/modules/observability/domain/task/entity/task_do.go @@ -0,0 +1,162 @@ +package entity + +import ( + "time" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/dataset" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" +) + +// do +type ObservabilityTask struct { + ID int64 // Task ID + WorkspaceID int64 // 空间ID + Name string // 任务名称 + Description *string // 任务描述 + TaskType string // 任务类型 + TaskStatus string // 任务状态 + TaskDetail *RunDetail // 任务运行详情 + SpanFilter *filter.SpanFilterFields // span 过滤条件 + EffectiveTime *EffectiveTime // 生效时间 + BackfillEffectiveTime *EffectiveTime // 历史回溯生效时间 + Sampler *Sampler // 采样器 + TaskConfig *TaskConfig // 相关任务的配置信息 + CreatedAt time.Time // 创建时间 + UpdatedAt time.Time // 更新时间 + CreatedBy string // 创建人 + UpdatedBy string // 更新人 + + TaskRuns []*TaskRun +} + +type RunDetail struct { + SuccessCount int64 `json:"success_count"` + FailedCount int64 `json:"failed_count"` + TotalCount int64 `json:"total_count"` +} +type SpanFilterFields struct { + Filters filter.SpanFilterFields `json:"filters"` + PlatformType common.PlatformType `json:"platform_type"` + SpanListType common.SpanListType `json:"span_list_type"` +} +type EffectiveTime struct { + // ms timestamp + StartAt int64 `json:"start_at"` + // ms timestamp + EndAt int64 `json:"end_at"` +} +type Sampler struct { + SampleRate float64 `json:"sample_rate"` + SampleSize int64 `json:"sample_size"` + IsCycle bool `json:"is_cycle"` + CycleCount int64 `json:"cycle_count"` + CycleInterval int64 `json:"cycle_interval"` + CycleTimeUnit string `json:"cycle_time_unit"` +} +type TaskConfig struct { + AutoEvaluateConfigs []*AutoEvaluateConfig `json:"auto_evaluate_configs"` + DataReflowConfig []*DataReflowConfig +} +type AutoEvaluateConfig struct { + EvaluatorVersionID int64 `json:"evaluator_version_id"` + EvaluatorID int64 `json:"evaluator_id"` + FieldMappings []*EvaluateFieldMapping `json:"field_mappings"` +} +type EvaluateFieldMapping struct { + // 数据集字段约束 + FieldSchema *dataset.FieldSchema `json:"field_schema"` + TraceFieldKey string `json:"trace_field_key"` + TraceFieldJsonpath string `json:"trace_field_jsonpath"` + EvalSetName *string `json:"eval_set_name"` +} +type DataReflowConfig struct { + DatasetID *int64 `json:"dataset_id"` + DatasetName *string `json:"dataset_name"` + DatasetSchema dataset.DatasetSchema `json:"dataset_schema"` + FieldMappings []dataset.FieldMapping `json:"field_mappings"` +} + +type TaskRun struct { + ID int64 // Task Run ID + TaskID int64 // Task ID + WorkspaceID int64 // 空间ID + TaskType string // 任务类型 + RunStatus string // Task Run状态 + RunDetail *RunDetail // Task Run运行详情 + BackfillDetail *BackfillDetail // 历史回溯运行详情 + RunStartAt time.Time // run 开始时间 + RunEndAt time.Time // run 结束时间 + TaskRunConfig *TaskRunConfig // 相关任务的配置信息 + CreatedAt time.Time // 创建时间 + UpdatedAt time.Time // 更新时间 +} +type BackfillDetail struct { + SuccessCount *int64 `json:"success_count"` + FailedCount *int64 `json:"failed_count"` + TotalCount *int64 `json:"total_count"` + BackfillStatus *string `json:"backfill_status"` + LastSpanPageToken *string `json:"last_span_page_token"` +} +type TaskRunConfig struct { + AutoEvaluateRunConfig *AutoEvaluateRunConfig `json:"auto_evaluate_run_config"` + DataReflowRunConfig *DataReflowRunConfig `json:"data_reflow_run_config"` +} +type AutoEvaluateRunConfig struct { + ExptID int64 `json:"expt_id"` + ExptRunID int64 `json:"expt_run_id"` + EvalID int64 `json:"eval_id"` + SchemaID int64 `json:"schema_id"` + Schema *string `json:"schema"` + EndAt int64 `json:"end_at"` + CycleStartAt int64 `json:"cycle_start_at"` + CycleEndAt int64 `json:"cycle_end_at"` + Status string `json:"status"` +} +type DataReflowRunConfig struct { + DatasetID int64 `json:"dataset_id"` + DatasetRunID int64 `json:"dataset_run_id"` + EndAt int64 `json:"end_at"` + CycleStartAt int64 `json:"cycle_start_at"` + CycleEndAt int64 `json:"cycle_end_at"` + Status string `json:"status"` +} + +func (t ObservabilityTask) IsFinished() bool { + switch t.TaskStatus { + case task.TaskStatusSuccess, task.TaskStatusDisabled, task.TaskStatusPending: + return true + default: + return false + } +} + +func (t ObservabilityTask) GetBackfillTaskRun() *TaskRun { + for _, taskRunPO := range t.TaskRuns { + if taskRunPO.TaskType == task.TaskRunTypeBackFill { + return taskRunPO + } + } + return nil +} + +func (t ObservabilityTask) GetCurrentTaskRun() *TaskRun { + for _, taskRunPO := range t.TaskRuns { + if taskRunPO.TaskType == task.TaskRunTypeNewData && taskRunPO.RunStatus == task.TaskStatusRunning { + return taskRunPO + } + } + return nil +} + +func (t ObservabilityTask) GetTaskttl() int64 { + var ttl int64 + if t.EffectiveTime != nil { + ttl = t.EffectiveTime.EndAt - t.EffectiveTime.StartAt + } + if t.BackfillEffectiveTime != nil { + ttl += t.BackfillEffectiveTime.EndAt - t.BackfillEffectiveTime.StartAt + } + return ttl +} diff --git a/backend/modules/observability/domain/task/repo/mocks/Task.go b/backend/modules/observability/domain/task/repo/mocks/Task.go new file mode 100644 index 000000000..659d9ca48 --- /dev/null +++ b/backend/modules/observability/domain/task/repo/mocks/Task.go @@ -0,0 +1,377 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo (interfaces: ITaskRepo) +// +// Generated by this command: +// +// mockgen -destination=mocks/Task.go -package=mocks . ITaskRepo +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + entity "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + mysql "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" + gomock "go.uber.org/mock/gomock" +) + +// MockITaskRepo is a mock of ITaskRepo interface. +type MockITaskRepo struct { + ctrl *gomock.Controller + recorder *MockITaskRepoMockRecorder +} + +// MockITaskRepoMockRecorder is the mock recorder for MockITaskRepo. +type MockITaskRepoMockRecorder struct { + mock *MockITaskRepo +} + +// NewMockITaskRepo creates a new mock instance. +func NewMockITaskRepo(ctrl *gomock.Controller) *MockITaskRepo { + mock := &MockITaskRepo{ctrl: ctrl} + mock.recorder = &MockITaskRepoMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockITaskRepo) EXPECT() *MockITaskRepoMockRecorder { + return m.recorder +} + +// CreateTask mocks base method. +func (m *MockITaskRepo) CreateTask(arg0 context.Context, arg1 *entity.ObservabilityTask) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTask", arg0, arg1) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTask indicates an expected call of CreateTask. +func (mr *MockITaskRepoMockRecorder) CreateTask(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTask", reflect.TypeOf((*MockITaskRepo)(nil).CreateTask), arg0, arg1) +} + +// CreateTaskRun mocks base method. +func (m *MockITaskRepo) CreateTaskRun(arg0 context.Context, arg1 *entity.TaskRun) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTaskRun", arg0, arg1) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTaskRun indicates an expected call of CreateTaskRun. +func (mr *MockITaskRepoMockRecorder) CreateTaskRun(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTaskRun", reflect.TypeOf((*MockITaskRepo)(nil).CreateTaskRun), arg0, arg1) +} + +// DecrTaskCount mocks base method. +func (m *MockITaskRepo) DecrTaskCount(arg0 context.Context, arg1, arg2 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecrTaskCount", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DecrTaskCount indicates an expected call of DecrTaskCount. +func (mr *MockITaskRepoMockRecorder) DecrTaskCount(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecrTaskCount", reflect.TypeOf((*MockITaskRepo)(nil).DecrTaskCount), arg0, arg1, arg2) +} + +// DecrTaskRunCount mocks base method. +func (m *MockITaskRepo) DecrTaskRunCount(arg0 context.Context, arg1, arg2, arg3 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecrTaskRunCount", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// DecrTaskRunCount indicates an expected call of DecrTaskRunCount. +func (mr *MockITaskRepoMockRecorder) DecrTaskRunCount(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecrTaskRunCount", reflect.TypeOf((*MockITaskRepo)(nil).DecrTaskRunCount), arg0, arg1, arg2, arg3) +} + +// DecrTaskRunSuccessCount mocks base method. +func (m *MockITaskRepo) DecrTaskRunSuccessCount(arg0 context.Context, arg1, arg2 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecrTaskRunSuccessCount", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DecrTaskRunSuccessCount indicates an expected call of DecrTaskRunSuccessCount. +func (mr *MockITaskRepoMockRecorder) DecrTaskRunSuccessCount(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecrTaskRunSuccessCount", reflect.TypeOf((*MockITaskRepo)(nil).DecrTaskRunSuccessCount), arg0, arg1, arg2) +} + +// DeleteTask mocks base method. +func (m *MockITaskRepo) DeleteTask(arg0 context.Context, arg1 *entity.ObservabilityTask) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTask", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteTask indicates an expected call of DeleteTask. +func (mr *MockITaskRepoMockRecorder) DeleteTask(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockITaskRepo)(nil).DeleteTask), arg0, arg1) +} + +// GetBackfillTaskRun mocks base method. +func (m *MockITaskRepo) GetBackfillTaskRun(arg0 context.Context, arg1 *int64, arg2 int64) (*entity.TaskRun, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBackfillTaskRun", arg0, arg1, arg2) + ret0, _ := ret[0].(*entity.TaskRun) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBackfillTaskRun indicates an expected call of GetBackfillTaskRun. +func (mr *MockITaskRepoMockRecorder) GetBackfillTaskRun(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackfillTaskRun", reflect.TypeOf((*MockITaskRepo)(nil).GetBackfillTaskRun), arg0, arg1, arg2) +} + +// GetLatestNewDataTaskRun mocks base method. +func (m *MockITaskRepo) GetLatestNewDataTaskRun(arg0 context.Context, arg1 *int64, arg2 int64) (*entity.TaskRun, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLatestNewDataTaskRun", arg0, arg1, arg2) + ret0, _ := ret[0].(*entity.TaskRun) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLatestNewDataTaskRun indicates an expected call of GetLatestNewDataTaskRun. +func (mr *MockITaskRepoMockRecorder) GetLatestNewDataTaskRun(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestNewDataTaskRun", reflect.TypeOf((*MockITaskRepo)(nil).GetLatestNewDataTaskRun), arg0, arg1, arg2) +} + +// GetObjListWithTask mocks base method. +func (m *MockITaskRepo) GetObjListWithTask(arg0 context.Context) ([]string, []string, []*entity.ObservabilityTask) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjListWithTask", arg0) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].([]string) + ret2, _ := ret[2].([]*entity.ObservabilityTask) + return ret0, ret1, ret2 +} + +// GetObjListWithTask indicates an expected call of GetObjListWithTask. +func (mr *MockITaskRepoMockRecorder) GetObjListWithTask(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjListWithTask", reflect.TypeOf((*MockITaskRepo)(nil).GetObjListWithTask), arg0) +} + +// GetTask mocks base method. +func (m *MockITaskRepo) GetTask(arg0 context.Context, arg1 int64, arg2 *int64, arg3 *string) (*entity.ObservabilityTask, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTask", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*entity.ObservabilityTask) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTask indicates an expected call of GetTask. +func (mr *MockITaskRepoMockRecorder) GetTask(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTask", reflect.TypeOf((*MockITaskRepo)(nil).GetTask), arg0, arg1, arg2, arg3) +} + +// GetTaskCount mocks base method. +func (m *MockITaskRepo) GetTaskCount(arg0 context.Context, arg1 int64) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskCount", arg0, arg1) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskCount indicates an expected call of GetTaskCount. +func (mr *MockITaskRepoMockRecorder) GetTaskCount(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskCount", reflect.TypeOf((*MockITaskRepo)(nil).GetTaskCount), arg0, arg1) +} + +// GetTaskRunCount mocks base method. +func (m *MockITaskRepo) GetTaskRunCount(arg0 context.Context, arg1, arg2 int64) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskRunCount", arg0, arg1, arg2) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskRunCount indicates an expected call of GetTaskRunCount. +func (mr *MockITaskRepoMockRecorder) GetTaskRunCount(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskRunCount", reflect.TypeOf((*MockITaskRepo)(nil).GetTaskRunCount), arg0, arg1, arg2) +} + +// GetTaskRunFailCount mocks base method. +func (m *MockITaskRepo) GetTaskRunFailCount(arg0 context.Context, arg1, arg2 int64) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskRunFailCount", arg0, arg1, arg2) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskRunFailCount indicates an expected call of GetTaskRunFailCount. +func (mr *MockITaskRepoMockRecorder) GetTaskRunFailCount(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskRunFailCount", reflect.TypeOf((*MockITaskRepo)(nil).GetTaskRunFailCount), arg0, arg1, arg2) +} + +// GetTaskRunSuccessCount mocks base method. +func (m *MockITaskRepo) GetTaskRunSuccessCount(arg0 context.Context, arg1, arg2 int64) (int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskRunSuccessCount", arg0, arg1, arg2) + ret0, _ := ret[0].(int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskRunSuccessCount indicates an expected call of GetTaskRunSuccessCount. +func (mr *MockITaskRepoMockRecorder) GetTaskRunSuccessCount(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskRunSuccessCount", reflect.TypeOf((*MockITaskRepo)(nil).GetTaskRunSuccessCount), arg0, arg1, arg2) +} + +// IncrTaskCount mocks base method. +func (m *MockITaskRepo) IncrTaskCount(arg0 context.Context, arg1, arg2 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrTaskCount", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// IncrTaskCount indicates an expected call of IncrTaskCount. +func (mr *MockITaskRepoMockRecorder) IncrTaskCount(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrTaskCount", reflect.TypeOf((*MockITaskRepo)(nil).IncrTaskCount), arg0, arg1, arg2) +} + +// IncrTaskRunCount mocks base method. +func (m *MockITaskRepo) IncrTaskRunCount(arg0 context.Context, arg1, arg2, arg3 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrTaskRunCount", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// IncrTaskRunCount indicates an expected call of IncrTaskRunCount. +func (mr *MockITaskRepoMockRecorder) IncrTaskRunCount(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrTaskRunCount", reflect.TypeOf((*MockITaskRepo)(nil).IncrTaskRunCount), arg0, arg1, arg2, arg3) +} + +// IncrTaskRunFailCount mocks base method. +func (m *MockITaskRepo) IncrTaskRunFailCount(arg0 context.Context, arg1, arg2 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrTaskRunFailCount", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// IncrTaskRunFailCount indicates an expected call of IncrTaskRunFailCount. +func (mr *MockITaskRepoMockRecorder) IncrTaskRunFailCount(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrTaskRunFailCount", reflect.TypeOf((*MockITaskRepo)(nil).IncrTaskRunFailCount), arg0, arg1, arg2) +} + +// IncrTaskRunSuccessCount mocks base method. +func (m *MockITaskRepo) IncrTaskRunSuccessCount(arg0 context.Context, arg1, arg2 int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrTaskRunSuccessCount", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// IncrTaskRunSuccessCount indicates an expected call of IncrTaskRunSuccessCount. +func (mr *MockITaskRepoMockRecorder) IncrTaskRunSuccessCount(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrTaskRunSuccessCount", reflect.TypeOf((*MockITaskRepo)(nil).IncrTaskRunSuccessCount), arg0, arg1, arg2) +} + +// ListTasks mocks base method. +func (m *MockITaskRepo) ListTasks(arg0 context.Context, arg1 mysql.ListTaskParam) ([]*entity.ObservabilityTask, int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTasks", arg0, arg1) + ret0, _ := ret[0].([]*entity.ObservabilityTask) + ret1, _ := ret[1].(int64) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ListTasks indicates an expected call of ListTasks. +func (mr *MockITaskRepoMockRecorder) ListTasks(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTasks", reflect.TypeOf((*MockITaskRepo)(nil).ListTasks), arg0, arg1) +} + +// UpdateTask mocks base method. +func (m *MockITaskRepo) UpdateTask(arg0 context.Context, arg1 *entity.ObservabilityTask) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateTask", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateTask indicates an expected call of UpdateTask. +func (mr *MockITaskRepoMockRecorder) UpdateTask(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTask", reflect.TypeOf((*MockITaskRepo)(nil).UpdateTask), arg0, arg1) +} + +// UpdateTaskRun mocks base method. +func (m *MockITaskRepo) UpdateTaskRun(arg0 context.Context, arg1 *entity.TaskRun) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateTaskRun", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateTaskRun indicates an expected call of UpdateTaskRun. +func (mr *MockITaskRepoMockRecorder) UpdateTaskRun(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskRun", reflect.TypeOf((*MockITaskRepo)(nil).UpdateTaskRun), arg0, arg1) +} + +// UpdateTaskRunWithOCC mocks base method. +func (m *MockITaskRepo) UpdateTaskRunWithOCC(arg0 context.Context, arg1, arg2 int64, arg3 map[string]any) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateTaskRunWithOCC", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateTaskRunWithOCC indicates an expected call of UpdateTaskRunWithOCC. +func (mr *MockITaskRepoMockRecorder) UpdateTaskRunWithOCC(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskRunWithOCC", reflect.TypeOf((*MockITaskRepo)(nil).UpdateTaskRunWithOCC), arg0, arg1, arg2, arg3) +} + +// UpdateTaskWithOCC mocks base method. +func (m *MockITaskRepo) UpdateTaskWithOCC(arg0 context.Context, arg1, arg2 int64, arg3 map[string]any) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateTaskWithOCC", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateTaskWithOCC indicates an expected call of UpdateTaskWithOCC. +func (mr *MockITaskRepoMockRecorder) UpdateTaskWithOCC(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskWithOCC", reflect.TypeOf((*MockITaskRepo)(nil).UpdateTaskWithOCC), arg0, arg1, arg2, arg3) +} diff --git a/backend/modules/observability/domain/task/repo/task.go b/backend/modules/observability/domain/task/repo/task.go new file mode 100644 index 000000000..c2a4988d9 --- /dev/null +++ b/backend/modules/observability/domain/task/repo/task.go @@ -0,0 +1,48 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package repo + +import ( + "context" + + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" +) + +//go:generate mockgen -destination=mocks/Task.go -package=mocks . ITaskRepo +type ITaskRepo interface { + // task + CreateTask(ctx context.Context, do *entity.ObservabilityTask) (int64, error) + UpdateTask(ctx context.Context, do *entity.ObservabilityTask) error + UpdateTaskWithOCC(ctx context.Context, id int64, workspaceID int64, updateMap map[string]interface{}) error + GetTask(ctx context.Context, id int64, workspaceID *int64, userID *string) (*entity.ObservabilityTask, error) + ListTasks(ctx context.Context, param mysql.ListTaskParam) ([]*entity.ObservabilityTask, int64, error) + DeleteTask(ctx context.Context, do *entity.ObservabilityTask) error + + // task run + CreateTaskRun(ctx context.Context, do *entity.TaskRun) (int64, error) + UpdateTaskRun(ctx context.Context, do *entity.TaskRun) error + UpdateTaskRunWithOCC(ctx context.Context, id int64, workspaceID int64, updateMap map[string]interface{}) error + GetBackfillTaskRun(ctx context.Context, workspaceID *int64, taskID int64) (*entity.TaskRun, error) + GetLatestNewDataTaskRun(ctx context.Context, workspaceID *int64, taskID int64) (*entity.TaskRun, error) + + // task count + GetTaskCount(ctx context.Context, taskID int64) (int64, error) + IncrTaskCount(ctx context.Context, taskID, ttl int64) error + DecrTaskCount(ctx context.Context, taskID, ttl int64) error + + // task run count + GetTaskRunCount(ctx context.Context, taskID, taskRunID int64) (int64, error) + IncrTaskRunCount(ctx context.Context, taskID, taskRunID int64, ttl int64) error + DecrTaskRunCount(ctx context.Context, taskID, taskRunID int64, ttl int64) error + + // task run success/fail count + GetTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) (int64, error) + IncrTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) error + DecrTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) error + IncrTaskRunFailCount(ctx context.Context, taskID, taskRunID int64) error + GetTaskRunFailCount(ctx context.Context, taskID, taskRunID int64) (int64, error) + + GetObjListWithTask(ctx context.Context) ([]string, []string, []*entity.ObservabilityTask) +} diff --git a/backend/modules/observability/domain/task/service/mocks/task_service.go b/backend/modules/observability/domain/task/service/mocks/task_service.go new file mode 100644 index 000000000..992300806 --- /dev/null +++ b/backend/modules/observability/domain/task/service/mocks/task_service.go @@ -0,0 +1,115 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service (interfaces: ITaskService) +// +// Generated by this command: +// +// mockgen -destination=mocks/task_service.go -package=mocks . ITaskService +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + service "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service" + gomock "go.uber.org/mock/gomock" +) + +// MockITaskService is a mock of ITaskService interface. +type MockITaskService struct { + ctrl *gomock.Controller + recorder *MockITaskServiceMockRecorder +} + +// MockITaskServiceMockRecorder is the mock recorder for MockITaskService. +type MockITaskServiceMockRecorder struct { + mock *MockITaskService +} + +// NewMockITaskService creates a new mock instance. +func NewMockITaskService(ctrl *gomock.Controller) *MockITaskService { + mock := &MockITaskService{ctrl: ctrl} + mock.recorder = &MockITaskServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockITaskService) EXPECT() *MockITaskServiceMockRecorder { + return m.recorder +} + +// CheckTaskName mocks base method. +func (m *MockITaskService) CheckTaskName(arg0 context.Context, arg1 *service.CheckTaskNameReq) (*service.CheckTaskNameResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckTaskName", arg0, arg1) + ret0, _ := ret[0].(*service.CheckTaskNameResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CheckTaskName indicates an expected call of CheckTaskName. +func (mr *MockITaskServiceMockRecorder) CheckTaskName(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckTaskName", reflect.TypeOf((*MockITaskService)(nil).CheckTaskName), arg0, arg1) +} + +// CreateTask mocks base method. +func (m *MockITaskService) CreateTask(arg0 context.Context, arg1 *service.CreateTaskReq) (*service.CreateTaskResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTask", arg0, arg1) + ret0, _ := ret[0].(*service.CreateTaskResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTask indicates an expected call of CreateTask. +func (mr *MockITaskServiceMockRecorder) CreateTask(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTask", reflect.TypeOf((*MockITaskService)(nil).CreateTask), arg0, arg1) +} + +// GetTask mocks base method. +func (m *MockITaskService) GetTask(arg0 context.Context, arg1 *service.GetTaskReq) (*service.GetTaskResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTask", arg0, arg1) + ret0, _ := ret[0].(*service.GetTaskResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTask indicates an expected call of GetTask. +func (mr *MockITaskServiceMockRecorder) GetTask(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTask", reflect.TypeOf((*MockITaskService)(nil).GetTask), arg0, arg1) +} + +// ListTasks mocks base method. +func (m *MockITaskService) ListTasks(arg0 context.Context, arg1 *service.ListTasksReq) (*service.ListTasksResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTasks", arg0, arg1) + ret0, _ := ret[0].(*service.ListTasksResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTasks indicates an expected call of ListTasks. +func (mr *MockITaskServiceMockRecorder) ListTasks(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTasks", reflect.TypeOf((*MockITaskService)(nil).ListTasks), arg0, arg1) +} + +// UpdateTask mocks base method. +func (m *MockITaskService) UpdateTask(arg0 context.Context, arg1 *service.UpdateTaskReq) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateTask", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateTask indicates an expected call of UpdateTask. +func (mr *MockITaskServiceMockRecorder) UpdateTask(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTask", reflect.TypeOf((*MockITaskService)(nil).UpdateTask), arg0, arg1) +} diff --git a/backend/modules/observability/domain/task/service/task_service.go b/backend/modules/observability/domain/task/service/task_service.go new file mode 100644 index 000000000..a5e0b0b25 --- /dev/null +++ b/backend/modules/observability/domain/task/service/task_service.go @@ -0,0 +1,322 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package service + +import ( + "context" + "fmt" + "time" + + "github.com/bytedance/gg/gptr" + "github.com/coze-dev/coze-loop/backend/infra/idgen" + "github.com/coze-dev/coze-loop/backend/infra/middleware/session" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + tconv "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/mq" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/processor" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +type CreateTaskReq struct { + Task *entity.ObservabilityTask +} +type CreateTaskResp struct { + TaskID *int64 +} +type UpdateTaskReq struct { + TaskID int64 + WorkspaceID int64 + TaskStatus *task.TaskStatus + Description *string + EffectiveTime *task.EffectiveTime + SampleRate *float64 +} +type ListTasksReq struct { + WorkspaceID int64 + TaskFilters *filter.TaskFilterFields + Limit int32 + Offset int32 + OrderBy *common.OrderBy +} +type ListTasksResp struct { + Tasks []*task.Task + Total *int64 +} +type GetTaskReq struct { + TaskID int64 + WorkspaceID int64 +} +type GetTaskResp struct { + Task *task.Task +} +type CheckTaskNameReq struct { + WorkspaceID int64 + Name string +} +type CheckTaskNameResp struct { + Pass *bool +} + +//go:generate mockgen -destination=mocks/task_service.go -package=mocks . ITaskService +type ITaskService interface { + CreateTask(ctx context.Context, req *CreateTaskReq) (resp *CreateTaskResp, err error) + UpdateTask(ctx context.Context, req *UpdateTaskReq) (err error) + ListTasks(ctx context.Context, req *ListTasksReq) (resp *ListTasksResp, err error) + GetTask(ctx context.Context, req *GetTaskReq) (resp *GetTaskResp, err error) + CheckTaskName(ctx context.Context, req *CheckTaskNameReq) (resp *CheckTaskNameResp, err error) +} + +func NewTaskServiceImpl( + tRepo repo.ITaskRepo, + userProvider rpc.IUserProvider, + idGenerator idgen.IIDGenerator, + backfillProducer mq.IBackfillProducer, + taskProcessor *processor.TaskProcessor, +) (ITaskService, error) { + return &TaskServiceImpl{ + TaskRepo: tRepo, + userProvider: userProvider, + idGenerator: idGenerator, + backfillProducer: backfillProducer, + taskProcessor: *taskProcessor, + }, nil +} + +type TaskServiceImpl struct { + TaskRepo repo.ITaskRepo + userProvider rpc.IUserProvider + idGenerator idgen.IIDGenerator + backfillProducer mq.IBackfillProducer + taskProcessor processor.TaskProcessor +} + +func (t *TaskServiceImpl) CreateTask(ctx context.Context, req *CreateTaskReq) (resp *CreateTaskResp, err error) { + // 校验task name是否存在 + checkResp, err := t.CheckTaskName(ctx, &CheckTaskNameReq{ + WorkspaceID: req.Task.WorkspaceID, + Name: req.Task.Name, + }) + if err != nil { + logs.CtxError(ctx, "CheckTaskName err:%v", err) + return nil, err + } + if !*checkResp.Pass { + logs.CtxError(ctx, "task name exist") + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("task name exist")) + } + proc := t.taskProcessor.GetTaskProcessor(req.Task.TaskType) + // 校验配置项是否有效 + if err = proc.ValidateConfig(ctx, req.Task); err != nil { + logs.CtxError(ctx, "ValidateConfig err:%v", err) + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg(fmt.Sprintf("config invalid:%v", err))) + } + id, err := t.TaskRepo.CreateTask(ctx, req.Task) + if err != nil { + return nil, err + } + // 创建任务的数据准备 + // 数据回流任务——创建/更新输出数据集 + // 自动评测历史回溯——创建空壳子 + req.Task.ID = id + if err = proc.OnCreateTaskChange(ctx, req.Task); err != nil { + logs.CtxError(ctx, "create initial task run failed, task_id=%d, err=%v", id, err) + + if err1 := t.TaskRepo.DeleteTask(ctx, req.Task); err1 != nil { + logs.CtxError(ctx, "delete task failed, task_id=%d, err=%v", id, err1) + } + return nil, err + } + + // 历史回溯数据发MQ + if t.shouldTriggerBackfill(req.Task) { + backfillEvent := &entity.BackFillEvent{ + SpaceID: req.Task.WorkspaceID, + TaskID: id, + } + + // 异步发送MQ消息,不阻塞任务创建流程 + go func() { + if err := t.sendBackfillMessage(context.Background(), backfillEvent); err != nil { + logs.CtxWarn(ctx, "send backfill message failed, task_id=%d, err=%v", id, err) + } + }() + } + + return &CreateTaskResp{TaskID: &id}, nil +} + +func (t *TaskServiceImpl) UpdateTask(ctx context.Context, req *UpdateTaskReq) (err error) { + taskDO, err := t.TaskRepo.GetTask(ctx, req.TaskID, &req.WorkspaceID, nil) + if err != nil { + return err + } + if taskDO == nil { + logs.CtxError(ctx, "task not found") + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("task not found")) + } + userID := session.UserIDInCtxOrEmpty(ctx) + if userID == "" { + return errorx.NewByCode(obErrorx.UserParseFailedCode) + } + // 校验更新参数是否合法 + if req.Description != nil { + taskDO.Description = req.Description + } + if req.EffectiveTime != nil { + validEffectiveTime, err := tconv.CheckEffectiveTime(ctx, req.EffectiveTime, taskDO.TaskStatus, taskDO.EffectiveTime) + if err != nil { + return err + } + taskDO.EffectiveTime = validEffectiveTime + } + if req.SampleRate != nil { + taskDO.Sampler.SampleRate = *req.SampleRate + } + if req.TaskStatus != nil { + validTaskStatus, err := tconv.CheckTaskStatus(ctx, *req.TaskStatus, taskDO.TaskStatus) + if err != nil { + return err + } + if validTaskStatus != "" { + if validTaskStatus == task.TaskStatusDisabled { + // 禁用操作处理 + proc := t.taskProcessor.GetTaskProcessor(taskDO.TaskType) + var taskRun *entity.TaskRun + for _, tr := range taskDO.TaskRuns { + if tr.RunStatus == task.RunStatusRunning { + taskRun = tr + break + } + } + if err = proc.OnFinishTaskRunChange(ctx, taskexe.OnFinishTaskRunChangeReq{ + Task: taskDO, + TaskRun: taskRun, + }); err != nil { + logs.CtxError(ctx, "proc Finish err:%v", err) + return err + } + } + taskDO.TaskStatus = *req.TaskStatus + } + } + taskDO.UpdatedBy = userID + taskDO.UpdatedAt = time.Now() + if err = t.TaskRepo.UpdateTask(ctx, taskDO); err != nil { + return err + } + return nil +} + +func (t *TaskServiceImpl) ListTasks(ctx context.Context, req *ListTasksReq) (resp *ListTasksResp, err error) { + taskDOs, total, err := t.TaskRepo.ListTasks(ctx, mysql.ListTaskParam{ + WorkspaceIDs: []int64{req.WorkspaceID}, + TaskFilters: req.TaskFilters, + ReqLimit: req.Limit, + ReqOffset: req.Offset, + OrderBy: req.OrderBy, + }) + if len(taskDOs) == 0 { + logs.CtxInfo(ctx, "GetTasks tasks is nil") + return resp, nil + } + userMap := make(map[string]bool) + users := make([]string, 0) + for _, tp := range taskDOs { + userMap[tp.CreatedBy] = true + userMap[tp.UpdatedBy] = true + } + for u := range userMap { + users = append(users, u) + } + _, userInfoMap, err := t.userProvider.GetUserInfo(ctx, users) + if err != nil { + logs.CtxError(ctx, "MGetUserInfo err:%v", err) + } + return &ListTasksResp{ + Tasks: tconv.TaskDOs2DTOs(ctx, taskDOs, userInfoMap), + Total: ptr.Of(total), + }, nil +} +func (t *TaskServiceImpl) GetTask(ctx context.Context, req *GetTaskReq) (resp *GetTaskResp, err error) { + taskPO, err := t.TaskRepo.GetTask(ctx, req.TaskID, &req.WorkspaceID, nil) + if err != nil { + logs.CtxError(ctx, "GetTasks err:%v", err) + return resp, err + } + if taskPO == nil { + logs.CtxError(ctx, "GetTasks tasks is nil") + return resp, nil + } + _, userInfoMap, err := t.userProvider.GetUserInfo(ctx, []string{taskPO.CreatedBy, taskPO.UpdatedBy}) + if err != nil { + logs.CtxError(ctx, "MGetUserInfo err:%v", err) + } + return &GetTaskResp{Task: tconv.TaskDO2DTO(ctx, taskPO, userInfoMap)}, nil +} +func (t *TaskServiceImpl) CheckTaskName(ctx context.Context, req *CheckTaskNameReq) (resp *CheckTaskNameResp, err error) { + taskPOs, _, err := t.TaskRepo.ListTasks(ctx, mysql.ListTaskParam{ + WorkspaceIDs: []int64{req.WorkspaceID}, + TaskFilters: &filter.TaskFilterFields{ + FilterFields: []*filter.TaskFilterField{ + { + FieldName: gptr.Of(filter.TaskFieldNameTaskName), + FieldType: gptr.Of(filter.FieldTypeString), + Values: []string{req.Name}, + QueryType: gptr.Of(filter.QueryTypeMatch), + }, + }, + }, + ReqLimit: 10, + ReqOffset: 0, + }) + if err != nil { + logs.CtxError(ctx, "ListTasks err:%v", err) + return nil, err + } + var pass bool + if len(taskPOs) > 0 { + pass = false + } else { + pass = true + } + return &CheckTaskNameResp{Pass: gptr.Of(pass)}, nil +} + +// shouldTriggerBackfill 判断是否需要发送历史回溯MQ +func (t *TaskServiceImpl) shouldTriggerBackfill(taskDO *entity.ObservabilityTask) bool { + // 检查任务类型 + taskType := taskDO.TaskType + if taskType != task.TaskTypeAutoEval && taskType != task.TaskTypeAutoDataReflow { + return false + } + + // 检查回填时间配置 + + if taskDO.BackfillEffectiveTime == nil { + return false + } + + return taskDO.BackfillEffectiveTime.StartAt > 0 && + taskDO.BackfillEffectiveTime.EndAt > 0 && + taskDO.BackfillEffectiveTime.StartAt < taskDO.BackfillEffectiveTime.EndAt +} + +// sendBackfillMessage 发送MQ消息 +func (t *TaskServiceImpl) sendBackfillMessage(ctx context.Context, event *entity.BackFillEvent) error { + if t.backfillProducer == nil { + return errorx.NewByCode(obErrorx.CommonInternalErrorCode, errorx.WithExtraMsg("backfill producer not initialized")) + } + + return t.backfillProducer.SendBackfill(ctx, event) +} diff --git a/backend/modules/observability/domain/task/service/taskexe/processor/auto_evaluate.go b/backend/modules/observability/domain/task/service/taskexe/processor/auto_evaluate.go new file mode 100644 index 000000000..bed58bb6d --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/processor/auto_evaluate.go @@ -0,0 +1,448 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package processor + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/apaxa-go/helper/strconvh" + "github.com/bytedance/gg/gptr" + "github.com/coze-dev/coze-loop/backend/infra/middleware/session" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/eval_set" + eval_target_d "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/eval_target" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/expt" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/eval_target" + dataset0 "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/dataset" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + tconv "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" + task_entity "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/lang/slices" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +var _ taskexe.Processor = (*AutoEvaluteProcessor)(nil) + +type AutoEvaluteProcessor struct { + evalSvc rpc.IEvaluatorRPCAdapter + evaluationSvc rpc.IEvaluationRPCAdapter + datasetServiceAdaptor *service.DatasetServiceAdaptor + taskRepo repo.ITaskRepo + aid int32 +} + +func NewAutoEvaluteProcessor( + aid int32, + datasetServiceProvider *service.DatasetServiceAdaptor, + evalService rpc.IEvaluatorRPCAdapter, + evaluationService rpc.IEvaluationRPCAdapter, + taskRepo repo.ITaskRepo) *AutoEvaluteProcessor { + return &AutoEvaluteProcessor{ + datasetServiceAdaptor: datasetServiceProvider, + evalSvc: evalService, + evaluationSvc: evaluationService, + taskRepo: taskRepo, + aid: aid, + } +} + +func (p *AutoEvaluteProcessor) ValidateConfig(ctx context.Context, config any) error { + cfg, ok := config.(*task_entity.ObservabilityTask) + if !ok { + return taskexe.ErrInvalidConfig + } + if cfg.EffectiveTime != nil { + startAt := cfg.EffectiveTime.StartAt + endAt := cfg.EffectiveTime.EndAt + if startAt <= time.Now().Add(-10*time.Minute).UnixMilli() { + return errorx.NewByCode(obErrorx.CommonInvalidParamCode) + } + if startAt >= endAt { + return errorx.NewByCode(obErrorx.CommonInvalidParamCode) + } + } + var evaluatorVersionIDs []int64 + for _, autoEvaluateConfig := range cfg.TaskConfig.AutoEvaluateConfigs { + evaluatorVersionIDs = append(evaluatorVersionIDs, autoEvaluateConfig.EvaluatorVersionID) + } + if len(evaluatorVersionIDs) == 0 { + return errorx.NewByCode(obErrorx.CommonInvalidParamCode) + } + // Verify evaluator version validity + evaluators, _, err := p.evalSvc.BatchGetEvaluatorVersions(ctx, &rpc.BatchGetEvaluatorVersionsParam{ + WorkspaceID: cfg.WorkspaceID, + EvaluatorVersionIds: evaluatorVersionIDs, + }) + if err != nil { + return errorx.NewByCode(obErrorx.CommonInvalidParamCode) + } + if len(evaluators) != len(evaluatorVersionIDs) { + return errorx.NewByCode(obErrorx.CommonInvalidParamCode) + } + return nil +} + +func (p *AutoEvaluteProcessor) Invoke(ctx context.Context, trigger *taskexe.Trigger) error { + taskRun := tconv.TaskRunDO2DTO(ctx, trigger.TaskRun, nil) + workspaceID := trigger.Task.WorkspaceID + session := p.getSession(ctx, trigger.Task) + var mapping []*task_entity.EvaluateFieldMapping + for _, autoEvaluateConfig := range trigger.Task.TaskConfig.AutoEvaluateConfigs { + mapping = append(mapping, autoEvaluateConfig.FieldMappings...) + } + turns := buildItems(ctx, []*loop_span.Span{trigger.Span}, mapping, taskRun.GetTaskRunConfig().GetAutoEvaluateRunConfig().GetSchema(), strconv.FormatInt(taskRun.ID, 10)) + if len(turns) == 0 { + logs.CtxInfo(ctx, "[task-debug] AutoEvaluteProcessor Invoke, turns is empty") + return nil + } + taskTTL := trigger.Task.GetTaskttl() + p.taskRepo.IncrTaskCount(ctx, trigger.Task.ID, taskTTL) + p.taskRepo.IncrTaskRunCount(ctx, trigger.Task.ID, taskRun.ID, taskTTL) + taskCount, _ := p.taskRepo.GetTaskCount(ctx, trigger.Task.ID) + taskRunCount, _ := p.taskRepo.GetTaskRunCount(ctx, trigger.Task.ID, taskRun.ID) + if (trigger.Task.Sampler.CycleCount != 0 && taskRunCount > trigger.Task.Sampler.CycleCount) || + (taskCount > trigger.Task.Sampler.SampleSize) { + logs.CtxInfo(ctx, "[task-debug] AutoEvaluteProcessor Invoke, subCount:%v,taskCount:%v", taskRunCount, taskCount) + p.taskRepo.DecrTaskCount(ctx, trigger.Task.ID, taskTTL) + p.taskRepo.DecrTaskRunCount(ctx, trigger.Task.ID, taskRun.ID, taskTTL) + return nil + } + _, err := p.evaluationSvc.InvokeExperiment(ctx, &rpc.InvokeExperimentReq{ + WorkspaceID: workspaceID, + EvaluationSetID: taskRun.GetTaskRunConfig().GetAutoEvaluateRunConfig().GetEvalID(), + Items: []*eval_set.EvaluationSetItem{ + { + WorkspaceID: gptr.Of(workspaceID), + EvaluationSetID: gptr.Of(taskRun.GetTaskRunConfig().GetAutoEvaluateRunConfig().GetEvalID()), + SchemaID: gptr.Of(taskRun.GetTaskRunConfig().GetAutoEvaluateRunConfig().GetSchemaID()), + Turns: turns, + ItemKey: gptr.Of(trigger.Span.SpanID), + }, + }, + SkipInvalidItems: gptr.Of(true), + AllowPartialAdd: gptr.Of(true), + ExperimentID: gptr.Of(taskRun.GetTaskRunConfig().GetAutoEvaluateRunConfig().GetExptID()), + ExperimentRunID: gptr.Of(taskRun.GetTaskRunConfig().GetAutoEvaluateRunConfig().GetExptRunID()), + Session: session, + Ext: map[string]string{"workspace_id": strconv.FormatInt(trigger.Task.WorkspaceID, 10), + "span_id": trigger.Span.SpanID, + "task_id": strconvh.FormatInt64(trigger.Task.ID), + "task_run_id": strconvh.FormatInt64(taskRun.ID)}, + }) + + if err != nil { + p.taskRepo.DecrTaskCount(ctx, trigger.Task.ID, taskTTL) + p.taskRepo.DecrTaskRunCount(ctx, trigger.Task.ID, taskRun.ID, taskTTL) + return err + } + return nil +} + +func (p *AutoEvaluteProcessor) OnCreateTaskChange(ctx context.Context, currentTask *task_entity.ObservabilityTask) error { + taskRuns, err := p.taskRepo.GetBackfillTaskRun(ctx, nil, currentTask.ID) + if err != nil { + logs.CtxError(ctx, "GetBackfillTaskRun failed, taskID:%d, err:%v", currentTask.ID, err) + return err + } + if ShouldTriggerBackfill(currentTask) && taskRuns == nil { + err = p.OnCreateTaskRunChange(ctx, taskexe.OnCreateTaskRunChangeReq{ + CurrentTask: currentTask, + RunType: task.TaskRunTypeBackFill, + RunStartAt: time.Now().UnixMilli(), + RunEndAt: time.Now().UnixMilli() + (currentTask.BackfillEffectiveTime.EndAt - currentTask.BackfillEffectiveTime.StartAt), + }) + if err != nil { + logs.CtxError(ctx, "OnCreateTaskChange failed, taskID:%d, err:%v", currentTask.ID, err) + return err + } + err = p.OnUpdateTaskChange(ctx, currentTask, task.TaskStatusRunning) + if err != nil { + logs.CtxError(ctx, "OnCreateTaskChange failed, taskID:%d, err:%v", currentTask.ID, err) + return err + } + } + if ShouldTriggerNewData(ctx, currentTask) { + var runStartAt, runEndAt int64 + runStartAt = currentTask.EffectiveTime.StartAt + if !currentTask.Sampler.IsCycle { + runEndAt = currentTask.EffectiveTime.EndAt + } else { + switch currentTask.Sampler.CycleTimeUnit { + case task.TimeUnitDay: + runEndAt = runStartAt + (currentTask.Sampler.CycleInterval)*24*time.Hour.Milliseconds() + case task.TimeUnitWeek: + runEndAt = runStartAt + (currentTask.Sampler.CycleInterval)*7*24*time.Hour.Milliseconds() + default: + runEndAt = runStartAt + (currentTask.Sampler.CycleInterval)*10*time.Minute.Milliseconds() + } + } + err = p.OnCreateTaskRunChange(ctx, taskexe.OnCreateTaskRunChangeReq{ + CurrentTask: currentTask, + RunType: task.TaskRunTypeNewData, + RunStartAt: runStartAt, + RunEndAt: runEndAt, + }) + err = p.OnUpdateTaskChange(ctx, currentTask, task.TaskStatusRunning) + if err != nil { + logs.CtxError(ctx, "OnCreateTaskChange failed, taskID:%d, err:%v", currentTask.ID, err) + return err + } + } + return nil +} + +func (p *AutoEvaluteProcessor) OnUpdateTaskChange(ctx context.Context, currentTask *task_entity.ObservabilityTask, taskOp task.TaskStatus) error { + switch taskOp { + case task.TaskStatusSuccess: + if currentTask.TaskStatus != task.TaskStatusDisabled { + currentTask.TaskStatus = task.TaskStatusSuccess + } + case task.TaskStatusRunning: + if currentTask.TaskStatus != task.TaskStatusDisabled && currentTask.TaskStatus != task.TaskStatusSuccess { + currentTask.TaskStatus = task.TaskStatusRunning + } + case task.TaskStatusDisabled: + if currentTask.TaskStatus != task.TaskStatusDisabled { + currentTask.TaskStatus = task.TaskStatusDisabled + } + case task.TaskStatusPending: + if currentTask.TaskStatus == task.TaskStatusPending || currentTask.TaskStatus == task.TaskStatusUnstarted { + currentTask.TaskStatus = task.TaskStatusPending + } + default: + return fmt.Errorf("OnUpdateChangeProcessor, valid taskOp:%s", taskOp) + } + // Step 2: update task + err := p.taskRepo.UpdateTask(ctx, currentTask) + if err != nil { + logs.CtxError(ctx, "[auto_task] OnUpdateChangeProcessor, UpdateTask err, taskID:%d, err:%v", currentTask.ID, err) + return err + } + return nil +} + +func (p *AutoEvaluteProcessor) OnFinishTaskChange(ctx context.Context, param taskexe.OnFinishTaskChangeReq) error { + err := p.OnFinishTaskRunChange(ctx, taskexe.OnFinishTaskRunChangeReq{ + Task: param.Task, + TaskRun: param.TaskRun, + }) + if err != nil { + logs.CtxError(ctx, "OnFinishTaskRunChange failed, taskRun:%+v, err:%v", param.TaskRun, err) + return err + } + if param.IsFinish { + logs.CtxWarn(ctx, "OnFinishTaskChange, taskID:%d, taskRun:%+v, isFinish:%v", param.Task.ID, param.TaskRun, param.IsFinish) + if err := p.OnUpdateTaskChange(ctx, param.Task, task.TaskStatusSuccess); err != nil { + logs.CtxError(ctx, "OnUpdateChangeProcessor failed, taskID:%d, err:%v", param.Task.ID, err) + return err + } + } + return nil +} + +const ( + AutoEvaluateCN = "自动化任务实验" + AutoEvaluateI18N = "AutoEvaluate" + BackFillCN = "历史回溯" + BackFillI18N = "BackFill" +) + +func (p *AutoEvaluteProcessor) OnCreateTaskRunChange(ctx context.Context, param taskexe.OnCreateTaskRunChangeReq) error { + currentTask := param.CurrentTask + ctx = session.WithCtxUser(ctx, &session.User{ID: currentTask.CreatedBy}) + sessionInfo := p.getSession(ctx, currentTask) + var evaluationSetColumns []string + var evaluatorVersionIds []int64 + var evaluatorFieldMappings []*expt.EvaluatorFieldMapping + evaluationSetColumns = append(evaluationSetColumns, "span_id", "trace_id", "run_id") + autoEvaluateConfigs := currentTask.TaskConfig.AutoEvaluateConfigs + evaluationSetSchema, fromEvalSet := getBasicEvaluationSetSchema(evaluationSetColumns) + for _, autoEvaluateConfig := range autoEvaluateConfigs { + evaluatorVersionIds = append(evaluatorVersionIds, autoEvaluateConfig.EvaluatorVersionID) + filedMappings := autoEvaluateConfig.FieldMappings + for _, fieldMapping := range filedMappings { + if fieldMapping.FieldSchema == nil { + continue + } + fromEvalSet = append(fromEvalSet, &expt.FieldMapping{ + FieldName: fieldMapping.FieldSchema.Name, + FromFieldName: fieldMapping.EvalSetName, + }) + if slices.Contains(evaluationSetColumns, *fieldMapping.EvalSetName) { + continue + } + // historical data compatibility, convert plain_text to text, data needs to be refreshed + evaluationSetSchema.FieldSchemas = append(evaluationSetSchema.FieldSchemas, &dataset0.FieldSchema{ + Key: gptr.Of(*fieldMapping.EvalSetName), + Name: gptr.Of(*fieldMapping.EvalSetName), + Description: gptr.Of(fieldMapping.TraceFieldJsonpath), + ContentType: fieldMapping.FieldSchema.ContentType, + //DefaultDisplayFormat: gptr.Of(dataset.FieldDisplayFormat_PlainText), + TextSchema: fieldMapping.FieldSchema.TextSchema, + //Hidden: gptr.Of(false), + }) + evaluationSetColumns = append(evaluationSetColumns, *fieldMapping.EvalSetName) + } + + evaluatorFieldMappings = append(evaluatorFieldMappings, &expt.EvaluatorFieldMapping{ + EvaluatorVersionID: autoEvaluateConfig.EvaluatorVersionID, + FromEvalSet: fromEvalSet, + }) + } + category := getCategory(currentTask.TaskType) + schema := convertDatasetSchemaDTO2DO(evaluationSetSchema) + logs.CtxInfo(ctx, "[auto_task] CreateDataset,category:%s", category) + var datasetName, exptName string + if param.RunType == task.TaskRunTypeBackFill { + datasetName = fmt.Sprintf("%s_%s_%s_%d.%d.%d.%d", AutoEvaluateCN, BackFillCN, currentTask.Name, time.Now().Year(), time.Now().Month(), time.Now().Day(), time.Now().Unix()) + exptName = fmt.Sprintf("%s_%s_%s_%d.%d.%d.%d", AutoEvaluateCN, BackFillCN, currentTask.Name, time.Now().Year(), time.Now().Month(), time.Now().Day(), time.Now().Unix()) + } else { + datasetName = fmt.Sprintf("%s_%s_%d.%d.%d.%d", AutoEvaluateCN, currentTask.Name, time.Now().Year(), time.Now().Month(), time.Now().Day(), time.Now().Unix()) + exptName = fmt.Sprintf("%s_%s_%d.%d.%d.%d", AutoEvaluateCN, currentTask.Name, time.Now().Year(), time.Now().Month(), time.Now().Day(), time.Now().Unix()) + } + // Step 1: create evaluation dataset + datasetID, err := p.datasetServiceAdaptor.GetDatasetProvider(category).CreateDataset(ctx, entity.NewDataset( + 0, + currentTask.WorkspaceID, + datasetName, + category, + schema, + sessionInfo, + )) + if err != nil { + logs.CtxError(ctx, "CreateDataset failed, workspace_id=%d, err=%#v", currentTask.WorkspaceID, err) + return err + } + logs.CtxInfo(ctx, "[auto_task] AutoEvaluteProcessor OnChangeProcessor, datasetID:%d", datasetID) + // Step 2: create experiment + maxAliveTime := param.RunEndAt - param.RunStartAt + submitExperimentReq := rpc.SubmitExperimentReq{ + WorkspaceID: currentTask.WorkspaceID, + EvalSetVersionID: gptr.Of(datasetID), + EvaluatorVersionIds: evaluatorVersionIds, + Name: ptr.Of(exptName), + Desc: gptr.Of("Auto Task Experiment"), + EvalSetID: gptr.Of(datasetID), + EvaluatorFieldMapping: evaluatorFieldMappings, + TargetFieldMapping: &expt.TargetFieldMapping{ + FromEvalSet: []*expt.FieldMapping{}, + }, + CreateEvalTargetParam: &eval_target.CreateEvalTargetParam{ + SourceTargetID: gptr.Of(strconvh.FormatInt64(currentTask.ID)), + EvalTargetType: gptr.Of(eval_target_d.EvalTargetType_Trace), + }, + ExptType: gptr.Of(expt.ExptType_Online), + MaxAliveTime: gptr.Of(maxAliveTime), + SourceType: gptr.Of(expt.SourceType_AutoTask), + SourceID: gptr.Of(strconvh.FormatInt64(currentTask.ID)), + Session: sessionInfo, + } + logs.CtxInfo(ctx, "[auto_task] SubmitExperiment:%+v", submitExperimentReq) + exptID, exptRunID, err := p.evaluationSvc.SubmitExperiment(ctx, &submitExperimentReq) + if err != nil { + logs.CtxError(ctx, "SubmitExperiment failed, workspace_id=%d, err=%#v", currentTask.WorkspaceID, err) + return err + } + logs.CtxInfo(ctx, "[auto_task] AutoEvaluteProcessor OnChangeProcessor, exptID:%d, exptRunID:%d", exptID, exptRunID) + + evaluationSetConfig, err := p.datasetServiceAdaptor.GetDatasetProvider(category).GetDataset(ctx, currentTask.WorkspaceID, datasetID, category) + if err != nil { + logs.CtxError(ctx, "[task-debug] GetEvaluationSet err:%v", err) + return err + } + + // Step 5: create task run + taskRunConfig := &task.TaskRunConfig{ + AutoEvaluateRunConfig: &task.AutoEvaluateRunConfig{ + ExptID: exptID, + ExptRunID: exptRunID, + EvalID: datasetID, + SchemaID: evaluationSetConfig.DatasetVersion.DatasetSchema.ID, + Schema: ptr.Of(ToJSONString(ctx, evaluationSetConfig.DatasetVersion.DatasetSchema.FieldSchemas)), + EndAt: param.RunEndAt, + CycleStartAt: param.RunStartAt, + CycleEndAt: param.RunEndAt, + Status: task.TaskStatusRunning, + }, + } + taskRun := &task_entity.TaskRun{ + TaskID: currentTask.ID, + WorkspaceID: currentTask.WorkspaceID, + TaskType: param.RunType, + RunStatus: task.RunStatusRunning, + RunStartAt: time.UnixMilli(param.RunStartAt), + RunEndAt: time.UnixMilli(param.RunEndAt), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + TaskRunConfig: tconv.TaskRunConfigDTO2DO(taskRunConfig), + } + _, err = p.taskRepo.CreateTaskRun(ctx, taskRun) + if err != nil { + logs.CtxError(ctx, "[auto_task] OnCreateTaskRunProcessor, CreateTaskRun err, taskRun:%+v, err:%v", taskRun, err) + return err + } + //taskRun.ID = id + //taskConfig, err := p.taskRepo.GetTask(ctx, currentTask.GetID(), nil, nil) + //if err != nil { + // return err + //} + //taskConfig.TaskRuns = append(taskConfig.TaskRuns, taskRun) + //err = p.taskRepo.UpdateTask(ctx, taskConfig) + //if err != nil { + // return err + //} + return nil +} + +func (p *AutoEvaluteProcessor) OnFinishTaskRunChange(ctx context.Context, param taskexe.OnFinishTaskRunChangeReq) error { + if param.TaskRun == nil { + return nil + } + session := p.getSession(ctx, param.Task) + taskRun := param.TaskRun + if err := p.evaluationSvc.FinishExperiment(ctx, &rpc.FinishExperimentReq{ + WorkspaceID: param.Task.WorkspaceID, + ExperimentID: taskRun.TaskRunConfig.AutoEvaluateRunConfig.ExptID, + ExperimentRunID: taskRun.TaskRunConfig.AutoEvaluateRunConfig.ExptRunID, + Session: session, + }); err != nil { + return err + } + // Set task run status to completed + taskRun.RunStatus = task.RunStatusDone + // Update task run + err := p.taskRepo.UpdateTaskRun(ctx, taskRun) + if err != nil { + logs.CtxError(ctx, "[auto_task] OnFinishTaskRunProcessor, UpdateTaskRun err, taskRunID:%d, err:%v", taskRun.ID, err) + return err + } + return nil +} + +func (p *AutoEvaluteProcessor) getSession(ctx context.Context, task *task_entity.ObservabilityTask) *common.Session { + userIDStr := session.UserIDInCtxOrEmpty(ctx) + if userIDStr == "" { + userIDStr = task.CreatedBy + } + userID, err := strconv.ParseInt(userIDStr, 10, 64) + if err != nil { + logs.CtxError(ctx, "[task-debug] AutoEvaluteProcessor OnChangeProcessor, ParseInt err:%v", err) + } + return &common.Session{ + UserID: gptr.Of(userID), + AppID: gptr.Of(p.aid), + } +} diff --git a/backend/modules/observability/domain/task/service/taskexe/processor/factory.go b/backend/modules/observability/domain/task/service/taskexe/processor/factory.go new file mode 100644 index 000000000..131437f7f --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/processor/factory.go @@ -0,0 +1,32 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package processor + +import ( + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe" +) + +type TaskProcessor struct { + taskProcessorMap map[task.TaskType]taskexe.Processor +} + +func NewTaskProcessor() *TaskProcessor { + return &TaskProcessor{} +} + +func (t *TaskProcessor) Register(taskType task.TaskType, taskProcessor taskexe.Processor) { + if t.taskProcessorMap == nil { + t.taskProcessorMap = make(map[task.TaskType]taskexe.Processor) + } + t.taskProcessorMap[taskType] = taskProcessor +} + +func (t *TaskProcessor) GetTaskProcessor(taskType task.TaskType) taskexe.Processor { + datasetProvider, ok := t.taskProcessorMap[taskType] + if !ok { + return NewNoopTaskProcessor() + } + return datasetProvider +} diff --git a/backend/modules/observability/domain/task/service/taskexe/processor/noop.go b/backend/modules/observability/domain/task/service/taskexe/processor/noop.go new file mode 100644 index 000000000..768e6c7fe --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/processor/noop.go @@ -0,0 +1,49 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package processor + +import ( + "context" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + task_entity "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe" +) + +var _ taskexe.Processor = (*NoopTaskProcessor)(nil) + +type NoopTaskProcessor struct { +} + +func NewNoopTaskProcessor() *NoopTaskProcessor { + return &NoopTaskProcessor{} +} + +func (p *NoopTaskProcessor) ValidateConfig(ctx context.Context, config any) error { + return nil +} + +func (p *NoopTaskProcessor) Invoke(ctx context.Context, trigger *taskexe.Trigger) error { + return nil +} + +func (p *NoopTaskProcessor) OnCreateTaskChange(ctx context.Context, currentTask *task_entity.ObservabilityTask) error { + return nil +} + +func (p *NoopTaskProcessor) OnUpdateTaskChange(ctx context.Context, currentTask *task_entity.ObservabilityTask, taskOp task.TaskStatus) error { + return nil +} + +func (p *NoopTaskProcessor) OnFinishTaskChange(ctx context.Context, param taskexe.OnFinishTaskChangeReq) error { + return nil +} + +func (p *NoopTaskProcessor) OnCreateTaskRunChange(ctx context.Context, param taskexe.OnCreateTaskRunChangeReq) error { + return nil +} + +func (p *NoopTaskProcessor) OnFinishTaskRunChange(ctx context.Context, param taskexe.OnFinishTaskRunChangeReq) error { + return nil +} diff --git a/backend/modules/observability/domain/task/service/taskexe/processor/utils.go b/backend/modules/observability/domain/task/service/taskexe/processor/utils.go new file mode 100644 index 000000000..31bd59c13 --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/processor/utils.go @@ -0,0 +1,288 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package processor + +import ( + "context" + "time" + + "github.com/bytedance/gg/gptr" + "github.com/bytedance/sonic" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/eval_set" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/expt" + dataset0 "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/dataset" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + task_entity "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/logs" + "github.com/coze-dev/cozeloop-go/spec/tracespec" +) + +func getCategory(taskType task.TaskType) entity.DatasetCategory { + switch taskType { + case task.TaskTypeAutoEval: + return entity.DatasetCategory_Evaluation + default: + return entity.DatasetCategory_General + } +} + +// shouldTriggerBackfill 判断是否需要发送历史回溯MQ +func ShouldTriggerBackfill(taskDO *task_entity.ObservabilityTask) bool { + // 检查任务类型 + taskType := taskDO.TaskType + if taskType != task.TaskTypeAutoEval && taskType != task.TaskTypeAutoDataReflow { + return false + } + + // 检查回填时间配置 + + if taskDO.BackfillEffectiveTime == nil { + return false + } + + return taskDO.BackfillEffectiveTime.StartAt > 0 && + taskDO.BackfillEffectiveTime.EndAt > 0 && + taskDO.BackfillEffectiveTime.StartAt < taskDO.BackfillEffectiveTime.EndAt +} + +func ShouldTriggerNewData(ctx context.Context, taskDO *task_entity.ObservabilityTask) bool { + // 检查任务类型 + taskType := taskDO.TaskType + if taskType != task.TaskTypeAutoEval && taskType != task.TaskTypeAutoDataReflow { + return false + } + + if taskDO.EffectiveTime == nil { + return false + } + logs.CtxInfo(ctx, "[auto_task] ShouldTriggerNewData, endAt:%d, startAt:%d", taskDO.EffectiveTime.EndAt, taskDO.EffectiveTime.StartAt) + + return taskDO.EffectiveTime.EndAt > 0 && + taskDO.EffectiveTime.StartAt > 0 && + taskDO.EffectiveTime.StartAt < taskDO.EffectiveTime.EndAt && + time.Now().After(time.UnixMilli(taskDO.EffectiveTime.StartAt)) +} + +func ToJSONString(ctx context.Context, obj interface{}) string { + if obj == nil { + return "" + } + jsonData, err := sonic.Marshal(obj) + if err != nil { + logs.CtxError(ctx, "JSON marshal error: %v", err) + return "" + } + jsonStr := string(jsonData) + return jsonStr +} + +func getBasicEvaluationSetSchema(basicColumns []string) (*dataset0.DatasetSchema, []*expt.FieldMapping) { + evaluationSetSchema := dataset0.NewDatasetSchema() + var fromEvalSet []*expt.FieldMapping + for _, column := range basicColumns { + evaluationSetSchema.FieldSchemas = append(evaluationSetSchema.FieldSchemas, &dataset0.FieldSchema{ + Key: gptr.Of(column), + Name: gptr.Of(column), + Description: gptr.Of(column), + ContentType: gptr.Of(common.ContentTypeText), + TextSchema: gptr.Of("{\"type\": \"string\"}"), + }) + fromEvalSet = append(fromEvalSet, &expt.FieldMapping{ + FieldName: gptr.Of(column), + FromFieldName: gptr.Of(column), + }) + } + return evaluationSetSchema, fromEvalSet +} + +// todo:[xun]和手动回流的代码逻辑一样,需要抽取公共代码 +// convertDatasetSchemaDTO2DO 转换数据集模式 +func convertDatasetSchemaDTO2DO(schema *dataset0.DatasetSchema) entity.DatasetSchema { + if schema == nil { + return entity.DatasetSchema{} + } + + result := entity.DatasetSchema{} + + if schema.IsSetFieldSchemas() { + fieldSchemas := schema.GetFieldSchemas() + result.FieldSchemas = make([]entity.FieldSchema, len(fieldSchemas)) + for i, fs := range fieldSchemas { + key := fs.GetKey() + if key == "" { + key = fs.GetName() + } + name := fs.GetName() + description := fs.GetDescription() + textSchema := fs.GetTextSchema() + result.FieldSchemas[i] = entity.FieldSchema{ + Key: &key, + Name: name, + Description: description, + ContentType: convertContentTypeDTO2DO(fs.GetContentType()), + TextSchema: textSchema, + } + } + } + + return result +} + +// todo:[xun]和手动回流的代码逻辑一样,需要抽取公共代码 +// convertContentTypeDTO2DO 转换内容类型 +func convertContentTypeDTO2DO(contentType common.ContentType) entity.ContentType { + switch contentType { + case common.ContentTypeText: + return entity.ContentType_Text + case common.ContentTypeImage: + return entity.ContentType_Image + case common.ContentTypeAudio: + return entity.ContentType_Audio + case common.ContentTypeMultiPart: + return entity.ContentType_MultiPart + default: + return entity.ContentType_Text + } +} + +// todo:[xun]和手动回流的代码逻辑一样,需要抽取公共代码 +func buildItems(ctx context.Context, spans []*loop_span.Span, fieldMappings []*task_entity.EvaluateFieldMapping, + evaluationSetSchema string, taskRunID string) (turns []*eval_set.Turn) { + turns = make([]*eval_set.Turn, 0, len(spans)) + for _, span := range spans { + fieldData := buildItem(ctx, span, fieldMappings, evaluationSetSchema, taskRunID) + if len(fieldData) == 0 { + continue + } + turns = append(turns, &eval_set.Turn{ + FieldDataList: fieldData, + }) + } + return turns +} + +// todo:[xun]和手动回流的代码逻辑一样,需要抽取公共代码 +func buildItem(ctx context.Context, span *loop_span.Span, fieldMappings []*task_entity.EvaluateFieldMapping, + evaluationSetSchema string, taskRunID string) []*eval_set.FieldData { + var fieldDatas []*eval_set.FieldData + fieldDatas = append(fieldDatas, &eval_set.FieldData{ + Key: gptr.Of("trace_id"), + Name: gptr.Of("trace_id"), + Content: &common.Content{ + ContentType: gptr.Of(common.ContentTypeText), + Text: gptr.Of(span.TraceID), + }, + }) + fieldDatas = append(fieldDatas, &eval_set.FieldData{ + Key: gptr.Of("span_id"), + Name: gptr.Of("span_id"), + Content: &common.Content{ + ContentType: gptr.Of(common.ContentTypeText), + Text: gptr.Of(span.SpanID), + }, + }) + fieldDatas = append(fieldDatas, &eval_set.FieldData{ + Key: gptr.Of("run_id"), + Name: gptr.Of("run_id"), + Content: &common.Content{ + ContentType: gptr.Of(common.ContentTypeText), + Text: gptr.Of(taskRunID), + }, + }) + for _, mapping := range fieldMappings { + // 前端传入的是Name,评测集需要的是key,需要做一下mapping + if mapping.EvalSetName == nil { + logs.CtxInfo(ctx, "Evaluator field name is nil") + continue + } + var evaluationSetSchemas []*eval_set.FieldSchema + if evaluationSetSchema == "" { + logs.CtxInfo(ctx, "Evaluation set schema is nil") + continue + } + err := json.Unmarshal([]byte(evaluationSetSchema), &evaluationSetSchemas) + if err != nil { + logs.CtxInfo(ctx, "Unmarshal evaluation set schema failed, err:%v", err) + continue + } + for _, fieldSchema := range evaluationSetSchemas { + if fieldSchema.GetKey() == *mapping.EvalSetName { + key := fieldSchema.GetKey() + if key == "" { + logs.CtxInfo(ctx, "Evaluator field key is empty, name:%v", *mapping.FieldSchema.Name) + continue + } + value, err := span.ExtractByJsonpath(ctx, mapping.TraceFieldKey, mapping.TraceFieldJsonpath) + if err != nil { + logs.CtxInfo(ctx, "Extract field failed, err:%v", err) + continue + } + content, err := GetContentInfo(ctx, fieldSchema.GetContentType(), value) + if err != nil { + logs.CtxInfo(ctx, "GetContentInfo failed, err:%v", err) + return nil + } + fieldDatas = append(fieldDatas, &eval_set.FieldData{ + Key: gptr.Of(key), + Name: gptr.Of(fieldSchema.GetName()), + Content: content, + }) + } + } + } + return fieldDatas +} + +// todo:[xun]和手动回流的代码逻辑一样,需要抽取公共代码 +func GetContentInfo(ctx context.Context, contentType common.ContentType, value string) (*common.Content, error) { + var content *common.Content + switch contentType { + case common.ContentTypeMultiPart: + var parts []tracespec.ModelMessagePart + err := json.Unmarshal([]byte(value), &parts) + if err != nil { + logs.CtxInfo(ctx, "Unmarshal multi part failed, err:%v", err) + return nil, err + } + var multiPart []*common.Content + for _, part := range parts { + // 本期仅支持回流图片的多模态数据,非ImageURL信息的,打包放进text + switch part.Type { + case tracespec.ModelMessagePartTypeImage: + if part.ImageURL == nil { + continue + } + multiPart = append(multiPart, &common.Content{ + ContentType: gptr.Of(common.ContentTypeImage), + Image: &common.Image{ + Name: gptr.Of(part.ImageURL.Name), + URL: gptr.Of(part.ImageURL.URL), + }, + }) + case tracespec.ModelMessagePartTypeText, tracespec.ModelMessagePartTypeFile: + multiPart = append(multiPart, &common.Content{ + ContentType: gptr.Of(common.ContentTypeText), + Text: gptr.Of(part.Text), + }) + default: + logs.CtxWarn(ctx, "Unsupported part type: %s", part.Type) + return nil, err + } + } + content = &common.Content{ + ContentType: gptr.Of(common.ContentTypeMultiPart), + MultiPart: multiPart, + } + default: + content = &common.Content{ + ContentType: gptr.Of(common.ContentTypeText), + Text: gptr.Of(value), + } + } + return content, nil +} diff --git a/backend/modules/observability/domain/task/service/taskexe/tracehub/backfill.go b/backend/modules/observability/domain/task/service/taskexe/tracehub/backfill.go new file mode 100644 index 000000000..e889e686e --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/tracehub/backfill.go @@ -0,0 +1,508 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracehub + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/middleware/session" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor" + tconv "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/repo" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service/trace/span_filter" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service/trace/span_processor" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +const ( + pageSize = 500 + backfillLockKeyTemplate = "observability:tracehub:backfill:%d" + backfillLockMaxHold = 24 * time.Hour +) + +// 定时任务+锁 +func (h *TraceHubServiceImpl) BackFill(ctx context.Context, event *entity.BackFillEvent) error { + // 1. Set the current task context + ctx = h.fillCtx(ctx) + logs.CtxInfo(ctx, "BackFill msg %+v", event) + + var ( + lockKey string + lockCancel func() + ) + if h.locker != nil && event != nil { + lockKey = fmt.Sprintf(backfillLockKeyTemplate, event.TaskID) + locked, lockCtx, cancel, lockErr := h.locker.LockWithRenew(ctx, lockKey, transformTaskStatusLockTTL, backfillLockMaxHold) + if lockErr != nil { + logs.CtxError(ctx, "backfill acquire lock failed", "task_id", event.TaskID, "err", lockErr) + return lockErr + } + if !locked { + logs.CtxInfo(ctx, "backfill lock held by others, skip execution", "task_id", event.TaskID) + return nil + } + lockCancel = cancel + ctx = lockCtx + defer func(cancel func()) { + if cancel != nil { + cancel() + } else if lockKey != "" { + if _, err := h.locker.Unlock(lockKey); err != nil { + logs.CtxWarn(ctx, "backfill release lock failed", "task_id", event.TaskID, "err", err) + } + } + }(lockCancel) + } + + sub, err := h.setBackfillTask(ctx, event) + if err != nil { + return err + } + + if sub != nil && sub.t != nil && sub.t.GetBaseInfo() != nil && sub.t.GetBaseInfo().GetCreatedBy() != nil { + ctx = session.WithCtxUser(ctx, &session.User{ID: sub.t.GetBaseInfo().GetCreatedBy().GetUserID()}) + } + + // 2. Determine whether the backfill task is completed to avoid repeated execution + isDone, err := h.isBackfillDone(ctx, sub) + if err != nil { + logs.CtxError(ctx, "check backfill task done failed, task_id=%d, err=%v", sub.t.GetID(), err) + return err + } + if isDone { + logs.CtxInfo(ctx, "backfill already completed, task_id=%d", sub.t.GetID()) + return nil + } + + // 顺序执行时重置 flush 错误收集器 + h.flushErrLock.Lock() + h.flushErr = nil + h.flushErrLock.Unlock() + + // 5. Retrieve span data from the observability service + listErr := h.listSpans(ctx, sub) + if listErr != nil { + logs.CtxError(ctx, "list spans failed, task_id=%d, err=%v", sub.t.GetID(), listErr) + // continue on error without interrupting the flow + } + + // 6. Synchronously wait for completion to ensure all data is processed + return h.onHandleDone(ctx, listErr, sub) +} + +// setBackfillTask sets the context for the current backfill task +func (h *TraceHubServiceImpl) setBackfillTask(ctx context.Context, event *entity.BackFillEvent) (*spanSubscriber, error) { + taskConfig, err := h.taskRepo.GetTask(ctx, event.TaskID, nil, nil) + if err != nil { + logs.CtxError(ctx, "get task config failed, task_id=%d, err=%v", event.TaskID, err) + return nil, err + } + if taskConfig == nil { + return nil, errors.New("task config not found") + } + taskConfigDO := tconv.TaskDO2DTO(ctx, taskConfig, nil) + taskRun, err := h.taskRepo.GetBackfillTaskRun(ctx, ptr.Of(taskConfigDO.GetWorkspaceID()), taskConfigDO.GetID()) + if err != nil { + logs.CtxError(ctx, "get backfill task run failed, task_id=%d, err=%v", taskConfigDO.GetID(), err) + return nil, err + } + taskRunDTO := tconv.TaskRunDO2DTO(ctx, taskRun, nil) + proc := h.taskProcessor.GetTaskProcessor(taskConfig.TaskType) + sub := &spanSubscriber{ + taskID: taskConfigDO.GetID(), + t: taskConfigDO, + tr: taskRunDTO, + processor: proc, + bufCap: 0, + maxFlushInterval: time.Second * 5, + taskRepo: h.taskRepo, + runType: task.TaskRunTypeBackFill, + } + + return sub, nil +} + +// isBackfillDone checks whether the backfill task has been completed +func (h *TraceHubServiceImpl) isBackfillDone(ctx context.Context, sub *spanSubscriber) (bool, error) { + if sub.tr == nil { + logs.CtxError(ctx, "get backfill task run failed, task_id=%d, err=%v", sub.t.GetID(), nil) + return true, nil + } + + return sub.tr.RunStatus == task.RunStatusDone, nil +} + +func (h *TraceHubServiceImpl) listSpans(ctx context.Context, sub *spanSubscriber) error { + backfillTime := sub.t.GetRule().GetBackfillEffectiveTime() + tenants, err := h.getTenants(ctx, loop_span.PlatformType(sub.t.GetRule().GetSpanFilters().GetPlatformType())) + if err != nil { + logs.CtxError(ctx, "get tenants failed, task_id=%d, err=%v", sub.t.GetID(), err) + return err + } + + // Build query parameters + listParam := &repo.ListSpansParam{ + Tenants: tenants, + Filters: h.buildSpanFilters(ctx, sub.t), + StartAt: backfillTime.GetStartAt(), + EndAt: backfillTime.GetEndAt(), + Limit: pageSize, // Page size + DescByStartTime: true, + NotQueryAnnotation: true, // No annotation query required during backfill + } + + if sub.tr.BackfillRunDetail != nil && sub.tr.BackfillRunDetail.LastSpanPageToken != nil { + listParam.PageToken = *sub.tr.BackfillRunDetail.LastSpanPageToken + } + // Paginate query and send data + return h.fetchAndSendSpans(ctx, listParam, sub) +} + +type ListSpansReq struct { + WorkspaceID int64 + ThirdPartyWorkspaceID string + StartTime int64 // ms + EndTime int64 // ms + Filters *loop_span.FilterFields + Limit int32 + DescByStartTime bool + PageToken string + PlatformType loop_span.PlatformType + SpanListType loop_span.SpanListType +} + +// buildSpanFilters constructs span filter conditions +func (h *TraceHubServiceImpl) buildSpanFilters(ctx context.Context, taskConfig *task.Task) *loop_span.FilterFields { + // More complex filters can be built based on the task configuration + // Simplified here: return nil to indicate no additional filters + + platformFilter, err := h.buildHelper.BuildPlatformRelatedFilter(ctx, loop_span.PlatformType(taskConfig.GetRule().GetSpanFilters().GetPlatformType())) + if err != nil { + return nil + } + builtinFilter, err := h.buildBuiltinFilters(ctx, platformFilter, &ListSpansReq{ + WorkspaceID: taskConfig.GetWorkspaceID(), + SpanListType: loop_span.SpanListType(taskConfig.GetRule().GetSpanFilters().GetSpanListType()), + }) + if err != nil { + return nil + } + filters := h.combineFilters(builtinFilter, convertor.FilterFieldsDTO2DO(taskConfig.GetRule().GetSpanFilters().GetFilters())) + + return filters +} + +func (h *TraceHubServiceImpl) buildBuiltinFilters(ctx context.Context, f span_filter.Filter, req *ListSpansReq) (*loop_span.FilterFields, error) { + filters := make([]*loop_span.FilterField, 0) + env := &span_filter.SpanEnv{ + WorkspaceID: req.WorkspaceID, + ThirdPartyWorkspaceID: req.ThirdPartyWorkspaceID, + } + basicFilter, forceQuery, err := f.BuildBasicSpanFilter(ctx, env) + if err != nil { + return nil, err + } else if len(basicFilter) == 0 && !forceQuery { // if it's null, no need to query from ck + return nil, nil + } + filters = append(filters, basicFilter...) + switch req.SpanListType { + case loop_span.SpanListTypeRootSpan: + subFilter, err := f.BuildRootSpanFilter(ctx, env) + if err != nil { + return nil, err + } + filters = append(filters, subFilter...) + case loop_span.SpanListTypeLLMSpan: + subFilter, err := f.BuildLLMSpanFilter(ctx, env) + if err != nil { + return nil, err + } + filters = append(filters, subFilter...) + case loop_span.SpanListTypeAllSpan: + subFilter, err := f.BuildALLSpanFilter(ctx, env) + if err != nil { + return nil, err + } + filters = append(filters, subFilter...) + default: + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid span list type: %s")) + } + filterAggr := &loop_span.FilterFields{ + QueryAndOr: ptr.Of(loop_span.QueryAndOrEnumAnd), + FilterFields: filters, + } + return filterAggr, nil +} + +func (h *TraceHubServiceImpl) combineFilters(filters ...*loop_span.FilterFields) *loop_span.FilterFields { + filterAggr := &loop_span.FilterFields{ + QueryAndOr: ptr.Of(loop_span.QueryAndOrEnumAnd), + } + for _, f := range filters { + if f == nil { + continue + } + filterAggr.FilterFields = append(filterAggr.FilterFields, &loop_span.FilterField{ + QueryAndOr: ptr.Of(loop_span.QueryAndOrEnumAnd), + SubFilter: f, + }) + } + return filterAggr +} + +// fetchAndSendSpans paginates and sends span data +func (h *TraceHubServiceImpl) fetchAndSendSpans(ctx context.Context, listParam *repo.ListSpansParam, sub *spanSubscriber) error { + totalCount := int64(0) + pageToken := listParam.PageToken + for { + logs.CtxInfo(ctx, "ListSpansParam:%v", listParam) + result, err := h.traceRepo.ListSpans(ctx, listParam) + if err != nil { + logs.CtxError(ctx, "list spans failed, task_id=%d, page_token=%s, err=%v", sub.t.GetID(), pageToken, err) + return err + } + spans := result.Spans + processors, err := h.buildHelper.BuildGetTraceProcessors(ctx, span_processor.Settings{ + WorkspaceId: sub.t.GetWorkspaceID(), + PlatformType: loop_span.PlatformType(sub.t.GetRule().GetSpanFilters().GetPlatformType()), + QueryStartTime: listParam.StartAt, + QueryEndTime: listParam.EndAt, + }) + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommercialCommonInternalErrorCodeCode) + } + for _, p := range processors { + spans, err = p.Transform(ctx, spans) + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommercialCommonInternalErrorCodeCode) + } + } + + if len(spans) > 0 { + flush := &flushReq{ + retrievedSpanCount: int64(len(spans)), + pageToken: result.PageToken, + spans: spans, + noMore: !result.HasMore, + } + + if err = h.flushSpans(ctx, flush, sub); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return err + } + } + + totalCount += int64(len(spans)) + logs.CtxInfo(ctx, "processed %d spans, total=%d, task_id=%d", len(spans), totalCount, sub.t.GetID()) + } + + if !result.HasMore { + logs.CtxInfo(ctx, "completed listing spans, total_count=%d, task_id=%d", totalCount, sub.t.GetID()) + break + } + + pageToken = result.PageToken + } + + return nil +} + +func (h *TraceHubServiceImpl) flushSpans(ctx context.Context, fr *flushReq, sub *spanSubscriber) error { + if ctx.Err() != nil { + return ctx.Err() + } + + _, _, err := h.doFlush(ctx, fr, sub) + if err != nil { + logs.CtxError(ctx, "flush spans failed, task_id=%d, err=%v", sub.t.GetID(), err) + h.flushErrLock.Lock() + h.flushErr = append(h.flushErr, err) + h.flushErrLock.Unlock() + } + + return nil +} + +func (h *TraceHubServiceImpl) doFlush(ctx context.Context, fr *flushReq, sub *spanSubscriber) (flushed, sampled int, _ error) { + if fr == nil || len(fr.spans) == 0 { + return 0, 0, nil + } + + logs.CtxInfo(ctx, "processing %d spans for backfill, task_id=%d", len(fr.spans), sub.t.GetID()) + + // Apply sampling logic + sampledSpans := h.applySampling(fr.spans, sub) + if len(sampledSpans) == 0 { + logs.CtxInfo(ctx, "no spans after sampling, task_id=%d", sub.t.GetID()) + return len(fr.spans), 0, nil + } + + // Execute specific business logic + err := h.processSpansForBackfill(ctx, sampledSpans, sub) + if err != nil { + logs.CtxError(ctx, "process spans failed, task_id=%d, err=%v", sub.t.GetID(), err) + return len(fr.spans), len(sampledSpans), err + } + + sub.tr.BackfillRunDetail = &task.BackfillDetail{ + LastSpanPageToken: ptr.Of(fr.pageToken), + } + err = h.taskRepo.UpdateTaskRunWithOCC(ctx, sub.tr.ID, sub.tr.WorkspaceID, map[string]interface{}{ + "backfill_detail": ToJSONString(ctx, sub.tr.BackfillRunDetail), + }) + if err != nil { + logs.CtxError(ctx, "update task run failed, task_id=%d, err=%v", sub.t.GetID(), err) + return len(fr.spans), len(sampledSpans), err + } + if fr.noMore { + logs.CtxInfo(ctx, "no more spans to process, task_id=%d", sub.t.GetID()) + if err = sub.processor.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: tconv.TaskDTO2DO(sub.t, "", nil), + TaskRun: tconv.TaskRunDTO2DO(sub.tr), + IsFinish: false, + }); err != nil { + return len(fr.spans), len(sampledSpans), err + } + } + + logs.CtxInfo(ctx, "successfully processed %d spans (sampled from %d), task_id=%d", + len(sampledSpans), len(fr.spans), sub.t.GetID()) + return len(fr.spans), len(sampledSpans), nil +} + +// applySampling applies sampling logic +func (h *TraceHubServiceImpl) applySampling(spans []*loop_span.Span, sub *spanSubscriber) []*loop_span.Span { + if sub.t == nil || sub.t.Rule == nil { + return spans + } + + sampler := sub.t.GetRule().GetSampler() + if sampler == nil { + return spans + } + + sampleRate := sampler.GetSampleRate() + if sampleRate >= 1.0 { + return spans // 100% sampling + } + + if sampleRate <= 0.0 { + return nil // 0% sampling + } + + // Calculate sampling size + sampleSize := int(float64(len(spans)) * sampleRate) + if sampleSize == 0 && len(spans) > 0 { + sampleSize = 1 // Sample at least one + } + + if sampleSize >= len(spans) { + return spans + } + + return spans[:sampleSize] +} + +// processSpansForBackfill handles spans for backfill +func (h *TraceHubServiceImpl) processSpansForBackfill(ctx context.Context, spans []*loop_span.Span, sub *spanSubscriber) error { + // Batch processing spans for efficiency + const batchSize = 100 + + for i := 0; i < len(spans); i += batchSize { + end := i + batchSize + if end > len(spans) { + end = len(spans) + } + + batch := spans[i:end] + if err := h.processBatchSpans(ctx, batch, sub); err != nil { + logs.CtxError(ctx, "process batch spans failed, task_id=%d, batch_start=%d, err=%v", + sub.t.GetID(), i, err) + // Continue with the next batch without stopping due to a single failure + continue + } + } + + return nil +} + +// processBatchSpans processes a batch of span data +func (h *TraceHubServiceImpl) processBatchSpans(ctx context.Context, spans []*loop_span.Span, sub *spanSubscriber) error { + for _, span := range spans { + // Execute processing logic according to the task type + logs.CtxInfo(ctx, "processing span for backfill, span_id=%s, trace_id=%s, task_id=%d", + span.SpanID, span.TraceID, sub.t.GetID()) + taskCount, _ := h.taskRepo.GetTaskCount(ctx, sub.taskID) + taskRunCount, _ := h.taskRepo.GetTaskRunCount(ctx, sub.taskID, sub.tr.GetID()) + sampler := sub.t.GetRule().GetSampler() + if taskCount+1 > sampler.GetSampleSize() { + logs.CtxWarn(ctx, "taskCount+1 > sampler.GetSampleSize(), task_id=%d,SampleSize=%d", sub.taskID, sampler.GetSampleSize()) + if err := sub.processor.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: tconv.TaskDTO2DO(sub.t, "", nil), + TaskRun: tconv.TaskRunDTO2DO(sub.tr), + IsFinish: true, + }); err != nil { + return err + } + break + } + logs.CtxInfo(ctx, "preDispatch, task_id=%d, taskCount=%d, taskRunCount=%d", sub.taskID, taskCount, taskRunCount) + if err := h.dispatch(ctx, span, []*spanSubscriber{sub}); err != nil { + return err + } + } + + return nil +} + +// onHandleDone handles completion callback +func (h *TraceHubServiceImpl) onHandleDone(ctx context.Context, listErr error, sub *spanSubscriber) error { + // Collect all errors + h.flushErrLock.Lock() + allErrors := append([]error{}, h.flushErr...) + if listErr != nil { + allErrors = append(allErrors, listErr) + } + h.flushErrLock.Unlock() + + if len(allErrors) > 0 { + backfillEvent := &entity.BackFillEvent{ + SpaceID: sub.t.GetWorkspaceID(), + TaskID: sub.t.GetID(), + } + + // Send MQ message asynchronously without blocking task creation flow + go func() { + if err := h.sendBackfillMessage(context.Background(), backfillEvent); err != nil { + logs.CtxWarn(ctx, "send backfill message failed, task_id=%d, err=%v", sub.t.GetID(), err) + } + }() + logs.CtxWarn(ctx, "backfill completed with %d errors, task_id=%d", len(allErrors), sub.t.GetID()) + // Return the first error as a representative + return allErrors[0] + + } + + logs.CtxInfo(ctx, "backfill completed successfully, task_id=%d", sub.t.GetID()) + return nil +} + +// sendBackfillMessage sends an MQ message +func (h *TraceHubServiceImpl) sendBackfillMessage(ctx context.Context, event *entity.BackFillEvent) error { + if h.backfillProducer == nil { + return errorx.NewByCode(obErrorx.CommonInternalErrorCode, errorx.WithExtraMsg("backfill producer not initialized")) + } + + return h.backfillProducer.SendBackfill(ctx, event) +} diff --git a/backend/modules/observability/domain/task/service/taskexe/tracehub/callback.go b/backend/modules/observability/domain/task/service/taskexe/tracehub/callback.go new file mode 100644 index 000000000..cc1305020 --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/tracehub/callback.go @@ -0,0 +1,165 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracehub + +import ( + "context" + "fmt" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/external/benefit" + "github.com/coze-dev/coze-loop/backend/infra/middleware/session" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/repo" + "github.com/coze-dev/coze-loop/backend/pkg/logs" + "github.com/samber/lo" +) + +func (h *TraceHubServiceImpl) CallBack(ctx context.Context, event *entity.AutoEvalEvent) error { + logs.CtxInfo(ctx, "CallBack msg %+v", event) + for _, turn := range event.TurnEvalResults { + workspaceIDStr, workspaceID := turn.GetWorkspaceIDFromExt() + tenants, err := h.getTenants(ctx, loop_span.PlatformType("loop_all")) + if err != nil { + return err + } + var storageDuration int64 = 1 + //缓存做了吗? + res, err := h.benefitSvc.CheckTraceBenefit(ctx, &benefit.CheckTraceBenefitParams{ + ConnectorUID: session.UserIDInCtxOrEmpty(ctx), + SpaceID: workspaceID, + }) + if err != nil { + logs.CtxWarn(ctx, "fail to check trace benefit, %v", err) + } else if res == nil { + logs.CtxWarn(ctx, "fail to get trace benefit, got nil response") + } else if res != nil { + storageDuration = res.StorageDuration + } + + spans, err := h.getSpan(ctx, + tenants, + []string{turn.GetSpanIDFromExt()}, + turn.GetTraceIDFromExt(), + workspaceIDStr, + turn.GetStartTimeFromExt()/1000-(24*time.Duration(storageDuration)*time.Hour).Milliseconds(), + turn.GetStartTimeFromExt()/1000+10*time.Minute.Milliseconds(), + ) + if len(spans) == 0 { + return fmt.Errorf("span not found, span_id: %s", turn.GetSpanIDFromExt()) + } + span := spans[0] + + // Newly added: write Redis counters based on the Status + err = h.updateTaskRunDetailsCount(ctx, turn.GetTaskIDFromExt(), turn) + if err != nil { + logs.CtxWarn(ctx, "更新TaskRun状态计数失败: taskID=%d, status=%d, err=%v", + turn.GetTaskIDFromExt(), turn.Status, err) + // Continue processing without interrupting the flow + } + + annotation := &loop_span.Annotation{ + SpanID: turn.GetSpanIDFromExt(), + TraceID: span.TraceID, + WorkspaceID: workspaceIDStr, + AnnotationType: loop_span.AnnotationTypeAutoEvaluate, + StartTime: time.UnixMicro(span.StartTime), + Key: fmt.Sprintf("%d:%d", turn.GetTaskIDFromExt(), turn.EvaluatorVersionID), + Value: loop_span.AnnotationValue{ + ValueType: loop_span.AnnotationValueTypeDouble, + FloatValue: turn.Score, + }, + Reasoning: turn.Reasoning, + Status: loop_span.AnnotationStatusNormal, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + if err = annotation.GenID(); err != nil { + return err + } + + err = h.traceRepo.InsertAnnotations(ctx, &repo.InsertAnnotationParam{ + Tenant: span.GetTenant(), + TTL: span.GetTTL(ctx), + Annotations: []*loop_span.Annotation{annotation}, + }) + if err != nil { + return err + } + + } + return nil +} + +func (h *TraceHubServiceImpl) Correction(ctx context.Context, event *entity.CorrectionEvent) error { + workspaceIDStr, workspaceID := event.GetWorkspaceIDFromExt() + if workspaceID == 0 { + return fmt.Errorf("workspace_id is empty") + } + tenants, err := h.getTenants(ctx, loop_span.PlatformType("loop_all")) + if err != nil { + return err + } + spans, err := h.getSpan(ctx, + tenants, + []string{event.GetSpanIDFromExt()}, + event.GetTraceIDFromExt(), + workspaceIDStr, + event.GetStartTimeFromExt()/1000-time.Second.Milliseconds(), + event.GetStartTimeFromExt()/1000+time.Second.Milliseconds(), + ) + if err != nil { + return err + } + if event.EvaluatorResult.Correction == nil || event.EvaluatorResult == nil { + return err + } + if len(spans) == 0 { + return fmt.Errorf("span not found, span_id: %s", event.GetSpanIDFromExt()) + } + span := spans[0] + annotations, err := h.traceRepo.ListAnnotations(ctx, &repo.ListAnnotationsParam{ + Tenants: tenants, + SpanID: event.GetSpanIDFromExt(), + TraceID: event.GetTraceIDFromExt(), + WorkspaceId: workspaceID, + StartAt: event.GetStartTimeFromExt() - 5*time.Second.Milliseconds(), + EndAt: event.GetStartTimeFromExt() + 5*time.Second.Milliseconds(), + }) + if err != nil { + return err + } + var annotation *loop_span.Annotation + for _, a := range annotations { + meta := a.GetAutoEvaluateMetadata() + if meta != nil && meta.EvaluatorRecordID == event.EvaluatorRecordID { + annotation = a + break + } + } + + updateBy := session.UserIDInCtxOrEmpty(ctx) + if updateBy == "" { + return err + } + annotation.CorrectAutoEvaluateScore(event.EvaluatorResult.Correction.Score, event.EvaluatorResult.Correction.Explain, updateBy) + + // Then synchronize the observability data + param := &repo.UpsertAnnotationParam{ + Tenant: span.GetTenant(), + TTL: span.GetTTL(ctx), + Annotations: []*loop_span.Annotation{annotation}, + IsSync: true, + } + if err = h.traceRepo.UpsertAnnotation(ctx, param); err != nil { + recordID := lo.Ternary(annotation.GetAutoEvaluateMetadata() != nil, annotation.GetAutoEvaluateMetadata().EvaluatorRecordID, 0) + // If the synchronous update fails, compensate asynchronously + // TODO: asynchronous processing has issues and may duplicate + logs.CtxWarn(ctx, "Sync upsert annotation failed, try async upsert. span_id=[%v], recored_id=[%v], err:%v", + annotation.SpanID, recordID, err) + return nil + } + return nil +} diff --git a/backend/modules/observability/domain/task/service/taskexe/tracehub/mocks/trace_hub_service.go b/backend/modules/observability/domain/task/service/taskexe/tracehub/mocks/trace_hub_service.go new file mode 100644 index 000000000..a46b07d6b --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/tracehub/mocks/trace_hub_service.go @@ -0,0 +1,97 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/tracehub (interfaces: ITraceHubService) +// +// Generated by this command: +// +// mockgen -destination=mocks/trace_hub_service.go -package=mocks . ITraceHubService +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + entity "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + gomock "go.uber.org/mock/gomock" +) + +// MockITraceHubService is a mock of ITraceHubService interface. +type MockITraceHubService struct { + ctrl *gomock.Controller + recorder *MockITraceHubServiceMockRecorder +} + +// MockITraceHubServiceMockRecorder is the mock recorder for MockITraceHubService. +type MockITraceHubServiceMockRecorder struct { + mock *MockITraceHubService +} + +// NewMockITraceHubService creates a new mock instance. +func NewMockITraceHubService(ctrl *gomock.Controller) *MockITraceHubService { + mock := &MockITraceHubService{ctrl: ctrl} + mock.recorder = &MockITraceHubServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockITraceHubService) EXPECT() *MockITraceHubServiceMockRecorder { + return m.recorder +} + +// BackFill mocks base method. +func (m *MockITraceHubService) BackFill(arg0 context.Context, arg1 *entity.BackFillEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BackFill", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// BackFill indicates an expected call of BackFill. +func (mr *MockITraceHubServiceMockRecorder) BackFill(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackFill", reflect.TypeOf((*MockITraceHubService)(nil).BackFill), arg0, arg1) +} + +// CallBack mocks base method. +func (m *MockITraceHubService) CallBack(arg0 context.Context, arg1 *entity.AutoEvalEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CallBack", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// CallBack indicates an expected call of CallBack. +func (mr *MockITraceHubServiceMockRecorder) CallBack(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CallBack", reflect.TypeOf((*MockITraceHubService)(nil).CallBack), arg0, arg1) +} + +// Correction mocks base method. +func (m *MockITraceHubService) Correction(arg0 context.Context, arg1 *entity.CorrectionEvent) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Correction", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Correction indicates an expected call of Correction. +func (mr *MockITraceHubServiceMockRecorder) Correction(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Correction", reflect.TypeOf((*MockITraceHubService)(nil).Correction), arg0, arg1) +} + +// SpanTrigger mocks base method. +func (m *MockITraceHubService) SpanTrigger(arg0 context.Context, arg1 *entity.RawSpan) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SpanTrigger", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SpanTrigger indicates an expected call of SpanTrigger. +func (mr *MockITraceHubServiceMockRecorder) SpanTrigger(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SpanTrigger", reflect.TypeOf((*MockITraceHubService)(nil).SpanTrigger), arg0, arg1) +} diff --git a/backend/modules/observability/domain/task/service/taskexe/tracehub/scheduled_task.go b/backend/modules/observability/domain/task/service/taskexe/tracehub/scheduled_task.go new file mode 100755 index 000000000..02fcbf640 --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/tracehub/scheduled_task.go @@ -0,0 +1,514 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracehub + +import ( + "context" + "fmt" + "time" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" + "github.com/pkg/errors" +) + +// TaskRunCountInfo represents the TaskRunCount information structure +type TaskRunCountInfo struct { + TaskID int64 + TaskRunID int64 + TaskRunCount int64 + TaskRunSuccCount int64 + TaskRunFailCount int64 +} + +// TaskCacheInfo represents task cache information +type TaskCacheInfo struct { + WorkspaceIDs []string + BotIDs []string + Tasks []*entity.ObservabilityTask + UpdateTime time.Time +} + +const ( + transformTaskStatusLockKey = "observability:tracehub:transform_task_status" + transformTaskStatusLockTTL = 3 * time.Minute + syncTaskRunCountsLockKey = "observability:tracehub:sync_task_run_counts" +) + +// startScheduledTask launches the scheduled task goroutine +func (h *TraceHubServiceImpl) startScheduledTask() { + go func() { + for { + select { + case <-h.scheduledTaskTicker.C: + // Execute scheduled task + h.transformTaskStatus() + case <-h.stopChan: + // Stop scheduled task + h.scheduledTaskTicker.Stop() + return + } + } + }() + go func() { + for { + select { + case <-h.syncTaskTicker.C: + // Execute scheduled task + h.syncTaskRunCounts() + h.syncTaskCache() + case <-h.stopChan: + // Stop scheduled task + h.syncTaskTicker.Stop() + return + } + } + }() +} + +func (h *TraceHubServiceImpl) transformTaskStatus() { + ctx := context.Background() + ctx = h.fillCtx(ctx) + + if h.locker != nil { + locked, lockErr := h.locker.Lock(ctx, transformTaskStatusLockKey, transformTaskStatusLockTTL) + if lockErr != nil { + logs.CtxError(ctx, "transformTaskStatus acquire lock failed", "err", lockErr) + return + } + if !locked { + logs.CtxInfo(ctx, "transformTaskStatus lock held by others, skip execution") + return + } + defer func() { + if _, err := h.locker.Unlock(transformTaskStatusLockKey); err != nil { + logs.CtxWarn(ctx, "transformTaskStatus release lock failed", "err", err) + } + }() + } + logs.CtxInfo(ctx, "Scheduled task started...") + + // Read all non-final (success/disabled) tasks + taskPOs, err := h.listNonFinalTask(ctx) + if err != nil { + logs.CtxError(ctx, "Failed to get non-final task list", "err", err) + return + } + logs.CtxInfo(ctx, "Scheduled task retrieved number of tasks:%d", len(taskPOs)) + for _, taskPO := range taskPOs { + var taskRun, backfillTaskRun *entity.TaskRun + backfillTaskRun = taskPO.GetBackfillTaskRun() + taskRun = taskPO.GetCurrentTaskRun() + var startTime, endTime time.Time + //taskInfo := tconv.TaskDO2DTO(ctx, taskPO, nil) + + if taskPO.EffectiveTime != nil { + endTime = time.UnixMilli(taskPO.EffectiveTime.EndAt) + startTime = time.UnixMilli(taskPO.EffectiveTime.StartAt) + } + proc := h.taskProcessor.GetTaskProcessor(taskPO.TaskType) + // Task time horizon reached + // End when the task end time is reached + logs.CtxInfo(ctx, "[auto_task]taskID:%d, endTime:%v, startTime:%v", taskPO.ID, endTime, startTime) + if taskPO.BackfillEffectiveTime != nil && taskPO.EffectiveTime != nil && backfillTaskRun != nil { + if time.Now().After(endTime) && backfillTaskRun.RunStatus == task.RunStatusDone { + logs.CtxInfo(ctx, "[OnFinishTaskChange]taskID:%d, time.Now().After(endTime) && backfillTaskRun.RunStatus == task.RunStatusDone", taskPO.ID) + err = proc.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: taskPO, + TaskRun: backfillTaskRun, + IsFinish: true, + }) + if err != nil { + logs.CtxError(ctx, "OnFinishTaskChange err:%v", err) + continue + } + } + if backfillTaskRun.RunStatus != task.RunStatusDone { + lockKey := fmt.Sprintf(backfillLockKeyTemplate, taskPO.ID) + locked, _, cancel, lockErr := h.locker.LockWithRenew(ctx, lockKey, transformTaskStatusLockTTL, backfillLockMaxHold) + if lockErr != nil || !locked { + h.sendBackfillMessage(ctx, &entity.BackFillEvent{ + TaskID: taskPO.ID, + SpaceID: taskPO.WorkspaceID, + }) + } + defer cancel() + } + } else if taskPO.BackfillEffectiveTime != nil && backfillTaskRun != nil { + if backfillTaskRun.RunStatus == task.RunStatusDone { + logs.CtxInfo(ctx, "[OnFinishTaskChange]taskID:%d, backfillTaskRun.RunStatus == task.RunStatusDone", taskPO.ID) + err = proc.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: taskPO, + TaskRun: backfillTaskRun, + IsFinish: true, + }) + if err != nil { + logs.CtxError(ctx, "OnFinishTaskChange err:%v", err) + continue + } + } + if backfillTaskRun.RunStatus != task.RunStatusDone { + lockKey := fmt.Sprintf(backfillLockKeyTemplate, taskPO.ID) + locked, _, cancel, lockErr := h.locker.LockWithRenew(ctx, lockKey, transformTaskStatusLockTTL, backfillLockMaxHold) + if lockErr != nil || !locked { + h.sendBackfillMessage(ctx, &entity.BackFillEvent{ + TaskID: taskPO.ID, + SpaceID: taskPO.WorkspaceID, + }) + } + defer cancel() + } + } else if taskPO.EffectiveTime != nil { + if time.Now().After(endTime) { + logs.CtxInfo(ctx, "[OnFinishTaskChange]taskID:%d, time.Now().After(endTime)", taskPO.ID) + err = proc.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: taskPO, + TaskRun: taskRun, + IsFinish: true, + }) + if err != nil { + logs.CtxError(ctx, "OnFinishTaskChange err:%v", err) + continue + } + } + } + // If the task status is unstarted, create it once the task start time is reached + if taskPO.TaskStatus == task.TaskStatusUnstarted && time.Now().After(startTime) { + if !taskPO.Sampler.IsCycle { + err = proc.OnCreateTaskRunChange(ctx, taskexe.OnCreateTaskRunChangeReq{ + CurrentTask: taskPO, + RunType: task.TaskRunTypeNewData, + RunStartAt: taskPO.EffectiveTime.StartAt, + RunEndAt: taskPO.EffectiveTime.EndAt, + }) + err = proc.OnUpdateTaskChange(ctx, taskPO, task.TaskStatusRunning) + if err != nil { + logs.CtxError(ctx, "OnUpdateTaskChange err:%v", err) + continue + } + } else { + err = proc.OnCreateTaskRunChange(ctx, taskexe.OnCreateTaskRunChangeReq{ + CurrentTask: taskPO, + RunType: task.TaskRunTypeNewData, + RunStartAt: taskRun.RunEndAt.UnixMilli(), + RunEndAt: taskRun.RunEndAt.UnixMilli() + (taskRun.RunEndAt.UnixMilli() - taskRun.RunStartAt.UnixMilli()), + }) + if err != nil { + logs.CtxError(ctx, "OnCreateTaskRunChange err:%v", err) + continue + } + } + } + // Handle taskRun + if taskPO.TaskStatus == task.TaskStatusRunning && taskPO.TaskStatus == task.TaskStatusPending { + logs.CtxInfo(ctx, "taskID:%d, taskRun.RunEndAt:%v", taskPO.ID, taskRun.RunEndAt) + // Handling repeated tasks: single task time horizon reached + if time.Now().After(taskRun.RunEndAt) { + logs.CtxInfo(ctx, "[OnFinishTaskChange]taskID:%d, time.Now().After(cycleEndTime)", taskPO.ID) + err = proc.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: taskPO, + TaskRun: taskRun, + IsFinish: false, + }) + if err != nil { + logs.CtxError(ctx, "OnFinishTaskChange err:%v", err) + continue + } + if taskPO.Sampler.IsCycle { + err = proc.OnCreateTaskRunChange(ctx, taskexe.OnCreateTaskRunChangeReq{ + CurrentTask: taskPO, + RunType: task.TaskRunTypeNewData, + RunStartAt: taskRun.RunEndAt.UnixMilli(), + RunEndAt: taskRun.RunEndAt.UnixMilli() + (taskRun.RunEndAt.UnixMilli() - taskRun.RunStartAt.UnixMilli()), + }) + if err != nil { + logs.CtxError(ctx, "OnCreateTaskRunChange err:%v", err) + continue + } + } + } + } + } + +} + +// syncTaskRunCounts synchronizes TaskRunCount data to the database +func (h *TraceHubServiceImpl) syncTaskRunCounts() { + ctx := context.Background() + ctx = h.fillCtx(ctx) + + if h.locker != nil { + locked, lockErr := h.locker.Lock(ctx, syncTaskRunCountsLockKey, transformTaskStatusLockTTL) + if lockErr != nil { + logs.CtxError(ctx, "syncTaskRunCounts acquire lock failed", "err", lockErr) + return + } + if !locked { + logs.CtxInfo(ctx, "syncTaskRunCounts lock held by others, skip execution") + return + } + defer func() { + if _, err := h.locker.Unlock(syncTaskRunCountsLockKey); err != nil { + logs.CtxWarn(ctx, "syncTaskRunCounts release lock failed", "err", err) + } + }() + } + logs.CtxInfo(ctx, "Start syncing TaskRunCounts to database...") + // 1. Retrieve non-final task list + taskDOs, err := h.listSyncTaskRunTask(ctx) + if err != nil { + logs.CtxError(ctx, "Failed to get non-final task list", "err", err) + return + } + if len(taskDOs) == 0 { + logs.CtxInfo(ctx, "No non-final tasks need syncing") + return + } + + // 2. Collect all TaskRun information that needs syncing + var taskRunInfos []*TaskRunCountInfo + for _, taskPO := range taskDOs { + if len(taskPO.TaskRuns) == 0 { + continue + } + + for _, taskRun := range taskPO.TaskRuns { + taskRunInfos = append(taskRunInfos, &TaskRunCountInfo{ + TaskID: taskPO.ID, + TaskRunID: taskRun.ID, + }) + } + } + + if len(taskRunInfos) == 0 { + logs.CtxInfo(ctx, "No TaskRun requires syncing") + return + } + + logs.CtxInfo(ctx, "Number of TaskRun entries requiring syncing:%d", len(taskRunInfos)) + + // 3. Process TaskRun entries in batches of 50 + batchSize := 50 + for i := 0; i < len(taskRunInfos); i += batchSize { + end := i + batchSize + if end > len(taskRunInfos) { + end = len(taskRunInfos) + } + + batch := taskRunInfos[i:end] + h.processBatch(ctx, batch) + } +} + +func (h *TraceHubServiceImpl) syncTaskCache() { + ctx := context.Background() + ctx = h.fillCtx(ctx) + + logs.CtxInfo(ctx, "Start syncing task cache...") + + // 1. Retrieve spaceID, botID, and task information for all non-final tasks from the database + spaceIDs, botIDs, tasks := h.taskRepo.GetObjListWithTask(ctx) + logs.CtxInfo(ctx, "Retrieved task information, taskCount:%d, spaceCount:%d, botCount:%d", len(tasks), len(spaceIDs), len(botIDs)) + + // 2. Build a new cache map + var newCache = TaskCacheInfo{ + WorkspaceIDs: spaceIDs, + BotIDs: botIDs, + Tasks: tasks, + UpdateTime: time.Now(), // Set the current time as the update time + } + + // 3. Clear old cache and update with new cache + h.taskCacheLock.Lock() + defer h.taskCacheLock.Unlock() + + // Clear old cache + h.taskCache.Delete("ObjListWithTask") + + // 4. Write new cache into local cache + h.taskCache.Store("ObjListWithTask", &newCache) + + logs.CtxInfo(ctx, "Task cache sync completed, taskCount:%d, updateTime:%s", len(tasks), newCache.UpdateTime.Format(time.RFC3339)) +} + +// processBatch synchronizes TaskRun counts in batches +func (h *TraceHubServiceImpl) processBatch(ctx context.Context, batch []*TaskRunCountInfo) { + logs.CtxInfo(ctx, "Start processing batch, batchSize:%d", len(batch)) + + // 1. Read Redis count data in batch + for _, info := range batch { + // Read taskruncount + count, err := h.taskRepo.GetTaskRunCount(ctx, info.TaskID, info.TaskRunID) + if err != nil || count == -1 { + logs.CtxWarn(ctx, "Failed to get TaskRunCount, taskID:%d, taskRunID:%d, err:%v", info.TaskID, info.TaskRunID, err) + } else { + info.TaskRunCount = count + } + + // Read taskrun success count + successCount, err := h.taskRepo.GetTaskRunSuccessCount(ctx, info.TaskID, info.TaskRunID) + if err != nil || successCount == -1 { + logs.CtxWarn(ctx, "Failed to get TaskRunSuccessCount, taskID:%d, taskRunID:%d, err:%v", info.TaskID, info.TaskRunID, err) + successCount = 0 + } else { + info.TaskRunSuccCount = successCount + } + + // Read taskrun fail count + failCount, err := h.taskRepo.GetTaskRunFailCount(ctx, info.TaskID, info.TaskRunID) + if err != nil || failCount == -1 { + logs.CtxWarn(ctx, "Failed to get TaskRunFailCount, taskID:%d, taskRunID:%d, err:%v", info.TaskID, info.TaskRunID, err) + failCount = 0 + } else { + info.TaskRunFailCount = failCount + } + + logs.CtxDebug(ctx, "Read count data", + "taskID", info.TaskID, + "taskRunID", info.TaskRunID, + "runCount", info.TaskRunCount, + "successCount", info.TaskRunSuccCount, + "failCount", info.TaskRunFailCount) + } + logs.CtxInfo(ctx, "Start updating TaskRun detail in batch, batchSize:%d, batch:%v", len(batch), batch) + // 2. Update database in batch + for _, info := range batch { + err := h.updateTaskRunDetail(ctx, info) + if err != nil { + logs.CtxError(ctx, "Failed to update TaskRun detail", + "taskID", info.TaskID, + "taskRunID", info.TaskRunID, + "err", err) + } else { + logs.CtxDebug(ctx, "Succeeded in updating TaskRun detail", + "taskID", info.TaskID, + "taskRunID", info.TaskRunID) + } + } + + logs.CtxInfo(ctx, "Batch processing completed, batchSize:%d", len(batch)) +} + +// updateTaskRunDetail updates the run_detail field of TaskRun +func (h *TraceHubServiceImpl) updateTaskRunDetail(ctx context.Context, info *TaskRunCountInfo) error { + // Build run_detail JSON data + runDetail := map[string]interface{}{ + "total_count": info.TaskRunCount, + "success_count": info.TaskRunSuccCount, + "failed_count": info.TaskRunFailCount, + } + + // Update using optimistic locking + err := h.taskRepo.UpdateTaskRunWithOCC(ctx, info.TaskRunID, 0, map[string]interface{}{ + "run_detail": ToJSONString(ctx, runDetail), + }) + if err != nil { + return errors.Wrap(err, "Failed to update TaskRun") + } + + return nil +} + +func (h *TraceHubServiceImpl) listNonFinalTask(ctx context.Context) ([]*entity.ObservabilityTask, error) { + var taskPOs []*entity.ObservabilityTask + var offset int32 = 0 + const limit int32 = 1000 + // Paginate through all tasks + for { + tasklist, _, err := h.taskRepo.ListTasks(ctx, mysql.ListTaskParam{ + ReqLimit: limit, + ReqOffset: offset, + TaskFilters: &filter.TaskFilterFields{ + FilterFields: []*filter.TaskFilterField{ + { + FieldName: ptr.Of(filter.TaskFieldNameTaskStatus), + Values: []string{ + string(task.TaskStatusUnstarted), + string(task.TaskStatusRunning), + string(task.TaskStatusPending), + }, + QueryType: ptr.Of(filter.QueryTypeIn), + FieldType: ptr.Of(filter.FieldTypeString), + }, + }, + }, + }) + if err != nil { + logs.CtxError(ctx, "Failed to get non-final task list", "err", err) + return nil, err + } + + // Add tasks from the current page to the full list + taskPOs = append(taskPOs, tasklist...) + + // If fewer tasks than limit are returned, this is the last page + if len(tasklist) < int(limit) { + break + } + + // Move to the next page, increasing offset by 1000 + offset += limit + } + return taskPOs, nil +} + +func (h *TraceHubServiceImpl) listSyncTaskRunTask(ctx context.Context) ([]*entity.ObservabilityTask, error) { + var taskDOs []*entity.ObservabilityTask + taskDOs, err := h.listNonFinalTask(ctx) + if err != nil { + logs.CtxError(ctx, "Failed to get non-final task list", "err", err) + return nil, err + } + var offset int32 = 0 + const limit int32 = 1000 + // Paginate through all tasks + for { + tasklist, _, err := h.taskRepo.ListTasks(ctx, mysql.ListTaskParam{ + ReqLimit: limit, + ReqOffset: offset, + TaskFilters: &filter.TaskFilterFields{ + FilterFields: []*filter.TaskFilterField{ + { + FieldName: ptr.Of(filter.TaskFieldNameTaskStatus), + Values: []string{ + string(task.TaskStatusSuccess), + string(task.TaskStatusDisabled), + }, + QueryType: ptr.Of(filter.QueryTypeIn), + FieldType: ptr.Of(filter.FieldTypeString), + }, + { + FieldName: ptr.Of("updated_at"), + Values: []string{ + fmt.Sprintf("%d", time.Now().Add(-24*time.Hour).UnixMilli()), + }, + QueryType: ptr.Of(filter.QueryTypeGt), + FieldType: ptr.Of(filter.FieldTypeLong), + }, + }, + }, + }) + if err != nil { + logs.CtxError(ctx, "Failed to get non-final task list", "err", err) + break + } + + // Add tasks from the current page to the full list + taskDOs = append(taskDOs, tasklist...) + + // If fewer tasks than limit are returned, this is the last page + if len(tasklist) < int(limit) { + break + } + + // Move to the next page, increasing offset by 1000 + offset += limit + } + return taskDOs, nil +} diff --git a/backend/modules/observability/domain/task/service/taskexe/tracehub/span_trigger.go b/backend/modules/observability/domain/task/service/taskexe/tracehub/span_trigger.go new file mode 100644 index 000000000..5fae32cd0 --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/tracehub/span_trigger.go @@ -0,0 +1,288 @@ +package tracehub + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/bytedance/gg/gslice" + "github.com/coze-dev/coze-loop/backend/infra/metrics" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + tconv "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + "github.com/coze-dev/coze-loop/backend/pkg/logs" + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" +) + +func (h *TraceHubServiceImpl) SpanTrigger(ctx context.Context, rawSpan *entity.RawSpan) error { + ctx = h.fillCtx(ctx) + logSuffix := fmt.Sprintf("log_id=%s, trace_id=%s, span_id=%s", rawSpan.LogID, rawSpan.TraceID, rawSpan.SpanID) + logs.CtxInfo(ctx, "auto_task start, log_suffix=%s", logSuffix) + var tags []metrics.T + // 1、Convert to standard span and perform initial filtering based on space_id + span := rawSpan.RawSpanConvertToLoopSpan() + // 1.1 Filter out spans that do not belong to any space or bot + spaceIDs, botIDs, _ := h.getObjListWithTaskFromCache(ctx) + logs.CtxInfo(ctx, "space list: %v, bot list: %v, log_suffix=%s", spaceIDs, botIDs, logSuffix) + if !gslice.Contains(spaceIDs, span.WorkspaceID) && !gslice.Contains(botIDs, span.TagsString["bot_id"]) { + tags = append(tags, metrics.T{Name: TagKeyResult, Value: "no_space_or_bot"}) + logs.CtxInfo(ctx, "no space or bot found for span, space_id=%s,bot_id=%s, log_suffix=%s", span.WorkspaceID, span.TagsString["bot_id"], logSuffix) + return nil + } + // 1.2 Filter out spans of type Evaluator + if gslice.Contains([]string{"Evaluator"}, span.CallType) { + return nil + } + // 2、Match spans against task rules + subs, err := h.getSubscriberOfSpan(ctx, span) + if err != nil { + logs.CtxWarn(ctx, "get subscriber of flow span failed, %s, err: %v", logSuffix, err) + } + + logs.CtxInfo(ctx, "%d subscriber of flow span found, %s", len(subs), logSuffix) + if len(subs) == 0 { + tags = append(tags, metrics.T{Name: TagKeyResult, Value: "no_subscriber"}) + return nil + } + // 3、Sample + subs = gslice.Filter(subs, func(sub *spanSubscriber) bool { return sub.Sampled() }) + logs.CtxInfo(ctx, "%d subscriber of flow span sampled, %s", len(subs), logSuffix) + if len(subs) == 0 { + tags = append(tags, metrics.T{Name: TagKeyResult, Value: "sampler_not_hit"}) + return nil + } + // 3. PreDispatch + err = h.preDispatch(ctx, span, subs) + if err != nil { + tags = append(tags, metrics.T{Name: TagKeyResult, Value: "preDispatch_failed"}) + logs.CtxWarn(ctx, "preDispatch flow span failed, %s, err: %v", logSuffix, err) + } + logs.CtxInfo(ctx, "%d preDispatch success, %v", len(subs), subs) + // 4、Dispatch + if err = h.dispatch(ctx, span, subs); err != nil { + tags = append(tags, metrics.T{Name: TagKeyResult, Value: "dispatch_failed"}) + logs.CtxWarn(ctx, "dispatch flow span failed, %s, err: %v", logSuffix, err) + return err + } + tags = append(tags, metrics.T{Name: TagKeyResult, Value: "dispatched"}) + return nil +} + +func (h *TraceHubServiceImpl) getSubscriberOfSpan(ctx context.Context, span *loop_span.Span) ([]*spanSubscriber, error) { + logs.CtxInfo(ctx, "getSubscriberOfSpan start") + var subscribers []*spanSubscriber + taskPOs, err := h.listNonFinalTask(ctx) + if err != nil { + logs.CtxError(ctx, "Failed to get non-final task list", "err", err) + return nil, err + } + taskList := tconv.TaskDOs2DTOs(ctx, taskPOs, nil) + for _, taskDO := range taskList { + proc := h.taskProcessor.GetTaskProcessor(taskDO.TaskType) + subscribers = append(subscribers, &spanSubscriber{ + taskID: taskDO.GetID(), + RWMutex: sync.RWMutex{}, + t: taskDO, + processor: proc, + bufCap: 0, + flushWait: sync.WaitGroup{}, + maxFlushInterval: time.Second * 5, + taskRepo: h.taskRepo, + runType: task.TaskRunTypeNewData, + buildHelper: h.buildHelper, + }) + } + + var ( + merr = &multierror.Error{} + keep int + ) + // Match data according to detailed filter rules + for _, s := range subscribers { + ok, err := s.Match(ctx, span) + logs.CtxInfo(ctx, "Match span, task_id=%d, trace_id=%s, span_id=%s, ok=%v, err=%v", s.taskID, span.TraceID, span.SpanID, ok, err) + if err != nil { + merr = multierror.Append(merr, errors.WithMessagef(err, "match span,task_id=%d, trace_id=%s, span_id=%s", s.taskID, span.TraceID, span.SpanID)) + continue + } + if ok { + subscribers[keep] = s + keep++ + } + } + return subscribers[:keep], merr.ErrorOrNil() +} + +func (h *TraceHubServiceImpl) preDispatch(ctx context.Context, span *loop_span.Span, subs []*spanSubscriber) error { + merr := &multierror.Error{} + var needDispatchSubs []*spanSubscriber + for _, sub := range subs { + if span.StartTime < sub.t.GetRule().GetEffectiveTime().GetStartAt() { + logs.CtxWarn(ctx, "span start time is before task cycle start time, trace_id=%s, span_id=%s", span.TraceID, span.SpanID) + continue + } + // First step: lock for task status change + // Task run status + var runStartAt, runEndAt int64 + if sub.t.GetTaskStatus() == task.TaskStatusUnstarted { + logs.CtxWarn(ctx, "task is unstarted, need sub.Creative") + runStartAt = sub.t.GetRule().GetEffectiveTime().GetStartAt() + if !sub.t.GetRule().GetSampler().GetIsCycle() { + runEndAt = sub.t.GetRule().GetEffectiveTime().GetEndAt() + } else { + switch *sub.t.GetRule().GetSampler().CycleTimeUnit { + case task.TimeUnitDay: + runEndAt = runStartAt + (*sub.t.GetRule().GetSampler().CycleInterval)*24*time.Hour.Milliseconds() + case task.TimeUnitWeek: + runEndAt = runStartAt + (*sub.t.GetRule().GetSampler().CycleInterval)*7*24*time.Hour.Milliseconds() + default: + runEndAt = runStartAt + (*sub.t.GetRule().GetSampler().CycleInterval)*10*time.Minute.Milliseconds() + } + } + if err := sub.Creative(ctx, runStartAt, runEndAt); err != nil { + merr = multierror.Append(merr, errors.WithMessagef(err, "task is unstarted, need sub.Creative,creative processor, task_id=%d", sub.taskID)) + needDispatchSubs = append(needDispatchSubs, sub) + continue + } + if err := sub.processor.OnUpdateTaskChange(ctx, tconv.TaskDTO2DO(sub.t, "", nil), task.TaskStatusRunning); err != nil { + logs.CtxWarn(ctx, "OnUpdateTaskChange, task_id=%d, err=%v", sub.taskID, err) + continue + } + } + // Fetch the corresponding task config + taskRunConfig, err := h.taskRepo.GetLatestNewDataTaskRun(ctx, sub.t.WorkspaceID, sub.taskID) + if err != nil { + logs.CtxWarn(ctx, "GetLatestNewDataTaskRun, task_id=%d, err=%v", sub.taskID, err) + continue + } + if taskRunConfig == nil { + logs.CtxWarn(ctx, "task run config not found, task_id=%d", sub.taskID) + runStartAt = sub.t.GetRule().GetEffectiveTime().GetStartAt() + if !sub.t.GetRule().GetSampler().GetIsCycle() { + runEndAt = sub.t.GetRule().GetEffectiveTime().GetEndAt() + } else { + switch *sub.t.GetRule().GetSampler().CycleTimeUnit { + case task.TimeUnitDay: + runEndAt = runStartAt + (*sub.t.GetRule().GetSampler().CycleInterval)*24*time.Hour.Milliseconds() + case task.TimeUnitWeek: + runEndAt = runStartAt + (*sub.t.GetRule().GetSampler().CycleInterval)*7*24*time.Hour.Milliseconds() + default: + runEndAt = runStartAt + (*sub.t.GetRule().GetSampler().CycleInterval)*10*time.Minute.Milliseconds() + } + } + if err = sub.Creative(ctx, runStartAt, runEndAt); err != nil { + merr = multierror.Append(merr, errors.WithMessagef(err, "task run config not found,creative processor, task_id=%d", sub.taskID)) + needDispatchSubs = append(needDispatchSubs, sub) + continue + } + } + sampler := sub.t.GetRule().GetSampler() + // Fetch the corresponding task count and subtask count + taskCount, _ := h.taskRepo.GetTaskCount(ctx, sub.taskID) + taskRunCount, _ := h.taskRepo.GetTaskRunCount(ctx, sub.taskID, taskRunConfig.ID) + logs.CtxInfo(ctx, "preDispatch, task_id=%d, taskCount=%d, taskRunCount=%d", sub.taskID, taskCount, taskRunCount) + endTime := time.UnixMilli(sub.t.GetRule().GetEffectiveTime().GetEndAt()) + // Reached task time limit + if time.Now().After(endTime) { + logs.CtxWarn(ctx, "[OnFinishTaskChange]time.Now().After(endTime) Finish processor, task_id=%d, endTime=%v, now=%v", sub.taskID, endTime, time.Now()) + if err := sub.processor.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: tconv.TaskDTO2DO(sub.t, "", nil), + TaskRun: taskRunConfig, + IsFinish: true, + }); err != nil { + logs.CtxWarn(ctx, "time.Now().After(endTime) Finish processor, task_id=%d", sub.taskID) + merr = multierror.Append(merr, errors.WithMessagef(err, "time.Now().After(endTime) Finish processor, task_id=%d", sub.taskID)) + continue + } + } + // Reached task limit + if taskCount+1 > sampler.GetSampleSize() { + logs.CtxWarn(ctx, "[OnFinishTaskChange]taskCount+1 > sampler.GetSampleSize() Finish processor, task_id=%d", sub.taskID) + if err := sub.processor.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: tconv.TaskDTO2DO(sub.t, "", nil), + TaskRun: taskRunConfig, + IsFinish: true, + }); err != nil { + merr = multierror.Append(merr, errors.WithMessagef(err, "time.Now().After(endTime) Finish processor, task_id=%d", sub.taskID)) + continue + } + } + if sampler.GetIsCycle() { + cycleEndTime := time.Unix(0, taskRunConfig.RunEndAt.UnixMilli()*1e6) + // Reached single cycle task time limit + if time.Now().After(cycleEndTime) { + logs.CtxInfo(ctx, "[OnFinishTaskChange]time.Now().After(cycleEndTime) Finish processor, task_id=%d", sub.taskID) + if err := sub.processor.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: tconv.TaskDTO2DO(sub.t, "", nil), + TaskRun: taskRunConfig, + IsFinish: false, + }); err != nil { + merr = multierror.Append(merr, errors.WithMessagef(err, "time.Now().After(endTime) Finish processor, task_id=%d", sub.taskID)) + continue + } + runStartAt = taskRunConfig.RunEndAt.UnixMilli() + runEndAt = taskRunConfig.RunEndAt.UnixMilli() + (taskRunConfig.RunEndAt.UnixMilli() - taskRunConfig.RunStartAt.UnixMilli()) + if err := sub.Creative(ctx, runStartAt, runEndAt); err != nil { + merr = multierror.Append(merr, errors.WithMessagef(err, "time.Now().After(cycleEndTime) creative processor, task_id=%d", sub.taskID)) + needDispatchSubs = append(needDispatchSubs, sub) + continue + } + } + // Reached single cycle task limit + if taskRunCount+1 > sampler.GetCycleCount() { + logs.CtxWarn(ctx, "[OnFinishTaskChange]taskRunCount+1 > sampler.GetCycleCount(), task_id=%d", sub.taskID) + if err := sub.processor.OnFinishTaskChange(ctx, taskexe.OnFinishTaskChangeReq{ + Task: tconv.TaskDTO2DO(sub.t, "", nil), + TaskRun: taskRunConfig, + IsFinish: false, + }); err != nil { + merr = multierror.Append(merr, errors.WithMessagef(err, "time.Now().After(endTime) Finish processor, task_id=%d", sub.taskID)) + continue + } + } + } + } + subs = needDispatchSubs + return merr.ErrorOrNil() +} + +func (h *TraceHubServiceImpl) dispatch(ctx context.Context, span *loop_span.Span, subs []*spanSubscriber) error { + merr := &multierror.Error{} + for _, sub := range subs { + if sub.t.GetTaskStatus() != task.TaskStatusRunning { + continue + } + logs.CtxInfo(ctx, " sub.AddSpan: %v", sub) + if err := sub.AddSpan(ctx, span); err != nil { + merr = multierror.Append(merr, errors.WithMessagef(err, "add span to subscriber, task_id=%d", sub.taskID)) + continue + } + logs.CtxInfo(ctx, "add span to subscriber, task_id=%d, log_id=%s, trace_id=%s, span_id=%s", sub.taskID, + span.LogID, span.TraceID, span.SpanID) + } + return merr.ErrorOrNil() +} + +// getObjListWithTaskFromCache retrieves the task list from cache, falling back to the database if cache is empty +func (h *TraceHubServiceImpl) getObjListWithTaskFromCache(ctx context.Context) ([]string, []string, []*entity.ObservabilityTask) { + // First, try to retrieve tasks from cache + objListWithTask, ok := h.taskCache.Load("ObjListWithTask") + if !ok { + // Cache is empty, fallback to the database + logs.CtxInfo(ctx, "Cache is empty, retrieving task list from database") + return h.taskRepo.GetObjListWithTask(ctx) + } + + cacheInfo, ok := objListWithTask.(*TaskCacheInfo) + if !ok { + logs.CtxError(ctx, "Cache data type mismatch") + return h.taskRepo.GetObjListWithTask(ctx) + } + + logs.CtxInfo(ctx, "Retrieve task list from cache, taskCount=%d, spaceCount=%d, botCount=%d", len(cacheInfo.Tasks), len(cacheInfo.WorkspaceIDs), len(cacheInfo.BotIDs)) + return cacheInfo.WorkspaceIDs, cacheInfo.BotIDs, cacheInfo.Tasks +} diff --git a/backend/modules/observability/domain/task/service/taskexe/tracehub/subscriber.go b/backend/modules/observability/domain/task/service/taskexe/tracehub/subscriber.go new file mode 100644 index 000000000..5f198ba9f --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/tracehub/subscriber.go @@ -0,0 +1,203 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracehub + +import ( + "context" + "math/rand" + "sync" + "time" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor" + tconv "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service/trace/span_filter" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +type spanSubscriber struct { + taskID int64 + sync.RWMutex // protect t, buf + t *task.Task + tr *task.TaskRun + processor taskexe.Processor + buf []*loop_span.Span + bufCap int // max buffer size + + flushWait sync.WaitGroup + maxFlushInterval time.Duration + taskRepo repo.ITaskRepo + runType task.TaskRunType + buildHelper service.TraceFilterProcessorBuilder +} + +// Sampled determines whether a span is sampled based on the sampling rate; the sample size will be validated during flush. +func (s *spanSubscriber) Sampled() bool { + t := s.getTask() + if t == nil || t.Rule == nil || t.Rule.Sampler == nil { + return false + } + + const base = 10000 + threshold := int64(float64(base) * t.GetRule().GetSampler().GetSampleRate()) + r := rand.Int63n(base) // todo: rand seed + return r <= threshold +} +func (s *spanSubscriber) getTask() *task.Task { + s.RLock() + defer s.RUnlock() + return s.t +} +func combineFilters(filters ...*loop_span.FilterFields) *loop_span.FilterFields { + filterAggr := &loop_span.FilterFields{ + QueryAndOr: ptr.Of(loop_span.QueryAndOrEnumAnd), + } + for _, f := range filters { + if f == nil { + continue + } + filterAggr.FilterFields = append(filterAggr.FilterFields, &loop_span.FilterField{ + QueryAndOr: ptr.Of(loop_span.QueryAndOrEnumAnd), + SubFilter: f, + }) + } + return filterAggr +} + +// Match checks whether the span matches the task filter. +func (s *spanSubscriber) Match(ctx context.Context, span *loop_span.Span) (bool, error) { + task := s.t + if task == nil || task.Rule == nil { + return false, nil + } + + filters := s.buildSpanFilters(ctx, task) + logs.CtxInfo(ctx, "spanSubscriber Match, taskID: %d, span: %v, filters: %v", s.taskID, span, filters) + if !filters.Satisfied(span) { + return false, nil + } + + return true, nil +} +func (s *spanSubscriber) buildSpanFilters(ctx context.Context, taskConfig *task.Task) *loop_span.FilterFields { + // Additional filters can be constructed based on task configuration if needed. + // Simplified handling here: returning nil means no extra filters are applied. + filters := &loop_span.FilterFields{} + platformFilter, err := s.buildHelper.BuildPlatformRelatedFilter(ctx, loop_span.PlatformType(taskConfig.GetRule().GetSpanFilters().GetPlatformType())) + if err != nil { + return filters + } + builtinFilter, err := buildBuiltinFilters(ctx, platformFilter, &ListSpansReq{ + WorkspaceID: taskConfig.GetWorkspaceID(), + SpanListType: loop_span.SpanListType(taskConfig.GetRule().GetSpanFilters().GetSpanListType()), + }) + if err != nil { + return filters + } + filters = combineFilters(builtinFilter, convertor.FilterFieldsDTO2DO(taskConfig.GetRule().GetSpanFilters().GetFilters())) + + return filters +} +func buildBuiltinFilters(ctx context.Context, f span_filter.Filter, req *ListSpansReq) (*loop_span.FilterFields, error) { + filters := make([]*loop_span.FilterField, 0) + env := &span_filter.SpanEnv{ + WorkspaceID: req.WorkspaceID, + ThirdPartyWorkspaceID: req.ThirdPartyWorkspaceID, + } + basicFilter, forceQuery, err := f.BuildBasicSpanFilter(ctx, env) + if err != nil { + return nil, err + } else if len(basicFilter) == 0 && !forceQuery { // if it's null, no need to query from ck + return nil, nil + } + filters = append(filters, basicFilter...) + switch req.SpanListType { + case loop_span.SpanListTypeRootSpan: + subFilter, err := f.BuildRootSpanFilter(ctx, env) + if err != nil { + return nil, err + } + filters = append(filters, subFilter...) + case loop_span.SpanListTypeLLMSpan: + subFilter, err := f.BuildLLMSpanFilter(ctx, env) + if err != nil { + return nil, err + } + filters = append(filters, subFilter...) + case loop_span.SpanListTypeAllSpan: + subFilter, err := f.BuildALLSpanFilter(ctx, env) + if err != nil { + return nil, err + } + filters = append(filters, subFilter...) + default: + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid span list type: %s")) + } + filterAggr := &loop_span.FilterFields{ + QueryAndOr: ptr.Of(loop_span.QueryAndOrEnumAnd), + FilterFields: filters, + } + return filterAggr, nil +} + +func (s *spanSubscriber) Creative(ctx context.Context, runStartAt, runEndAt int64) error { + err := s.processor.OnCreateTaskRunChange(ctx, taskexe.OnCreateTaskRunChangeReq{ + CurrentTask: tconv.TaskDTO2DO(s.t, "", nil), + RunType: s.runType, + RunStartAt: runStartAt, + RunEndAt: runEndAt, + }) + if err != nil { + return err + } + return nil +} + +func (s *spanSubscriber) AddSpan(ctx context.Context, span *loop_span.Span) error { + var taskRunConfig *entity.TaskRun + var err error + if s.runType == task.TaskRunTypeNewData { + taskRunConfig, err = s.taskRepo.GetLatestNewDataTaskRun(ctx, nil, s.t.GetID()) + if err != nil { + logs.CtxWarn(ctx, "get latest new data task run failed, task_id=%d, err: %v", s.t.GetID(), err) + return err + } + } else { + taskRunConfig, err = s.taskRepo.GetBackfillTaskRun(ctx, nil, s.t.GetID()) + if err != nil { + logs.CtxWarn(ctx, "get backfill task run failed, task_id=%d, err: %v", s.t.GetID(), err) + return err + } + } + + if taskRunConfig == nil { + logs.CtxWarn(ctx, "no taskRunConfig:%v", taskRunConfig) + return nil + } + + if taskRunConfig.RunEndAt.UnixMilli() < time.Now().UnixMilli() || taskRunConfig.RunStartAt.UnixMilli() > time.Now().UnixMilli() { + return nil + } + if span.StartTime < taskRunConfig.RunStartAt.UnixMilli() { + logs.CtxWarn(ctx, "span start time is before task cycle start time, trace_id=%s, span_id=%s", span.TraceID, span.SpanID) + return nil + } + trigger := &taskexe.Trigger{Task: tconv.TaskDTO2DO(s.t, "", nil), Span: span, TaskRun: taskRunConfig} + logs.CtxInfo(ctx, "invoke processor, trigger: %v", trigger) + err = s.processor.Invoke(ctx, trigger) + if err != nil { + logs.CtxWarn(ctx, "invoke processor failed, trace_id=%s, span_id=%s, err: %v", span.TraceID, span.SpanID, err) + return err + } + + return nil +} diff --git a/backend/modules/observability/domain/task/service/taskexe/tracehub/trace_hub.go b/backend/modules/observability/domain/task/service/taskexe/tracehub/trace_hub.go new file mode 100644 index 000000000..d5b03ce04 --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/tracehub/trace_hub.go @@ -0,0 +1,102 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracehub + +import ( + "context" + "sync" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/external/benefit" + "github.com/coze-dev/coze-loop/backend/infra/lock" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/mq" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/tenant" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/service/taskexe/processor" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + trace_repo "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/repo" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service" +) + +//go:generate mockgen -destination=mocks/trace_hub_service.go -package=mocks . ITraceHubService + +type ITraceHubService interface { + SpanTrigger(ctx context.Context, event *entity.RawSpan) error + CallBack(ctx context.Context, event *entity.AutoEvalEvent) error + Correction(ctx context.Context, event *entity.CorrectionEvent) error + BackFill(ctx context.Context, event *entity.BackFillEvent) error +} + +func NewTraceHubImpl( + tRepo repo.ITaskRepo, + traceRepo trace_repo.ITraceRepo, + tenantProvider tenant.ITenantProvider, + buildHelper service.TraceFilterProcessorBuilder, + taskProcessor *processor.TaskProcessor, + benefitSvc benefit.IBenefitService, + aid int32, + backfillProducer mq.IBackfillProducer, + locker lock.ILocker, +) (ITraceHubService, error) { + // Create two independent timers with different intervals + scheduledTaskTicker := time.NewTicker(5 * time.Minute) // Task status lifecycle management - 5-minute interval + syncTaskTicker := time.NewTicker(2 * time.Minute) // Data synchronization - 1-minute interval + impl := &TraceHubServiceImpl{ + taskRepo: tRepo, + scheduledTaskTicker: scheduledTaskTicker, + syncTaskTicker: syncTaskTicker, + stopChan: make(chan struct{}), + traceRepo: traceRepo, + tenantProvider: tenantProvider, + buildHelper: buildHelper, + taskProcessor: taskProcessor, + benefitSvc: benefitSvc, + aid: aid, + backfillProducer: backfillProducer, + locker: locker, + } + + // Start the scheduled tasks immediately + impl.startScheduledTask() + + // default+lane?+新集群?——定时任务和任务处理分开——内场 + return impl, nil +} + +type TraceHubServiceImpl struct { + scheduledTaskTicker *time.Ticker // Task status lifecycle management timer - 5-minute interval + syncTaskTicker *time.Ticker // Data synchronization timer - 1-minute interval + stopChan chan struct{} + taskRepo repo.ITaskRepo + traceRepo trace_repo.ITraceRepo + tenantProvider tenant.ITenantProvider + taskProcessor *processor.TaskProcessor + buildHelper service.TraceFilterProcessorBuilder + benefitSvc benefit.IBenefitService + backfillProducer mq.IBackfillProducer + locker lock.ILocker + + flushErrLock sync.Mutex + flushErr []error + + // Local cache - caching non-terminal task information + taskCache sync.Map + taskCacheLock sync.RWMutex + + aid int32 +} + +type flushReq struct { + retrievedSpanCount int64 + pageToken string + spans []*loop_span.Span + noMore bool +} + +const TagKeyResult = "tag_key" + +func (h *TraceHubServiceImpl) Close() { + close(h.stopChan) +} diff --git a/backend/modules/observability/domain/task/service/taskexe/tracehub/utils.go b/backend/modules/observability/domain/task/service/taskexe/tracehub/utils.go new file mode 100644 index 000000000..ee8f5a56f --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/tracehub/utils.go @@ -0,0 +1,130 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracehub + +import ( + "context" + "fmt" + "os" + "strconv" + + "github.com/bytedance/gopkg/cloud/metainfo" + "github.com/bytedance/sonic" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/repo" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +const ( + CtxKeyEnv = "K_ENV" + XttEnv = "x_tt_env" +) + +func ToJSONString(ctx context.Context, obj interface{}) string { + if obj == nil { + return "" + } + jsonData, err := sonic.Marshal(obj) + if err != nil { + logs.CtxError(ctx, "JSON marshal error: %v", err) + return "" + } + jsonStr := string(jsonData) + return jsonStr +} + +func (h *TraceHubServiceImpl) fillCtx(ctx context.Context) context.Context { + + logID := logs.NewLogID() + ctx = logs.SetLogID(ctx, logID) + + //todo:是否需要?——eval + ctx = metainfo.WithPersistentValue(ctx, "LANE_C_FORNAX_APPID", strconv.FormatInt(int64(h.aid), 10)) + if os.Getenv("TCE_HOST_ENV") == "boe" { + ctx = context.WithValue(ctx, CtxKeyEnv, "boe_auto_task") + } else { + ctx = context.WithValue(ctx, CtxKeyEnv, "ppe_auto_task") + } + if env := os.Getenv(XttEnv); env != "" { + ctx = context.WithValue(ctx, CtxKeyEnv, env) //nolint:staticcheck,SA1029 + } + return ctx +} + +func (h *TraceHubServiceImpl) getTenants(ctx context.Context, platform loop_span.PlatformType) ([]string, error) { + return h.tenantProvider.GetTenantsByPlatformType(ctx, platform) +} +func (h *TraceHubServiceImpl) getSpan(ctx context.Context, tenants []string, spanIds []string, traceId, workspaceId string, startAt, endAt int64) ([]*loop_span.Span, error) { + if len(spanIds) == 0 || workspaceId == "" { + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode) + } + var filterFields []*loop_span.FilterField + filterFields = append(filterFields, &loop_span.FilterField{ + FieldName: loop_span.SpanFieldSpanId, + FieldType: loop_span.FieldTypeString, + Values: spanIds, + QueryType: ptr.Of(loop_span.QueryTypeEnumIn), + }) + filterFields = append(filterFields, &loop_span.FilterField{ + FieldName: loop_span.SpanFieldSpaceId, + FieldType: loop_span.FieldTypeString, + Values: []string{workspaceId}, + QueryType: ptr.Of(loop_span.QueryTypeEnumEq), + }) + if traceId != "" { + filterFields = append(filterFields, &loop_span.FilterField{ + FieldName: loop_span.SpanFieldTraceId, + FieldType: loop_span.FieldTypeString, + Values: []string{traceId}, + + QueryType: ptr.Of(loop_span.QueryTypeEnumEq), + }) + } + res, err := h.traceRepo.ListSpans(ctx, &repo.ListSpansParam{ + Tenants: tenants, + Filters: &loop_span.FilterFields{ + FilterFields: filterFields, + }, + StartAt: startAt, + EndAt: endAt, + NotQueryAnnotation: true, + Limit: 2, + }) + if err != nil { + logs.CtxError(ctx, "failed to list span, %v", err) + return nil, err + } else if len(res.Spans) == 0 { + return nil, nil + } + return res.Spans, nil +} + +// updateTaskRunStatusCount updates the Redis count based on Status +func (h *TraceHubServiceImpl) updateTaskRunDetailsCount(ctx context.Context, taskID int64, turn *entity.OnlineExptTurnEvalResult) error { + // Retrieve taskRunID from Ext + taskRunIDStr := turn.Ext["run_id"] + if taskRunIDStr == "" { + return fmt.Errorf("task_run_id not found in ext") + } + + taskRunID, err := strconv.ParseInt(taskRunIDStr, 10, 64) + if err != nil { + return fmt.Errorf("invalid task_run_id: %s, err: %v", taskRunIDStr, err) + } + // Increase the corresponding counter based on Status + switch turn.Status { + case entity.EvaluatorRunStatus_Success: + return h.taskRepo.IncrTaskRunSuccessCount(ctx, taskID, taskRunID) + case entity.EvaluatorRunStatus_Fail: + return h.taskRepo.IncrTaskRunFailCount(ctx, taskID, taskRunID) + default: + logs.CtxDebug(ctx, "未知的评估状态,跳过计数: taskID=%d, taskRunID=%d, status=%d", + taskID, taskRunID, turn.Status) + return nil + } +} diff --git a/backend/modules/observability/domain/task/service/taskexe/types.go b/backend/modules/observability/domain/task/service/taskexe/types.go new file mode 100644 index 000000000..40a7dee5c --- /dev/null +++ b/backend/modules/observability/domain/task/service/taskexe/types.go @@ -0,0 +1,56 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package taskexe + +import ( + "context" + "errors" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + task_entity "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" +) + +type Trigger struct { + Task *task_entity.ObservabilityTask + Span *loop_span.Span + TaskRun *task_entity.TaskRun +} + +var ( + ErrInvalidConfig = errors.New("invalid config") + ErrInvalidTrigger = errors.New("invalid span trigger") +) + +type OnCreateTaskRunChangeReq struct { + CurrentTask *task_entity.ObservabilityTask + RunType task.TaskRunType + RunStartAt int64 + RunEndAt int64 +} +type OnFinishTaskRunChangeReq struct { + Task *task_entity.ObservabilityTask + TaskRun *task_entity.TaskRun +} +type OnFinishTaskChangeReq struct { + Task *task_entity.ObservabilityTask + TaskRun *task_entity.TaskRun + IsFinish bool +} + +type Processor interface { + ValidateConfig(ctx context.Context, config any) error + Invoke(ctx context.Context, trigger *Trigger) error + + OnCreateTaskChange(ctx context.Context, currentTask *task_entity.ObservabilityTask) error + OnUpdateTaskChange(ctx context.Context, currentTask *task_entity.ObservabilityTask, taskOp task.TaskStatus) error + OnFinishTaskChange(ctx context.Context, param OnFinishTaskChangeReq) error + + OnCreateTaskRunChange(ctx context.Context, param OnCreateTaskRunChangeReq) error + OnFinishTaskRunChange(ctx context.Context, param OnFinishTaskRunChangeReq) error +} + +type ProcessorUnion interface { + Processor +} diff --git a/backend/modules/observability/domain/trace/entity/dataset.go b/backend/modules/observability/domain/trace/entity/dataset.go index 5b4cb199d..1e0761ac5 100644 --- a/backend/modules/observability/domain/trace/entity/dataset.go +++ b/backend/modules/observability/domain/trace/entity/dataset.go @@ -5,11 +5,13 @@ package entity import ( "context" + "strconv" "github.com/bytedance/gg/gptr" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/common" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" "github.com/coze-dev/coze-loop/backend/pkg/logs" "github.com/coze-dev/cozeloop-go/spec/tracespec" ) @@ -57,6 +59,8 @@ type Dataset struct { DatasetVersion DatasetVersion // 评测集属性 EvaluationBizCategory *EvaluationBizCategory + Seesion *common.Session + UserID *string } type DatasetVersion struct { @@ -95,7 +99,11 @@ type FieldSchema struct { DisplayFormat FieldDisplayFormat } -func NewDataset(id, spaceID int64, name string, category DatasetCategory, schema DatasetSchema) *Dataset { +func NewDataset(id, spaceID int64, name string, category DatasetCategory, schema DatasetSchema, session *common.Session) *Dataset { + var userID *string + if session != nil { + userID = ptr.Of(strconv.FormatInt(*session.UserID, 10)) + } dataset := &Dataset{ ID: id, WorkspaceID: spaceID, @@ -104,6 +112,8 @@ func NewDataset(id, spaceID int64, name string, category DatasetCategory, schema DatasetSchema: schema, }, DatasetCategory: category, + Seesion: session, + UserID: userID, } return dataset } @@ -128,6 +138,7 @@ type DatasetItem struct { Error []*ItemError SpanType string SpanName string + Source *ItemSource } type ItemError struct { @@ -142,6 +153,40 @@ type FieldData struct { Content *Content } +type ItemSource struct { + Type LineageSourceType + // 任务类型,根据该字段区分数据导入任务/数据回流任务/... + JobType *TrackedJobType + // item 关联的任务 id,为 0 表示无相应任务(例如数据是通过克隆另一数据行产生的) + JobID *int64 + // type = DataReflow 时,从该字段获取 span 信息 + Span *TrackedTraceSpan +} +type LineageSourceType int64 + +const ( + // 数据回流,需要根据 ItemSource.span.isManual 是否是手动回流。如果是自动回流,则 ItemSource.jobID 中会包含对应的任务 ID + LineageSourceType_DataReflow LineageSourceType = 4 +) + +type TrackedJobType int64 + +const ( + // 数据导入任务 + TrackedJobType_DatasetIOJob TrackedJobType = 1 + // 数据回流任务 + TrackedJobType_DataReflow TrackedJobType = 2 +) + +type TrackedTraceSpan struct { + TraceID *string + SpanID *string + SpanName *string + SpanType *string + // 是否手工回流 + IsManual *bool +} + type Content struct { ContentType ContentType Text string @@ -201,7 +246,7 @@ func (c *Content) GetMultiPart() []*Content { return c.MultiPart } -func NewDatasetItem(workspaceID int64, datasetID int64, span *loop_span.Span) *DatasetItem { +func NewDatasetItem(workspaceID int64, datasetID int64, span *loop_span.Span, source *ItemSource) *DatasetItem { if span == nil { return nil } @@ -213,6 +258,7 @@ func NewDatasetItem(workspaceID int64, datasetID int64, span *loop_span.Span) *D FieldData: make([]*FieldData, 0), SpanType: span.SpanType, SpanName: span.SpanName, + Source: source, } } diff --git a/backend/modules/observability/domain/trace/entity/dataset_test.go b/backend/modules/observability/domain/trace/entity/dataset_test.go index 2d6cdbc61..5b9f2a957 100755 --- a/backend/modules/observability/domain/trace/entity/dataset_test.go +++ b/backend/modules/observability/domain/trace/entity/dataset_test.go @@ -95,7 +95,7 @@ func TestNewDataset(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := NewDataset(tt.args.id, tt.args.spaceID, tt.args.name, tt.args.category, tt.args.schema) + got := NewDataset(tt.args.id, tt.args.spaceID, tt.args.name, tt.args.category, tt.args.schema, nil) assert.Equal(t, tt.want, got) }) } @@ -224,7 +224,7 @@ func TestNewDatasetItem(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := NewDatasetItem(tt.args.workspaceID, tt.args.datasetID, tt.args.span) + got := NewDatasetItem(tt.args.workspaceID, tt.args.datasetID, tt.args.span, nil) assert.Equal(t, tt.want, got) }) } diff --git a/backend/modules/observability/domain/trace/entity/loop_span/annotation.go b/backend/modules/observability/domain/trace/entity/loop_span/annotation.go index 004f481fa..ca5de6f4c 100644 --- a/backend/modules/observability/domain/trace/entity/loop_span/annotation.go +++ b/backend/modules/observability/domain/trace/entity/loop_span/annotation.go @@ -4,11 +4,18 @@ package loop_span import ( + "context" "crypto/sha256" "encoding/hex" "fmt" + "strconv" "time" + "github.com/apaxa-go/helper/strconvh" + "github.com/bytedance/gg/gptr" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/annotation" + domain_common "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/pkg/logs" "github.com/samber/lo" ) @@ -140,6 +147,97 @@ func (a *Annotation) GetDatasetMetadata() *ManualDatasetMetadata { return metadata } +func (a *Annotation) CorrectAutoEvaluateScore(score float64, reasoning string, updateBy string) { + if a.Corrections == nil { + // 首次修改时,先记录一下LLM的原始值 + a.Corrections = make([]AnnotationCorrection, 0) + a.Corrections = append(a.Corrections, AnnotationCorrection{ + Reasoning: a.Reasoning, + Value: a.Value, + Type: AnnotationCorrectionTypeLLM, + UpdateAt: a.UpdatedAt, + UpdatedBy: a.UpdatedBy, + }) + } + // 增加人工修改的记录 + a.Corrections = append(a.Corrections, AnnotationCorrection{ + Reasoning: reasoning, + Value: NewDoubleValue(score), + Type: AnnotationCorrectionTypeManual, + UpdateAt: time.Now(), + UpdatedBy: updateBy, + }) + // 更新当前值 + a.Reasoning = reasoning + a.Value = NewDoubleValue(score) + a.UpdatedBy = updateBy + a.UpdatedAt = time.Now() +} + +func (a *Annotation) ToFornaxAnnotation(ctx context.Context) (fa *annotation.Annotation) { + fa = &annotation.Annotation{} + fa.ID = lo.ToPtr(a.ID) + fa.Type = lo.ToPtr(string(a.AnnotationType)) + fa.Key = lo.ToPtr(a.Key) + + fa.Value = lo.ToPtr(a.Value.StringValue) + switch a.Value.ValueType { + case annotation.ValueTypeString: + fa.ValueType = lo.ToPtr(annotation.ValueTypeString) + fa.Value = lo.ToPtr(a.Value.StringValue) + case annotation.ValueTypeLong: + fa.ValueType = lo.ToPtr(annotation.ValueTypeLong) + fa.Value = lo.ToPtr(strconvh.FormatInt64(a.Value.LongValue)) + case annotation.ValueTypeDouble: + fa.ValueType = lo.ToPtr(annotation.ValueTypeDouble) + fa.Value = lo.ToPtr(strconvh.FormatFloat64(a.Value.FloatValue)) + case annotation.ValueTypeBool: + fa.ValueType = lo.ToPtr(annotation.ValueTypeBool) + fa.Value = lo.ToPtr(strconvh.FormatBool(a.Value.BoolValue)) + default: + logs.CtxWarn(ctx, "toFornaxAnnotation invalid ValueType", "ValueType", a.Value.ValueType) + } + switch a.AnnotationType { + case annotation.AnnotationTypeAutoEvaluate: + fa.AutoEvaluate = a.toAutoEvaluate() + default: + logs.CtxWarn(ctx, "toFornaxAnnotation invalid AnnotationType", "AnnotationType", a.AnnotationType) + } + fa.SetBaseInfo(&domain_common.BaseInfo{ + CreatedBy: &domain_common.UserInfo{UserID: gptr.Of(a.CreatedBy)}, + UpdatedBy: &domain_common.UserInfo{UserID: gptr.Of(a.UpdatedBy)}, + CreatedAt: gptr.Of(a.CreatedAt.UnixMilli()), + UpdatedAt: gptr.Of(a.UpdatedAt.UnixMilli()), + }) + return fa +} +func (a *Annotation) toAutoEvaluate() *annotation.AutoEvaluate { + metadata := a.GetAutoEvaluateMetadata() + if metadata == nil { + return nil + } + res := annotation.NewAutoEvaluate() + res.EvaluatorVersionID = metadata.EvaluatorVersionID + res.TaskID = strconv.FormatInt(metadata.TaskID, 10) + res.RecordID = metadata.EvaluatorRecordID + res.EvaluatorResult_ = annotation.NewEvaluatorResult_() + res.EvaluatorResult_.Score = lo.ToPtr(a.Value.FloatValue) + res.EvaluatorResult_.Reasoning = lo.ToPtr(a.Reasoning) + if len(a.Corrections) > 0 { + // 取最后一个人工修改的记录 + manualCorrections := lo.Filter(a.Corrections, func(item AnnotationCorrection, index int) bool { + return item.Type == AnnotationCorrectionTypeManual + }) + if len(manualCorrections) > 0 { + manualCorrection := manualCorrections[len(manualCorrections)-1] + res.EvaluatorResult_.Correction = annotation.NewCorrection() + res.EvaluatorResult_.Correction.Score = lo.ToPtr(manualCorrection.Value.FloatValue) + res.EvaluatorResult_.Correction.Explain = lo.ToPtr(manualCorrection.Reasoning) + } + } + return res +} + func (a AnnotationList) GetUserIDs() []string { if len(a) == 0 { return nil diff --git a/backend/modules/observability/domain/trace/entity/loop_span/filter.go b/backend/modules/observability/domain/trace/entity/loop_span/filter.go index 8ecde54d7..1860cc3b6 100644 --- a/backend/modules/observability/domain/trace/entity/loop_span/filter.go +++ b/backend/modules/observability/domain/trace/entity/loop_span/filter.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" "github.com/coze-dev/coze-loop/backend/pkg/logs" ) @@ -476,3 +477,19 @@ func anyToFloat64(val any) (float64, error) { return 0, fmt.Errorf("invalid float") } } + +func CombineFilters(filters ...*FilterFields) *FilterFields { + filterAggr := &FilterFields{ + QueryAndOr: ptr.Of(QueryAndOrEnumAnd), + } + for _, f := range filters { + if f == nil { + continue + } + filterAggr.FilterFields = append(filterAggr.FilterFields, &FilterField{ + QueryAndOr: ptr.Of(QueryAndOrEnumAnd), + SubFilter: f, + }) + } + return filterAggr +} diff --git a/backend/modules/observability/domain/trace/entity/loop_span/span.go b/backend/modules/observability/domain/trace/entity/loop_span/span.go index e38c0802d..152b04511 100644 --- a/backend/modules/observability/domain/trace/entity/loop_span/span.go +++ b/backend/modules/observability/domain/trace/entity/loop_span/span.go @@ -217,6 +217,13 @@ func (s *Span) getTokens(ctx context.Context) (inputTokens, outputTokens int64, return inputToken, outputToken, nil } +func (s *Span) getStatus() string { + if s.StatusCode == 0 { + return SpanStatusSuccess + } + return SpanStatusError +} + // filter使用, 当前只支持特定参数,后续有需要可拓展到其他参数 func (s *Span) GetFieldValue(fieldName string, isSystem bool) any { switch fieldName { @@ -252,6 +259,8 @@ func (s *Span) GetFieldValue(fieldName string, isSystem bool) any { return s.ObjectStorage case SpanFieldMethod: return s.Method + case SpanFieldStatus: + return s.getStatus() } if isSystem { if val, ok := s.SystemTagsString[fieldName]; ok { diff --git a/backend/modules/observability/domain/trace/repo/mocks/trace.go b/backend/modules/observability/domain/trace/repo/mocks/trace.go index 8dc5c9cbe..20b331e76 100644 --- a/backend/modules/observability/domain/trace/repo/mocks/trace.go +++ b/backend/modules/observability/domain/trace/repo/mocks/trace.go @@ -3,7 +3,7 @@ // // Generated by this command: // -// mockgen -destination=modules/observability/domain/trace/repo/mocks/trace.go -package=mocks github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/repo ITraceRepo +// mockgen -destination=mocks/trace.go -package=mocks . ITraceRepo // // Package mocks is a generated GoMock package. @@ -128,3 +128,17 @@ func (mr *MockITraceRepoMockRecorder) ListSpans(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSpans", reflect.TypeOf((*MockITraceRepo)(nil).ListSpans), arg0, arg1) } + +// UpsertAnnotation mocks base method. +func (m *MockITraceRepo) UpsertAnnotation(arg0 context.Context, arg1 *repo.UpsertAnnotationParam) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertAnnotation", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpsertAnnotation indicates an expected call of UpsertAnnotation. +func (mr *MockITraceRepoMockRecorder) UpsertAnnotation(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertAnnotation", reflect.TypeOf((*MockITraceRepo)(nil).UpsertAnnotation), arg0, arg1) +} diff --git a/backend/modules/observability/domain/trace/repo/mocks/view.go b/backend/modules/observability/domain/trace/repo/mocks/view.go index 12aa19f8c..419109d6c 100644 --- a/backend/modules/observability/domain/trace/repo/mocks/view.go +++ b/backend/modules/observability/domain/trace/repo/mocks/view.go @@ -21,7 +21,6 @@ import ( type MockIViewRepo struct { ctrl *gomock.Controller recorder *MockIViewRepoMockRecorder - isgomock struct{} } // MockIViewRepoMockRecorder is the mock recorder for MockIViewRepo. @@ -42,74 +41,74 @@ func (m *MockIViewRepo) EXPECT() *MockIViewRepoMockRecorder { } // CreateView mocks base method. -func (m *MockIViewRepo) CreateView(ctx context.Context, do *entity.ObservabilityView) (int64, error) { +func (m *MockIViewRepo) CreateView(arg0 context.Context, arg1 *entity.ObservabilityView) (int64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateView", ctx, do) + ret := m.ctrl.Call(m, "CreateView", arg0, arg1) ret0, _ := ret[0].(int64) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateView indicates an expected call of CreateView. -func (mr *MockIViewRepoMockRecorder) CreateView(ctx, do any) *gomock.Call { +func (mr *MockIViewRepoMockRecorder) CreateView(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateView", reflect.TypeOf((*MockIViewRepo)(nil).CreateView), ctx, do) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateView", reflect.TypeOf((*MockIViewRepo)(nil).CreateView), arg0, arg1) } // DeleteView mocks base method. -func (m *MockIViewRepo) DeleteView(ctx context.Context, id, workspaceID int64, userID string) error { +func (m *MockIViewRepo) DeleteView(arg0 context.Context, arg1, arg2 int64, arg3 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteView", ctx, id, workspaceID, userID) + ret := m.ctrl.Call(m, "DeleteView", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // DeleteView indicates an expected call of DeleteView. -func (mr *MockIViewRepoMockRecorder) DeleteView(ctx, id, workspaceID, userID any) *gomock.Call { +func (mr *MockIViewRepoMockRecorder) DeleteView(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteView", reflect.TypeOf((*MockIViewRepo)(nil).DeleteView), ctx, id, workspaceID, userID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteView", reflect.TypeOf((*MockIViewRepo)(nil).DeleteView), arg0, arg1, arg2, arg3) } // GetView mocks base method. -func (m *MockIViewRepo) GetView(ctx context.Context, id int64, workspaceID *int64, userID *string) (*entity.ObservabilityView, error) { +func (m *MockIViewRepo) GetView(arg0 context.Context, arg1 int64, arg2 *int64, arg3 *string) (*entity.ObservabilityView, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetView", ctx, id, workspaceID, userID) + ret := m.ctrl.Call(m, "GetView", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*entity.ObservabilityView) ret1, _ := ret[1].(error) return ret0, ret1 } // GetView indicates an expected call of GetView. -func (mr *MockIViewRepoMockRecorder) GetView(ctx, id, workspaceID, userID any) *gomock.Call { +func (mr *MockIViewRepoMockRecorder) GetView(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetView", reflect.TypeOf((*MockIViewRepo)(nil).GetView), ctx, id, workspaceID, userID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetView", reflect.TypeOf((*MockIViewRepo)(nil).GetView), arg0, arg1, arg2, arg3) } // ListViews mocks base method. -func (m *MockIViewRepo) ListViews(ctx context.Context, workspaceID int64, userID string) ([]*entity.ObservabilityView, error) { +func (m *MockIViewRepo) ListViews(arg0 context.Context, arg1 int64, arg2 string) ([]*entity.ObservabilityView, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListViews", ctx, workspaceID, userID) + ret := m.ctrl.Call(m, "ListViews", arg0, arg1, arg2) ret0, _ := ret[0].([]*entity.ObservabilityView) ret1, _ := ret[1].(error) return ret0, ret1 } // ListViews indicates an expected call of ListViews. -func (mr *MockIViewRepoMockRecorder) ListViews(ctx, workspaceID, userID any) *gomock.Call { +func (mr *MockIViewRepoMockRecorder) ListViews(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListViews", reflect.TypeOf((*MockIViewRepo)(nil).ListViews), ctx, workspaceID, userID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListViews", reflect.TypeOf((*MockIViewRepo)(nil).ListViews), arg0, arg1, arg2) } // UpdateView mocks base method. -func (m *MockIViewRepo) UpdateView(ctx context.Context, do *entity.ObservabilityView) error { +func (m *MockIViewRepo) UpdateView(arg0 context.Context, arg1 *entity.ObservabilityView) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateView", ctx, do) + ret := m.ctrl.Call(m, "UpdateView", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // UpdateView indicates an expected call of UpdateView. -func (mr *MockIViewRepoMockRecorder) UpdateView(ctx, do any) *gomock.Call { +func (mr *MockIViewRepoMockRecorder) UpdateView(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateView", reflect.TypeOf((*MockIViewRepo)(nil).UpdateView), ctx, do) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateView", reflect.TypeOf((*MockIViewRepo)(nil).UpdateView), arg0, arg1) } diff --git a/backend/modules/observability/domain/trace/repo/trace.go b/backend/modules/observability/domain/trace/repo/trace.go index 75d56aa44..c879d17fd 100644 --- a/backend/modules/observability/domain/trace/repo/trace.go +++ b/backend/modules/observability/domain/trace/repo/trace.go @@ -67,6 +67,13 @@ type InsertAnnotationParam struct { Annotations []*loop_span.Annotation } +type UpsertAnnotationParam struct { + Tenant string + TTL loop_span.TTL + Annotations []*loop_span.Annotation + IsSync bool +} + //go:generate mockgen -destination=mocks/trace.go -package=mocks . ITraceRepo type ITraceRepo interface { InsertSpans(context.Context, *InsertTraceParam) error @@ -75,4 +82,5 @@ type ITraceRepo interface { ListAnnotations(context.Context, *ListAnnotationsParam) (loop_span.AnnotationList, error) GetAnnotation(context.Context, *GetAnnotationParam) (*loop_span.Annotation, error) InsertAnnotations(context.Context, *InsertAnnotationParam) error + UpsertAnnotation(ctx context.Context, param *UpsertAnnotationParam) error } diff --git a/backend/modules/observability/domain/trace/service/mocks/ingestion.go b/backend/modules/observability/domain/trace/service/mocks/ingestion.go index 9010a34ad..397a9493c 100644 --- a/backend/modules/observability/domain/trace/service/mocks/ingestion.go +++ b/backend/modules/observability/domain/trace/service/mocks/ingestion.go @@ -20,7 +20,6 @@ import ( type MockIngestionService struct { ctrl *gomock.Controller recorder *MockIngestionServiceMockRecorder - isgomock struct{} } // MockIngestionServiceMockRecorder is the mock recorder for MockIngestionService. @@ -41,27 +40,27 @@ func (m *MockIngestionService) EXPECT() *MockIngestionServiceMockRecorder { } // RunAsync mocks base method. -func (m *MockIngestionService) RunAsync(ctx context.Context) { +func (m *MockIngestionService) RunAsync(arg0 context.Context) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RunAsync", ctx) + m.ctrl.Call(m, "RunAsync", arg0) } // RunAsync indicates an expected call of RunAsync. -func (mr *MockIngestionServiceMockRecorder) RunAsync(ctx any) *gomock.Call { +func (mr *MockIngestionServiceMockRecorder) RunAsync(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunAsync", reflect.TypeOf((*MockIngestionService)(nil).RunAsync), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunAsync", reflect.TypeOf((*MockIngestionService)(nil).RunAsync), arg0) } // RunSync mocks base method. -func (m *MockIngestionService) RunSync(ctx context.Context) error { +func (m *MockIngestionService) RunSync(arg0 context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RunSync", ctx) + ret := m.ctrl.Call(m, "RunSync", arg0) ret0, _ := ret[0].(error) return ret0 } // RunSync indicates an expected call of RunSync. -func (mr *MockIngestionServiceMockRecorder) RunSync(ctx any) *gomock.Call { +func (mr *MockIngestionServiceMockRecorder) RunSync(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunSync", reflect.TypeOf((*MockIngestionService)(nil).RunSync), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunSync", reflect.TypeOf((*MockIngestionService)(nil).RunSync), arg0) } diff --git a/backend/modules/observability/domain/trace/service/mocks/span_processor.go b/backend/modules/observability/domain/trace/service/mocks/span_processor.go index 1241cd947..adcda3d67 100644 --- a/backend/modules/observability/domain/trace/service/mocks/span_processor.go +++ b/backend/modules/observability/domain/trace/service/mocks/span_processor.go @@ -23,7 +23,6 @@ import ( type MockTraceFilterProcessorBuilder struct { ctrl *gomock.Controller recorder *MockTraceFilterProcessorBuilderMockRecorder - isgomock struct{} } // MockTraceFilterProcessorBuilderMockRecorder is the mock recorder for MockTraceFilterProcessorBuilder. diff --git a/backend/modules/observability/domain/trace/service/mocks/trace_service.go b/backend/modules/observability/domain/trace/service/mocks/trace_service.go index 96f95e20f..fac7ba3c1 100644 --- a/backend/modules/observability/domain/trace/service/mocks/trace_service.go +++ b/backend/modules/observability/domain/trace/service/mocks/trace_service.go @@ -22,7 +22,6 @@ import ( type MockITraceService struct { ctrl *gomock.Controller recorder *MockITraceServiceMockRecorder - isgomock struct{} } // MockITraceServiceMockRecorder is the mock recorder for MockITraceService. @@ -42,206 +41,251 @@ func (m *MockITraceService) EXPECT() *MockITraceServiceMockRecorder { return m.recorder } +// ChangeEvaluatorScore mocks base method. +func (m *MockITraceService) ChangeEvaluatorScore(arg0 context.Context, arg1 *service.ChangeEvaluatorScoreRequest) (*service.ChangeEvaluatorScoreResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChangeEvaluatorScore", arg0, arg1) + ret0, _ := ret[0].(*service.ChangeEvaluatorScoreResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChangeEvaluatorScore indicates an expected call of ChangeEvaluatorScore. +func (mr *MockITraceServiceMockRecorder) ChangeEvaluatorScore(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeEvaluatorScore", reflect.TypeOf((*MockITraceService)(nil).ChangeEvaluatorScore), arg0, arg1) +} + // CreateAnnotation mocks base method. -func (m *MockITraceService) CreateAnnotation(ctx context.Context, req *service.CreateAnnotationReq) error { +func (m *MockITraceService) CreateAnnotation(arg0 context.Context, arg1 *service.CreateAnnotationReq) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateAnnotation", ctx, req) + ret := m.ctrl.Call(m, "CreateAnnotation", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // CreateAnnotation indicates an expected call of CreateAnnotation. -func (mr *MockITraceServiceMockRecorder) CreateAnnotation(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) CreateAnnotation(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAnnotation", reflect.TypeOf((*MockITraceService)(nil).CreateAnnotation), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAnnotation", reflect.TypeOf((*MockITraceService)(nil).CreateAnnotation), arg0, arg1) } // CreateManualAnnotation mocks base method. -func (m *MockITraceService) CreateManualAnnotation(ctx context.Context, req *service.CreateManualAnnotationReq) (*service.CreateManualAnnotationResp, error) { +func (m *MockITraceService) CreateManualAnnotation(arg0 context.Context, arg1 *service.CreateManualAnnotationReq) (*service.CreateManualAnnotationResp, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateManualAnnotation", ctx, req) + ret := m.ctrl.Call(m, "CreateManualAnnotation", arg0, arg1) ret0, _ := ret[0].(*service.CreateManualAnnotationResp) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateManualAnnotation indicates an expected call of CreateManualAnnotation. -func (mr *MockITraceServiceMockRecorder) CreateManualAnnotation(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) CreateManualAnnotation(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateManualAnnotation", reflect.TypeOf((*MockITraceService)(nil).CreateManualAnnotation), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateManualAnnotation", reflect.TypeOf((*MockITraceService)(nil).CreateManualAnnotation), arg0, arg1) } // DeleteAnnotation mocks base method. -func (m *MockITraceService) DeleteAnnotation(ctx context.Context, req *service.DeleteAnnotationReq) error { +func (m *MockITraceService) DeleteAnnotation(arg0 context.Context, arg1 *service.DeleteAnnotationReq) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteAnnotation", ctx, req) + ret := m.ctrl.Call(m, "DeleteAnnotation", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // DeleteAnnotation indicates an expected call of DeleteAnnotation. -func (mr *MockITraceServiceMockRecorder) DeleteAnnotation(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) DeleteAnnotation(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAnnotation", reflect.TypeOf((*MockITraceService)(nil).DeleteAnnotation), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAnnotation", reflect.TypeOf((*MockITraceService)(nil).DeleteAnnotation), arg0, arg1) } // DeleteManualAnnotation mocks base method. -func (m *MockITraceService) DeleteManualAnnotation(ctx context.Context, req *service.DeleteManualAnnotationReq) error { +func (m *MockITraceService) DeleteManualAnnotation(arg0 context.Context, arg1 *service.DeleteManualAnnotationReq) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteManualAnnotation", ctx, req) + ret := m.ctrl.Call(m, "DeleteManualAnnotation", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // DeleteManualAnnotation indicates an expected call of DeleteManualAnnotation. -func (mr *MockITraceServiceMockRecorder) DeleteManualAnnotation(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) DeleteManualAnnotation(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteManualAnnotation", reflect.TypeOf((*MockITraceService)(nil).DeleteManualAnnotation), arg0, arg1) +} + +// ExtractSpanInfo mocks base method. +func (m *MockITraceService) ExtractSpanInfo(arg0 context.Context, arg1 *service.ExtractSpanInfoRequest) (*service.ExtractSpanInfoResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExtractSpanInfo", arg0, arg1) + ret0, _ := ret[0].(*service.ExtractSpanInfoResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExtractSpanInfo indicates an expected call of ExtractSpanInfo. +func (mr *MockITraceServiceMockRecorder) ExtractSpanInfo(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteManualAnnotation", reflect.TypeOf((*MockITraceService)(nil).DeleteManualAnnotation), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExtractSpanInfo", reflect.TypeOf((*MockITraceService)(nil).ExtractSpanInfo), arg0, arg1) } // GetTrace mocks base method. -func (m *MockITraceService) GetTrace(ctx context.Context, req *service.GetTraceReq) (*service.GetTraceResp, error) { +func (m *MockITraceService) GetTrace(arg0 context.Context, arg1 *service.GetTraceReq) (*service.GetTraceResp, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTrace", ctx, req) + ret := m.ctrl.Call(m, "GetTrace", arg0, arg1) ret0, _ := ret[0].(*service.GetTraceResp) ret1, _ := ret[1].(error) return ret0, ret1 } // GetTrace indicates an expected call of GetTrace. -func (mr *MockITraceServiceMockRecorder) GetTrace(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) GetTrace(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrace", reflect.TypeOf((*MockITraceService)(nil).GetTrace), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTrace", reflect.TypeOf((*MockITraceService)(nil).GetTrace), arg0, arg1) } // GetTracesAdvanceInfo mocks base method. -func (m *MockITraceService) GetTracesAdvanceInfo(ctx context.Context, req *service.GetTracesAdvanceInfoReq) (*service.GetTracesAdvanceInfoResp, error) { +func (m *MockITraceService) GetTracesAdvanceInfo(arg0 context.Context, arg1 *service.GetTracesAdvanceInfoReq) (*service.GetTracesAdvanceInfoResp, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTracesAdvanceInfo", ctx, req) + ret := m.ctrl.Call(m, "GetTracesAdvanceInfo", arg0, arg1) ret0, _ := ret[0].(*service.GetTracesAdvanceInfoResp) ret1, _ := ret[1].(error) return ret0, ret1 } // GetTracesAdvanceInfo indicates an expected call of GetTracesAdvanceInfo. -func (mr *MockITraceServiceMockRecorder) GetTracesAdvanceInfo(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) GetTracesAdvanceInfo(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTracesAdvanceInfo", reflect.TypeOf((*MockITraceService)(nil).GetTracesAdvanceInfo), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTracesAdvanceInfo", reflect.TypeOf((*MockITraceService)(nil).GetTracesAdvanceInfo), arg0, arg1) } // GetTracesMetaInfo mocks base method. -func (m *MockITraceService) GetTracesMetaInfo(ctx context.Context, req *service.GetTracesMetaInfoReq) (*service.GetTracesMetaInfoResp, error) { +func (m *MockITraceService) GetTracesMetaInfo(arg0 context.Context, arg1 *service.GetTracesMetaInfoReq) (*service.GetTracesMetaInfoResp, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTracesMetaInfo", ctx, req) + ret := m.ctrl.Call(m, "GetTracesMetaInfo", arg0, arg1) ret0, _ := ret[0].(*service.GetTracesMetaInfoResp) ret1, _ := ret[1].(error) return ret0, ret1 } // GetTracesMetaInfo indicates an expected call of GetTracesMetaInfo. -func (mr *MockITraceServiceMockRecorder) GetTracesMetaInfo(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) GetTracesMetaInfo(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTracesMetaInfo", reflect.TypeOf((*MockITraceService)(nil).GetTracesMetaInfo), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTracesMetaInfo", reflect.TypeOf((*MockITraceService)(nil).GetTracesMetaInfo), arg0, arg1) } // IngestTraces mocks base method. -func (m *MockITraceService) IngestTraces(ctx context.Context, req *service.IngestTracesReq) error { +func (m *MockITraceService) IngestTraces(arg0 context.Context, arg1 *service.IngestTracesReq) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IngestTraces", ctx, req) + ret := m.ctrl.Call(m, "IngestTraces", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // IngestTraces indicates an expected call of IngestTraces. -func (mr *MockITraceServiceMockRecorder) IngestTraces(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) IngestTraces(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IngestTraces", reflect.TypeOf((*MockITraceService)(nil).IngestTraces), arg0, arg1) +} + +// ListAnnotationEvaluators mocks base method. +func (m *MockITraceService) ListAnnotationEvaluators(arg0 context.Context, arg1 *service.ListAnnotationEvaluatorsRequest) (*service.ListAnnotationEvaluatorsResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAnnotationEvaluators", arg0, arg1) + ret0, _ := ret[0].(*service.ListAnnotationEvaluatorsResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAnnotationEvaluators indicates an expected call of ListAnnotationEvaluators. +func (mr *MockITraceServiceMockRecorder) ListAnnotationEvaluators(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IngestTraces", reflect.TypeOf((*MockITraceService)(nil).IngestTraces), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAnnotationEvaluators", reflect.TypeOf((*MockITraceService)(nil).ListAnnotationEvaluators), arg0, arg1) } // ListAnnotations mocks base method. -func (m *MockITraceService) ListAnnotations(ctx context.Context, req *service.ListAnnotationsReq) (*service.ListAnnotationsResp, error) { +func (m *MockITraceService) ListAnnotations(arg0 context.Context, arg1 *service.ListAnnotationsReq) (*service.ListAnnotationsResp, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListAnnotations", ctx, req) + ret := m.ctrl.Call(m, "ListAnnotations", arg0, arg1) ret0, _ := ret[0].(*service.ListAnnotationsResp) ret1, _ := ret[1].(error) return ret0, ret1 } // ListAnnotations indicates an expected call of ListAnnotations. -func (mr *MockITraceServiceMockRecorder) ListAnnotations(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) ListAnnotations(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAnnotations", reflect.TypeOf((*MockITraceService)(nil).ListAnnotations), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAnnotations", reflect.TypeOf((*MockITraceService)(nil).ListAnnotations), arg0, arg1) } // ListSpans mocks base method. -func (m *MockITraceService) ListSpans(ctx context.Context, req *service.ListSpansReq) (*service.ListSpansResp, error) { +func (m *MockITraceService) ListSpans(arg0 context.Context, arg1 *service.ListSpansReq) (*service.ListSpansResp, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListSpans", ctx, req) + ret := m.ctrl.Call(m, "ListSpans", arg0, arg1) ret0, _ := ret[0].(*service.ListSpansResp) ret1, _ := ret[1].(error) return ret0, ret1 } // ListSpans indicates an expected call of ListSpans. -func (mr *MockITraceServiceMockRecorder) ListSpans(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) ListSpans(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSpans", reflect.TypeOf((*MockITraceService)(nil).ListSpans), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSpans", reflect.TypeOf((*MockITraceService)(nil).ListSpans), arg0, arg1) } // ListSpansOApi mocks base method. -func (m *MockITraceService) ListSpansOApi(ctx context.Context, req *service.ListSpansOApiReq) (*service.ListSpansOApiResp, error) { +func (m *MockITraceService) ListSpansOApi(arg0 context.Context, arg1 *service.ListSpansOApiReq) (*service.ListSpansOApiResp, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListSpansOApi", ctx, req) + ret := m.ctrl.Call(m, "ListSpansOApi", arg0, arg1) ret0, _ := ret[0].(*service.ListSpansOApiResp) ret1, _ := ret[1].(error) return ret0, ret1 } // ListSpansOApi indicates an expected call of ListSpansOApi. -func (mr *MockITraceServiceMockRecorder) ListSpansOApi(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) ListSpansOApi(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSpansOApi", reflect.TypeOf((*MockITraceService)(nil).ListSpansOApi), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSpansOApi", reflect.TypeOf((*MockITraceService)(nil).ListSpansOApi), arg0, arg1) } // SearchTraceOApi mocks base method. -func (m *MockITraceService) SearchTraceOApi(ctx context.Context, req *service.SearchTraceOApiReq) (*service.SearchTraceOApiResp, error) { +func (m *MockITraceService) SearchTraceOApi(arg0 context.Context, arg1 *service.SearchTraceOApiReq) (*service.SearchTraceOApiResp, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SearchTraceOApi", ctx, req) + ret := m.ctrl.Call(m, "SearchTraceOApi", arg0, arg1) ret0, _ := ret[0].(*service.SearchTraceOApiResp) ret1, _ := ret[1].(error) return ret0, ret1 } // SearchTraceOApi indicates an expected call of SearchTraceOApi. -func (mr *MockITraceServiceMockRecorder) SearchTraceOApi(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) SearchTraceOApi(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchTraceOApi", reflect.TypeOf((*MockITraceService)(nil).SearchTraceOApi), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchTraceOApi", reflect.TypeOf((*MockITraceService)(nil).SearchTraceOApi), arg0, arg1) } // Send mocks base method. -func (m *MockITraceService) Send(ctx context.Context, msg *entity.AnnotationEvent) error { +func (m *MockITraceService) Send(arg0 context.Context, arg1 *entity.AnnotationEvent) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", ctx, msg) + ret := m.ctrl.Call(m, "Send", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // Send indicates an expected call of Send. -func (mr *MockITraceServiceMockRecorder) Send(ctx, msg any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) Send(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockITraceService)(nil).Send), ctx, msg) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockITraceService)(nil).Send), arg0, arg1) } // UpdateManualAnnotation mocks base method. -func (m *MockITraceService) UpdateManualAnnotation(ctx context.Context, req *service.UpdateManualAnnotationReq) error { +func (m *MockITraceService) UpdateManualAnnotation(arg0 context.Context, arg1 *service.UpdateManualAnnotationReq) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateManualAnnotation", ctx, req) + ret := m.ctrl.Call(m, "UpdateManualAnnotation", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // UpdateManualAnnotation indicates an expected call of UpdateManualAnnotation. -func (mr *MockITraceServiceMockRecorder) UpdateManualAnnotation(ctx, req any) *gomock.Call { +func (mr *MockITraceServiceMockRecorder) UpdateManualAnnotation(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateManualAnnotation", reflect.TypeOf((*MockITraceService)(nil).UpdateManualAnnotation), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateManualAnnotation", reflect.TypeOf((*MockITraceService)(nil).UpdateManualAnnotation), arg0, arg1) } diff --git a/backend/modules/observability/domain/trace/service/trace_export_service.go b/backend/modules/observability/domain/trace/service/trace_export_service.go index a553f9575..2ad5b0a2d 100644 --- a/backend/modules/observability/domain/trace/service/trace_export_service.go +++ b/backend/modules/observability/domain/trace/service/trace_export_service.go @@ -217,6 +217,7 @@ func (r *TraceExportServiceImpl) createOrUpdateDataset(ctx context.Context, work *config.DatasetName, category, config.DatasetSchema, + nil, )) if err != nil { return nil, err @@ -240,6 +241,7 @@ func (r *TraceExportServiceImpl) createOrUpdateDataset(ctx context.Context, work "", category, config.DatasetSchema, + nil, )); err != nil { return nil, err } @@ -435,7 +437,7 @@ func (r *TraceExportServiceImpl) buildDatasetItems(ctx context.Context, spans [] func (r *TraceExportServiceImpl) buildItem(ctx context.Context, span *loop_span.Span, i int, fieldMappings []entity.FieldMapping, workspaceID int64, dataset *entity.Dataset, ) *entity.DatasetItem { - item := entity.NewDatasetItem(workspaceID, dataset.ID, span) + item := entity.NewDatasetItem(workspaceID, dataset.ID, span, nil) for _, mapping := range fieldMappings { value, err := span.ExtractByJsonpath(ctx, mapping.TraceFieldKey, mapping.TraceFieldJsonpath) if err != nil { @@ -477,6 +479,7 @@ func (r *TraceExportServiceImpl) buildPreviewDataset(ctx context.Context, worksp "", category, schema, + nil, ) if config.DatasetID != nil { dataset.ID = *config.DatasetID @@ -488,7 +491,7 @@ func (r *TraceExportServiceImpl) buildPreviewDataset(ctx context.Context, worksp } func (r *TraceExportServiceImpl) getDatasetProvider(category entity.DatasetCategory) rpc.IDatasetProvider { - return r.DatasetServiceAdaptor.getDatasetProvider(category) + return r.DatasetServiceAdaptor.GetDatasetProvider(category) } type DatasetServiceAdaptor struct { @@ -506,7 +509,7 @@ func (d *DatasetServiceAdaptor) Register(category entity.DatasetCategory, provid d.datasetServiceMap[category] = provider } -func (d *DatasetServiceAdaptor) getDatasetProvider(category entity.DatasetCategory) rpc.IDatasetProvider { +func (d *DatasetServiceAdaptor) GetDatasetProvider(category entity.DatasetCategory) rpc.IDatasetProvider { datasetProvider, ok := d.datasetServiceMap[category] if !ok { return rpc.NoopDatasetProvider diff --git a/backend/modules/observability/domain/trace/service/trace_service.go b/backend/modules/observability/domain/trace/service/trace_service.go index 6edd0e644..4be5e3d3d 100644 --- a/backend/modules/observability/domain/trace/service/trace_service.go +++ b/backend/modules/observability/domain/trace/service/trace_service.go @@ -10,12 +10,21 @@ import ( "sync" "time" + tconv "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor/task" + taskRepo "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" "golang.org/x/sync/errgroup" + "github.com/bytedance/gg/gptr" "github.com/coze-dev/coze-loop/backend/infra/middleware/session" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/annotation" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/dataset" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/trace" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/config" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/metrics" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/mq" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/tenant" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/entity/loop_span" @@ -24,10 +33,12 @@ import ( "github.com/coze-dev/coze-loop/backend/modules/observability/domain/trace/service/trace/span_processor" obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/json" "github.com/coze-dev/coze-loop/backend/pkg/lang/goroutine" "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" "github.com/coze-dev/coze-loop/backend/pkg/logs" time_util "github.com/coze-dev/coze-loop/backend/pkg/time" + "github.com/samber/lo" ) type ListSpansReq struct { @@ -195,6 +206,37 @@ type ListAnnotationsResp struct { Annotations loop_span.AnnotationList } +type ChangeEvaluatorScoreRequest struct { + WorkspaceID int64 + AnnotationID string + SpanID string + StartTime int64 + PlatformType loop_span.PlatformType + Correction *annotation.Correction +} +type ChangeEvaluatorScoreResp struct { + Annotation *annotation.Annotation +} +type ListAnnotationEvaluatorsRequest struct { + WorkspaceID int64 + Name *string +} +type ListAnnotationEvaluatorsResp struct { + Evaluators []*annotation.AnnotationEvaluator +} +type ExtractSpanInfoRequest struct { + WorkspaceID int64 + TraceID string + SpanIds []string + StartTime int64 + EndTime int64 + PlatformType loop_span.PlatformType + FieldMappings []entity.FieldMapping +} +type ExtractSpanInfoResp struct { + SpanInfos []*trace.SpanInfo +} + type IAnnotationEvent interface { Send(ctx context.Context, msg *entity.AnnotationEvent) error } @@ -215,6 +257,9 @@ type ITraceService interface { UpdateManualAnnotation(ctx context.Context, req *UpdateManualAnnotationReq) error DeleteManualAnnotation(ctx context.Context, req *DeleteManualAnnotationReq) error IAnnotationEvent + ChangeEvaluatorScore(ctx context.Context, req *ChangeEvaluatorScoreRequest) (*ChangeEvaluatorScoreResp, error) + ListAnnotationEvaluators(ctx context.Context, req *ListAnnotationEvaluatorsRequest) (*ListAnnotationEvaluatorsResp, error) + ExtractSpanInfo(ctx context.Context, req *ExtractSpanInfoRequest) (*ExtractSpanInfoResp, error) } func NewTraceServiceImpl( @@ -225,6 +270,8 @@ func NewTraceServiceImpl( metrics metrics.ITraceMetrics, buildHelper TraceFilterProcessorBuilder, tenantProvider tenant.ITenantProvider, + evalSvc rpc.IEvaluatorRPCAdapter, + taskRepo taskRepo.ITaskRepo, ) (ITraceService, error) { return &TraceServiceImpl{ traceRepo: tRepo, @@ -234,6 +281,8 @@ func NewTraceServiceImpl( buildHelper: buildHelper, tenantProvider: tenantProvider, metrics: metrics, + evalSvc: evalSvc, + taskRepo: taskRepo, }, nil } @@ -245,6 +294,8 @@ type TraceServiceImpl struct { metrics metrics.ITraceMetrics buildHelper TraceFilterProcessorBuilder tenantProvider tenant.ITenantProvider + evalSvc rpc.IEvaluatorRPCAdapter + taskRepo taskRepo.ITaskRepo } func (r *TraceServiceImpl) GetTrace(ctx context.Context, req *GetTraceReq) (*GetTraceResp, error) { @@ -587,9 +638,9 @@ func (r *TraceServiceImpl) CreateManualAnnotation(ctx context.Context, req *Crea if err != nil { return nil, err } - span, err := r.getSpan(ctx, + spans, err := r.getSpan(ctx, tenants, - req.Annotation.SpanID, + []string{req.Annotation.SpanID}, req.Annotation.TraceID, req.Annotation.WorkspaceID, req.Annotation.StartTime.Add(-time.Second).UnixMilli(), @@ -597,10 +648,11 @@ func (r *TraceServiceImpl) CreateManualAnnotation(ctx context.Context, req *Crea ) if err != nil { return nil, err - } else if span == nil { + } else if len(spans) == 0 { logs.CtxWarn(ctx, "no span found for span_id %s trace_id %s", req.Annotation.SpanID, req.Annotation.TraceID) return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode) } + span := spans[0] annotation, err := span.BuildFeedback( loop_span.AnnotationTypeManualFeedback, req.Annotation.Key, @@ -629,9 +681,9 @@ func (r *TraceServiceImpl) UpdateManualAnnotation(ctx context.Context, req *Upda if err != nil { return err } - span, err := r.getSpan(ctx, + spans, err := r.getSpan(ctx, tenants, - req.Annotation.SpanID, + []string{req.Annotation.SpanID}, req.Annotation.TraceID, req.Annotation.WorkspaceID, req.Annotation.StartTime.Add(-time.Second).UnixMilli(), @@ -639,10 +691,11 @@ func (r *TraceServiceImpl) UpdateManualAnnotation(ctx context.Context, req *Upda ) if err != nil { return err - } else if span == nil { + } else if len(spans) == 0 { logs.CtxWarn(ctx, "no span found for span_id %s trace_id %s", req.Annotation.SpanID, req.Annotation.TraceID) return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode) } + span := spans[0] annotation, err := span.BuildFeedback( loop_span.AnnotationTypeManualFeedback, req.Annotation.Key, @@ -651,6 +704,7 @@ func (r *TraceServiceImpl) UpdateManualAnnotation(ctx context.Context, req *Upda session.UserIDInCtxOrEmpty(ctx), false, ) + fmt.Println(annotation.ID, req.AnnotationID) if err != nil || annotation.ID != req.AnnotationID { return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode) } @@ -679,9 +733,9 @@ func (r *TraceServiceImpl) DeleteManualAnnotation(ctx context.Context, req *Dele if err != nil { return err } - span, err := r.getSpan(ctx, + spans, err := r.getSpan(ctx, tenants, - req.SpanID, + []string{req.SpanID}, req.TraceID, strconv.FormatInt(req.WorkspaceID, 10), req.StartTime-time.Second.Milliseconds(), @@ -689,10 +743,11 @@ func (r *TraceServiceImpl) DeleteManualAnnotation(ctx context.Context, req *Dele ) if err != nil { return err - } else if span == nil { + } else if len(spans) == 0 { logs.CtxWarn(ctx, "no span found for span_id %s trace_id %s", req.SpanID, req.TraceID) return errorx.NewByCode(obErrorx.CommercialCommonInternalErrorCodeCode) } + span := spans[0] annotation, err := span.BuildFeedback( loop_span.AnnotationTypeManualFeedback, req.AnnotationKey, @@ -716,9 +771,9 @@ func (r *TraceServiceImpl) CreateAnnotation(ctx context.Context, req *CreateAnno if err != nil { return err } - span, err := r.getSpan(ctx, + spans, err := r.getSpan(ctx, cfg.Tenants, - req.SpanID, + []string{req.SpanID}, req.TraceID, strconv.FormatInt(req.WorkspaceID, 10), time.Now().Add(-time.Duration(req.QueryDays)*24*time.Hour).UnixMilli(), @@ -726,7 +781,7 @@ func (r *TraceServiceImpl) CreateAnnotation(ctx context.Context, req *CreateAnno ) if err != nil { return err - } else if span == nil { + } else if len(spans) == 0 { return r.annotationProducer.SendAnnotation(ctx, &entity.AnnotationEvent{ Annotation: &loop_span.Annotation{ SpanID: req.SpanID, @@ -746,6 +801,7 @@ func (r *TraceServiceImpl) CreateAnnotation(ctx context.Context, req *CreateAnno RetryTimes: 3, }) } + span := spans[0] annotation, err := span.BuildFeedback( loop_span.AnnotationType(cfg.AnnotationType), req.AnnotationKey, @@ -779,9 +835,9 @@ func (r *TraceServiceImpl) DeleteAnnotation(ctx context.Context, req *DeleteAnno if err != nil { return err } - span, err := r.getSpan(ctx, + spans, err := r.getSpan(ctx, cfg.Tenants, - req.SpanID, + []string{req.SpanID}, req.TraceID, strconv.FormatInt(req.WorkspaceID, 10), time.Now().Add(-time.Duration(req.QueryDays)*24*time.Hour).UnixMilli(), @@ -789,7 +845,7 @@ func (r *TraceServiceImpl) DeleteAnnotation(ctx context.Context, req *DeleteAnno ) if err != nil { return err - } else if span == nil { + } else if len(spans) == 0 { return r.annotationProducer.SendAnnotation(ctx, &entity.AnnotationEvent{ Annotation: &loop_span.Annotation{ SpanID: req.SpanID, @@ -808,6 +864,7 @@ func (r *TraceServiceImpl) DeleteAnnotation(ctx context.Context, req *DeleteAnno RetryTimes: 3, }) } + span := spans[0] annotation, err := span.BuildFeedback( loop_span.AnnotationType(cfg.AnnotationType), req.AnnotationKey, @@ -839,18 +896,19 @@ func (r *TraceServiceImpl) Send(ctx context.Context, event *entity.AnnotationEve if err != nil { // retry return err } - span, err := r.getSpan(ctx, + spans, err := r.getSpan(ctx, cfg.Tenants, - event.Annotation.SpanID, + []string{event.Annotation.SpanID}, event.Annotation.TraceID, event.Annotation.WorkspaceID, event.StartAt, event.EndAt, ) - if err != nil || span == nil { // retry if not found yet + if err != nil || len(spans) == 0 { // retry if not found yet shouldReSend = true return nil } + span := spans[0] event.Annotation.StartTime = time.UnixMicro(span.StartTime) if err := event.Annotation.GenID(); err != nil { logs.CtxWarn(ctx, "failed to generate annotation id for %+v, %v", event.Annotation, err) @@ -864,46 +922,36 @@ func (r *TraceServiceImpl) Send(ctx context.Context, event *entity.AnnotationEve }) } -func (r *TraceServiceImpl) getSpan(ctx context.Context, tenants []string, spanId, traceId, workspaceId string, startAt, endAt int64) (*loop_span.Span, error) { - if traceId == "" || workspaceId == "" { +func (r *TraceServiceImpl) getSpan(ctx context.Context, tenants []string, spanIds []string, traceId, workspaceId string, startAt, endAt int64) ([]*loop_span.Span, error) { + if len(spanIds) == 0 || workspaceId == "" { return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode) } - filter := &loop_span.FilterFields{ - FilterFields: []*loop_span.FilterField{ - { - FieldName: loop_span.SpanFieldSpaceId, - FieldType: loop_span.FieldTypeString, - Values: []string{workspaceId}, - QueryType: ptr.Of(loop_span.QueryTypeEnumEq), - }, - { - FieldName: loop_span.SpanFieldTraceId, - FieldType: loop_span.FieldTypeString, - Values: []string{traceId}, - QueryType: ptr.Of(loop_span.QueryTypeEnumEq), - }, - }, - } - if spanId != "" { - filter.FilterFields = append(filter.FilterFields, - &loop_span.FilterField{ - FieldName: loop_span.SpanFieldSpanId, - FieldType: loop_span.FieldTypeString, - Values: []string{spanId}, - QueryType: ptr.Of(loop_span.QueryTypeEnumEq), - }) - } else { - filter.FilterFields = append(filter.FilterFields, - &loop_span.FilterField{ - FieldName: loop_span.SpanFieldParentID, - FieldType: loop_span.FieldTypeString, - Values: []string{"0", ""}, - QueryType: ptr.Of(loop_span.QueryTypeEnumIn), - }) + var filterFields []*loop_span.FilterField + filterFields = append(filterFields, &loop_span.FilterField{ + FieldName: loop_span.SpanFieldSpanId, + FieldType: loop_span.FieldTypeString, + Values: spanIds, + QueryType: ptr.Of(loop_span.QueryTypeEnumIn), + }) + filterFields = append(filterFields, &loop_span.FilterField{ + FieldName: loop_span.SpanFieldSpaceId, + FieldType: loop_span.FieldTypeString, + Values: []string{workspaceId}, + QueryType: ptr.Of(loop_span.QueryTypeEnumEq), + }) + if traceId != "" { + filterFields = append(filterFields, &loop_span.FilterField{ + FieldName: loop_span.SpanFieldTraceId, + FieldType: loop_span.FieldTypeString, + Values: []string{traceId}, + QueryType: ptr.Of(loop_span.QueryTypeEnumEq), + }) } res, err := r.traceRepo.ListSpans(ctx, &repo.ListSpansParam{ - Tenants: tenants, - Filters: filter, + Tenants: tenants, + Filters: &loop_span.FilterFields{ + FilterFields: filterFields, + }, StartAt: startAt, EndAt: endAt, NotQueryAnnotation: true, @@ -915,7 +963,7 @@ func (r *TraceServiceImpl) getSpan(ctx context.Context, tenants []string, spanId } else if len(res.Spans) == 0 { return nil, nil } - return res.Spans[0], nil + return res.Spans, nil } func (r *TraceServiceImpl) getAnnotationCallerCfg(ctx context.Context, caller string) (*config.AnnotationConfig, error) { @@ -996,6 +1044,248 @@ func (r *TraceServiceImpl) getTenants(ctx context.Context, platform loop_span.Pl return r.tenantProvider.GetTenantsByPlatformType(ctx, platform) } +func (r *TraceServiceImpl) ChangeEvaluatorScore(ctx context.Context, req *ChangeEvaluatorScoreRequest) (*ChangeEvaluatorScoreResp, error) { + var resp *ChangeEvaluatorScoreResp + tenants, err := r.getTenants(ctx, req.PlatformType) + if err != nil { + return resp, err + } + spans, err := r.getSpan(ctx, + tenants, + []string{req.SpanID}, + "", + strconv.FormatInt(req.WorkspaceID, 10), + req.StartTime-time.Second.Milliseconds(), + req.StartTime+time.Second.Milliseconds(), + ) + if err != nil { + return resp, err + } else if len(spans) == 0 { + logs.CtxWarn(ctx, "no span found for span_id %s", req.SpanID) + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode) + } + span := spans[0] + annotation, err := r.traceRepo.GetAnnotation(ctx, &repo.GetAnnotationParam{ + Tenants: tenants, + ID: req.AnnotationID, + StartAt: time.UnixMicro(span.StartTime).Add(-time.Second).UnixMilli(), + EndAt: time.UnixMicro(span.StartTime).Add(time.Second).UnixMilli(), + }) + if err != nil { + logs.CtxError(ctx, "get annotation %s err %v", req.AnnotationID, err) + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("get annotation error")) + } + if annotation == nil { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("annotation not found")) + } + updateBy := session.UserIDInCtxOrEmpty(ctx) + if updateBy == "" { + return resp, errorx.NewByCode(obErrorx.UserParseFailedCode) + } + annotation.CorrectAutoEvaluateScore(req.Correction.GetScore(), req.Correction.GetExplain(), updateBy) + // 以评估数据为主数据,优先修改评估数据,异常则直接返回失败 + if err = r.correctEvaluatorRecords(ctx, r.evalSvc, annotation); err != nil { + return resp, err + } + // 再同步修改观测数据 + param := &repo.UpsertAnnotationParam{ + Tenant: span.GetTenant(), + TTL: span.GetTTL(ctx), + Annotations: []*loop_span.Annotation{annotation}, + IsSync: true, + } + if err = r.traceRepo.UpsertAnnotation(ctx, param); err != nil { + recordID := lo.Ternary(annotation.GetAutoEvaluateMetadata() != nil, annotation.GetAutoEvaluateMetadata().EvaluatorRecordID, 0) + // 如果同步修改失败,异步补偿 + // todo 异步有问题,会重复 + logs.CtxWarn(ctx, "Sync upsert annotation failed, try async upsert. span_id=[%v], recored_id=[%v], err:%v", + annotation.SpanID, recordID, err) + return resp, nil + } + return &ChangeEvaluatorScoreResp{ + Annotation: annotation.ToFornaxAnnotation(ctx), + }, nil +} + +func (r *TraceServiceImpl) correctEvaluatorRecords(ctx context.Context, evalSvc rpc.IEvaluatorRPCAdapter, annotation *loop_span.Annotation) error { + if annotation == nil { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("annotation is nil")) + } + if annotation.GetAutoEvaluateMetadata() == nil { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("annotation auto evaluate metadata is nil")) + } + if len(annotation.Corrections) == 0 { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("annotation corrections is empty")) + } + correction := annotation.Corrections[len(annotation.Corrections)-1] + + if err := evalSvc.UpdateEvaluatorRecord(ctx, &rpc.UpdateEvaluatorRecordParam{ + WorkspaceID: annotation.WorkspaceID, + EvaluatorRecordID: annotation.GetAutoEvaluateMetadata().EvaluatorRecordID, + Score: correction.Value.FloatValue, + Reasoning: correction.Reasoning, + UpdatedBy: correction.UpdatedBy, + }); err != nil { + return err + } + return nil +} +func (r *TraceServiceImpl) ListAnnotationEvaluators(ctx context.Context, req *ListAnnotationEvaluatorsRequest) (*ListAnnotationEvaluatorsResp, error) { + resp := &ListAnnotationEvaluatorsResp{} + resp.Evaluators = make([]*annotation.AnnotationEvaluator, 0) + var ( + evaluators = make([]*rpc.Evaluator, 0) + ) + var err error + if req.Name != nil { + // 有name直接模糊查询 + evaluators, err = r.evalSvc.ListEvaluators(ctx, &rpc.ListEvaluatorsParam{ + WorkspaceID: req.WorkspaceID, + Name: req.Name, + }) + if err != nil { + return resp, err + } + } else { + // 没有name先查task + taskDOs, _, err := r.taskRepo.ListTasks(ctx, mysql.ListTaskParam{ + WorkspaceIDs: []int64{req.WorkspaceID}, + ReqLimit: int32(500), + ReqOffset: int32(0), + }) + if err != nil { + return nil, err + } + if len(taskDOs) == 0 { + logs.CtxInfo(ctx, "GetTasks tasks is nil") + return resp, nil + } + + evaluatorVersionIDS := make(map[int64]bool) + for _, taskDO := range taskDOs { + taskConfig := tconv.TaskConfigDO2DTO(taskDO.TaskConfig) + if taskConfig == nil { + continue + } + for _, evaluator := range taskConfig.AutoEvaluateConfigs { + evaluatorVersionIDS[evaluator.EvaluatorVersionID] = true + if len(evaluatorVersionIDS) >= 30 { + break + } + } + if len(evaluatorVersionIDS) >= 30 { + break + } + } + evaluatorVersionIDList := make([]int64, 0) + for k := range evaluatorVersionIDS { + evaluatorVersionIDList = append(evaluatorVersionIDList, k) + } + evaluators, _, err = r.evalSvc.BatchGetEvaluatorVersions(ctx, &rpc.BatchGetEvaluatorVersionsParam{ + WorkspaceID: req.WorkspaceID, + EvaluatorVersionIds: evaluatorVersionIDList, + }) + if err != nil { + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithMsgParam("evaluatorVersionIDs is invalid, BatchGetEvaluators err: %v", err.Error())) + } + } + for _, evaluator := range evaluators { + re := &annotation.AnnotationEvaluator{} + if evaluator.EvaluatorVersionID != 0 { + re.EvaluatorVersionID = evaluator.EvaluatorVersionID + } + if evaluator.EvaluatorName != "" { + re.EvaluatorName = evaluator.EvaluatorName + } + if evaluator.EvaluatorVersion != "" { + re.EvaluatorVersion = evaluator.EvaluatorVersion + } + resp.Evaluators = append(resp.Evaluators, re) + } + return resp, nil +} +func (r *TraceServiceImpl) ExtractSpanInfo(ctx context.Context, req *ExtractSpanInfoRequest) (*ExtractSpanInfoResp, error) { + resp := &ExtractSpanInfoResp{} + var spanInfos []*trace.SpanInfo + tenants, err := r.getTenants(ctx, req.PlatformType) + if err != nil { + return resp, err + } + spans, err := r.getSpan(ctx, + tenants, + req.SpanIds, + req.TraceID, + strconv.FormatInt(req.WorkspaceID, 10), + req.StartTime-time.Second.Milliseconds(), + req.EndTime+time.Second.Milliseconds(), + ) + if err != nil { + return resp, err + } else if len(spans) == 0 { + logs.CtxWarn(ctx, "no span found for span_ids %v trace_id %s", req.SpanIds, req.TraceID) + return resp, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode) + } + logs.CtxInfo(ctx, "Get spans success, total conut:%v", len(spans)) + for _, span := range spans { + var fieldList []*dataset.FieldData + for _, mapping := range req.FieldMappings { + value, err := buildExtractSpanInfo(ctx, span, &mapping) + if err != nil { + // 非json但使用了jsonpath,也不报错,置空 + logs.CtxInfo(ctx, "Extract field failed, err:%v", err) + return resp, err + } + content := buildContent(value) + // 前端传入的是Name,评测集需要的是key,需要做一下mapping + if mapping.FieldSchema.Name == "" { + logs.CtxInfo(ctx, "Evaluator field name is nil") + continue + } + fieldList = append(fieldList, &dataset.FieldData{ + Key: mapping.FieldSchema.Key, + Name: gptr.Of(mapping.FieldSchema.Name), + Content: content, + }) + } + spanInfos = append(spanInfos, &trace.SpanInfo{ + SpanID: span.SpanID, + FieldList: fieldList, + }) + } + return &ExtractSpanInfoResp{ + SpanInfos: spanInfos, + }, nil +} +func buildExtractSpanInfo(ctx context.Context, span *loop_span.Span, fieldMapping *entity.FieldMapping) (string, error) { + value, err := span.ExtractByJsonpath(ctx, fieldMapping.TraceFieldKey, fieldMapping.TraceFieldJsonpath) + if err != nil { + // 非json但使用了jsonpath,也不报错,置空 + logs.CtxInfo(ctx, "Extract field failed, err:%v", err) + } + content, errCode := entity.GetContentInfo(ctx, fieldMapping.FieldSchema.ContentType, value) + if errCode == entity.DatasetErrorType_MismatchSchema { + logs.CtxInfo(ctx, "invalid multi part") + return "", errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid multi part")) + } + valueJSON, err := json.Marshal(content) + if err != nil { + return "", err + } + return string(valueJSON), nil +} + +func buildContent(value string) *dataset.Content { + var content *dataset.Content + err := json.Unmarshal([]byte(value), &content) + if err != nil { + content = &dataset.Content{ + ContentType: gptr.Of(common.ContentTypeText), + Text: gptr.Of(value), + } + } + return content +} + func processSpecificFilter(f *loop_span.FilterField) error { switch f.FieldName { case loop_span.SpanFieldStatus: diff --git a/backend/modules/observability/infra/config/trace.go b/backend/modules/observability/infra/config/trace.go index 9d8cd8ed0..cb2b0b509 100644 --- a/backend/modules/observability/infra/config/trace.go +++ b/backend/modules/observability/infra/config/trace.go @@ -24,6 +24,7 @@ const ( traceMaxDurationDay = "trace_max_duration_day" annotationSourceCfgKey = "annotation_source_cfg" queryTraceRateLimitCfgKey = "query_trace_rate_limit_config" + backfillMqProducerCfgKey = "backfill_mq_producer_config" ) type TraceConfigCenter struct { @@ -72,6 +73,14 @@ func (t *TraceConfigCenter) GetAnnotationMqProducerCfg(ctx context.Context) (*co return cfg, nil } +func (t *TraceConfigCenter) GetBackfillMqProducerCfg(ctx context.Context) (*config.MqProducerCfg, error) { + cfg := new(config.MqProducerCfg) + if err := t.UnmarshalKey(context.Background(), backfillMqProducerCfgKey, cfg); err != nil { + return nil, err + } + return cfg, nil +} + func (t *TraceConfigCenter) GetTraceCkCfg(ctx context.Context) (*config.TraceCKCfg, error) { cfg := new(config.TraceCKCfg) if err := t.UnmarshalKey(context.Background(), traceCkCfgKey, cfg); err != nil { diff --git a/backend/modules/observability/infra/mq/consumer/autotask_callback_consumer.go b/backend/modules/observability/infra/mq/consumer/autotask_callback_consumer.go new file mode 100644 index 000000000..5ae8ceee6 --- /dev/null +++ b/backend/modules/observability/infra/mq/consumer/autotask_callback_consumer.go @@ -0,0 +1,59 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumer + +import ( + "context" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/mq" + obapp "github.com/coze-dev/coze-loop/backend/modules/observability/application" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/config" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/pkg/conf" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/conv" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +type CallbackConsumer struct { + handler obapp.ITaskQueueConsumer + conf.IConfigLoader +} + +func newCallbackConsumer(handler obapp.ITaskQueueConsumer, loader conf.IConfigLoader) mq.IConsumerWorker { + return &CallbackConsumer{ + handler: handler, + IConfigLoader: loader, + } +} + +func (e *CallbackConsumer) ConsumerCfg(ctx context.Context) (*mq.ConsumerConfig, error) { + const key = "autotask_callback_mq_consumer_config" + cfg := &config.MqConsumerCfg{} + if err := e.UnmarshalKey(ctx, key, cfg); err != nil { + return nil, err + } + res := &mq.ConsumerConfig{ + Addr: cfg.Addr, + Topic: cfg.Topic, + ConsumerGroup: cfg.ConsumerGroup, + ConsumeTimeout: time.Duration(cfg.Timeout) * time.Millisecond, + ConsumeGoroutineNums: cfg.WorkerNum, + EnablePPE: cfg.EnablePPE, + } + return res, nil +} + +func (e *CallbackConsumer) HandleMessage(ctx context.Context, ext *mq.MessageExt) error { + logID := logs.NewLogID() + ctx = logs.SetLogID(ctx, logID) + event := new(entity.AutoEvalEvent) + if err := json.Unmarshal(ext.Body, event); err != nil { + logs.CtxError(ctx, "Callback msg json unmarshal fail, raw: %v, err: %s", conv.UnsafeBytesToString(ext.Body), err) + return nil + } + logs.CtxInfo(ctx, "Callback msg, event: %v,msgID: %s", event, ext.MsgID) + return e.handler.CallBack(ctx, event) +} diff --git a/backend/modules/observability/infra/mq/consumer/backfill_consumer.go b/backend/modules/observability/infra/mq/consumer/backfill_consumer.go new file mode 100644 index 000000000..0eb1859eb --- /dev/null +++ b/backend/modules/observability/infra/mq/consumer/backfill_consumer.go @@ -0,0 +1,59 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumer + +import ( + "context" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/mq" + obapp "github.com/coze-dev/coze-loop/backend/modules/observability/application" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/config" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/pkg/conf" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/conv" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +type BackFillConsumer struct { + handler obapp.ITaskQueueConsumer + conf.IConfigLoader +} + +func newBackFillConsumer(handler obapp.ITaskQueueConsumer, loader conf.IConfigLoader) mq.IConsumerWorker { + return &BackFillConsumer{ + handler: handler, + IConfigLoader: loader, + } +} + +func (e *BackFillConsumer) ConsumerCfg(ctx context.Context) (*mq.ConsumerConfig, error) { + const key = "backfill_mq_consumer_config" + cfg := &config.MqConsumerCfg{} + if err := e.UnmarshalKey(ctx, key, cfg); err != nil { + return nil, err + } + res := &mq.ConsumerConfig{ + Addr: cfg.Addr, + Topic: cfg.Topic, + ConsumerGroup: cfg.ConsumerGroup, + ConsumeTimeout: time.Duration(cfg.Timeout) * time.Millisecond, + ConsumeGoroutineNums: cfg.WorkerNum, + EnablePPE: cfg.EnablePPE, + } + return res, nil +} + +func (e *BackFillConsumer) HandleMessage(ctx context.Context, ext *mq.MessageExt) error { + logID := logs.NewLogID() + ctx = logs.SetLogID(ctx, logID) + event := new(entity.BackFillEvent) + if err := json.Unmarshal(ext.Body, event); err != nil { + logs.CtxError(ctx, "BackFill msg json unmarshal fail, raw: %v, err: %s", conv.UnsafeBytesToString(ext.Body), err) + return nil + } + logs.CtxInfo(ctx, "BackFill msg %+v,msgID=%s", event, ext.MsgID) + return e.handler.BackFill(ctx, event) +} diff --git a/backend/modules/observability/infra/mq/consumer/consumer.go b/backend/modules/observability/infra/mq/consumer/consumer.go index 733fbf545..0956fef7a 100644 --- a/backend/modules/observability/infra/mq/consumer/consumer.go +++ b/backend/modules/observability/infra/mq/consumer/consumer.go @@ -12,8 +12,13 @@ import ( func NewConsumerWorkers( loader conf.IConfigLoader, handler application.IAnnotationQueueConsumer, + taskConsumer application.ITaskQueueConsumer, ) ([]mq.IConsumerWorker, error) { return []mq.IConsumerWorker{ newAnnotationConsumer(handler, loader), + newTaskConsumer(taskConsumer, loader), + newCallbackConsumer(taskConsumer, loader), + newCorrectionConsumer(taskConsumer, loader), + newBackFillConsumer(taskConsumer, loader), }, nil } diff --git a/backend/modules/observability/infra/mq/consumer/correction_consumer.go b/backend/modules/observability/infra/mq/consumer/correction_consumer.go new file mode 100644 index 000000000..a72ff61bf --- /dev/null +++ b/backend/modules/observability/infra/mq/consumer/correction_consumer.go @@ -0,0 +1,58 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 +package consumer + +import ( + "context" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/mq" + obapp "github.com/coze-dev/coze-loop/backend/modules/observability/application" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/config" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/pkg/conf" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/conv" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +type CorrectionConsumer struct { + handler obapp.ITaskQueueConsumer + conf.IConfigLoader +} + +func newCorrectionConsumer(handler obapp.ITaskQueueConsumer, loader conf.IConfigLoader) mq.IConsumerWorker { + return &CorrectionConsumer{ + handler: handler, + IConfigLoader: loader, + } +} + +func (e *CorrectionConsumer) ConsumerCfg(ctx context.Context) (*mq.ConsumerConfig, error) { + const key = "correction_mq_consumer_config" + cfg := &config.MqConsumerCfg{} + if err := e.UnmarshalKey(ctx, key, cfg); err != nil { + return nil, err + } + res := &mq.ConsumerConfig{ + Addr: cfg.Addr, + Topic: cfg.Topic, + ConsumerGroup: cfg.ConsumerGroup, + ConsumeTimeout: time.Duration(cfg.Timeout) * time.Millisecond, + ConsumeGoroutineNums: cfg.WorkerNum, + EnablePPE: cfg.EnablePPE, + } + return res, nil +} + +func (e *CorrectionConsumer) HandleMessage(ctx context.Context, ext *mq.MessageExt) error { + logID := logs.NewLogID() + ctx = logs.SetLogID(ctx, logID) + event := new(entity.CorrectionEvent) + if err := json.Unmarshal(ext.Body, event); err != nil { + logs.CtxError(ctx, "Correction msg json unmarshal fail, raw: %v, err: %s", conv.UnsafeBytesToString(ext.Body), err) + return nil + } + logs.CtxInfo(ctx, "Correction msg, event: %v,msgID=%s", event, ext.MsgID) + return e.handler.Correction(ctx, event) +} diff --git a/backend/modules/observability/infra/mq/consumer/task_consumer.go b/backend/modules/observability/infra/mq/consumer/task_consumer.go new file mode 100644 index 000000000..fa1f261df --- /dev/null +++ b/backend/modules/observability/infra/mq/consumer/task_consumer.go @@ -0,0 +1,60 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumer + +import ( + "context" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/mq" + obapp "github.com/coze-dev/coze-loop/backend/modules/observability/application" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/config" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/pkg/conf" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/conv" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +type TaskConsumer struct { + handler obapp.ITaskQueueConsumer + conf.IConfigLoader +} + +func newTaskConsumer(handler obapp.ITaskQueueConsumer, loader conf.IConfigLoader) mq.IConsumerWorker { + return &TaskConsumer{ + handler: handler, + IConfigLoader: loader, + } +} + +func (e *TaskConsumer) ConsumerCfg(ctx context.Context) (*mq.ConsumerConfig, error) { + // 【1011】内场的topic,doubaogu过滤 + const key = "task_mq_consumer_config" + cfg := &config.MqConsumerCfg{} + if err := e.UnmarshalKey(ctx, key, cfg); err != nil { + return nil, err + } + res := &mq.ConsumerConfig{ + Addr: cfg.Addr, + Topic: cfg.Topic, + ConsumerGroup: cfg.ConsumerGroup, + ConsumeTimeout: time.Duration(cfg.Timeout) * time.Millisecond, + ConsumeGoroutineNums: cfg.WorkerNum, + EnablePPE: cfg.EnablePPE, + } + return res, nil +} + +func (e *TaskConsumer) HandleMessage(ctx context.Context, ext *mq.MessageExt) error { + logID := logs.NewLogID() + ctx = logs.SetLogID(ctx, logID) + event := new(entity.RawSpan) + if err := json.Unmarshal(ext.Body, event); err != nil { + logs.CtxError(ctx, "Task msg json unmarshal fail, raw: %v, err: %s", conv.UnsafeBytesToString(ext.Body), err) + return nil + } + logs.CtxInfo(ctx, "Task msg,log_id=%s, trace_id=%s, span_id=%s,msgID=%s", event.LogID, event.TraceID, event.SpanID, ext.MsgID) + return e.handler.SpanTrigger(ctx, event) +} diff --git a/backend/modules/observability/infra/mq/producer/backfill_producer.go b/backend/modules/observability/infra/mq/producer/backfill_producer.go new file mode 100644 index 000000000..2ac8858a6 --- /dev/null +++ b/backend/modules/observability/infra/mq/producer/backfill_producer.go @@ -0,0 +1,84 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package producer + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/mq" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/config" + mq2 "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/mq" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +var ( + backfillProducerOnce sync.Once + singletonBackfillProducer mq2.IBackfillProducer +) + +type BackfillProducerImpl struct { + topic string + mqProducer mq.IProducer +} + +func (b *BackfillProducerImpl) SendBackfill(ctx context.Context, message *entity.BackFillEvent) error { + bytes, err := json.Marshal(message) + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommercialCommonInternalErrorCodeCode) + } + msg := mq.NewDeferMessage(b.topic, 10*time.Second, bytes) + sendMsg, err := b.mqProducer.Send(ctx, msg) + if err != nil { + logs.CtxWarn(ctx, "send annotation msg err: %v", err) + return errorx.WrapByCode(err, obErrorx.CommercialCommonRPCErrorCodeCode) + } + logs.CtxInfo(ctx, "send annotation msg %s successfully, msgId: %s", string(bytes), sendMsg.MessageID) + return nil +} + +func NewBackfillProducerImpl(traceConfig config.ITraceConfig, mqFactory mq.IFactory) (mq2.IBackfillProducer, error) { + var err error + backfillProducerOnce.Do(func() { + singletonBackfillProducer, err = newBackfillProducerImpl(traceConfig, mqFactory) + }) + if err != nil { + return nil, err + } else { + return singletonBackfillProducer, nil + } +} + +func newBackfillProducerImpl(traceConfig config.ITraceConfig, mqFactory mq.IFactory) (mq2.IBackfillProducer, error) { + mqCfg, err := traceConfig.GetBackfillMqProducerCfg(context.Background()) + if err != nil { + return nil, err + } + if mqCfg.Topic == "" { + return nil, fmt.Errorf("trace topic required") + } + mqProducer, err := mqFactory.NewProducer(mq.ProducerConfig{ + Addr: mqCfg.Addr, + ProduceTimeout: time.Duration(mqCfg.Timeout) * time.Millisecond, + RetryTimes: mqCfg.RetryTimes, + ProducerGroup: ptr.Of(mqCfg.ProducerGroup), + }) + if err != nil { + return nil, err + } + if err := mqProducer.Start(); err != nil { + return nil, fmt.Errorf("fail to start producer, %v", err) + } + return &BackfillProducerImpl{ + topic: mqCfg.Topic, + mqProducer: mqProducer, + }, nil +} diff --git a/backend/modules/observability/infra/repo/mysql/convertor/task.go b/backend/modules/observability/infra/repo/mysql/convertor/task.go new file mode 100644 index 000000000..2770e8f88 --- /dev/null +++ b/backend/modules/observability/infra/repo/mysql/convertor/task.go @@ -0,0 +1,96 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package convertor + +//import ( +// "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" +// "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" +//) +// +//func TaskDO2PO(task *entity.ObservabilityTask) *model.ObservabilityTask { +// return &model.ObservabilityTask{ +// ID: task.ID, +// WorkspaceID: task.WorkspaceID, +// Name: task.Name, +// Description: task.Description, +// TaskType: task.TaskType, +// TaskStatus: task.TaskStatus, +// TaskDetail: task.TaskDetail, +// SpanFilter: task.SpanFilter, +// EffectiveTime: task.EffectiveTime, +// BackfillEffectiveTime: task.BackfillEffectiveTime, +// Sampler: task.Sampler, +// TaskConfig: task.TaskConfig, +// CreatedAt: task.CreatedAt, +// UpdatedAt: task.UpdatedAt, +// CreatedBy: task.CreatedBy, +// UpdatedBy: task.UpdatedBy, +// } +//} +// +//func TaskPO2DO(task *model.ObservabilityTask) *entity.ObservabilityTask { +// return &entity.ObservabilityTask{ +// ID: task.ID, +// WorkspaceID: task.WorkspaceID, +// Name: task.Name, +// Description: task.Description, +// TaskType: task.TaskType, +// TaskStatus: task.TaskStatus, +// TaskDetail: task.TaskDetail, +// SpanFilter: task.SpanFilter, +// EffectiveTime: task.EffectiveTime, +// BackfillEffectiveTime: task.BackfillEffectiveTime, +// Sampler: task.Sampler, +// TaskConfig: task.TaskConfig, +// CreatedAt: task.CreatedAt, +// UpdatedAt: task.UpdatedAt, +// CreatedBy: task.CreatedBy, +// UpdatedBy: task.UpdatedBy, +// } +//} +// +//func TaskRunDO2PO(taskRun *entity.TaskRun) *model.ObservabilityTaskRun { +// return &model.ObservabilityTaskRun{ +// ID: taskRun.ID, +// TaskID: taskRun.TaskID, +// WorkspaceID: taskRun.WorkspaceID, +// TaskType: taskRun.TaskType, +// RunStatus: taskRun.RunStatus, +// RunDetail: taskRun.RunDetail, +// BackfillDetail: taskRun.BackfillDetail, +// RunStartAt: taskRun.RunStartAt, +// RunEndAt: taskRun.RunEndAt, +// RunConfig: taskRun.RunConfig, +// CreatedAt: taskRun.CreatedAt, +// UpdatedAt: taskRun.UpdatedAt, +// } +//} +// +//func TaskRunPO2DO(taskRun *model.ObservabilityTaskRun) *entity.TaskRun { +// return &entity.TaskRun{ +// ID: taskRun.ID, +// TaskID: taskRun.TaskID, +// WorkspaceID: taskRun.WorkspaceID, +// TaskType: taskRun.TaskType, +// RunStatus: taskRun.RunStatus, +// RunDetail: taskRun.RunDetail, +// BackfillDetail: taskRun.BackfillDetail, +// RunStartAt: taskRun.RunStartAt, +// RunEndAt: taskRun.RunEndAt, +// RunConfig: taskRun.RunConfig, +// CreatedAt: taskRun.CreatedAt, +// UpdatedAt: taskRun.UpdatedAt, +// } +//} +// +//func TaskRunsPO2DO(taskRun []*model.ObservabilityTaskRun) []*entity.TaskRun { +// if taskRun == nil { +// return nil +// } +// resp := make([]*entity.TaskRun, len(taskRun)) +// for i, tr := range taskRun { +// resp[i] = TaskRunPO2DO(tr) +// } +// return resp +//} diff --git a/backend/modules/observability/infra/repo/mysql/convertor/task_copy.go b/backend/modules/observability/infra/repo/mysql/convertor/task_copy.go new file mode 100644 index 000000000..2fcdd5e08 --- /dev/null +++ b/backend/modules/observability/infra/repo/mysql/convertor/task_copy.go @@ -0,0 +1,531 @@ +package convertor + +import ( + "strings" + + "github.com/bytedance/sonic" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +func TaskDO2PO(task *entity.ObservabilityTask) *model.ObservabilityTask { + return &model.ObservabilityTask{ + ID: task.ID, + WorkspaceID: task.WorkspaceID, + Name: task.Name, + Description: task.Description, + TaskType: task.TaskType, + TaskStatus: task.TaskStatus, + TaskDetail: ptr.Of(ToJSONString(task.TaskDetail)), + SpanFilter: ptr.Of(ToJSONString(task.SpanFilter)), + EffectiveTime: ptr.Of(ToJSONString(task.EffectiveTime)), + BackfillEffectiveTime: ptr.Of(ToJSONString(task.BackfillEffectiveTime)), + Sampler: ptr.Of(ToJSONString(task.Sampler)), + TaskConfig: ptr.Of(ToJSONString(task.TaskConfig)), + CreatedAt: task.CreatedAt, + UpdatedAt: task.UpdatedAt, + CreatedBy: task.CreatedBy, + UpdatedBy: task.UpdatedBy, + } +} + +func TaskPO2DO(task *model.ObservabilityTask) *entity.ObservabilityTask { + return &entity.ObservabilityTask{ + ID: task.ID, + WorkspaceID: task.WorkspaceID, + Name: task.Name, + Description: task.Description, + TaskType: task.TaskType, + TaskStatus: task.TaskStatus, + TaskDetail: TaskDetailJSON2DO(task.TaskDetail), + SpanFilter: SpanFilterJSON2DO(task.SpanFilter), + EffectiveTime: EffectiveTimeJSON2DO(task.EffectiveTime), + BackfillEffectiveTime: EffectiveTimeJSON2DO(task.BackfillEffectiveTime), + Sampler: SamplerJSON2DO(task.Sampler), + TaskConfig: TaskConfigJSON2DO(task.TaskConfig), + CreatedAt: task.CreatedAt, + UpdatedAt: task.UpdatedAt, + CreatedBy: task.CreatedBy, + UpdatedBy: task.UpdatedBy, + } +} + +func TaskDetailJSON2DO(taskDetail *string) *entity.RunDetail { + if taskDetail == nil || *taskDetail == "" { + return nil + } + var taskDetailDO *entity.RunDetail + if err := sonic.UnmarshalString(*taskDetail, &taskDetailDO); err != nil { + logs.Error("TaskDetailJSON2DO UnmarshalString err: %v", err) + return nil + } + return taskDetailDO +} + +func SpanFilterJSON2DO(spanFilter *string) *filter.SpanFilterFields { + if spanFilter == nil || *spanFilter == "" { + return nil + } + var spanFilterDO *filter.SpanFilterFields + if err := sonic.UnmarshalString(*spanFilter, &spanFilterDO); err != nil { + logs.Error("SpanFilterJSON2DO UnmarshalString err: %v", err) + return nil + } + return spanFilterDO +} + +func EffectiveTimeJSON2DO(effectiveTime *string) *entity.EffectiveTime { + if effectiveTime == nil || *effectiveTime == "" { + return nil + } + var effectiveTimeDO *entity.EffectiveTime + if err := sonic.UnmarshalString(*effectiveTime, &effectiveTimeDO); err != nil { + logs.Error("EffectiveTimeJSON2DO UnmarshalString err: %v", err) + return nil + } + return effectiveTimeDO +} + +func SamplerJSON2DO(sampler *string) *entity.Sampler { + if sampler == nil || *sampler == "" { + return nil + } + var samplerDO *entity.Sampler + if err := sonic.UnmarshalString(*sampler, &samplerDO); err != nil { + logs.Error("SamplerJSON2DO UnmarshalString err: %v", err) + return nil + } + return samplerDO +} + +func TaskConfigJSON2DO(taskConfig *string) *entity.TaskConfig { + if taskConfig == nil || *taskConfig == "" { + return nil + } + var taskConfigDO *entity.TaskConfig + if err := sonic.UnmarshalString(*taskConfig, &taskConfigDO); err != nil { + logs.Error("TaskConfigJSON2DO UnmarshalString err: %v", err) + return nil + } + return taskConfigDO +} + +func TaskRunDO2PO(taskRun *entity.TaskRun) *model.ObservabilityTaskRun { + return &model.ObservabilityTaskRun{ + ID: taskRun.ID, + TaskID: taskRun.TaskID, + WorkspaceID: taskRun.WorkspaceID, + TaskType: taskRun.TaskType, + RunStatus: taskRun.RunStatus, + RunDetail: ptr.Of(ToJSONString(taskRun.RunDetail)), + BackfillDetail: ptr.Of(ToJSONString(taskRun.BackfillDetail)), + RunStartAt: taskRun.RunStartAt, + RunEndAt: taskRun.RunEndAt, + RunConfig: ptr.Of(ToJSONString(taskRun.TaskRunConfig)), + CreatedAt: taskRun.CreatedAt, + UpdatedAt: taskRun.UpdatedAt, + } +} + +func TaskRunPO2DO(taskRun *model.ObservabilityTaskRun) *entity.TaskRun { + return &entity.TaskRun{ + ID: taskRun.ID, + TaskID: taskRun.TaskID, + WorkspaceID: taskRun.WorkspaceID, + TaskType: taskRun.TaskType, + RunStatus: taskRun.RunStatus, + RunDetail: RunDetailJSON2DO(taskRun.RunDetail), + BackfillDetail: BackfillRunDetailJSON2DO(taskRun.BackfillDetail), + RunStartAt: taskRun.RunStartAt, + RunEndAt: taskRun.RunEndAt, + TaskRunConfig: TaskRunConfigJSON2DO(taskRun.RunConfig), + CreatedAt: taskRun.CreatedAt, + UpdatedAt: taskRun.UpdatedAt, + } +} + +func RunDetailJSON2DO(runDetail *string) *entity.RunDetail { + if runDetail == nil || *runDetail == "" { + return nil + } + var runDetailDO *entity.RunDetail + if err := sonic.UnmarshalString(*runDetail, &runDetailDO); err != nil { + logs.Error("RunDetailJSON2DO UnmarshalString err: %v", err) + return nil + } + return runDetailDO +} +func BackfillRunDetailJSON2DO(backfillDetail *string) *entity.BackfillDetail { + if backfillDetail == nil || *backfillDetail == "" { + return nil + } + var backfillDetailDO *entity.BackfillDetail + if err := sonic.UnmarshalString(*backfillDetail, &backfillDetailDO); err != nil { + logs.Error("BackfillRunDetailJSON2DO UnmarshalString err: %v", err) + return nil + } + return backfillDetailDO +} +func TaskRunConfigJSON2DO(taskRunConfig *string) *entity.TaskRunConfig { + if taskRunConfig == nil || *taskRunConfig == "" { + return nil + } + var taskRunConfigDO *entity.TaskRunConfig + if err := sonic.UnmarshalString(*taskRunConfig, &taskRunConfigDO); err != nil { + logs.Error("TaskRunConfigJSON2DO UnmarshalString err: %v", err) + return nil + } + return taskRunConfigDO +} + +func TaskRunsPO2DO(taskRun []*model.ObservabilityTaskRun) []*entity.TaskRun { + if taskRun == nil { + return nil + } + resp := make([]*entity.TaskRun, len(taskRun)) + for i, tr := range taskRun { + resp[i] = TaskRunPO2DO(tr) + } + return resp +} + +//func TaskPOs2DOs(ctx context.Context, taskPOs []*entity.ObservabilityTask, userInfos map[string]*entity_common.UserInfo) []*task.Task { +// var taskList []*task.Task +// if len(taskPOs) == 0 { +// return taskList +// } +// for _, v := range taskPOs { +// taskDO := TaskPO2DTO(ctx, v, userInfos) +// taskList = append(taskList, taskDO) +// } +// return taskList +//} +//func TaskPO2DTO(ctx context.Context, v *entity.ObservabilityTask, userMap map[string]*entity_common.UserInfo) *task.Task { +// if v == nil { +// return nil +// } +// var taskDetail *task.RunDetail +// var totalCount, successCount, failedCount int64 +// for _, tr := range v.TaskRuns { +// trDO := TaskRunPO2DTO(ctx, tr, nil) +// if trDO.RunDetail != nil { +// totalCount += *trDO.RunDetail.TotalCount +// successCount += *trDO.RunDetail.SuccessCount +// failedCount += *trDO.RunDetail.FailedCount +// } +// } +// taskDetail = &task.RunDetail{ +// TotalCount: gptr.Of(totalCount), +// SuccessCount: gptr.Of(successCount), +// FailedCount: gptr.Of(failedCount), +// } +// taskInfo := &task.Task{ +// ID: ptr.Of(v.ID), +// Name: v.Name, +// Description: v.Description, +// WorkspaceID: ptr.Of(v.WorkspaceID), +// TaskType: v.TaskType, +// TaskStatus: ptr.Of(v.TaskStatus), +// Rule: RulePO2DO(ctx, v.SpanFilter, v.EffectiveTime, v.Sampler, v.BackfillEffectiveTime), +// TaskConfig: TaskConfigPO2DO(ctx, v.TaskConfig), +// TaskDetail: taskDetail, +// BaseInfo: &common.BaseInfo{ +// CreatedAt: gptr.Of(v.CreatedAt.UnixMilli()), +// UpdatedAt: gptr.Of(v.UpdatedAt.UnixMilli()), +// CreatedBy: UserInfoPO2DO(userMap[v.CreatedBy], v.CreatedBy), +// UpdatedBy: UserInfoPO2DO(userMap[v.UpdatedBy], v.UpdatedBy), +// }, +// } +// return taskInfo +//} +//func UserInfoPO2DO(userInfo *entity_common.UserInfo, userID string) *common.UserInfo { +// if userInfo == nil { +// return &common.UserInfo{ +// UserID: gptr.Of(userID), +// } +// } +// return &common.UserInfo{ +// Name: ptr.Of(userInfo.Name), +// EnName: ptr.Of(userInfo.EnName), +// AvatarURL: ptr.Of(userInfo.AvatarURL), +// AvatarThumb: ptr.Of(userInfo.AvatarThumb), +// OpenID: ptr.Of(userInfo.OpenID), +// UnionID: ptr.Of(userInfo.UnionID), +// UserID: ptr.Of(userInfo.UserID), +// Email: ptr.Of(userInfo.Email), +// } +//} + +//func RulePO2DO(ctx context.Context, spanFilter, effectiveTime, sampler, backFillEffectiveTime *string) *task.Rule { +// var spanFilterDO *filter.SpanFilterFields +// if spanFilter != nil { +// spanFilterDO = SpanFilterPO2DO(ctx, spanFilter) +// } +// rule := &task.Rule{ +// SpanFilters: spanFilterDO, +// EffectiveTime: EffectiveTimePO2DO(ctx, effectiveTime), +// Sampler: SamplerPO2DO(ctx, sampler), +// BackfillEffectiveTime: EffectiveTimePO2DO(ctx, backFillEffectiveTime), +// } +// return rule +//} +//func SamplerPO2DO(ctx context.Context, sampler *string) *task.Sampler { +// if sampler == nil { +// return nil +// } +// var samplerDO task.Sampler +// if err := sonic.Unmarshal([]byte(*sampler), &samplerDO); err != nil { +// logs.CtxError(ctx, "SamplerPO2DO sonic.Unmarshal err:%v", err) +// return nil +// } +// return &samplerDO +//} +//func SpanFilterPO2DO(ctx context.Context, spanFilter *string) *filter.SpanFilterFields { +// if spanFilter == nil { +// return nil +// } +// var spanFilterDO filter.SpanFilterFields +// if err := sonic.Unmarshal([]byte(*spanFilter), &spanFilterDO); err != nil { +// logs.CtxError(ctx, "SpanFilterPO2DO sonic.Unmarshal err:%v", err) +// return nil +// } +// return &spanFilterDO +//} + +//func TaskConfigPO2DO(ctx context.Context, taskConfig *string) *task.TaskConfig { +// if taskConfig == nil { +// return nil +// } +// var taskConfigDO task.TaskConfig +// if err := sonic.Unmarshal([]byte(*taskConfig), &taskConfigDO); err != nil { +// logs.CtxError(ctx, "TaskConfigPO2DO sonic.Unmarshal err:%v", err) +// return nil +// } +// return &taskConfigDO +//} + +// func BatchTaskPO2DTO(ctx context.Context, Tasks []*entity.ObservabilityTask) []*task.Task { +// ret := make([]*task.Task, len(Tasks)) +// for i, v := range Tasks { +// ret[i] = TaskPO2DTO(ctx, v, nil) +// } +// return ret +// } +//func EffectiveTimePO2DO(ctx context.Context, effectiveTime *string) *entity.EffectiveTime { +// if effectiveTime == nil { +// return nil +// } +// var effectiveTimeDO entity.EffectiveTime +// if err := sonic.Unmarshal([]byte(*effectiveTime), &effectiveTimeDO); err != nil { +// logs.CtxError(ctx, "EffectiveTimePO2DO sonic.Unmarshal err:%v", err) +// return nil +// } +// return &effectiveTimeDO +//} +//func CheckEffectiveTime(ctx context.Context, effectiveTime *task.EffectiveTime, taskStatus task.TaskStatus, effectiveTimePO *string) (*task.EffectiveTime, error) { +// effectiveTimeDO := EffectiveTimePO2DO(ctx, effectiveTimePO) +// if effectiveTimeDO == nil { +// logs.CtxError(ctx, "EffectiveTimePO2DO error") +// return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("effective time is nil")) +// } +// var validEffectiveTime task.EffectiveTime +// // 开始时间不能大于结束时间 +// if effectiveTime.GetStartAt() >= effectiveTime.GetEndAt() { +// logs.CtxError(ctx, "Start time must be less than end time") +// return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("start time must be less than end time")) +// } +// // 开始、结束时间不能小于当前时间 +// if effectiveTimeDO.StartAt != effectiveTime.GetStartAt() && effectiveTime.GetStartAt() < time.Now().UnixMilli() { +// logs.CtxError(ctx, "update time must be greater than current time") +// return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("start time must be greater than current time")) +// } +// if effectiveTimeDO.EndAt != effectiveTime.GetEndAt() && effectiveTime.GetEndAt() < time.Now().UnixMilli() { +// logs.CtxError(ctx, "update time must be greater than current time") +// return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("start time must be greater than current time")) +// } +// validEffectiveTime.StartAt = ptr.Of(effectiveTimeDO.StartAt) +// validEffectiveTime.EndAt = ptr.Of(effectiveTimeDO.EndAt) +// switch taskStatus { +// case task.TaskStatusUnstarted: +// if validEffectiveTime.StartAt != nil { +// validEffectiveTime.StartAt = effectiveTime.StartAt +// } +// if validEffectiveTime.EndAt != nil { +// validEffectiveTime.EndAt = effectiveTime.EndAt +// } +// case task.TaskStatusRunning, task.TaskStatusPending: +// if validEffectiveTime.EndAt != nil { +// validEffectiveTime.EndAt = effectiveTime.EndAt +// } +// default: +// logs.CtxError(ctx, "Invalid task status:%s", taskStatus) +// return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid task status")) +// } +// return &validEffectiveTime, nil +//} + +//func CheckTaskStatus(ctx context.Context, taskStatus task.TaskStatus, currentTaskStatus task.TaskStatus) (task.TaskStatus, error) { +// var validTaskStatus task.TaskStatus +// // [0530]todo: 任务状态校验 +// switch taskStatus { +// case task.TaskStatusUnstarted: +// if currentTaskStatus == task.TaskStatusUnstarted { +// validTaskStatus = taskStatus +// } else { +// logs.CtxError(ctx, "Invalid task status:%s", taskStatus) +// return "", errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid task status")) +// } +// case task.TaskStatusRunning: +// if currentTaskStatus == task.TaskStatusUnstarted || currentTaskStatus == task.TaskStatusPending { +// validTaskStatus = taskStatus +// } else { +// logs.CtxError(ctx, "Invalid task status:%s,currentTaskStatus:%s", taskStatus, currentTaskStatus) +// return "", errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid task status")) +// } +// case task.TaskStatusPending: +// if currentTaskStatus == task.TaskStatusRunning { +// validTaskStatus = task.TaskStatusPending +// } +// case task.TaskStatusDisabled: +// if currentTaskStatus == task.TaskStatusUnstarted || currentTaskStatus == task.TaskStatusPending { +// validTaskStatus = task.TaskStatusDisabled +// } +// case task.TaskStatusSuccess: +// if currentTaskStatus != task.TaskStatusSuccess { +// validTaskStatus = task.TaskStatusSuccess +// } +// } +// +// return validTaskStatus, nil +//} + +//func TaskDTO2PO(ctx context.Context, taskDO *task.Task, userID string, spanFilters *filter.SpanFilterFields) *entity.ObservabilityTask { +// if taskDO == nil { +// return nil +// } +// var createdBy, updatedBy string +// if taskDO.GetBaseInfo().GetCreatedBy() != nil { +// createdBy = taskDO.GetBaseInfo().GetCreatedBy().GetUserID() +// } +// if taskDO.GetBaseInfo().GetUpdatedBy() != nil { +// updatedBy = taskDO.GetBaseInfo().GetUpdatedBy().GetUserID() +// } +// if userID != "" { +// createdBy = userID +// updatedBy = userID +// } else { +// if taskDO.GetBaseInfo().GetCreatedBy() != nil { +// createdBy = taskDO.GetBaseInfo().GetCreatedBy().GetUserID() +// } +// if taskDO.GetBaseInfo().GetUpdatedBy() != nil { +// updatedBy = taskDO.GetBaseInfo().GetUpdatedBy().GetUserID() +// } +// } +// var spanFilterDO *filter.SpanFilterFields +// if spanFilters != nil { +// spanFilterDO = spanFilters +// } else { +// spanFilterDO = taskDO.GetRule().GetSpanFilters() +// } +// +// return &entity.ObservabilityTask{ +// ID: taskDO.GetID(), +// WorkspaceID: taskDO.GetWorkspaceID(), +// Name: taskDO.GetName(), +// Description: ptr.Of(taskDO.GetDescription()), +// TaskType: taskDO.GetTaskType(), +// TaskStatus: taskDO.GetTaskStatus(), +// TaskDetail: ptr.Of(ToJSONString(ctx, taskDO.GetTaskDetail())), +// SpanFilter: SpanFilterDTO2PO(ctx, spanFilterDO), +// EffectiveTime: ptr.Of(ToJSONString(ctx, taskDO.GetRule().GetEffectiveTime())), +// Sampler: ptr.Of(ToJSONString(ctx, taskDO.GetRule().GetSampler())), +// TaskConfig: TaskConfigDTO2PO(ctx, taskDO.GetTaskConfig()), +// CreatedAt: time.Now(), +// UpdatedAt: time.Now(), +// CreatedBy: createdBy, +// UpdatedBy: updatedBy, +// BackfillEffectiveTime: ptr.Of(ToJSONString(ctx, taskDO.GetRule().GetBackfillEffectiveTime())), +// } +//} +//func SpanFilterDTO2PO(ctx context.Context, filters *filter.SpanFilterFields) *string { +// var filtersDO *loop_span.FilterFields +// if filters.GetFilters() != nil { +// filtersDO = convertor.FilterFieldsDTO2DO(filters.GetFilters()) +// } +// filterDO := entity.SpanFilter{ +// PlatformType: filters.GetPlatformType(), +// SpanListType: filters.GetSpanListType(), +// } +// if filtersDO != nil { +// filterDO.Filters = *filtersDO +// } +// +// return ptr.Of(ToJSONString(ctx, filterDO)) +//} + +// func TaskConfigDTO2PO(ctx context.Context, taskConfig *task.TaskConfig) *string { +// if taskConfig == nil { +// return nil +// } +// var evalSetNames []string +// jspnPathMapping := make(map[string]string) +// for _, autoEvaluateConfig := range taskConfig.GetAutoEvaluateConfigs() { +// for _, mapping := range autoEvaluateConfig.GetFieldMappings() { +// jspnPath := fmt.Sprintf("%s.%s", mapping.TraceFieldKey, mapping.TraceFieldJsonpath) +// if _, exits := jspnPathMapping[jspnPath]; exits { +// mapping.EvalSetName = gptr.Of(jspnPathMapping[jspnPath]) +// continue +// } +// evalSetName := getLastPartAfterDot(jspnPath) +// for exists := slices.Contains(evalSetNames, evalSetName); exists; exists = slices.Contains(evalSetNames, evalSetName) { +// evalSetName += "_" +// } +// mapping.EvalSetName = gptr.Of(evalSetName) +// evalSetNames = append(evalSetNames, evalSetName) +// jspnPathMapping[jspnPath] = evalSetName +// } +// } +// +// return gptr.Of(ToJSONString(ctx, taskConfig)) +// } +func getLastPartAfterDot(s string) string { + s = strings.TrimRight(s, ".") + lastDotIndex := strings.LastIndex(s, ".") + if lastDotIndex == -1 { + lastPart := s + return processBracket(lastPart) + } + lastPart := s[lastDotIndex+1:] + return processBracket(lastPart) +} + +// processBracket 处理字符串中的方括号,将其转换为下划线连接的形式 +func processBracket(s string) string { + openBracketIndex := strings.Index(s, "[") + if openBracketIndex == -1 { + return s + } + closeBracketIndex := strings.Index(s, "]") + if closeBracketIndex == -1 { + return s + } + base := s[:openBracketIndex] + index := s[openBracketIndex+1 : closeBracketIndex] + return base + "_" + index +} + +// ToJSONString 通用函数,将对象转换为 JSON 字符串指针 +func ToJSONString(obj interface{}) string { + if obj == nil { + return "" + } + jsonData, err := sonic.Marshal(obj) + if err != nil { + return "" + } + jsonStr := string(jsonData) + return jsonStr +} diff --git a/backend/modules/observability/infra/repo/mysql/gorm_gen/model/auto_task_run.gen.go b/backend/modules/observability/infra/repo/mysql/gorm_gen/model/auto_task_run.gen.go new file mode 100644 index 000000000..9d717accd --- /dev/null +++ b/backend/modules/observability/infra/repo/mysql/gorm_gen/model/auto_task_run.gen.go @@ -0,0 +1,32 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package model + +import ( + "time" +) + +const TableNameObservabilityTaskRun = "auto_task_run" + +// ObservabilityTaskRun Task Run信息 +type ObservabilityTaskRun struct { + ID int64 `gorm:"column:id;type:bigint(20) unsigned;primaryKey;comment:TaskRun ID" json:"id"` // TaskRun ID + WorkspaceID int64 `gorm:"column:workspace_id;type:bigint(20) unsigned;not null;index:idx_workspace_task,priority:1;comment:空间ID" json:"workspace_id"` // 空间ID + TaskID int64 `gorm:"column:task_id;type:bigint(20) unsigned;not null;index:idx_task_id_status,priority:1;index:idx_workspace_task,priority:2;comment:Task ID" json:"task_id"` // Task ID + TaskType string `gorm:"column:task_type;type:varchar(64);not null;comment:Task类型" json:"task_type"` // Task类型 + RunStatus string `gorm:"column:run_status;type:varchar(64);not null;index:idx_task_id_status,priority:2;comment:Task Run状态" json:"run_status"` // Task Run状态 + RunDetail *string `gorm:"column:run_detail;type:json;comment:Task Run运行状态详情" json:"run_detail"` // Task Run运行状态详情 + BackfillDetail *string `gorm:"column:backfill_detail;type:json;comment:历史回溯Task Run运行状态详情" json:"backfill_detail"` // 历史回溯Task Run运行状态详情 + RunStartAt time.Time `gorm:"column:run_start_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:任务开始时间" json:"run_start_at"` // 任务开始时间 + RunEndAt time.Time `gorm:"column:run_end_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:任务结束时间" json:"run_end_at"` // 任务结束时间 + RunConfig *string `gorm:"column:run_config;type:json;comment:相关Run的配置信息" json:"run_config"` // 相关Run的配置信息 + CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:创建时间" json:"created_at"` // 创建时间 + UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:更新时间" json:"updated_at"` // 更新时间 +} + +// TableName ObservabilityTaskRun's table name +func (*ObservabilityTaskRun) TableName() string { + return TableNameObservabilityTaskRun +} diff --git a/backend/modules/observability/infra/repo/mysql/gorm_gen/model/task.gen.go b/backend/modules/observability/infra/repo/mysql/gorm_gen/model/task.gen.go new file mode 100644 index 000000000..89a228b1b --- /dev/null +++ b/backend/modules/observability/infra/repo/mysql/gorm_gen/model/task.gen.go @@ -0,0 +1,36 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package model + +import ( + "time" +) + +const TableNameObservabilityTask = "task" + +// ObservabilityTask 任务信息 +type ObservabilityTask struct { + ID int64 `gorm:"column:id;type:bigint(20) unsigned;primaryKey;comment:Task ID" json:"id"` // Task ID + WorkspaceID int64 `gorm:"column:workspace_id;type:bigint(20) unsigned;not null;index:idx_space_id_status,priority:1;index:idx_space_id_type,priority:1;comment:空间ID" json:"workspace_id"` // 空间ID + Name string `gorm:"column:name;type:varchar(128);not null;comment:任务名称" json:"name"` // 任务名称 + Description *string `gorm:"column:description;type:varchar(2048);comment:任务描述" json:"description"` // 任务描述 + TaskType string `gorm:"column:task_type;type:varchar(64);not null;index:idx_space_id_type,priority:2;comment:任务类型" json:"task_type"` // 任务类型 + TaskStatus string `gorm:"column:task_status;type:varchar(64);not null;index:idx_space_id_status,priority:2;comment:任务状态" json:"task_status"` // 任务状态 + TaskDetail *string `gorm:"column:task_detail;type:json;comment:任务运行状态详情" json:"task_detail"` // 任务运行状态详情 + SpanFilter *string `gorm:"column:span_filter;type:json;comment:span 过滤条件" json:"span_filter"` // span 过滤条件 + EffectiveTime *string `gorm:"column:effective_time;type:json;comment:生效时间" json:"effective_time"` // 生效时间 + BackfillEffectiveTime *string `gorm:"column:backfill_effective_time;type:json;comment:历史回溯生效时间" json:"backfill_effective_time"` // 历史回溯生效时间 + Sampler *string `gorm:"column:sampler;type:json;comment:采样器" json:"sampler"` // 采样器 + TaskConfig *string `gorm:"column:task_config;type:json;comment:相关任务的配置信息" json:"task_config"` // 相关任务的配置信息 + CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:创建时间" json:"created_at"` // 创建时间 + UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:更新时间" json:"updated_at"` // 更新时间 + CreatedBy string `gorm:"column:created_by;type:varchar(128);not null;comment:创建人" json:"created_by"` // 创建人 + UpdatedBy string `gorm:"column:updated_by;type:varchar(128);not null;comment:更新人" json:"updated_by"` // 更新人 +} + +// TableName ObservabilityTask's table name +func (*ObservabilityTask) TableName() string { + return TableNameObservabilityTask +} diff --git a/backend/modules/observability/infra/repo/mysql/gorm_gen/query/auto_task_run.gen.go b/backend/modules/observability/infra/repo/mysql/gorm_gen/query/auto_task_run.gen.go new file mode 100644 index 000000000..1a0a3f03f --- /dev/null +++ b/backend/modules/observability/infra/repo/mysql/gorm_gen/query/auto_task_run.gen.go @@ -0,0 +1,376 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package query + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "gorm.io/gen" + "gorm.io/gen/field" + + "gorm.io/plugin/dbresolver" + + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" +) + +func newObservabilityTaskRun(db *gorm.DB, opts ...gen.DOOption) observabilityTaskRun { + _observabilityTaskRun := observabilityTaskRun{} + + _observabilityTaskRun.observabilityTaskRunDo.UseDB(db, opts...) + _observabilityTaskRun.observabilityTaskRunDo.UseModel(&model.ObservabilityTaskRun{}) + + tableName := _observabilityTaskRun.observabilityTaskRunDo.TableName() + _observabilityTaskRun.ALL = field.NewAsterisk(tableName) + _observabilityTaskRun.ID = field.NewInt64(tableName, "id") + _observabilityTaskRun.WorkspaceID = field.NewInt64(tableName, "workspace_id") + _observabilityTaskRun.TaskID = field.NewInt64(tableName, "task_id") + _observabilityTaskRun.TaskType = field.NewString(tableName, "task_type") + _observabilityTaskRun.RunStatus = field.NewString(tableName, "run_status") + _observabilityTaskRun.RunDetail = field.NewString(tableName, "run_detail") + _observabilityTaskRun.BackfillDetail = field.NewString(tableName, "backfill_detail") + _observabilityTaskRun.RunStartAt = field.NewTime(tableName, "run_start_at") + _observabilityTaskRun.RunEndAt = field.NewTime(tableName, "run_end_at") + _observabilityTaskRun.RunConfig = field.NewString(tableName, "run_config") + _observabilityTaskRun.CreatedAt = field.NewTime(tableName, "created_at") + _observabilityTaskRun.UpdatedAt = field.NewTime(tableName, "updated_at") + + _observabilityTaskRun.fillFieldMap() + + return _observabilityTaskRun +} + +// observabilityTaskRun Task Run信息 +type observabilityTaskRun struct { + observabilityTaskRunDo observabilityTaskRunDo + + ALL field.Asterisk + ID field.Int64 // TaskRun ID + WorkspaceID field.Int64 // 空间ID + TaskID field.Int64 // Task ID + TaskType field.String // Task类型 + RunStatus field.String // Task Run状态 + RunDetail field.String // Task Run运行状态详情 + BackfillDetail field.String // 历史回溯Task Run运行状态详情 + RunStartAt field.Time // 任务开始时间 + RunEndAt field.Time // 任务结束时间 + RunConfig field.String // 相关Run的配置信息 + CreatedAt field.Time // 创建时间 + UpdatedAt field.Time // 更新时间 + + fieldMap map[string]field.Expr +} + +func (o observabilityTaskRun) Table(newTableName string) *observabilityTaskRun { + o.observabilityTaskRunDo.UseTable(newTableName) + return o.updateTableName(newTableName) +} + +func (o observabilityTaskRun) As(alias string) *observabilityTaskRun { + o.observabilityTaskRunDo.DO = *(o.observabilityTaskRunDo.As(alias).(*gen.DO)) + return o.updateTableName(alias) +} + +func (o *observabilityTaskRun) updateTableName(table string) *observabilityTaskRun { + o.ALL = field.NewAsterisk(table) + o.ID = field.NewInt64(table, "id") + o.WorkspaceID = field.NewInt64(table, "workspace_id") + o.TaskID = field.NewInt64(table, "task_id") + o.TaskType = field.NewString(table, "task_type") + o.RunStatus = field.NewString(table, "run_status") + o.RunDetail = field.NewString(table, "run_detail") + o.BackfillDetail = field.NewString(table, "backfill_detail") + o.RunStartAt = field.NewTime(table, "run_start_at") + o.RunEndAt = field.NewTime(table, "run_end_at") + o.RunConfig = field.NewString(table, "run_config") + o.CreatedAt = field.NewTime(table, "created_at") + o.UpdatedAt = field.NewTime(table, "updated_at") + + o.fillFieldMap() + + return o +} + +func (o *observabilityTaskRun) WithContext(ctx context.Context) *observabilityTaskRunDo { + return o.observabilityTaskRunDo.WithContext(ctx) +} + +func (o observabilityTaskRun) TableName() string { return o.observabilityTaskRunDo.TableName() } + +func (o observabilityTaskRun) Alias() string { return o.observabilityTaskRunDo.Alias() } + +func (o observabilityTaskRun) Columns(cols ...field.Expr) gen.Columns { + return o.observabilityTaskRunDo.Columns(cols...) +} + +func (o *observabilityTaskRun) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := o.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (o *observabilityTaskRun) fillFieldMap() { + o.fieldMap = make(map[string]field.Expr, 12) + o.fieldMap["id"] = o.ID + o.fieldMap["workspace_id"] = o.WorkspaceID + o.fieldMap["task_id"] = o.TaskID + o.fieldMap["task_type"] = o.TaskType + o.fieldMap["run_status"] = o.RunStatus + o.fieldMap["run_detail"] = o.RunDetail + o.fieldMap["backfill_detail"] = o.BackfillDetail + o.fieldMap["run_start_at"] = o.RunStartAt + o.fieldMap["run_end_at"] = o.RunEndAt + o.fieldMap["run_config"] = o.RunConfig + o.fieldMap["created_at"] = o.CreatedAt + o.fieldMap["updated_at"] = o.UpdatedAt +} + +func (o observabilityTaskRun) clone(db *gorm.DB) observabilityTaskRun { + o.observabilityTaskRunDo.ReplaceConnPool(db.Statement.ConnPool) + return o +} + +func (o observabilityTaskRun) replaceDB(db *gorm.DB) observabilityTaskRun { + o.observabilityTaskRunDo.ReplaceDB(db) + return o +} + +type observabilityTaskRunDo struct{ gen.DO } + +func (o observabilityTaskRunDo) Debug() *observabilityTaskRunDo { + return o.withDO(o.DO.Debug()) +} + +func (o observabilityTaskRunDo) WithContext(ctx context.Context) *observabilityTaskRunDo { + return o.withDO(o.DO.WithContext(ctx)) +} + +func (o observabilityTaskRunDo) ReadDB() *observabilityTaskRunDo { + return o.Clauses(dbresolver.Read) +} + +func (o observabilityTaskRunDo) WriteDB() *observabilityTaskRunDo { + return o.Clauses(dbresolver.Write) +} + +func (o observabilityTaskRunDo) Session(config *gorm.Session) *observabilityTaskRunDo { + return o.withDO(o.DO.Session(config)) +} + +func (o observabilityTaskRunDo) Clauses(conds ...clause.Expression) *observabilityTaskRunDo { + return o.withDO(o.DO.Clauses(conds...)) +} + +func (o observabilityTaskRunDo) Returning(value interface{}, columns ...string) *observabilityTaskRunDo { + return o.withDO(o.DO.Returning(value, columns...)) +} + +func (o observabilityTaskRunDo) Not(conds ...gen.Condition) *observabilityTaskRunDo { + return o.withDO(o.DO.Not(conds...)) +} + +func (o observabilityTaskRunDo) Or(conds ...gen.Condition) *observabilityTaskRunDo { + return o.withDO(o.DO.Or(conds...)) +} + +func (o observabilityTaskRunDo) Select(conds ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Select(conds...)) +} + +func (o observabilityTaskRunDo) Where(conds ...gen.Condition) *observabilityTaskRunDo { + return o.withDO(o.DO.Where(conds...)) +} + +func (o observabilityTaskRunDo) Order(conds ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Order(conds...)) +} + +func (o observabilityTaskRunDo) Distinct(cols ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Distinct(cols...)) +} + +func (o observabilityTaskRunDo) Omit(cols ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Omit(cols...)) +} + +func (o observabilityTaskRunDo) Join(table schema.Tabler, on ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Join(table, on...)) +} + +func (o observabilityTaskRunDo) LeftJoin(table schema.Tabler, on ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.LeftJoin(table, on...)) +} + +func (o observabilityTaskRunDo) RightJoin(table schema.Tabler, on ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.RightJoin(table, on...)) +} + +func (o observabilityTaskRunDo) Group(cols ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Group(cols...)) +} + +func (o observabilityTaskRunDo) Having(conds ...gen.Condition) *observabilityTaskRunDo { + return o.withDO(o.DO.Having(conds...)) +} + +func (o observabilityTaskRunDo) Limit(limit int) *observabilityTaskRunDo { + return o.withDO(o.DO.Limit(limit)) +} + +func (o observabilityTaskRunDo) Offset(offset int) *observabilityTaskRunDo { + return o.withDO(o.DO.Offset(offset)) +} + +func (o observabilityTaskRunDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *observabilityTaskRunDo { + return o.withDO(o.DO.Scopes(funcs...)) +} + +func (o observabilityTaskRunDo) Unscoped() *observabilityTaskRunDo { + return o.withDO(o.DO.Unscoped()) +} + +func (o observabilityTaskRunDo) Create(values ...*model.ObservabilityTaskRun) error { + if len(values) == 0 { + return nil + } + return o.DO.Create(values) +} + +func (o observabilityTaskRunDo) CreateInBatches(values []*model.ObservabilityTaskRun, batchSize int) error { + return o.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (o observabilityTaskRunDo) Save(values ...*model.ObservabilityTaskRun) error { + if len(values) == 0 { + return nil + } + return o.DO.Save(values) +} + +func (o observabilityTaskRunDo) First() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.First(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) Take() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.Take(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) Last() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.Last(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) Find() ([]*model.ObservabilityTaskRun, error) { + result, err := o.DO.Find() + return result.([]*model.ObservabilityTaskRun), err +} + +func (o observabilityTaskRunDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ObservabilityTaskRun, err error) { + buf := make([]*model.ObservabilityTaskRun, 0, batchSize) + err = o.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (o observabilityTaskRunDo) FindInBatches(result *[]*model.ObservabilityTaskRun, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return o.DO.FindInBatches(result, batchSize, fc) +} + +func (o observabilityTaskRunDo) Attrs(attrs ...field.AssignExpr) *observabilityTaskRunDo { + return o.withDO(o.DO.Attrs(attrs...)) +} + +func (o observabilityTaskRunDo) Assign(attrs ...field.AssignExpr) *observabilityTaskRunDo { + return o.withDO(o.DO.Assign(attrs...)) +} + +func (o observabilityTaskRunDo) Joins(fields ...field.RelationField) *observabilityTaskRunDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Joins(_f)) + } + return &o +} + +func (o observabilityTaskRunDo) Preload(fields ...field.RelationField) *observabilityTaskRunDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Preload(_f)) + } + return &o +} + +func (o observabilityTaskRunDo) FirstOrInit() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) FirstOrCreate() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) FindByPage(offset int, limit int) (result []*model.ObservabilityTaskRun, count int64, err error) { + result, err = o.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = o.Offset(-1).Limit(-1).Count() + return +} + +func (o observabilityTaskRunDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = o.Count() + if err != nil { + return + } + + err = o.Offset(offset).Limit(limit).Scan(result) + return +} + +func (o observabilityTaskRunDo) Scan(result interface{}) (err error) { + return o.DO.Scan(result) +} + +func (o observabilityTaskRunDo) Delete(models ...*model.ObservabilityTaskRun) (result gen.ResultInfo, err error) { + return o.DO.Delete(models) +} + +func (o *observabilityTaskRunDo) withDO(do gen.Dao) *observabilityTaskRunDo { + o.DO = *do.(*gen.DO) + return o +} diff --git a/backend/modules/observability/infra/repo/mysql/gorm_gen/query/gen.go b/backend/modules/observability/infra/repo/mysql/gorm_gen/query/gen.go index e543ee240..647547ef5 100644 --- a/backend/modules/observability/infra/repo/mysql/gorm_gen/query/gen.go +++ b/backend/modules/observability/infra/repo/mysql/gorm_gen/query/gen.go @@ -17,23 +17,29 @@ import ( func Use(db *gorm.DB, opts ...gen.DOOption) *Query { return &Query{ - db: db, - ObservabilityView: newObservabilityView(db, opts...), + db: db, + ObservabilityTask: newObservabilityTask(db, opts...), + ObservabilityTaskRun: newObservabilityTaskRun(db, opts...), + ObservabilityView: newObservabilityView(db, opts...), } } type Query struct { db *gorm.DB - ObservabilityView observabilityView + ObservabilityTask observabilityTask + ObservabilityTaskRun observabilityTaskRun + ObservabilityView observabilityView } func (q *Query) Available() bool { return q.db != nil } func (q *Query) clone(db *gorm.DB) *Query { return &Query{ - db: db, - ObservabilityView: q.ObservabilityView.clone(db), + db: db, + ObservabilityTask: q.ObservabilityTask.clone(db), + ObservabilityTaskRun: q.ObservabilityTaskRun.clone(db), + ObservabilityView: q.ObservabilityView.clone(db), } } @@ -47,18 +53,24 @@ func (q *Query) WriteDB() *Query { func (q *Query) ReplaceDB(db *gorm.DB) *Query { return &Query{ - db: db, - ObservabilityView: q.ObservabilityView.replaceDB(db), + db: db, + ObservabilityTask: q.ObservabilityTask.replaceDB(db), + ObservabilityTaskRun: q.ObservabilityTaskRun.replaceDB(db), + ObservabilityView: q.ObservabilityView.replaceDB(db), } } type queryCtx struct { - ObservabilityView *observabilityViewDo + ObservabilityTask *observabilityTaskDo + ObservabilityTaskRun *observabilityTaskRunDo + ObservabilityView *observabilityViewDo } func (q *Query) WithContext(ctx context.Context) *queryCtx { return &queryCtx{ - ObservabilityView: q.ObservabilityView.WithContext(ctx), + ObservabilityTask: q.ObservabilityTask.WithContext(ctx), + ObservabilityTaskRun: q.ObservabilityTaskRun.WithContext(ctx), + ObservabilityView: q.ObservabilityView.WithContext(ctx), } } diff --git a/backend/modules/observability/infra/repo/mysql/gorm_gen/query/task.gen.go b/backend/modules/observability/infra/repo/mysql/gorm_gen/query/task.gen.go new file mode 100644 index 000000000..26e5002f6 --- /dev/null +++ b/backend/modules/observability/infra/repo/mysql/gorm_gen/query/task.gen.go @@ -0,0 +1,392 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package query + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "gorm.io/gen" + "gorm.io/gen/field" + + "gorm.io/plugin/dbresolver" + + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" +) + +func newObservabilityTask(db *gorm.DB, opts ...gen.DOOption) observabilityTask { + _observabilityTask := observabilityTask{} + + _observabilityTask.observabilityTaskDo.UseDB(db, opts...) + _observabilityTask.observabilityTaskDo.UseModel(&model.ObservabilityTask{}) + + tableName := _observabilityTask.observabilityTaskDo.TableName() + _observabilityTask.ALL = field.NewAsterisk(tableName) + _observabilityTask.ID = field.NewInt64(tableName, "id") + _observabilityTask.WorkspaceID = field.NewInt64(tableName, "workspace_id") + _observabilityTask.Name = field.NewString(tableName, "name") + _observabilityTask.Description = field.NewString(tableName, "description") + _observabilityTask.TaskType = field.NewString(tableName, "task_type") + _observabilityTask.TaskStatus = field.NewString(tableName, "task_status") + _observabilityTask.TaskDetail = field.NewString(tableName, "task_detail") + _observabilityTask.SpanFilter = field.NewString(tableName, "span_filter") + _observabilityTask.EffectiveTime = field.NewString(tableName, "effective_time") + _observabilityTask.BackfillEffectiveTime = field.NewString(tableName, "backfill_effective_time") + _observabilityTask.Sampler = field.NewString(tableName, "sampler") + _observabilityTask.TaskConfig = field.NewString(tableName, "task_config") + _observabilityTask.CreatedAt = field.NewTime(tableName, "created_at") + _observabilityTask.UpdatedAt = field.NewTime(tableName, "updated_at") + _observabilityTask.CreatedBy = field.NewString(tableName, "created_by") + _observabilityTask.UpdatedBy = field.NewString(tableName, "updated_by") + + _observabilityTask.fillFieldMap() + + return _observabilityTask +} + +// observabilityTask 任务信息 +type observabilityTask struct { + observabilityTaskDo observabilityTaskDo + + ALL field.Asterisk + ID field.Int64 // Task ID + WorkspaceID field.Int64 // 空间ID + Name field.String // 任务名称 + Description field.String // 任务描述 + TaskType field.String // 任务类型 + TaskStatus field.String // 任务状态 + TaskDetail field.String // 任务运行状态详情 + SpanFilter field.String // span 过滤条件 + EffectiveTime field.String // 生效时间 + BackfillEffectiveTime field.String // 历史回溯生效时间 + Sampler field.String // 采样器 + TaskConfig field.String // 相关任务的配置信息 + CreatedAt field.Time // 创建时间 + UpdatedAt field.Time // 更新时间 + CreatedBy field.String // 创建人 + UpdatedBy field.String // 更新人 + + fieldMap map[string]field.Expr +} + +func (o observabilityTask) Table(newTableName string) *observabilityTask { + o.observabilityTaskDo.UseTable(newTableName) + return o.updateTableName(newTableName) +} + +func (o observabilityTask) As(alias string) *observabilityTask { + o.observabilityTaskDo.DO = *(o.observabilityTaskDo.As(alias).(*gen.DO)) + return o.updateTableName(alias) +} + +func (o *observabilityTask) updateTableName(table string) *observabilityTask { + o.ALL = field.NewAsterisk(table) + o.ID = field.NewInt64(table, "id") + o.WorkspaceID = field.NewInt64(table, "workspace_id") + o.Name = field.NewString(table, "name") + o.Description = field.NewString(table, "description") + o.TaskType = field.NewString(table, "task_type") + o.TaskStatus = field.NewString(table, "task_status") + o.TaskDetail = field.NewString(table, "task_detail") + o.SpanFilter = field.NewString(table, "span_filter") + o.EffectiveTime = field.NewString(table, "effective_time") + o.BackfillEffectiveTime = field.NewString(table, "backfill_effective_time") + o.Sampler = field.NewString(table, "sampler") + o.TaskConfig = field.NewString(table, "task_config") + o.CreatedAt = field.NewTime(table, "created_at") + o.UpdatedAt = field.NewTime(table, "updated_at") + o.CreatedBy = field.NewString(table, "created_by") + o.UpdatedBy = field.NewString(table, "updated_by") + + o.fillFieldMap() + + return o +} + +func (o *observabilityTask) WithContext(ctx context.Context) *observabilityTaskDo { + return o.observabilityTaskDo.WithContext(ctx) +} + +func (o observabilityTask) TableName() string { return o.observabilityTaskDo.TableName() } + +func (o observabilityTask) Alias() string { return o.observabilityTaskDo.Alias() } + +func (o observabilityTask) Columns(cols ...field.Expr) gen.Columns { + return o.observabilityTaskDo.Columns(cols...) +} + +func (o *observabilityTask) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := o.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (o *observabilityTask) fillFieldMap() { + o.fieldMap = make(map[string]field.Expr, 16) + o.fieldMap["id"] = o.ID + o.fieldMap["workspace_id"] = o.WorkspaceID + o.fieldMap["name"] = o.Name + o.fieldMap["description"] = o.Description + o.fieldMap["task_type"] = o.TaskType + o.fieldMap["task_status"] = o.TaskStatus + o.fieldMap["task_detail"] = o.TaskDetail + o.fieldMap["span_filter"] = o.SpanFilter + o.fieldMap["effective_time"] = o.EffectiveTime + o.fieldMap["backfill_effective_time"] = o.BackfillEffectiveTime + o.fieldMap["sampler"] = o.Sampler + o.fieldMap["task_config"] = o.TaskConfig + o.fieldMap["created_at"] = o.CreatedAt + o.fieldMap["updated_at"] = o.UpdatedAt + o.fieldMap["created_by"] = o.CreatedBy + o.fieldMap["updated_by"] = o.UpdatedBy +} + +func (o observabilityTask) clone(db *gorm.DB) observabilityTask { + o.observabilityTaskDo.ReplaceConnPool(db.Statement.ConnPool) + return o +} + +func (o observabilityTask) replaceDB(db *gorm.DB) observabilityTask { + o.observabilityTaskDo.ReplaceDB(db) + return o +} + +type observabilityTaskDo struct{ gen.DO } + +func (o observabilityTaskDo) Debug() *observabilityTaskDo { + return o.withDO(o.DO.Debug()) +} + +func (o observabilityTaskDo) WithContext(ctx context.Context) *observabilityTaskDo { + return o.withDO(o.DO.WithContext(ctx)) +} + +func (o observabilityTaskDo) ReadDB() *observabilityTaskDo { + return o.Clauses(dbresolver.Read) +} + +func (o observabilityTaskDo) WriteDB() *observabilityTaskDo { + return o.Clauses(dbresolver.Write) +} + +func (o observabilityTaskDo) Session(config *gorm.Session) *observabilityTaskDo { + return o.withDO(o.DO.Session(config)) +} + +func (o observabilityTaskDo) Clauses(conds ...clause.Expression) *observabilityTaskDo { + return o.withDO(o.DO.Clauses(conds...)) +} + +func (o observabilityTaskDo) Returning(value interface{}, columns ...string) *observabilityTaskDo { + return o.withDO(o.DO.Returning(value, columns...)) +} + +func (o observabilityTaskDo) Not(conds ...gen.Condition) *observabilityTaskDo { + return o.withDO(o.DO.Not(conds...)) +} + +func (o observabilityTaskDo) Or(conds ...gen.Condition) *observabilityTaskDo { + return o.withDO(o.DO.Or(conds...)) +} + +func (o observabilityTaskDo) Select(conds ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Select(conds...)) +} + +func (o observabilityTaskDo) Where(conds ...gen.Condition) *observabilityTaskDo { + return o.withDO(o.DO.Where(conds...)) +} + +func (o observabilityTaskDo) Order(conds ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Order(conds...)) +} + +func (o observabilityTaskDo) Distinct(cols ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Distinct(cols...)) +} + +func (o observabilityTaskDo) Omit(cols ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Omit(cols...)) +} + +func (o observabilityTaskDo) Join(table schema.Tabler, on ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Join(table, on...)) +} + +func (o observabilityTaskDo) LeftJoin(table schema.Tabler, on ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.LeftJoin(table, on...)) +} + +func (o observabilityTaskDo) RightJoin(table schema.Tabler, on ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.RightJoin(table, on...)) +} + +func (o observabilityTaskDo) Group(cols ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Group(cols...)) +} + +func (o observabilityTaskDo) Having(conds ...gen.Condition) *observabilityTaskDo { + return o.withDO(o.DO.Having(conds...)) +} + +func (o observabilityTaskDo) Limit(limit int) *observabilityTaskDo { + return o.withDO(o.DO.Limit(limit)) +} + +func (o observabilityTaskDo) Offset(offset int) *observabilityTaskDo { + return o.withDO(o.DO.Offset(offset)) +} + +func (o observabilityTaskDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *observabilityTaskDo { + return o.withDO(o.DO.Scopes(funcs...)) +} + +func (o observabilityTaskDo) Unscoped() *observabilityTaskDo { + return o.withDO(o.DO.Unscoped()) +} + +func (o observabilityTaskDo) Create(values ...*model.ObservabilityTask) error { + if len(values) == 0 { + return nil + } + return o.DO.Create(values) +} + +func (o observabilityTaskDo) CreateInBatches(values []*model.ObservabilityTask, batchSize int) error { + return o.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (o observabilityTaskDo) Save(values ...*model.ObservabilityTask) error { + if len(values) == 0 { + return nil + } + return o.DO.Save(values) +} + +func (o observabilityTaskDo) First() (*model.ObservabilityTask, error) { + if result, err := o.DO.First(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) Take() (*model.ObservabilityTask, error) { + if result, err := o.DO.Take(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) Last() (*model.ObservabilityTask, error) { + if result, err := o.DO.Last(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) Find() ([]*model.ObservabilityTask, error) { + result, err := o.DO.Find() + return result.([]*model.ObservabilityTask), err +} + +func (o observabilityTaskDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ObservabilityTask, err error) { + buf := make([]*model.ObservabilityTask, 0, batchSize) + err = o.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (o observabilityTaskDo) FindInBatches(result *[]*model.ObservabilityTask, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return o.DO.FindInBatches(result, batchSize, fc) +} + +func (o observabilityTaskDo) Attrs(attrs ...field.AssignExpr) *observabilityTaskDo { + return o.withDO(o.DO.Attrs(attrs...)) +} + +func (o observabilityTaskDo) Assign(attrs ...field.AssignExpr) *observabilityTaskDo { + return o.withDO(o.DO.Assign(attrs...)) +} + +func (o observabilityTaskDo) Joins(fields ...field.RelationField) *observabilityTaskDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Joins(_f)) + } + return &o +} + +func (o observabilityTaskDo) Preload(fields ...field.RelationField) *observabilityTaskDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Preload(_f)) + } + return &o +} + +func (o observabilityTaskDo) FirstOrInit() (*model.ObservabilityTask, error) { + if result, err := o.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) FirstOrCreate() (*model.ObservabilityTask, error) { + if result, err := o.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) FindByPage(offset int, limit int) (result []*model.ObservabilityTask, count int64, err error) { + result, err = o.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = o.Offset(-1).Limit(-1).Count() + return +} + +func (o observabilityTaskDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = o.Count() + if err != nil { + return + } + + err = o.Offset(offset).Limit(limit).Scan(result) + return +} + +func (o observabilityTaskDo) Scan(result interface{}) (err error) { + return o.DO.Scan(result) +} + +func (o observabilityTaskDo) Delete(models ...*model.ObservabilityTask) (result gen.ResultInfo, err error) { + return o.DO.Delete(models) +} + +func (o *observabilityTaskDo) withDO(do gen.Dao) *observabilityTaskDo { + o.DO = *do.(*gen.DO) + return o +} diff --git a/backend/modules/observability/infra/repo/mysql/task.go b/backend/modules/observability/infra/repo/mysql/task.go new file mode 100644 index 000000000..b8dc41856 --- /dev/null +++ b/backend/modules/observability/infra/repo/mysql/task.go @@ -0,0 +1,438 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package mysql + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/db" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/filter" + tconv "github.com/coze-dev/coze-loop/backend/modules/observability/application/convertor/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/query" + genquery "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/query" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/logs" + "gorm.io/gen/field" + "gorm.io/gorm" +) + +// 默认限制条数 +const ( + DefaultLimit = 20 + MaxLimit = 501 + DefaultOffset = 0 +) + +type ListTaskParam struct { + WorkspaceIDs []int64 + TaskFilters *filter.TaskFilterFields + ReqLimit int32 + ReqOffset int32 + OrderBy *common.OrderBy +} + +//go:generate mockgen -destination=mocks/task.go -package=mocks . ITaskDao +type ITaskDao interface { + GetTask(ctx context.Context, id int64, workspaceID *int64, userID *string) (*model.ObservabilityTask, error) + CreateTask(ctx context.Context, po *model.ObservabilityTask) (int64, error) + UpdateTask(ctx context.Context, po *model.ObservabilityTask) error + DeleteTask(ctx context.Context, id int64, workspaceID int64, userID string) error + ListTasks(ctx context.Context, param ListTaskParam) ([]*model.ObservabilityTask, int64, error) + UpdateTaskWithOCC(ctx context.Context, id int64, workspaceID int64, updateMap map[string]interface{}) error + GetObjListWithTask(ctx context.Context) ([]string, []string, []*model.ObservabilityTask, error) +} + +func NewTaskDaoImpl(db db.Provider) ITaskDao { + return &TaskDaoImpl{ + dbMgr: db, + } +} + +type TaskDaoImpl struct { + dbMgr db.Provider +} + +func (v *TaskDaoImpl) GetTask(ctx context.Context, id int64, workspaceID *int64, userID *string) (*model.ObservabilityTask, error) { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTask + qd := q.WithContext(ctx).Where(q.ID.Eq(id)) + if workspaceID != nil { + qd = qd.Where(q.WorkspaceID.Eq(*workspaceID)) + } + if userID != nil { + qd = qd.Where(q.CreatedBy.Eq(*userID)) + } + TaskPo, err := qd.First() + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("Task not found")) + } else { + return nil, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + } + return TaskPo, nil +} + +func (v *TaskDaoImpl) CreateTask(ctx context.Context, po *model.ObservabilityTask) (int64, error) { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTask + if err := q.WithContext(ctx).Create(po); err != nil { + if errors.Is(err, gorm.ErrDuplicatedKey) { + return 0, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("Task duplicate key")) + } else { + return 0, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + } else { + return po.ID, nil + } +} + +func (v *TaskDaoImpl) UpdateTask(ctx context.Context, po *model.ObservabilityTask) error { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTask + if err := q.WithContext(ctx).Save(po); err != nil { + return errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } else { + return nil + } +} + +func (v *TaskDaoImpl) DeleteTask(ctx context.Context, id int64, workspaceID int64, userID string) error { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTask + qd := q.WithContext(ctx).Where(q.ID.Eq(id)).Where(q.WorkspaceID.Eq(workspaceID)).Where(q.CreatedBy.Eq(userID)) + info, err := qd.Delete() + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + logs.CtxInfo(ctx, "%d rows deleted", info.RowsAffected) + return nil +} + +func (v *TaskDaoImpl) ListTasks(ctx context.Context, param ListTaskParam) ([]*model.ObservabilityTask, int64, error) { + q := genquery.Use(v.dbMgr.NewSession(ctx)) + qd := q.WithContext(ctx).ObservabilityTask + var total int64 + if len(param.WorkspaceIDs) != 0 { + qd = qd.Where(q.ObservabilityTask.WorkspaceID.In(param.WorkspaceIDs...)) + } + // 应用过滤条件 + qdf, err := v.applyTaskFilters(q, param.TaskFilters) + if err != nil { + return nil, 0, err + } + if qdf != nil { + qd = qd.Where(qdf) + } + // 计算总数 + total, err = qd.Count() + if err != nil { + return nil, 0, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + // order by + qd = qd.Order(v.order(q, param.OrderBy.GetField(), param.OrderBy.GetIsAsc())) + // 计算分页参数 + limit, offset := calculatePagination(param.ReqLimit, param.ReqOffset) + results, err := qd.Limit(limit).Offset(offset).Find() + if err != nil { + return nil, total, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + return results, total, nil +} + +// 处理任务过滤条件 +func (v *TaskDaoImpl) applyTaskFilters(q *query.Query, taskFilters *filter.TaskFilterFields) (field.Expr, error) { + if taskFilters == nil || len(taskFilters.FilterFields) == 0 { + return nil, nil + } + + // 收集所有过滤条件 + var expressions []field.Expr + + for _, f := range taskFilters.FilterFields { + expr, err := v.buildSingleFilterExpr(q, f) + if err != nil { + return nil, err + } + if expr != nil { + expressions = append(expressions, expr) + } + } + + if len(expressions) == 0 { + return nil, nil + } + + // 根据 QueryAndOr 关系组合条件 + return v.combineExpressions(expressions, taskFilters.GetQueryAndOr()), nil +} + +// 构建单个过滤条件 +func (v *TaskDaoImpl) buildSingleFilterExpr(q *query.Query, f *filter.TaskFilterField) (field.Expr, error) { + if f.FieldName == nil || f.QueryType == nil { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("field name or query type is nil")) + } + + switch *f.FieldName { + case filter.TaskFieldNameTaskName: + return v.buildTaskNameFilter(q, f) + case filter.TaskFieldNameTaskType: + return v.buildTaskTypeFilter(q, f) + case filter.TaskFieldNameTaskStatus: + return v.buildTaskStatusFilter(q, f) + case filter.TaskFieldNameCreatedBy: + return v.buildCreatedByFilter(q, f) + case filter.TaskFieldNameSampleRate: + return v.buildSampleRateFilter(q, f) + case "task_id": + return v.buildTaskIDFilter(q, f) + case "updated_at": + return v.buildUpdateAtFilter(q, f) + default: + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithMsgParam("invalid filter field name: %s", *f.FieldName)) + } +} + +// 组合多个表达式 +func (v *TaskDaoImpl) combineExpressions(expressions []field.Expr, relation string) field.Expr { + if len(expressions) == 1 { + return expressions[0] + } + + if relation == filter.QueryRelationOr { + return field.Or(expressions...) + } + // 默认使用 AND 关系 + return field.And(expressions...) +} + +// 构建任务名称过滤条件 +func (v *TaskDaoImpl) buildTaskNameFilter(q *query.Query, f *filter.TaskFilterField) (field.Expr, error) { + if len(f.Values) == 0 { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("no value provided for task name query")) + } + + switch *f.QueryType { + case filter.QueryTypeEq: + return q.ObservabilityTask.Name.Eq(f.Values[0]), nil + case filter.QueryTypeMatch: + return q.ObservabilityTask.Name.Like(fmt.Sprintf("%%%s%%", f.Values[0])), nil + default: + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("invalid query type for task name")) + } +} + +// 构建任务类型过滤条件 +func (v *TaskDaoImpl) buildTaskTypeFilter(q *query.Query, f *filter.TaskFilterField) (field.Expr, error) { + if len(f.Values) == 0 { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("no values provided for task type query")) + } + + switch *f.QueryType { + case filter.QueryTypeIn: + return q.ObservabilityTask.TaskType.In(f.Values...), nil + case filter.QueryTypeNotIn: + return q.ObservabilityTask.TaskType.NotIn(f.Values...), nil + default: + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("invalid query type for task type")) + } +} + +// 构建任务状态过滤条件 +func (v *TaskDaoImpl) buildTaskStatusFilter(q *query.Query, f *filter.TaskFilterField) (field.Expr, error) { + if len(f.Values) == 0 { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("no values provided for task status query")) + } + + switch *f.QueryType { + case filter.QueryTypeIn: + return q.ObservabilityTask.TaskStatus.In(f.Values...), nil + case filter.QueryTypeNotIn: + return q.ObservabilityTask.TaskStatus.NotIn(f.Values...), nil + default: + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("invalid query type for task status")) + } +} + +// 构建创建者过滤条件 +func (v *TaskDaoImpl) buildCreatedByFilter(q *query.Query, f *filter.TaskFilterField) (field.Expr, error) { + if len(f.Values) == 0 { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("no values provided for created_by query")) + } + + switch *f.QueryType { + case filter.QueryTypeIn: + return q.ObservabilityTask.CreatedBy.In(f.Values...), nil + case filter.QueryTypeNotIn: + return q.ObservabilityTask.CreatedBy.NotIn(f.Values...), nil + default: + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("invalid query type for created_by")) + } +} + +// 构建采样率过滤条件 +func (v *TaskDaoImpl) buildSampleRateFilter(q *query.Query, f *filter.TaskFilterField) (field.Expr, error) { + if len(f.Values) == 0 { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("no value provided for sample rate")) + } + + // 解析采样率值 + sampleRate, err := strconv.ParseFloat(f.Values[0], 64) + if err != nil { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithMsgParam("invalid sample rate: %v", err.Error())) + } + + // 构建 JSON_EXTRACT 表达式 + switch *f.QueryType { + case filter.QueryTypeGte: + return field.NewUnsafeFieldRaw("CAST(JSON_EXTRACT(?, '$.sample_rate') AS DECIMAL(10,4)) >= ?", q.ObservabilityTask.Sampler, sampleRate), nil + case filter.QueryTypeLte: + return field.NewUnsafeFieldRaw("CAST(JSON_EXTRACT(?, '$.sample_rate') AS DECIMAL(10,4)) <= ?", q.ObservabilityTask.Sampler, sampleRate), nil + case filter.QueryTypeEq: + return field.NewUnsafeFieldRaw("CAST(JSON_EXTRACT(?, '$.sample_rate') AS DECIMAL(10,4)) = ?", q.ObservabilityTask.Sampler, sampleRate), nil + case filter.QueryTypeNotEq: + return field.NewUnsafeFieldRaw("CAST(JSON_EXTRACT(?, '$.sample_rate') AS DECIMAL(10,4)) != ?", q.ObservabilityTask.Sampler, sampleRate), nil + default: + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("invalid query type for sample rate")) + } +} + +// 构建任务ID过滤条件 +func (v *TaskDaoImpl) buildTaskIDFilter(q *query.Query, f *filter.TaskFilterField) (field.Expr, error) { + if len(f.Values) == 0 { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("no value provided for task id")) + } + + var taskIDs []int64 + for _, value := range f.Values { + taskID, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithMsgParam("invalid task id: %v", err.Error())) + } + taskIDs = append(taskIDs, taskID) + } + + return q.ObservabilityTask.ID.In(taskIDs...), nil +} + +func (v *TaskDaoImpl) buildUpdateAtFilter(q *query.Query, f *filter.TaskFilterField) (field.Expr, error) { + if len(f.Values) == 0 { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("no value provided for update at")) + } + + updateAtLatest, err := strconv.ParseInt(f.Values[0], 10, 64) + if err != nil { + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithMsgParam("invalid update at: %v", err.Error())) + } + switch *f.QueryType { + case filter.QueryTypeGt: + return q.ObservabilityTask.UpdatedAt.Gt(time.UnixMilli(updateAtLatest)), nil + case filter.QueryTypeLt: + return q.ObservabilityTask.UpdatedAt.Lt(time.UnixMilli(updateAtLatest)), nil + default: + return nil, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("invalid query type for update at")) + } +} + +// 计算分页参数 +func calculatePagination(reqLimit, reqOffset int32) (int, int) { + limit := DefaultLimit + if reqLimit > 0 && reqLimit < MaxLimit { + limit = int(reqLimit) + } + + offset := DefaultOffset + if reqOffset > 0 { + offset = int(reqOffset) + } + + return limit, offset +} + +func (d *TaskDaoImpl) order(q *query.Query, orderBy string, asc bool) field.Expr { + var orderExpr field.OrderExpr + switch orderBy { + case "created_at": + orderExpr = q.ObservabilityTask.CreatedAt + default: + orderExpr = q.ObservabilityTask.CreatedAt + } + if asc { + return orderExpr.Asc() + } + return orderExpr.Desc() +} + +func (v *TaskDaoImpl) UpdateTaskWithOCC(ctx context.Context, id int64, workspaceID int64, updateMap map[string]interface{}) error { + //todo[xun]: 乐观锁 + logs.CtxInfo(ctx, "UpdateTaskWithOCC, id:%d, workspaceID:%d, updateMap:%+v", id, workspaceID, updateMap) + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTask + qd := q.WithContext(ctx) + qd = qd.Where(q.ID.Eq(id)).Where(q.WorkspaceID.Eq(workspaceID)) + for i := 0; i < MaxRetries; i++ { + // 使用原始 updated_at 作为乐观锁条件 + existingTask, err := qd.First() + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + updateMap["updated_at"] = time.Now() + info, err := qd.Where(q.UpdatedAt.Eq(existingTask.UpdatedAt)).Updates(updateMap) + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + logs.CtxInfo(ctx, "TaskRun updated with OCC, id:%d, workspaceID:%d, rowsAffected:%d", id, workspaceID, info.RowsAffected) + if info.RowsAffected == 1 { + return nil + } + time.Sleep(RetryDelay) + } + return errorx.NewByCode(obErrorx.CommonMySqlErrorCode, errorx.WithExtraMsg("TaskRun update failed with OCC")) +} + +func (v *TaskDaoImpl) GetObjListWithTask(ctx context.Context) ([]string, []string, []*model.ObservabilityTask, error) { + q := genquery.Use(v.dbMgr.NewSession(ctx)) + qd := q.WithContext(ctx).ObservabilityTask + + // 查询非终态任务的workspace_id,使用DISTINCT去重 + qd = qd.Where(q.ObservabilityTask.TaskStatus.NotIn("success", "disabled")) + //qd = qd.Select(q.ObservabilityTask.WorkspaceID).Distinct() + + results, err := qd.Find() + if err != nil { + return nil, nil, nil, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + + // 转换为字符串数组 + var spaceList []string + var botList []string + for _, task := range results { + spaceList = append(spaceList, strconv.FormatInt(task.WorkspaceID, 10)) + spanFilter := tconv.SpanFilterPO2DO(ctx, task.SpanFilter) + if spanFilter != nil && spanFilter.Filters.FilterFields != nil { + extractBotIDFromFilters(spanFilter.Filters.FilterFields, &botList) + } + } + + return spaceList, botList, nil, nil +} + +// extractBotIDFromFilters 递归提取过滤器中的 bot_id 值,包括 SubFilter +func extractBotIDFromFilters(filterFields []*filter.FilterField, botList *[]string) { + for _, filterField := range filterFields { + if filterField == nil { + continue + } + // 检查当前 FilterField 的 FieldName + if filterField.FieldName != nil && *filterField.FieldName == "bot_id" { + *botList = append(*botList, filterField.Values...) + } + // 递归处理 SubFilter + if filterField.SubFilter != nil && filterField.SubFilter.FilterFields != nil { + extractBotIDFromFilters(filterField.SubFilter.FilterFields, botList) + } + } +} diff --git a/backend/modules/observability/infra/repo/mysql/task_run.go b/backend/modules/observability/infra/repo/mysql/task_run.go new file mode 100755 index 000000000..6bad215a3 --- /dev/null +++ b/backend/modules/observability/infra/repo/mysql/task_run.go @@ -0,0 +1,233 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package mysql + +import ( + "context" + "errors" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/db" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/common" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/observability/domain/task" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/query" + genquery "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/query" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/logs" + "gorm.io/gen/field" + "gorm.io/gorm" +) + +// 默认限制条数 +const ( + DefaultTaskRunLimit = 20 + MaxTaskRunLimit = 501 + DefaultTaskRunOffset = 0 +) + +type ListTaskRunParam struct { + WorkspaceID *int64 + TaskID *int64 + TaskRunStatus *task.RunStatus + ReqLimit int32 + ReqOffset int32 + OrderBy *common.OrderBy +} + +//go:generate mockgen -destination=mocks/task_run.go -package=mocks . ITaskRunDao +type ITaskRunDao interface { + // 基础CRUD操作 + GetBackfillTaskRun(ctx context.Context, workspaceID *int64, taskID int64) (*model.ObservabilityTaskRun, error) + GetLatestNewDataTaskRun(ctx context.Context, workspaceID *int64, taskID int64) (*model.ObservabilityTaskRun, error) + CreateTaskRun(ctx context.Context, po *model.ObservabilityTaskRun) (int64, error) + UpdateTaskRun(ctx context.Context, po *model.ObservabilityTaskRun) error + ListTaskRuns(ctx context.Context, param ListTaskRunParam) ([]*model.ObservabilityTaskRun, int64, error) + UpdateTaskRunWithOCC(ctx context.Context, id int64, workspaceID int64, updateMap map[string]interface{}) error +} + +func NewTaskRunDaoImpl(db db.Provider) ITaskRunDao { + return &TaskRunDaoImpl{ + dbMgr: db, + } +} + +type TaskRunDaoImpl struct { + dbMgr db.Provider +} + +// TaskRun非终态状态定义 +var NonFinalTaskRunStatuses = []string{ + "pending", // 等待执行 + "running", // 执行中 + "paused", // 暂停 + "retrying", // 重试中 +} + +// 活跃状态定义(非终态状态的子集) +var ActiveTaskRunStatuses = []string{ + "running", // 执行中 + "retrying", // 重试中 +} + +// 计算分页参数 +func calculateTaskRunPagination(reqLimit, reqOffset int32) (int, int) { + limit := DefaultTaskRunLimit + if reqLimit > 0 && reqLimit < MaxTaskRunLimit { + limit = int(reqLimit) + } + + offset := DefaultTaskRunOffset + if reqOffset > 0 { + offset = int(reqOffset) + } + + return limit, offset +} + +func (v *TaskRunDaoImpl) GetBackfillTaskRun(ctx context.Context, workspaceID *int64, taskID int64) (*model.ObservabilityTaskRun, error) { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTaskRun + qd := q.WithContext(ctx).Where(q.TaskType.Eq(task.TaskRunTypeBackFill)).Where(q.TaskID.Eq(taskID)) + + if workspaceID != nil { + qd = qd.Where(q.WorkspaceID.Eq(*workspaceID)) + } + taskRunPo, err := qd.First() + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } else { + return nil, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + } + return taskRunPo, nil + +} +func (v *TaskRunDaoImpl) GetLatestNewDataTaskRun(ctx context.Context, workspaceID *int64, taskID int64) (*model.ObservabilityTaskRun, error) { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTaskRun + qd := q.WithContext(ctx).Where(q.TaskType.Eq(task.TaskRunTypeNewData)).Where(q.TaskID.Eq(taskID)) + + if workspaceID != nil { + qd = qd.Where(q.WorkspaceID.Eq(*workspaceID)) + } + taskRunPo, err := qd.Order(q.CreatedAt.Desc()).First() + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return nil, nil + } else { + return nil, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + } + return taskRunPo, nil + +} + +func (v *TaskRunDaoImpl) CreateTaskRun(ctx context.Context, po *model.ObservabilityTaskRun) (int64, error) { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTaskRun + if err := q.WithContext(ctx).Create(po); err != nil { + if errors.Is(err, gorm.ErrDuplicatedKey) { + return 0, errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("TaskRun duplicate key")) + } else { + return 0, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + } else { + return po.ID, nil + } +} + +func (v *TaskRunDaoImpl) UpdateTaskRun(ctx context.Context, po *model.ObservabilityTaskRun) error { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTaskRun + if err := q.WithContext(ctx).Save(po); err != nil { + return errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } else { + return nil + } +} + +func (v *TaskRunDaoImpl) ListTaskRuns(ctx context.Context, param ListTaskRunParam) ([]*model.ObservabilityTaskRun, int64, error) { + q := genquery.Use(v.dbMgr.NewSession(ctx)) + qd := q.WithContext(ctx).ObservabilityTaskRun + var total int64 + + // TaskID过滤 + if param.TaskID != nil { + qd = qd.Where(q.ObservabilityTaskRun.TaskID.Eq(*param.TaskID)) + } + // TaskRunStatus过滤 + if param.TaskRunStatus != nil { + qd = qd.Where(q.ObservabilityTaskRun.RunStatus.Eq(*param.TaskRunStatus)) + } + // workspaceID过滤 + if param.WorkspaceID != nil { + qd = qd.Where(q.ObservabilityTaskRun.WorkspaceID.Eq(*param.WorkspaceID)) + } + + // 排序 + qd = qd.Order(v.order(q, param.OrderBy.GetField(), param.OrderBy.GetIsAsc())) + + // 计算总数 + total, err := qd.Count() + if err != nil { + return nil, 0, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + + // 计算分页参数 + limit, offset := calculateTaskRunPagination(param.ReqLimit, param.ReqOffset) + results, err := qd.Limit(limit).Offset(offset).Find() + if err != nil { + return nil, total, errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + return results, total, nil +} + +func (d *TaskRunDaoImpl) order(q *query.Query, orderBy string, asc bool) field.Expr { + var orderExpr field.OrderExpr + switch orderBy { + case "created_at": + orderExpr = q.ObservabilityTaskRun.CreatedAt + case "run_start_at": + orderExpr = q.ObservabilityTaskRun.RunStartAt + case "run_end_at": + orderExpr = q.ObservabilityTaskRun.RunEndAt + case "updated_at": + orderExpr = q.ObservabilityTaskRun.UpdatedAt + default: + orderExpr = q.ObservabilityTaskRun.CreatedAt + } + if asc { + return orderExpr.Asc() + } + return orderExpr.Desc() +} + +const MaxRetries = 3 +const RetryDelay = 100 * time.Millisecond + +// UpdateTaskRunWithOCC 乐观并发控制更新 +func (v *TaskRunDaoImpl) UpdateTaskRunWithOCC(ctx context.Context, id int64, workspaceID int64, updateMap map[string]interface{}) error { + q := genquery.Use(v.dbMgr.NewSession(ctx)).ObservabilityTaskRun + qd := q.WithContext(ctx).Where(q.ID.Eq(id)) + if workspaceID != 0 { + qd = qd.Where(q.WorkspaceID.Eq(workspaceID)) + } + for i := 0; i < MaxRetries; i++ { + // 使用原始 updated_at 作为乐观锁条件 + existingTaskRun, err := qd.First() + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + updateMap["updated_at"] = time.Now() + info, err := qd.Where(q.UpdatedAt.Eq(existingTaskRun.UpdatedAt)).Updates(updateMap) + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommonMySqlErrorCode) + } + logs.CtxInfo(ctx, "TaskRun updated with OCC, id:%d, workspaceID:%d, rowsAffected:%d", id, workspaceID, info.RowsAffected) + if info.RowsAffected == 1 { + return nil + } + time.Sleep(RetryDelay) + } + return errorx.NewByCode(obErrorx.CommonMySqlErrorCode, errorx.WithExtraMsg("TaskRun update failed with OCC")) +} diff --git a/backend/modules/observability/infra/repo/redis/convert/task.go b/backend/modules/observability/infra/repo/redis/convert/task.go new file mode 100644 index 000000000..66bb3edb7 --- /dev/null +++ b/backend/modules/observability/infra/repo/redis/convert/task.go @@ -0,0 +1,37 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package convert + +import ( + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/samber/lo" +) + +func NewTaskConverter() *TaskConverter { + return &TaskConverter{} +} + +type TaskConverter struct{} + +func (TaskConverter) FromDO(qse *entity.ObservabilityTask) ([]byte, error) { + bytes, err := json.Marshal(qse) + if err != nil { + return nil, errorx.Wrapf(err, "json marshal failed") + } + return bytes, nil +} + +func (TaskConverter) ToDO(b []byte) (*entity.ObservabilityTask, error) { + qse := &entity.ObservabilityTask{} + if err := lo.TernaryF( + len(b) > 0, + func() error { return json.Unmarshal(b, qse) }, + func() error { return nil }, + ); err != nil { + return nil, errorx.Wrapf(err, "TaskExpt json unmarshal failed") + } + return qse, nil +} diff --git a/backend/modules/observability/infra/repo/redis/convert/task_run.go b/backend/modules/observability/infra/repo/redis/convert/task_run.go new file mode 100755 index 000000000..8e1b00aa5 --- /dev/null +++ b/backend/modules/observability/infra/repo/redis/convert/task_run.go @@ -0,0 +1,53 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package convert + +import ( + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/conv" + "github.com/samber/lo" +) + +func NewTaskRunConverter() *TaskRunConverter { + return &TaskRunConverter{} +} + +type TaskRunConverter struct{} + +func (TaskRunConverter) FromDO(taskRun *entity.TaskRun) ([]byte, error) { + bytes, err := json.Marshal(taskRun) + if err != nil { + return nil, errorx.Wrapf(err, "TaskRun json marshal failed") + } + return bytes, nil +} + +func (TaskRunConverter) ToDO(b []byte) (*entity.TaskRun, error) { + taskRun := &entity.TaskRun{} + bytes := toTaskRunBytes(b) + if err := lo.TernaryF( + len(bytes) > 0, + func() error { return json.Unmarshal(bytes, taskRun) }, + func() error { return nil }, + ); err != nil { + return nil, errorx.Wrapf(err, "TaskRun json unmarshal failed") + } + return taskRun, nil +} + +// toTaskRunBytes +// +//nolint:staticcheck,S1034 +func toTaskRunBytes(v any) []byte { + switch v.(type) { + case string: + return conv.UnsafeStringToBytes(v.(string)) + case []byte: + return v.([]byte) + default: + return nil + } +} \ No newline at end of file diff --git a/backend/modules/observability/infra/repo/redis/dao/task.go b/backend/modules/observability/infra/repo/redis/dao/task.go new file mode 100755 index 000000000..acfa9d3f6 --- /dev/null +++ b/backend/modules/observability/infra/repo/redis/dao/task.go @@ -0,0 +1,259 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package dao + +import ( + "context" + "crypto/md5" + "encoding/hex" + "fmt" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/redis" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/redis/convert" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/json" + "github.com/coze-dev/coze-loop/backend/pkg/lang/conv" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +//go:generate mockgen -destination=mocks/Task_dao.go -package=mocks . ITaskDAO +type ITaskDAO interface { + // Task相关 + GetTask(ctx context.Context, id int64) (*entity.ObservabilityTask, error) + SetTask(ctx context.Context, task *entity.ObservabilityTask, ttl time.Duration) error + + // TaskCount相关 + GetTaskCount(ctx context.Context, taskID int64) (int64, error) + IncrTaskCount(ctx context.Context, taskID int64, ttl time.Duration) (int64, error) + DecrTaskCount(ctx context.Context, taskID int64, ttl time.Duration) (int64, error) + + // TaskRunCount相关 + GetTaskRunCount(ctx context.Context, taskID, taskRunID int64) (int64, error) + IncrTaskRunCount(ctx context.Context, taskID, taskRunID int64, ttl time.Duration) (int64, error) + DecrTaskRunCount(ctx context.Context, taskID, taskRunID int64, ttl time.Duration) (int64, error) +} + +type TaskDAOImpl struct { + cmdable redis.Cmdable +} + +// NewTaskDAO creates a new TaskDAO instance +func NewTaskDAO(cmdable redis.Cmdable) ITaskDAO { + return &TaskDAOImpl{ + cmdable: cmdable, + } +} + +func (q *TaskDAOImpl) makeTaskConfigKey(taskID int64) string { + return fmt.Sprintf("task_config_%d", taskID) +} +func (q *TaskDAOImpl) makeTaskCountCacheKey(taskID int64) string { + return fmt.Sprintf("count_%d", taskID) +} +func (q *TaskDAOImpl) makeTaskRunCountCacheKey(taskID, taskRunID int64) string { + return fmt.Sprintf("count_%d_%d", taskID, taskRunID) +} + +// generateFilterHash 生成过滤条件的 hash +func (q *TaskDAOImpl) generateFilterHash(param mysql.ListTaskParam) string { + if param.TaskFilters == nil { + return "no_filter" + } + + // 将过滤条件序列化为 JSON 字符串 + filterBytes, err := json.Marshal(param.TaskFilters) + if err != nil { + logs.Error("failed to marshal filter: %v", err) + return "no_filter" + } + + // 生成 MD5 hash + hash := md5.Sum(filterBytes) + return hex.EncodeToString(hash[:]) +} + +// GetTask 获取单个任务缓存 +func (p *TaskDAOImpl) GetTask(ctx context.Context, id int64) (*entity.ObservabilityTask, error) { + key := p.makeTaskConfigKey(id) + got, err := p.cmdable.Get(ctx, key).Result() + if err != nil { + if redis.IsNilError(err) { + return nil, nil // 缓存未命中 + } + return nil, errorx.Wrapf(err, "redis get task fail, key: %v", key) + } + return convert.NewTaskConverter().ToDO(conv.UnsafeStringToBytes(got)) +} + +// SetTask 设置单个任务缓存 +func (p *TaskDAOImpl) SetTask(ctx context.Context, task *entity.ObservabilityTask, ttl time.Duration) error { + bytes, err := convert.NewTaskConverter().FromDO(task) + if err != nil { + return err + } + key := p.makeTaskConfigKey(task.ID) + if err := p.cmdable.Set(ctx, key, bytes, ttl).Err(); err != nil { + logs.CtxError(ctx, "redis set task cache failed", "key", key, "err", err) + return errorx.Wrapf(err, "redis set task key: %v", key) + } + return nil +} + +// DeleteTaskList 删除任务列表缓存(支持模糊匹配) +func (p *TaskDAOImpl) DeleteTaskList(ctx context.Context, pattern string) error { + // 由于 redis.Cmdable 接口没有 Keys 方法,这里简化处理 + // 在实际生产环境中,可能需要使用 SCAN 命令或其他方式来实现模糊删除 + logs.CtxWarn(ctx, "DeleteTaskList with pattern not fully implemented", "pattern", pattern) + return nil +} + +// GetTaskCount 获取任务计数缓存 +func (p *TaskDAOImpl) GetTaskCount(ctx context.Context, taskID int64) (int64, error) { + key := p.makeTaskCountCacheKey(taskID) + got, err := p.cmdable.Get(ctx, key).Int64() + if err != nil { + if redis.IsNilError(err) { + return -1, nil // 缓存未命中,返回-1表示未缓存 + } + return 0, errorx.Wrapf(err, "redis get task count fail, key: %v", key) + } + return got, nil +} + +// GetTaskRunCount 获取任务运行计数缓存 +func (p *TaskDAOImpl) GetTaskRunCount(ctx context.Context, taskID, taskRunID int64) (int64, error) { + key := p.makeTaskRunCountCacheKey(taskID, taskRunID) + got, err := p.cmdable.Get(ctx, key).Int64() + if err != nil { + if redis.IsNilError(err) { + return -1, nil // 缓存未命中,返回-1表示未缓存 + } + return 0, errorx.Wrapf(err, "redis get task count fail, key: %v", key) + } + return got, nil +} + +// IncrTaskCount 原子增加任务计数 +func (p *TaskDAOImpl) IncrTaskCount(ctx context.Context, taskID int64, ttl time.Duration) (int64, error) { + key := p.makeTaskCountCacheKey(taskID) + result, err := p.cmdable.Incr(ctx, key).Result() + logs.CtxInfo(ctx, "redis incr task count success, taskID: %v, key: %v, result: %v", taskID, key, result) + if err != nil { + logs.CtxError(ctx, "redis incr task count failed", "key", key, "err", err) + return 0, errorx.Wrapf(err, "redis incr task count key: %v", key) + } + + // 设置TTL + if err = p.cmdable.Expire(ctx, key, ttl).Err(); err != nil { + logs.CtxWarn(ctx, "failed to set TTL for task count", "key", key, "err", err) + } + + return result, nil +} + +// DecrTaskCount 原子减少任务计数,确保不会变为负数 +func (p *TaskDAOImpl) DecrTaskCount(ctx context.Context, taskID int64, ttl time.Duration) (int64, error) { + key := p.makeTaskCountCacheKey(taskID) + // 先获取当前值 + current, err := p.cmdable.Get(ctx, key).Int64() + if err != nil { + if redis.IsNilError(err) { + // 如果key不存在,返回0 + return 0, nil + } + logs.CtxError(ctx, "redis get task count failed before decr", "key", key, "err", err) + return 0, errorx.Wrapf(err, "redis get task count key: %v", key) + } + + // 如果当前值已经是0或负数,不再减少 + if current <= 0 { + return 0, nil + } + + // 执行减操作 + result, err := p.cmdable.Decr(ctx, key).Result() + if err != nil { + logs.CtxError(ctx, "redis decr task count failed", "key", key, "err", err) + return 0, errorx.Wrapf(err, "redis decr task count key: %v", key) + } + logs.CtxInfo(ctx, "redis decr task count success, taskID: %v, key: %v, result: %v", taskID, key, result) + // 如果减少后变为负数,重置为0 + if result < 0 { + if err := p.cmdable.Set(ctx, key, 0, ttl).Err(); err != nil { + logs.CtxError(ctx, "failed to reset negative task count", "key", key, "err", err) + } + return 0, nil + } + + // 设置TTL + if err := p.cmdable.Expire(ctx, key, ttl).Err(); err != nil { + logs.CtxWarn(ctx, "failed to set TTL for task count", "key", key, "err", err) + } + + return result, nil +} + +// IncrTaskRunCount 原子增加任务运行计数 +func (p *TaskDAOImpl) IncrTaskRunCount(ctx context.Context, taskID, taskRunID int64, ttl time.Duration) (int64, error) { + key := p.makeTaskRunCountCacheKey(taskID, taskRunID) + result, err := p.cmdable.Incr(ctx, key).Result() + logs.CtxInfo(ctx, "redis incr task run count success, taskID: %v,taskRunID: %v, key: %v, result: %v", taskID, taskRunID, key, result) + if err != nil { + logs.CtxError(ctx, "redis incr task run count failed", "key", key, "err", err) + return 0, errorx.Wrapf(err, "redis incr task run count key: %v", key) + } + + // 设置TTL + if err := p.cmdable.Expire(ctx, key, ttl).Err(); err != nil { + logs.CtxWarn(ctx, "failed to set TTL for task run count", "key", key, "err", err) + } + + return result, nil +} + +// DecrTaskRunCount 原子减少任务运行计数,确保不会变为负数 +func (p *TaskDAOImpl) DecrTaskRunCount(ctx context.Context, taskID, taskRunID int64, ttl time.Duration) (int64, error) { + key := p.makeTaskRunCountCacheKey(taskID, taskRunID) + + // 先获取当前值 + current, err := p.cmdable.Get(ctx, key).Int64() + if err != nil { + if redis.IsNilError(err) { + // 如果key不存在,返回0 + return 0, nil + } + logs.CtxError(ctx, "redis get task run count failed before decr", "key", key, "err", err) + return 0, errorx.Wrapf(err, "redis get task run count key: %v", key) + } + + // 如果当前值已经是0或负数,不再减少 + if current <= 0 { + return 0, nil + } + + // 执行减操作 + result, err := p.cmdable.Decr(ctx, key).Result() + if err != nil { + logs.CtxError(ctx, "redis decr task run count failed", "key", key, "err", err) + return 0, errorx.Wrapf(err, "redis decr task run count key: %v", key) + } + + // 如果减少后变为负数,重置为0 + if result < 0 { + if err := p.cmdable.Set(ctx, key, 0, ttl).Err(); err != nil { + logs.CtxError(ctx, "failed to reset negative task run count", "key", key, "err", err) + } + return 0, nil + } + + // 设置TTL + if err := p.cmdable.Expire(ctx, key, ttl).Err(); err != nil { + logs.CtxWarn(ctx, "failed to set TTL for task run count", "key", key, "err", err) + } + + return result, nil +} diff --git a/backend/modules/observability/infra/repo/redis/dao/task_run.go b/backend/modules/observability/infra/repo/redis/dao/task_run.go new file mode 100755 index 000000000..641dab054 --- /dev/null +++ b/backend/modules/observability/infra/repo/redis/dao/task_run.go @@ -0,0 +1,97 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package dao + +import ( + "context" + "fmt" + + "github.com/coze-dev/coze-loop/backend/infra/redis" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +//go:generate mockgen -destination=mocks/task_run_dao.go -package=mocks . ITaskRunDAO +type ITaskRunDAO interface { + // 成功/失败计数操作 + IncrTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) error + DecrTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) error + IncrTaskRunFailCount(ctx context.Context, taskID, taskRunID int64) error + GetTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) (int64, error) + GetTaskRunFailCount(ctx context.Context, taskID, taskRunID int64) (int64, error) +} + +type TaskRunDAOImpl struct { + cmdable redis.Cmdable +} + +// NewTaskRunDAO creates a new TaskRunDAO instance +func NewTaskRunDAO(cmdable redis.Cmdable) ITaskRunDAO { + return &TaskRunDAOImpl{ + cmdable: cmdable, + } +} + +// 缓存键生成方法 +func (q *TaskRunDAOImpl) makeTaskRunSuccessCountKey(taskID, taskRunID int64) string { + return fmt.Sprintf("taskrun:success_count:%d:%d", taskID, taskRunID) +} + +func (q *TaskRunDAOImpl) makeTaskRunFailCountKey(taskID, taskRunID int64) string { + return fmt.Sprintf("taskrun:fail_count:%d:%d", taskID, taskRunID) +} + +// IncrTaskRunSuccessCount 增加成功计数 +func (p *TaskRunDAOImpl) IncrTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) error { + key := p.makeTaskRunSuccessCountKey(taskID, taskRunID) + if err := p.cmdable.Incr(ctx, key).Err(); err != nil { + logs.CtxError(ctx, "redis incr taskrun success count failed, key:%v, err:%v", key, err) + return errorx.Wrapf(err, "redis incr taskrun success count key: %v", key) + } + return nil +} + +// IncrTaskRunFailCount 增加失败计数 +func (p *TaskRunDAOImpl) IncrTaskRunFailCount(ctx context.Context, taskID, taskRunID int64) error { + key := p.makeTaskRunFailCountKey(taskID, taskRunID) + if err := p.cmdable.Incr(ctx, key).Err(); err != nil { + logs.CtxError(ctx, "redis incr taskrun fail count failed, key:", "key", key, "err", err) + return errorx.Wrapf(err, "redis incr taskrun fail count key: %v", key) + } + return nil +} + +// GetTaskRunSuccessCount 获取成功计数 +func (p *TaskRunDAOImpl) GetTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) (int64, error) { + key := p.makeTaskRunSuccessCountKey(taskID, taskRunID) + got, err := p.cmdable.Get(ctx, key).Int64() + if err != nil { + if redis.IsNilError(err) { + return 0, nil // 缓存未命中,返回0 + } + return 0, errorx.Wrapf(err, "redis get taskrun success count fail, key: %v", key) + } + return got, nil +} + +// GetTaskRunFailCount 获取失败计数 +func (p *TaskRunDAOImpl) GetTaskRunFailCount(ctx context.Context, taskID, taskRunID int64) (int64, error) { + key := p.makeTaskRunFailCountKey(taskID, taskRunID) + got, err := p.cmdable.Get(ctx, key).Int64() + if err != nil { + if redis.IsNilError(err) { + return 0, nil // 缓存未命中,返回0 + } + return 0, errorx.Wrapf(err, "redis get taskrun fail count fail, key: %v", key) + } + return got, nil +} +func (p *TaskRunDAOImpl) DecrTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) error { + key := p.makeTaskRunSuccessCountKey(taskID, taskRunID) + if err := p.cmdable.Decr(ctx, key).Err(); err != nil { + logs.CtxError(ctx, "redis decr taskrun success count failed", "key", key, "err", err) + return errorx.Wrapf(err, "redis decr taskrun success count key: %v", key) + } + return nil +} diff --git a/backend/modules/observability/infra/repo/task.go b/backend/modules/observability/infra/repo/task.go new file mode 100644 index 000000000..2dff0c2ae --- /dev/null +++ b/backend/modules/observability/infra/repo/task.go @@ -0,0 +1,329 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package repo + +import ( + "context" + "time" + + "github.com/coze-dev/coze-loop/backend/infra/idgen" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/entity" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/task/repo" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/convertor" + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/redis/dao" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +func NewTaskRepoImpl(TaskDao mysql.ITaskDao, idGenerator idgen.IIDGenerator, taskRedisDao dao.ITaskDAO, taskRunDao mysql.ITaskRunDao, taskRunRedisDao dao.ITaskRunDAO) repo.ITaskRepo { + return &TaskRepoImpl{ + TaskDao: TaskDao, + idGenerator: idGenerator, + TaskRedisDao: taskRedisDao, + TaskRunDao: taskRunDao, + TaskRunRedisDao: taskRunRedisDao, + } +} + +type TaskRepoImpl struct { + TaskDao mysql.ITaskDao + TaskRunDao mysql.ITaskRunDao + TaskRedisDao dao.ITaskDAO + TaskRunRedisDao dao.ITaskRunDAO + idGenerator idgen.IIDGenerator +} + +// 缓存 TTL 常量 +const ( + TaskDetailTTL = 30 * time.Minute // 单个任务缓存30分钟 + NonFinalTaskListTTL = 1 * time.Minute // 非最终状态任务缓存1分钟 + TaskCountTTL = 10 * time.Minute // 任务计数缓存10分钟 +) + +// 任务运行计数TTL常量 +const ( + TaskRunCountTTL = 10 * time.Minute // 任务运行计数缓存10分钟 +) + +func (v *TaskRepoImpl) GetTask(ctx context.Context, id int64, workspaceID *int64, userID *string) (*entity.ObservabilityTask, error) { + // 先查 Redis 缓存 + cachedTask, err := v.TaskRedisDao.GetTask(ctx, id) + if err != nil { + logs.CtxWarn(ctx, "failed to get task from redis cache", "id", id, "err", err) + } else if cachedTask != nil { + // 验证权限(workspaceID 和 userID) + if workspaceID != nil && cachedTask.WorkspaceID != *workspaceID { + return nil, nil // 权限不符,返回空 + } + if userID != nil && cachedTask.CreatedBy != *userID { + return nil, nil // 权限不符,返回空 + } + return cachedTask, nil + } + + // 缓存未命中,查询数据库 + TaskPO, err := v.TaskDao.GetTask(ctx, id, workspaceID, userID) + if err != nil { + return nil, err + } + + taskDO := convertor.TaskPO2DO(TaskPO) + + TaskRunPO, _, err := v.TaskRunDao.ListTaskRuns(ctx, mysql.ListTaskRunParam{ + WorkspaceID: ptr.Of(taskDO.WorkspaceID), + TaskID: ptr.Of(taskDO.ID), + ReqLimit: 1000, + ReqOffset: 0, + }) + + taskDO.TaskRuns = convertor.TaskRunsPO2DO(TaskRunPO) + if err != nil { + return nil, err + } + + // 异步缓存到 Redis + go func() { + if len(taskDO.TaskRuns) > 0 { + if err := v.TaskRedisDao.SetTask(context.Background(), taskDO, TaskDetailTTL); err != nil { + logs.Error("failed to set task cache", "id", id, "err", err) + } + } + }() + + return taskDO, nil +} +func (v *TaskRepoImpl) ListTasks(ctx context.Context, param mysql.ListTaskParam) ([]*entity.ObservabilityTask, int64, error) { + results, total, err := v.TaskDao.ListTasks(ctx, param) + if err != nil { + return nil, 0, err + } + resp := make([]*entity.ObservabilityTask, len(results)) + for i, result := range results { + resp[i] = convertor.TaskPO2DO(result) + } + for _, t := range resp { + taskRuns, _, err := v.TaskRunDao.ListTaskRuns(ctx, mysql.ListTaskRunParam{ + WorkspaceID: ptr.Of(t.WorkspaceID), + TaskID: ptr.Of(t.ID), + ReqLimit: param.ReqLimit, + ReqOffset: param.ReqOffset, + }) + if err != nil { + logs.CtxError(ctx, "ListTaskRuns err, taskID:%d, err:%v", t.ID, err) + continue + } + t.TaskRuns = convertor.TaskRunsPO2DO(taskRuns) + } + + return resp, total, nil +} +func (v *TaskRepoImpl) CreateTask(ctx context.Context, do *entity.ObservabilityTask) (int64, error) { + id, err := v.idGenerator.GenID(ctx) + if err != nil { + return 0, err + } + TaskPo := convertor.TaskDO2PO(do) + TaskPo.ID = id + + // 先执行数据库操作 + createdID, err := v.TaskDao.CreateTask(ctx, TaskPo) + if err != nil { + return 0, err + } + + return createdID, nil +} +func (v *TaskRepoImpl) UpdateTask(ctx context.Context, do *entity.ObservabilityTask) error { + TaskPo := convertor.TaskDO2PO(do) + + // 先执行数据库操作 + err := v.TaskDao.UpdateTask(ctx, TaskPo) + if err != nil { + return err + } + for _, tr := range do.TaskRuns { + TaskRunPo := convertor.TaskRunDO2PO(tr) + err = v.TaskRunDao.UpdateTaskRun(ctx, TaskRunPo) + if err != nil { + return err + } + } + + // 数据库操作成功后,更新缓存 + go func() { + // 更新单个任务缓存 + if len(do.TaskRuns) > 0 { + if err = v.TaskRedisDao.SetTask(context.Background(), do, TaskDetailTTL); err != nil { + logs.Error("failed to update task cache", "id", do.ID, "err", err) + return + } + } + }() + + return nil +} +func (v *TaskRepoImpl) UpdateTaskWithOCC(ctx context.Context, id int64, workspaceID int64, updateMap map[string]interface{}) error { + // 先执行数据库操作 + logs.CtxInfo(ctx, "UpdateTaskWithOCC, id:%d, workspaceID:%d, updateMap:%+v", id, workspaceID, updateMap) + err := v.TaskDao.UpdateTaskWithOCC(ctx, id, workspaceID, updateMap) + if err != nil { + return err + } + + return nil +} +func (v *TaskRepoImpl) GetObjListWithTask(ctx context.Context) ([]string, []string, []*entity.ObservabilityTask) { + var tasks []*entity.ObservabilityTask + spaceList, botList, results, err := v.TaskDao.GetObjListWithTask(ctx) + if err != nil { + logs.CtxWarn(ctx, "failed to get obj list with task from mysql", "err", err) + return nil, nil, nil + } + tasks = make([]*entity.ObservabilityTask, len(results)) + for i, result := range results { + tasks[i] = convertor.TaskPO2DO(result) + } + + return spaceList, botList, tasks +} +func (v *TaskRepoImpl) DeleteTask(ctx context.Context, do *entity.ObservabilityTask) error { + // 先执行数据库删除操作 + err := v.TaskDao.DeleteTask(ctx, do.ID, do.WorkspaceID, do.CreatedBy) + if err != nil { + return err + } + + return nil +} + +func (v *TaskRepoImpl) CreateTaskRun(ctx context.Context, do *entity.TaskRun) (int64, error) { + // 1. 生成ID + id, err := v.idGenerator.GenID(ctx) + if err != nil { + return 0, err + } + + // 2. 转换并设置ID + taskRunPo := convertor.TaskRunDO2PO(do) + taskRunPo.ID = id + + // 3. 数据库创建 + createdID, err := v.TaskRunDao.CreateTaskRun(ctx, taskRunPo) + if err != nil { + return 0, err + } + + // 4. 异步更新缓存 + do.ID = createdID + return createdID, nil +} +func (v *TaskRepoImpl) UpdateTaskRun(ctx context.Context, do *entity.TaskRun) error { + // 1. 转换并更新数据库 + taskRunPo := convertor.TaskRunDO2PO(do) + err := v.TaskRunDao.UpdateTaskRun(ctx, taskRunPo) + if err != nil { + return err + } + return nil +} +func (v *TaskRepoImpl) UpdateTaskRunWithOCC(ctx context.Context, id int64, workspaceID int64, updateMap map[string]interface{}) error { + // 先执行数据库操作 + logs.CtxInfo(ctx, "UpdateTaskRunWithOCC, id:%d, workspaceID:%d, updateMap:%+v", id, workspaceID, updateMap) + err := v.TaskRunDao.UpdateTaskRunWithOCC(ctx, id, workspaceID, updateMap) + if err != nil { + return err + } + + return nil +} +func (v *TaskRepoImpl) GetBackfillTaskRun(ctx context.Context, workspaceID *int64, taskID int64) (*entity.TaskRun, error) { + taskRunPo, err := v.TaskRunDao.GetBackfillTaskRun(ctx, workspaceID, taskID) + if err != nil { + return nil, err + } + if taskRunPo == nil { + return nil, nil + } + return convertor.TaskRunPO2DO(taskRunPo), nil +} +func (v *TaskRepoImpl) GetLatestNewDataTaskRun(ctx context.Context, workspaceID *int64, taskID int64) (*entity.TaskRun, error) { + taskRunPo, err := v.TaskRunDao.GetLatestNewDataTaskRun(ctx, workspaceID, taskID) + if err != nil { + return nil, err + } + if taskRunPo == nil { + return nil, nil + } + return convertor.TaskRunPO2DO(taskRunPo), nil +} + +func (v *TaskRepoImpl) GetTaskCount(ctx context.Context, taskID int64) (int64, error) { + count, err := v.TaskRedisDao.GetTaskCount(ctx, taskID) + if err != nil { + logs.CtxWarn(ctx, "failed to get task count from redis cache", "taskID", taskID, "err", err) + } else if count != 0 { + return count, nil + } + return count, nil +} +func (v *TaskRepoImpl) IncrTaskCount(ctx context.Context, taskID, ttl int64) error { + _, err := v.TaskRedisDao.IncrTaskCount(ctx, taskID, time.Duration(ttl)*time.Second) + if err != nil { + logs.CtxError(ctx, "failed to increment task count", "taskID", taskID, "err", err) + return err + } + return nil +} +func (v *TaskRepoImpl) DecrTaskCount(ctx context.Context, taskID, ttl int64) error { + _, err := v.TaskRedisDao.DecrTaskCount(ctx, taskID, time.Duration(ttl)*time.Second) + if err != nil { + logs.CtxError(ctx, "failed to decrement task count", "taskID", taskID, "err", err) + return err + } + return nil +} + +func (v *TaskRepoImpl) GetTaskRunCount(ctx context.Context, taskID, taskRunID int64) (int64, error) { + count, err := v.TaskRedisDao.GetTaskRunCount(ctx, taskID, taskRunID) + if err != nil { + logs.CtxWarn(ctx, "failed to get task run count from redis cache", "taskID", taskID, "err", err) + } else if count != 0 { + return count, nil + } + return count, nil +} +func (v *TaskRepoImpl) IncrTaskRunCount(ctx context.Context, taskID, taskRunID int64, ttl int64) error { + _, err := v.TaskRedisDao.IncrTaskRunCount(ctx, taskID, taskRunID, time.Duration(ttl)*time.Second) + if err != nil { + logs.CtxError(ctx, "failed to increment task run count", "taskID", taskID, "taskRunID", taskRunID, "err", err) + return err + } + return nil +} +func (v *TaskRepoImpl) DecrTaskRunCount(ctx context.Context, taskID, taskRunID int64, ttl int64) error { + _, err := v.TaskRedisDao.DecrTaskRunCount(ctx, taskID, taskRunID, time.Duration(ttl)*time.Second) + if err != nil { + logs.CtxError(ctx, "failed to decrement task run count", "taskID", taskID, "taskRunID", taskRunID, "err", err) + return err + } + return nil +} + +func (v *TaskRepoImpl) GetTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) (int64, error) { + return v.TaskRunRedisDao.GetTaskRunSuccessCount(ctx, taskID, taskRunID) +} +func (v *TaskRepoImpl) IncrTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) error { + return v.TaskRunRedisDao.IncrTaskRunSuccessCount(ctx, taskID, taskRunID) +} +func (v *TaskRepoImpl) DecrTaskRunSuccessCount(ctx context.Context, taskID, taskRunID int64) error { + return v.TaskRunRedisDao.DecrTaskRunSuccessCount(ctx, taskID, taskRunID) +} + +func (v *TaskRepoImpl) GetTaskRunFailCount(ctx context.Context, taskID, taskRunID int64) (int64, error) { + return v.TaskRunRedisDao.GetTaskRunFailCount(ctx, taskID, taskRunID) +} +func (v *TaskRepoImpl) IncrTaskRunFailCount(ctx context.Context, taskID, taskRunID int64) error { + return v.TaskRunRedisDao.IncrTaskRunFailCount(ctx, taskID, taskRunID) +} diff --git a/backend/modules/observability/infra/repo/trace.go b/backend/modules/observability/infra/repo/trace.go index e87020973..6f315205b 100644 --- a/backend/modules/observability/infra/repo/trace.go +++ b/backend/modules/observability/infra/repo/trace.go @@ -272,6 +272,9 @@ func (t *TraceCkRepoImpl) InsertAnnotations(ctx context.Context, param *repo.Ins Annotations: pos, }) } +func (t *TraceCkRepoImpl) UpsertAnnotation(ctx context.Context, param *repo.UpsertAnnotationParam) error { + return nil +} type queryTableCfg struct { SpanTables []string diff --git a/backend/modules/observability/infra/rpc/auth/auth.go b/backend/modules/observability/infra/rpc/auth/auth.go index dd01cb3f0..8ee836340 100644 --- a/backend/modules/observability/infra/rpc/auth/auth.go +++ b/backend/modules/observability/infra/rpc/auth/auth.go @@ -7,6 +7,8 @@ import ( "context" "strconv" + "github.com/bytedance/gg/gptr" + "github.com/coze-dev/coze-loop/backend/infra/middleware/session" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/auth" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/auth/authservice" authentity "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/foundation/domain/auth" @@ -121,6 +123,55 @@ func (a *AuthProviderImpl) CheckQueryPermission(ctx context.Context, workspaceId return a.CheckWorkspacePermission(ctx, rpc.AuthActionTraceList, workspaceId, true) } +func (a *AuthProviderImpl) CheckTaskPermission(ctx context.Context, action, workspaceId, taskId string) error { + userID := session.UserIDInCtxOrEmpty(ctx) + authInfos := make([]*authentity.SubjectActionObjects, 0) + authInfos = append(authInfos, &authentity.SubjectActionObjects{ + Subject: &authentity.AuthPrincipal{ + AuthPrincipalType: ptr.Of(authentity.AuthPrincipalType_User), + AuthUser: &authentity.AuthUser{ + UserID: gptr.Of(userID), + }, + }, + Action: ptr.Of(action), + Objects: []*authentity.AuthEntity{ + { + ID: ptr.Of(taskId), + EntityType: ptr.Of(authentity.AuthEntityTypeTraceTask), + SpaceID: gptr.Of(workspaceId), + OwnerUserID: gptr.Of(userID), + }, + }, + }) + + // 将workspaceId字符串转换为int64 + spaceID, err := strconv.ParseInt(workspaceId, 10, 64) + if err != nil { + return errorx.NewByCode(obErrorx.CommonInternalErrorCode) + } + + req := &auth.MCheckPermissionRequest{ + Auths: authInfos, + SpaceID: ptr.Of(spaceID), + } + resp, err := a.cli.MCheckPermission(ctx, req) + if err != nil { + return errorx.WrapByCode(err, obErrorx.CommercialCommonRPCErrorCodeCode) + } else if resp == nil { + logs.CtxWarn(ctx, "MCheckPermission returned nil response") + return errorx.NewByCode(obErrorx.CommercialCommonRPCErrorCodeCode) + } else if resp.BaseResp != nil && resp.BaseResp.StatusCode != 0 { + logs.CtxWarn(ctx, "MCheckPermission returned non-zero status code %d", resp.BaseResp.StatusCode) + return errorx.NewByCode(obErrorx.CommercialCommonRPCErrorCodeCode) + } + for _, r := range resp.AuthRes { + if r != nil && !r.GetIsAllowed() { + return errorx.NewByCode(obErrorx.CommonNoPermissionCode) + } + } + return nil +} + func NewAuthProvider(cli authservice.Client) rpc.IAuthProvider { return &AuthProviderImpl{ cli: cli, diff --git a/backend/modules/observability/infra/rpc/evaluation/evaluation.go b/backend/modules/observability/infra/rpc/evaluation/evaluation.go new file mode 100644 index 000000000..9e09b43c7 --- /dev/null +++ b/backend/modules/observability/infra/rpc/evaluation/evaluation.go @@ -0,0 +1,106 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package evaluation + +import ( + "context" + + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/experimentservice" + "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/expt" + "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" + "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +type EvaluationProvider struct { + client experimentservice.Client +} + +func NewEvaluationRPCProvider(client experimentservice.Client) rpc.IEvaluationRPCAdapter { + return &EvaluationProvider{client: client} +} + +func (e *EvaluationProvider) SubmitExperiment(ctx context.Context, param *rpc.SubmitExperimentReq) (exptID, exptRunID int64, err error) { + if param.WorkspaceID == 0 { + return 0, 0, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("workspace ID is nil")) + } + logs.CtxInfo(ctx, "SubmitExperiment, param: %+v", param) + resp, err := e.client.SubmitExperiment(ctx, &expt.SubmitExperimentRequest{ + WorkspaceID: param.WorkspaceID, + EvalSetVersionID: param.EvalSetVersionID, + TargetVersionID: param.TargetVersionID, + EvaluatorVersionIds: param.EvaluatorVersionIds, + Name: param.Name, + Desc: param.Desc, + EvalSetID: param.EvalSetID, + TargetID: param.TargetID, + TargetFieldMapping: param.TargetFieldMapping, + EvaluatorFieldMapping: param.EvaluatorFieldMapping, + ItemConcurNum: param.ItemConcurNum, + EvaluatorsConcurNum: param.EvaluatorsConcurNum, + CreateEvalTargetParam: param.CreateEvalTargetParam, + ExptType: param.ExptType, + MaxAliveTime: param.MaxAliveTime, + SourceType: param.SourceType, + SourceID: param.SourceID, + Session: param.Session, + }) + if err != nil { + logs.CtxError(ctx, "SubmitExperiment failed, err: %v", err) + return 0, 0, errorx.NewByCode(obErrorx.CommonRPCErrorCode, errorx.WithExtraMsg("SubmitExperiment failed")) + } + return resp.GetExperiment().GetID(), resp.GetRunID(), nil +} + +func (e *EvaluationProvider) InvokeExperiment(ctx context.Context, param *rpc.InvokeExperimentReq) (addedItems int64, err error) { + if param.WorkspaceID == 0 { + return 0, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("workspace ID is nil")) + } + if param.EvaluationSetID == 0 { + return 0, errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("evaluation set ID is nil")) + } + logs.CtxInfo(ctx, "InvokeExperiment, param: %+v", param) + resp, err := e.client.InvokeExperiment(ctx, &expt.InvokeExperimentRequest{ + WorkspaceID: param.WorkspaceID, + EvaluationSetID: param.EvaluationSetID, + Items: param.Items, + SkipInvalidItems: param.SkipInvalidItems, + AllowPartialAdd: param.AllowPartialAdd, + ExperimentID: param.ExperimentID, + ExperimentRunID: param.ExperimentRunID, + Ext: param.Ext, + Session: param.Session, + }) + if err != nil { + logs.CtxError(ctx, "InvokeExperiment failed, err: %v", err) + return 0, errorx.NewByCode(obErrorx.CommonRPCErrorCode, errorx.WithExtraMsg("InvokeExperiment failed")) + } + return int64(len(resp.GetAddedItems())), nil +} + +func (e *EvaluationProvider) FinishExperiment(ctx context.Context, param *rpc.FinishExperimentReq) (err error) { + if param.WorkspaceID == 0 { + return errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("workspace ID is nil")) + } + if param.ExperimentID == 0 { + return errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("experiment ID is nil")) + } + if param.ExperimentRunID == 0 { + return errorx.NewByCode(obErrorx.CommonInvalidParamCode, errorx.WithExtraMsg("experiment run ID is nil")) + } + logs.CtxInfo(ctx, "FinishExperiment, param: %+v", param) + _, err = e.client.FinishExperiment(ctx, &expt.FinishExperimentRequest{ + WorkspaceID: ptr.Of(param.WorkspaceID), + ExperimentID: ptr.Of(param.ExperimentID), + ExperimentRunID: ptr.Of(param.ExperimentRunID), + Session: param.Session, + }) + if err != nil { + logs.CtxError(ctx, "FinishExperiment failed, err: %v", err) + return errorx.NewByCode(obErrorx.CommonRPCErrorCode, errorx.WithExtraMsg("FinishExperiment failed")) + } + return nil +} diff --git a/backend/modules/observability/infra/rpc/evaluationset/evaluation_set.go b/backend/modules/observability/infra/rpc/evaluationset/evaluation_set.go index 6248b2aae..72a73b53b 100644 --- a/backend/modules/observability/infra/rpc/evaluationset/evaluation_set.go +++ b/backend/modules/observability/infra/rpc/evaluationset/evaluation_set.go @@ -42,20 +42,26 @@ func (d *EvaluationSetProvider) CreateDataset(ctx context.Context, dataset *enti if dataset.Name == "" { return 0, errorx.NewByCode(errno.CommonInvalidParamCode, errorx.WithExtraMsg("dataset name is required")) } - - userIDStr, _ := session.UserIDInCtx(ctx) - userID, err := strconv.ParseInt(userIDStr, 10, 64) - if err != nil { - return 0, errorx.NewByCode(errno.CommonInvalidParamCode, errorx.WithExtraMsg("userid is required")) + var sessionInfo *common.Session + if dataset.Seesion == nil { + userIDStr, _ := session.UserIDInCtx(ctx) + userID, err := strconv.ParseInt(userIDStr, 10, 64) + if err != nil { + return 0, errorx.NewByCode(errno.CommonInvalidParamCode, errorx.WithExtraMsg("userid is required")) + } + sessionInfo = &common.Session{ + UserID: gptr.Of(userID), + } + } else { + sessionInfo = dataset.Seesion } + // 构造请求 req := &eval_set.CreateEvaluationSetRequest{ WorkspaceID: dataset.WorkspaceID, Name: &dataset.Name, Description: &dataset.Description, - Session: &common.Session{ - UserID: lo.ToPtr(userID), - }, + Session: sessionInfo, } // 设置BizCategory @@ -68,7 +74,6 @@ func (d *EvaluationSetProvider) CreateDataset(ctx context.Context, dataset *enti if len(dataset.DatasetVersion.DatasetSchema.FieldSchemas) > 0 { req.EvaluationSetSchema = datasetSchemaDO2DTO(&dataset.DatasetVersion.DatasetSchema) } - resp, err := d.client.CreateEvaluationSet(ctx, req) if err != nil { logs.CtxError(ctx, "CreateEvaluationSet failed, workspace_id=%d, err=%#v", dataset.WorkspaceID, err) @@ -136,6 +141,11 @@ func (d *EvaluationSetProvider) GetDataset(ctx context.Context, workspaceID, dat return dataset, nil } +// SearchDatasets 搜索数据集 +func (d *EvaluationSetProvider) SearchDatasets(ctx context.Context, workspaceID int64, datasetID int64, category entity.DatasetCategory, name string) ([]*entity.Dataset, error) { + return nil, nil +} + // ClearDatasetItems 清空数据集项 func (d *EvaluationSetProvider) ClearDatasetItems(ctx context.Context, workspaceID, datasetID int64, category entity.DatasetCategory) error { if workspaceID == 0 { diff --git a/backend/modules/observability/infra/rpc/evaluator/evaluator.go b/backend/modules/observability/infra/rpc/evaluator/evaluator.go index 7cf9eae35..1ce48fc31 100644 --- a/backend/modules/observability/infra/rpc/evaluator/evaluator.go +++ b/backend/modules/observability/infra/rpc/evaluator/evaluator.go @@ -5,10 +5,15 @@ package evaluator import ( "context" + "strconv" + "github.com/bytedance/gg/gptr" + doevaluator "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/domain/evaluator" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/evaluator" "github.com/coze-dev/coze-loop/backend/kitex_gen/coze/loop/evaluation/evaluatorservice" "github.com/coze-dev/coze-loop/backend/modules/observability/domain/component/rpc" + obErrorx "github.com/coze-dev/coze-loop/backend/modules/observability/pkg/errno" + "github.com/coze-dev/coze-loop/backend/pkg/errorx" "github.com/coze-dev/coze-loop/backend/pkg/lang/ptr" "github.com/coze-dev/coze-loop/backend/pkg/logs" "github.com/samber/lo" @@ -50,3 +55,48 @@ func (r *EvaluatorRPCAdapter) BatchGetEvaluatorVersions(ctx context.Context, par }) return evalInfos, evalMap, nil } + +func (r *EvaluatorRPCAdapter) UpdateEvaluatorRecord(ctx context.Context, param *rpc.UpdateEvaluatorRecordParam) error { + workspaceID, err := strconv.ParseInt(param.WorkspaceID, 10, 64) + if err != nil { + return errorx.NewByCode(obErrorx.CommercialCommonInvalidParamCodeCode, errorx.WithExtraMsg("invalid workspace ID")) + } + _, err = r.client.UpdateEvaluatorRecord(ctx, &evaluator.UpdateEvaluatorRecordRequest{ + WorkspaceID: workspaceID, + EvaluatorRecordID: param.EvaluatorRecordID, + Correction: &doevaluator.Correction{ + Score: lo.ToPtr(param.Score), + Explain: lo.ToPtr(param.Reasoning), + UpdatedBy: lo.ToPtr(param.UpdatedBy), + }, + }) + if err != nil { + logs.CtxWarn(ctx, "update evaluator record failed: %v", err) + return err + } + + return nil +} + +func (r *EvaluatorRPCAdapter) ListEvaluators(ctx context.Context, param *rpc.ListEvaluatorsParam) ([]*rpc.Evaluator, error) { + resp, err := r.client.ListEvaluators(ctx, &evaluator.ListEvaluatorsRequest{ + WorkspaceID: param.WorkspaceID, + SearchName: param.Name, + PageSize: gptr.Of(int32(500)), + PageNumber: gptr.Of(int32(1)), + WithVersion: gptr.Of(true), + }) + if err != nil { + return nil, errorx.WrapByCode(err, obErrorx.CommercialCommonRPCErrorCodeCode) + } + logs.CtxInfo(ctx, "ListEvaluators: %v", resp.GetEvaluators()) + evalInfos := make([]*rpc.Evaluator, 0) + for _, eval := range resp.GetEvaluators() { + evalInfos = append(evalInfos, &rpc.Evaluator{ + EvaluatorVersionID: eval.GetCurrentVersion().GetID(), + EvaluatorName: eval.GetName(), + EvaluatorVersion: eval.GetCurrentVersion().GetVersion(), + }) + } + return evalInfos, nil +} diff --git a/backend/script/gorm_gen/generate.go b/backend/script/gorm_gen/generate.go index 86b3e8eae..266f48a51 100644 --- a/backend/script/gorm_gen/generate.go +++ b/backend/script/gorm_gen/generate.go @@ -14,13 +14,13 @@ import ( func main() { db := initDB() - generateForPrompt(db) - generateForData(db) - generateForEvaluationTarget(db) - generateForEvaluationEvaluator(db) - generateForEvaluationExpt(db) + //generateForPrompt(db) + //generateForData(db) + //generateForEvaluationTarget(db) + //generateForEvaluationEvaluator(db) + //generateForEvaluationExpt(db) generateForObservability(db) - generateForFoundation(db) + //generateForFoundation(db) } func initDB() *gorm.DB { @@ -250,8 +250,12 @@ func generateForObservability(db *gorm.DB) { // 为 observability_view 表添加软删除字段 observabilityView := g.GenerateModelAs("observability_view", "ObservabilityView") + observabilityTask := g.GenerateModelAs("task", "ObservabilityTask") + observabilityTaskRun := g.GenerateModelAs("auto_task_run", "ObservabilityTaskRun") g.ApplyBasic(observabilityView) + g.ApplyBasic(observabilityTask) + g.ApplyBasic(observabilityTaskRun) g.Execute() } diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/cache_manager.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/cache_manager.go new file mode 100755 index 000000000..0174d836e --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/cache_manager.go @@ -0,0 +1,246 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "context" + "encoding/json" + "time" + + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" + "github.com/coze-dev/coze-loop/backend/pkg/logs" +) + +// TTL策略常量 +const ( + TaskTTL = 30 * time.Minute // 单个任务/任务运行记录:30分钟 + TaskListTTL = 10 * time.Minute // 任务列表:10分钟 + TaskRunTTL = 30 * time.Minute // 单个任务运行记录:30分钟 + TaskRunListTTL = 5 * time.Minute // 任务运行记录列表:5分钟 +) + +// CacheManager 缓存管理器接口 +type CacheManager interface { + // 任务缓存操作 + GetTask(ctx context.Context, id int64) (*model.ObservabilityTask, error) + SetTask(ctx context.Context, task *model.ObservabilityTask) error + DeleteTask(ctx context.Context, id int64) error + + // 任务列表缓存操作 + GetTaskList(ctx context.Context, key string) ([]*model.ObservabilityTask, error) + SetTaskList(ctx context.Context, key string, tasks []*model.ObservabilityTask) error + DeleteTaskListByPattern(ctx context.Context, pattern string) error + + // 任务运行记录缓存操作 + GetTaskRun(ctx context.Context, id int64) (*model.ObservabilityTaskRun, error) + SetTaskRun(ctx context.Context, taskRun *model.ObservabilityTaskRun) error + DeleteTaskRun(ctx context.Context, id int64) error + + // 任务运行记录列表缓存操作 + GetTaskRunList(ctx context.Context, key string) ([]*model.ObservabilityTaskRun, error) + SetTaskRunList(ctx context.Context, key string, taskRuns []*model.ObservabilityTaskRun) error + DeleteTaskRunListByPattern(ctx context.Context, pattern string) error +} + +// cacheManagerImpl 缓存管理器实现 +type cacheManagerImpl struct { + redisClient RedisClient + keyGenerator KeyGenerator +} + +// NewCacheManager 创建缓存管理器 +func NewCacheManager(redisClient RedisClient, keyGenerator KeyGenerator) CacheManager { + return &cacheManagerImpl{ + redisClient: redisClient, + keyGenerator: keyGenerator, + } +} + +// GetTask 获取任务缓存 +func (c *cacheManagerImpl) GetTask(ctx context.Context, id int64) (*model.ObservabilityTask, error) { + key := c.keyGenerator.TaskKey(id) + data, err := c.redisClient.Get(ctx, key) + if err != nil { + logs.CtxWarn(ctx, "Failed to get task from cache: %v", err) + return nil, err + } + + if data == "" { + return nil, nil // 缓存未命中 + } + + var task model.ObservabilityTask + if err := json.Unmarshal([]byte(data), &task); err != nil { + logs.CtxWarn(ctx, "Failed to unmarshal task from cache: %v", err) + return nil, err + } + + return &task, nil +} + +// SetTask 设置任务缓存 +func (c *cacheManagerImpl) SetTask(ctx context.Context, task *model.ObservabilityTask) error { + key := c.keyGenerator.TaskKey(task.ID) + data, err := json.Marshal(task) + if err != nil { + logs.CtxWarn(ctx, "Failed to marshal task for cache: %v", err) + return err + } + + if err := c.redisClient.Set(ctx, key, string(data), TaskTTL); err != nil { + logs.CtxWarn(ctx, "Failed to set task cache: %v", err) + return err + } + + return nil +} + +// DeleteTask 删除任务缓存 +func (c *cacheManagerImpl) DeleteTask(ctx context.Context, id int64) error { + key := c.keyGenerator.TaskKey(id) + if err := c.redisClient.Del(ctx, key); err != nil { + logs.CtxWarn(ctx, "Failed to delete task cache: %v", err) + return err + } + return nil +} + +// GetTaskList 获取任务列表缓存 +func (c *cacheManagerImpl) GetTaskList(ctx context.Context, key string) ([]*model.ObservabilityTask, error) { + data, err := c.redisClient.Get(ctx, key) + if err != nil { + logs.CtxWarn(ctx, "Failed to get task list from cache: %v", err) + return nil, err + } + + if data == "" { + return nil, nil // 缓存未命中 + } + + var tasks []*model.ObservabilityTask + if err := json.Unmarshal([]byte(data), &tasks); err != nil { + logs.CtxWarn(ctx, "Failed to unmarshal task list from cache: %v", err) + return nil, err + } + + return tasks, nil +} + +// SetTaskList 设置任务列表缓存 +func (c *cacheManagerImpl) SetTaskList(ctx context.Context, key string, tasks []*model.ObservabilityTask) error { + data, err := json.Marshal(tasks) + if err != nil { + logs.CtxWarn(ctx, "Failed to marshal task list for cache: %v", err) + return err + } + + if err := c.redisClient.Set(ctx, key, string(data), TaskListTTL); err != nil { + logs.CtxWarn(ctx, "Failed to set task list cache: %v", err) + return err + } + + return nil +} + +// DeleteTaskListByPattern 按模式删除任务列表缓存 +func (c *cacheManagerImpl) DeleteTaskListByPattern(ctx context.Context, pattern string) error { + // TODO: 实现按模式删除缓存的逻辑 + // 这里需要根据具体的Redis客户端实现来扫描和删除匹配的key + logs.CtxInfo(ctx, "Deleting task list cache by pattern: %s", pattern) + return nil +} + +// GetTaskRun 获取任务运行记录缓存 +func (c *cacheManagerImpl) GetTaskRun(ctx context.Context, id int64) (*model.ObservabilityTaskRun, error) { + key := c.keyGenerator.TaskRunKey(id) + data, err := c.redisClient.Get(ctx, key) + if err != nil { + logs.CtxWarn(ctx, "Failed to get task run from cache: %v", err) + return nil, err + } + + if data == "" { + return nil, nil // 缓存未命中 + } + + var taskRun model.ObservabilityTaskRun + if err := json.Unmarshal([]byte(data), &taskRun); err != nil { + logs.CtxWarn(ctx, "Failed to unmarshal task run from cache: %v", err) + return nil, err + } + + return &taskRun, nil +} + +// SetTaskRun 设置任务运行记录缓存 +func (c *cacheManagerImpl) SetTaskRun(ctx context.Context, taskRun *model.ObservabilityTaskRun) error { + key := c.keyGenerator.TaskRunKey(taskRun.ID) + data, err := json.Marshal(taskRun) + if err != nil { + logs.CtxWarn(ctx, "Failed to marshal task run for cache: %v", err) + return err + } + + if err := c.redisClient.Set(ctx, key, string(data), TaskRunTTL); err != nil { + logs.CtxWarn(ctx, "Failed to set task run cache: %v", err) + return err + } + + return nil +} + +// DeleteTaskRun 删除任务运行记录缓存 +func (c *cacheManagerImpl) DeleteTaskRun(ctx context.Context, id int64) error { + key := c.keyGenerator.TaskRunKey(id) + if err := c.redisClient.Del(ctx, key); err != nil { + logs.CtxWarn(ctx, "Failed to delete task run cache: %v", err) + return err + } + return nil +} + +// GetTaskRunList 获取任务运行记录列表缓存 +func (c *cacheManagerImpl) GetTaskRunList(ctx context.Context, key string) ([]*model.ObservabilityTaskRun, error) { + data, err := c.redisClient.Get(ctx, key) + if err != nil { + logs.CtxWarn(ctx, "Failed to get task run list from cache: %v", err) + return nil, err + } + + if data == "" { + return nil, nil // 缓存未命中 + } + + var taskRuns []*model.ObservabilityTaskRun + if err := json.Unmarshal([]byte(data), &taskRuns); err != nil { + logs.CtxWarn(ctx, "Failed to unmarshal task run list from cache: %v", err) + return nil, err + } + + return taskRuns, nil +} + +// SetTaskRunList 设置任务运行记录列表缓存 +func (c *cacheManagerImpl) SetTaskRunList(ctx context.Context, key string, taskRuns []*model.ObservabilityTaskRun) error { + data, err := json.Marshal(taskRuns) + if err != nil { + logs.CtxWarn(ctx, "Failed to marshal task run list for cache: %v", err) + return err + } + + if err := c.redisClient.Set(ctx, key, string(data), TaskRunListTTL); err != nil { + logs.CtxWarn(ctx, "Failed to set task run list cache: %v", err) + return err + } + + return nil +} + +// DeleteTaskRunListByPattern 按模式删除任务运行记录列表缓存 +func (c *cacheManagerImpl) DeleteTaskRunListByPattern(ctx context.Context, pattern string) error { + // TODO: 实现按模式删除缓存的逻辑 + // 这里需要根据具体的Redis客户端实现来扫描和删除匹配的key + logs.CtxInfo(ctx, "Deleting task run list cache by pattern: %s", pattern) + return nil +} diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/key_generator.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/key_generator.go new file mode 100755 index 000000000..ab995bac0 --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/key_generator.go @@ -0,0 +1,55 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "crypto/md5" + "fmt" + "strconv" +) + +// KeyGenerator 缓存Key生成器接口 +type KeyGenerator interface { + TaskKey(id int64) string + TaskListKey(workspaceID int64, filterHash string) string + TaskRunKey(id int64) string + TaskRunListKey(taskID int64, filterHash string) string + GenerateFilterHash(param interface{}) string +} + +// keyGeneratorImpl Key生成器实现 +type keyGeneratorImpl struct{} + +// NewKeyGenerator 创建Key生成器 +func NewKeyGenerator() KeyGenerator { + return &keyGeneratorImpl{} +} + +// TaskKey 生成任务缓存Key +func (k *keyGeneratorImpl) TaskKey(id int64) string { + return fmt.Sprintf("observability:task:%d", id) +} + +// TaskListKey 生成任务列表缓存Key +func (k *keyGeneratorImpl) TaskListKey(workspaceID int64, filterHash string) string { + return fmt.Sprintf("observability:task:list:%d:%s", workspaceID, filterHash) +} + +// TaskRunKey 生成任务运行记录缓存Key +func (k *keyGeneratorImpl) TaskRunKey(id int64) string { + return fmt.Sprintf("observability:taskrun:%d", id) +} + +// TaskRunListKey 生成任务运行记录列表缓存Key +func (k *keyGeneratorImpl) TaskRunListKey(taskID int64, filterHash string) string { + return fmt.Sprintf("observability:taskrun:list:%d:%s", taskID, filterHash) +} + +// GenerateFilterHash 基于查询参数生成哈希值 +func (k *keyGeneratorImpl) GenerateFilterHash(param interface{}) string { + // 将参数转换为字符串并生成MD5哈希 + paramStr := fmt.Sprintf("%+v", param) + hash := md5.Sum([]byte(paramStr)) + return fmt.Sprintf("%x", hash) +} \ No newline at end of file diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/redis_client.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/redis_client.go new file mode 100755 index 000000000..733fb051e --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/cache/redis_client.go @@ -0,0 +1,52 @@ +// Copyright (c) 2025 coze-dev Authors +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "context" + "time" +) + +// RedisClient Redis客户端接口 +type RedisClient interface { + Get(ctx context.Context, key string) (string, error) + Set(ctx context.Context, key string, value interface{}, expiration time.Duration) error + Del(ctx context.Context, keys ...string) error + Exists(ctx context.Context, keys ...string) (int64, error) +} + +// redisClientImpl Redis客户端实现 +type redisClientImpl struct { + // 这里可以注入具体的Redis客户端,比如go-redis或者内部的Redis SDK + // 为了示例,我们暂时使用接口,实际使用时需要注入具体实现 +} + +// NewRedisClient 创建Redis客户端 +func NewRedisClient() RedisClient { + return &redisClientImpl{} +} + +func (r *redisClientImpl) Get(ctx context.Context, key string) (string, error) { + // TODO: 实现Redis GET操作 + // 实际实现时需要注入具体的Redis客户端 + return "", nil +} + +func (r *redisClientImpl) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) error { + // TODO: 实现Redis SET操作 + // 实际实现时需要注入具体的Redis客户端 + return nil +} + +func (r *redisClientImpl) Del(ctx context.Context, keys ...string) error { + // TODO: 实现Redis DEL操作 + // 实际实现时需要注入具体的Redis客户端 + return nil +} + +func (r *redisClientImpl) Exists(ctx context.Context, keys ...string) (int64, error) { + // TODO: 实现Redis EXISTS操作 + // 实际实现时需要注入具体的Redis客户端 + return 0, nil +} \ No newline at end of file diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/auto_task_run.gen.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/auto_task_run.gen.go new file mode 100644 index 000000000..9d717accd --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/auto_task_run.gen.go @@ -0,0 +1,32 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package model + +import ( + "time" +) + +const TableNameObservabilityTaskRun = "auto_task_run" + +// ObservabilityTaskRun Task Run信息 +type ObservabilityTaskRun struct { + ID int64 `gorm:"column:id;type:bigint(20) unsigned;primaryKey;comment:TaskRun ID" json:"id"` // TaskRun ID + WorkspaceID int64 `gorm:"column:workspace_id;type:bigint(20) unsigned;not null;index:idx_workspace_task,priority:1;comment:空间ID" json:"workspace_id"` // 空间ID + TaskID int64 `gorm:"column:task_id;type:bigint(20) unsigned;not null;index:idx_task_id_status,priority:1;index:idx_workspace_task,priority:2;comment:Task ID" json:"task_id"` // Task ID + TaskType string `gorm:"column:task_type;type:varchar(64);not null;comment:Task类型" json:"task_type"` // Task类型 + RunStatus string `gorm:"column:run_status;type:varchar(64);not null;index:idx_task_id_status,priority:2;comment:Task Run状态" json:"run_status"` // Task Run状态 + RunDetail *string `gorm:"column:run_detail;type:json;comment:Task Run运行状态详情" json:"run_detail"` // Task Run运行状态详情 + BackfillDetail *string `gorm:"column:backfill_detail;type:json;comment:历史回溯Task Run运行状态详情" json:"backfill_detail"` // 历史回溯Task Run运行状态详情 + RunStartAt time.Time `gorm:"column:run_start_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:任务开始时间" json:"run_start_at"` // 任务开始时间 + RunEndAt time.Time `gorm:"column:run_end_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:任务结束时间" json:"run_end_at"` // 任务结束时间 + RunConfig *string `gorm:"column:run_config;type:json;comment:相关Run的配置信息" json:"run_config"` // 相关Run的配置信息 + CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:创建时间" json:"created_at"` // 创建时间 + UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:更新时间" json:"updated_at"` // 更新时间 +} + +// TableName ObservabilityTaskRun's table name +func (*ObservabilityTaskRun) TableName() string { + return TableNameObservabilityTaskRun +} diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/observability_view.gen.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/observability_view.gen.go new file mode 100644 index 000000000..42b21d1cc --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/observability_view.gen.go @@ -0,0 +1,36 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package model + +import ( + "time" + + "gorm.io/gorm" +) + +const TableNameObservabilityView = "observability_view" + +// ObservabilityView 观测视图信息 +type ObservabilityView struct { + ID int64 `gorm:"column:id;type:bigint(20) unsigned;primaryKey;autoIncrement:true;comment:主键ID" json:"id"` // 主键ID + EnterpriseID string `gorm:"column:enterprise_id;type:varchar(200);not null;comment:企业id" json:"enterprise_id"` // 企业id + WorkspaceID int64 `gorm:"column:workspace_id;type:bigint(20) unsigned;not null;index:idx_space_id_created_by,priority:1;comment:空间 ID" json:"workspace_id"` // 空间 ID + ViewName string `gorm:"column:view_name;type:varchar(256);not null;comment:视图名称" json:"view_name"` // 视图名称 + PlatformType string `gorm:"column:platform_type;type:varchar(128);not null;comment:数据来源" json:"platform_type"` // 数据来源 + SpanListType string `gorm:"column:span_list_type;type:varchar(128);not null;comment:列表信息" json:"span_list_type"` // 列表信息 + Filters string `gorm:"column:filters;type:varchar(2048);not null;comment:过滤条件信息" json:"filters"` // 过滤条件信息 + CreatedAt time.Time `gorm:"column:created_at;type:datetime;not null;default:CURRENT_TIMESTAMP;comment:创建时间" json:"created_at"` // 创建时间 + CreatedBy string `gorm:"column:created_by;type:varchar(128);not null;index:idx_space_id_created_by,priority:2;comment:创建人" json:"created_by"` // 创建人 + UpdatedAt time.Time `gorm:"column:updated_at;type:datetime;not null;default:CURRENT_TIMESTAMP;comment:修改时间" json:"updated_at"` // 修改时间 + UpdatedBy string `gorm:"column:updated_by;type:varchar(128);not null;comment:修改人" json:"updated_by"` // 修改人 + IsDeleted bool `gorm:"column:is_deleted;type:tinyint(1);not null;comment:是否删除, 0 表示未删除, 1 表示已删除" json:"is_deleted"` // 是否删除, 0 表示未删除, 1 表示已删除 + DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:datetime;comment:删除时间" json:"deleted_at"` // 删除时间 + DeletedBy string `gorm:"column:deleted_by;type:varchar(128);not null;comment:删除人" json:"deleted_by"` // 删除人 +} + +// TableName ObservabilityView's table name +func (*ObservabilityView) TableName() string { + return TableNameObservabilityView +} diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/task.gen.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/task.gen.go new file mode 100644 index 000000000..bf3d8ebb9 --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/model/task.gen.go @@ -0,0 +1,35 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package model + +import ( + "time" +) + +const TableNameObservabilityTask = "task" + +// ObservabilityTask 任务信息 +type ObservabilityTask struct { + ID int64 `gorm:"column:id;type:bigint(20) unsigned;primaryKey;comment:Task ID" json:"id"` // Task ID + WorkspaceID int64 `gorm:"column:workspace_id;type:bigint(20) unsigned;not null;index:idx_space_id_status,priority:1;index:idx_space_id_type,priority:1;comment:空间ID" json:"workspace_id"` // 空间ID + Name string `gorm:"column:name;type:varchar(128);not null;comment:任务名称" json:"name"` // 任务名称 + Description *string `gorm:"column:description;type:varchar(2048);comment:任务描述" json:"description"` // 任务描述 + TaskType string `gorm:"column:task_type;type:varchar(64);not null;index:idx_space_id_type,priority:2;comment:任务类型" json:"task_type"` // 任务类型 + TaskStatus string `gorm:"column:task_status;type:varchar(64);not null;index:idx_space_id_status,priority:2;comment:任务状态" json:"task_status"` // 任务状态 + TaskDetail *string `gorm:"column:task_detail;type:json;comment:任务运行状态详情" json:"task_detail"` // 任务运行状态详情 + SpanFilter *string `gorm:"column:span_filter;type:json;comment:span 过滤条件" json:"span_filter"` // span 过滤条件 + EffectiveTime *string `gorm:"column:effective_time;type:json;comment:生效时间" json:"effective_time"` // 生效时间 + Sampler *string `gorm:"column:sampler;type:json;comment:采样器" json:"sampler"` // 采样器 + TaskConfig *string `gorm:"column:task_config;type:json;comment:相关任务的配置信息" json:"task_config"` // 相关任务的配置信息 + CreatedAt time.Time `gorm:"column:created_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:创建时间" json:"created_at"` // 创建时间 + UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;not null;default:CURRENT_TIMESTAMP;comment:更新时间" json:"updated_at"` // 更新时间 + CreatedBy string `gorm:"column:created_by;type:varchar(128);not null;comment:创建人" json:"created_by"` // 创建人 + UpdatedBy string `gorm:"column:updated_by;type:varchar(128);not null;comment:更新人" json:"updated_by"` // 更新人 +} + +// TableName ObservabilityTask's table name +func (*ObservabilityTask) TableName() string { + return TableNameObservabilityTask +} diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/auto_task_run.gen.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/auto_task_run.gen.go new file mode 100644 index 000000000..1a0a3f03f --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/auto_task_run.gen.go @@ -0,0 +1,376 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package query + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "gorm.io/gen" + "gorm.io/gen/field" + + "gorm.io/plugin/dbresolver" + + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" +) + +func newObservabilityTaskRun(db *gorm.DB, opts ...gen.DOOption) observabilityTaskRun { + _observabilityTaskRun := observabilityTaskRun{} + + _observabilityTaskRun.observabilityTaskRunDo.UseDB(db, opts...) + _observabilityTaskRun.observabilityTaskRunDo.UseModel(&model.ObservabilityTaskRun{}) + + tableName := _observabilityTaskRun.observabilityTaskRunDo.TableName() + _observabilityTaskRun.ALL = field.NewAsterisk(tableName) + _observabilityTaskRun.ID = field.NewInt64(tableName, "id") + _observabilityTaskRun.WorkspaceID = field.NewInt64(tableName, "workspace_id") + _observabilityTaskRun.TaskID = field.NewInt64(tableName, "task_id") + _observabilityTaskRun.TaskType = field.NewString(tableName, "task_type") + _observabilityTaskRun.RunStatus = field.NewString(tableName, "run_status") + _observabilityTaskRun.RunDetail = field.NewString(tableName, "run_detail") + _observabilityTaskRun.BackfillDetail = field.NewString(tableName, "backfill_detail") + _observabilityTaskRun.RunStartAt = field.NewTime(tableName, "run_start_at") + _observabilityTaskRun.RunEndAt = field.NewTime(tableName, "run_end_at") + _observabilityTaskRun.RunConfig = field.NewString(tableName, "run_config") + _observabilityTaskRun.CreatedAt = field.NewTime(tableName, "created_at") + _observabilityTaskRun.UpdatedAt = field.NewTime(tableName, "updated_at") + + _observabilityTaskRun.fillFieldMap() + + return _observabilityTaskRun +} + +// observabilityTaskRun Task Run信息 +type observabilityTaskRun struct { + observabilityTaskRunDo observabilityTaskRunDo + + ALL field.Asterisk + ID field.Int64 // TaskRun ID + WorkspaceID field.Int64 // 空间ID + TaskID field.Int64 // Task ID + TaskType field.String // Task类型 + RunStatus field.String // Task Run状态 + RunDetail field.String // Task Run运行状态详情 + BackfillDetail field.String // 历史回溯Task Run运行状态详情 + RunStartAt field.Time // 任务开始时间 + RunEndAt field.Time // 任务结束时间 + RunConfig field.String // 相关Run的配置信息 + CreatedAt field.Time // 创建时间 + UpdatedAt field.Time // 更新时间 + + fieldMap map[string]field.Expr +} + +func (o observabilityTaskRun) Table(newTableName string) *observabilityTaskRun { + o.observabilityTaskRunDo.UseTable(newTableName) + return o.updateTableName(newTableName) +} + +func (o observabilityTaskRun) As(alias string) *observabilityTaskRun { + o.observabilityTaskRunDo.DO = *(o.observabilityTaskRunDo.As(alias).(*gen.DO)) + return o.updateTableName(alias) +} + +func (o *observabilityTaskRun) updateTableName(table string) *observabilityTaskRun { + o.ALL = field.NewAsterisk(table) + o.ID = field.NewInt64(table, "id") + o.WorkspaceID = field.NewInt64(table, "workspace_id") + o.TaskID = field.NewInt64(table, "task_id") + o.TaskType = field.NewString(table, "task_type") + o.RunStatus = field.NewString(table, "run_status") + o.RunDetail = field.NewString(table, "run_detail") + o.BackfillDetail = field.NewString(table, "backfill_detail") + o.RunStartAt = field.NewTime(table, "run_start_at") + o.RunEndAt = field.NewTime(table, "run_end_at") + o.RunConfig = field.NewString(table, "run_config") + o.CreatedAt = field.NewTime(table, "created_at") + o.UpdatedAt = field.NewTime(table, "updated_at") + + o.fillFieldMap() + + return o +} + +func (o *observabilityTaskRun) WithContext(ctx context.Context) *observabilityTaskRunDo { + return o.observabilityTaskRunDo.WithContext(ctx) +} + +func (o observabilityTaskRun) TableName() string { return o.observabilityTaskRunDo.TableName() } + +func (o observabilityTaskRun) Alias() string { return o.observabilityTaskRunDo.Alias() } + +func (o observabilityTaskRun) Columns(cols ...field.Expr) gen.Columns { + return o.observabilityTaskRunDo.Columns(cols...) +} + +func (o *observabilityTaskRun) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := o.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (o *observabilityTaskRun) fillFieldMap() { + o.fieldMap = make(map[string]field.Expr, 12) + o.fieldMap["id"] = o.ID + o.fieldMap["workspace_id"] = o.WorkspaceID + o.fieldMap["task_id"] = o.TaskID + o.fieldMap["task_type"] = o.TaskType + o.fieldMap["run_status"] = o.RunStatus + o.fieldMap["run_detail"] = o.RunDetail + o.fieldMap["backfill_detail"] = o.BackfillDetail + o.fieldMap["run_start_at"] = o.RunStartAt + o.fieldMap["run_end_at"] = o.RunEndAt + o.fieldMap["run_config"] = o.RunConfig + o.fieldMap["created_at"] = o.CreatedAt + o.fieldMap["updated_at"] = o.UpdatedAt +} + +func (o observabilityTaskRun) clone(db *gorm.DB) observabilityTaskRun { + o.observabilityTaskRunDo.ReplaceConnPool(db.Statement.ConnPool) + return o +} + +func (o observabilityTaskRun) replaceDB(db *gorm.DB) observabilityTaskRun { + o.observabilityTaskRunDo.ReplaceDB(db) + return o +} + +type observabilityTaskRunDo struct{ gen.DO } + +func (o observabilityTaskRunDo) Debug() *observabilityTaskRunDo { + return o.withDO(o.DO.Debug()) +} + +func (o observabilityTaskRunDo) WithContext(ctx context.Context) *observabilityTaskRunDo { + return o.withDO(o.DO.WithContext(ctx)) +} + +func (o observabilityTaskRunDo) ReadDB() *observabilityTaskRunDo { + return o.Clauses(dbresolver.Read) +} + +func (o observabilityTaskRunDo) WriteDB() *observabilityTaskRunDo { + return o.Clauses(dbresolver.Write) +} + +func (o observabilityTaskRunDo) Session(config *gorm.Session) *observabilityTaskRunDo { + return o.withDO(o.DO.Session(config)) +} + +func (o observabilityTaskRunDo) Clauses(conds ...clause.Expression) *observabilityTaskRunDo { + return o.withDO(o.DO.Clauses(conds...)) +} + +func (o observabilityTaskRunDo) Returning(value interface{}, columns ...string) *observabilityTaskRunDo { + return o.withDO(o.DO.Returning(value, columns...)) +} + +func (o observabilityTaskRunDo) Not(conds ...gen.Condition) *observabilityTaskRunDo { + return o.withDO(o.DO.Not(conds...)) +} + +func (o observabilityTaskRunDo) Or(conds ...gen.Condition) *observabilityTaskRunDo { + return o.withDO(o.DO.Or(conds...)) +} + +func (o observabilityTaskRunDo) Select(conds ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Select(conds...)) +} + +func (o observabilityTaskRunDo) Where(conds ...gen.Condition) *observabilityTaskRunDo { + return o.withDO(o.DO.Where(conds...)) +} + +func (o observabilityTaskRunDo) Order(conds ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Order(conds...)) +} + +func (o observabilityTaskRunDo) Distinct(cols ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Distinct(cols...)) +} + +func (o observabilityTaskRunDo) Omit(cols ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Omit(cols...)) +} + +func (o observabilityTaskRunDo) Join(table schema.Tabler, on ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Join(table, on...)) +} + +func (o observabilityTaskRunDo) LeftJoin(table schema.Tabler, on ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.LeftJoin(table, on...)) +} + +func (o observabilityTaskRunDo) RightJoin(table schema.Tabler, on ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.RightJoin(table, on...)) +} + +func (o observabilityTaskRunDo) Group(cols ...field.Expr) *observabilityTaskRunDo { + return o.withDO(o.DO.Group(cols...)) +} + +func (o observabilityTaskRunDo) Having(conds ...gen.Condition) *observabilityTaskRunDo { + return o.withDO(o.DO.Having(conds...)) +} + +func (o observabilityTaskRunDo) Limit(limit int) *observabilityTaskRunDo { + return o.withDO(o.DO.Limit(limit)) +} + +func (o observabilityTaskRunDo) Offset(offset int) *observabilityTaskRunDo { + return o.withDO(o.DO.Offset(offset)) +} + +func (o observabilityTaskRunDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *observabilityTaskRunDo { + return o.withDO(o.DO.Scopes(funcs...)) +} + +func (o observabilityTaskRunDo) Unscoped() *observabilityTaskRunDo { + return o.withDO(o.DO.Unscoped()) +} + +func (o observabilityTaskRunDo) Create(values ...*model.ObservabilityTaskRun) error { + if len(values) == 0 { + return nil + } + return o.DO.Create(values) +} + +func (o observabilityTaskRunDo) CreateInBatches(values []*model.ObservabilityTaskRun, batchSize int) error { + return o.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (o observabilityTaskRunDo) Save(values ...*model.ObservabilityTaskRun) error { + if len(values) == 0 { + return nil + } + return o.DO.Save(values) +} + +func (o observabilityTaskRunDo) First() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.First(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) Take() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.Take(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) Last() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.Last(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) Find() ([]*model.ObservabilityTaskRun, error) { + result, err := o.DO.Find() + return result.([]*model.ObservabilityTaskRun), err +} + +func (o observabilityTaskRunDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ObservabilityTaskRun, err error) { + buf := make([]*model.ObservabilityTaskRun, 0, batchSize) + err = o.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (o observabilityTaskRunDo) FindInBatches(result *[]*model.ObservabilityTaskRun, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return o.DO.FindInBatches(result, batchSize, fc) +} + +func (o observabilityTaskRunDo) Attrs(attrs ...field.AssignExpr) *observabilityTaskRunDo { + return o.withDO(o.DO.Attrs(attrs...)) +} + +func (o observabilityTaskRunDo) Assign(attrs ...field.AssignExpr) *observabilityTaskRunDo { + return o.withDO(o.DO.Assign(attrs...)) +} + +func (o observabilityTaskRunDo) Joins(fields ...field.RelationField) *observabilityTaskRunDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Joins(_f)) + } + return &o +} + +func (o observabilityTaskRunDo) Preload(fields ...field.RelationField) *observabilityTaskRunDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Preload(_f)) + } + return &o +} + +func (o observabilityTaskRunDo) FirstOrInit() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) FirstOrCreate() (*model.ObservabilityTaskRun, error) { + if result, err := o.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTaskRun), nil + } +} + +func (o observabilityTaskRunDo) FindByPage(offset int, limit int) (result []*model.ObservabilityTaskRun, count int64, err error) { + result, err = o.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = o.Offset(-1).Limit(-1).Count() + return +} + +func (o observabilityTaskRunDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = o.Count() + if err != nil { + return + } + + err = o.Offset(offset).Limit(limit).Scan(result) + return +} + +func (o observabilityTaskRunDo) Scan(result interface{}) (err error) { + return o.DO.Scan(result) +} + +func (o observabilityTaskRunDo) Delete(models ...*model.ObservabilityTaskRun) (result gen.ResultInfo, err error) { + return o.DO.Delete(models) +} + +func (o *observabilityTaskRunDo) withDO(do gen.Dao) *observabilityTaskRunDo { + o.DO = *do.(*gen.DO) + return o +} diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/observability_view.gen.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/observability_view.gen.go new file mode 100644 index 000000000..56e6404eb --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/observability_view.gen.go @@ -0,0 +1,384 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package query + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "gorm.io/gen" + "gorm.io/gen/field" + + "gorm.io/plugin/dbresolver" + + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" +) + +func newObservabilityView(db *gorm.DB, opts ...gen.DOOption) observabilityView { + _observabilityView := observabilityView{} + + _observabilityView.observabilityViewDo.UseDB(db, opts...) + _observabilityView.observabilityViewDo.UseModel(&model.ObservabilityView{}) + + tableName := _observabilityView.observabilityViewDo.TableName() + _observabilityView.ALL = field.NewAsterisk(tableName) + _observabilityView.ID = field.NewInt64(tableName, "id") + _observabilityView.EnterpriseID = field.NewString(tableName, "enterprise_id") + _observabilityView.WorkspaceID = field.NewInt64(tableName, "workspace_id") + _observabilityView.ViewName = field.NewString(tableName, "view_name") + _observabilityView.PlatformType = field.NewString(tableName, "platform_type") + _observabilityView.SpanListType = field.NewString(tableName, "span_list_type") + _observabilityView.Filters = field.NewString(tableName, "filters") + _observabilityView.CreatedAt = field.NewTime(tableName, "created_at") + _observabilityView.CreatedBy = field.NewString(tableName, "created_by") + _observabilityView.UpdatedAt = field.NewTime(tableName, "updated_at") + _observabilityView.UpdatedBy = field.NewString(tableName, "updated_by") + _observabilityView.IsDeleted = field.NewBool(tableName, "is_deleted") + _observabilityView.DeletedAt = field.NewField(tableName, "deleted_at") + _observabilityView.DeletedBy = field.NewString(tableName, "deleted_by") + + _observabilityView.fillFieldMap() + + return _observabilityView +} + +// observabilityView 观测视图信息 +type observabilityView struct { + observabilityViewDo observabilityViewDo + + ALL field.Asterisk + ID field.Int64 // 主键ID + EnterpriseID field.String // 企业id + WorkspaceID field.Int64 // 空间 ID + ViewName field.String // 视图名称 + PlatformType field.String // 数据来源 + SpanListType field.String // 列表信息 + Filters field.String // 过滤条件信息 + CreatedAt field.Time // 创建时间 + CreatedBy field.String // 创建人 + UpdatedAt field.Time // 修改时间 + UpdatedBy field.String // 修改人 + IsDeleted field.Bool // 是否删除, 0 表示未删除, 1 表示已删除 + DeletedAt field.Field // 删除时间 + DeletedBy field.String // 删除人 + + fieldMap map[string]field.Expr +} + +func (o observabilityView) Table(newTableName string) *observabilityView { + o.observabilityViewDo.UseTable(newTableName) + return o.updateTableName(newTableName) +} + +func (o observabilityView) As(alias string) *observabilityView { + o.observabilityViewDo.DO = *(o.observabilityViewDo.As(alias).(*gen.DO)) + return o.updateTableName(alias) +} + +func (o *observabilityView) updateTableName(table string) *observabilityView { + o.ALL = field.NewAsterisk(table) + o.ID = field.NewInt64(table, "id") + o.EnterpriseID = field.NewString(table, "enterprise_id") + o.WorkspaceID = field.NewInt64(table, "workspace_id") + o.ViewName = field.NewString(table, "view_name") + o.PlatformType = field.NewString(table, "platform_type") + o.SpanListType = field.NewString(table, "span_list_type") + o.Filters = field.NewString(table, "filters") + o.CreatedAt = field.NewTime(table, "created_at") + o.CreatedBy = field.NewString(table, "created_by") + o.UpdatedAt = field.NewTime(table, "updated_at") + o.UpdatedBy = field.NewString(table, "updated_by") + o.IsDeleted = field.NewBool(table, "is_deleted") + o.DeletedAt = field.NewField(table, "deleted_at") + o.DeletedBy = field.NewString(table, "deleted_by") + + o.fillFieldMap() + + return o +} + +func (o *observabilityView) WithContext(ctx context.Context) *observabilityViewDo { + return o.observabilityViewDo.WithContext(ctx) +} + +func (o observabilityView) TableName() string { return o.observabilityViewDo.TableName() } + +func (o observabilityView) Alias() string { return o.observabilityViewDo.Alias() } + +func (o observabilityView) Columns(cols ...field.Expr) gen.Columns { + return o.observabilityViewDo.Columns(cols...) +} + +func (o *observabilityView) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := o.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (o *observabilityView) fillFieldMap() { + o.fieldMap = make(map[string]field.Expr, 14) + o.fieldMap["id"] = o.ID + o.fieldMap["enterprise_id"] = o.EnterpriseID + o.fieldMap["workspace_id"] = o.WorkspaceID + o.fieldMap["view_name"] = o.ViewName + o.fieldMap["platform_type"] = o.PlatformType + o.fieldMap["span_list_type"] = o.SpanListType + o.fieldMap["filters"] = o.Filters + o.fieldMap["created_at"] = o.CreatedAt + o.fieldMap["created_by"] = o.CreatedBy + o.fieldMap["updated_at"] = o.UpdatedAt + o.fieldMap["updated_by"] = o.UpdatedBy + o.fieldMap["is_deleted"] = o.IsDeleted + o.fieldMap["deleted_at"] = o.DeletedAt + o.fieldMap["deleted_by"] = o.DeletedBy +} + +func (o observabilityView) clone(db *gorm.DB) observabilityView { + o.observabilityViewDo.ReplaceConnPool(db.Statement.ConnPool) + return o +} + +func (o observabilityView) replaceDB(db *gorm.DB) observabilityView { + o.observabilityViewDo.ReplaceDB(db) + return o +} + +type observabilityViewDo struct{ gen.DO } + +func (o observabilityViewDo) Debug() *observabilityViewDo { + return o.withDO(o.DO.Debug()) +} + +func (o observabilityViewDo) WithContext(ctx context.Context) *observabilityViewDo { + return o.withDO(o.DO.WithContext(ctx)) +} + +func (o observabilityViewDo) ReadDB() *observabilityViewDo { + return o.Clauses(dbresolver.Read) +} + +func (o observabilityViewDo) WriteDB() *observabilityViewDo { + return o.Clauses(dbresolver.Write) +} + +func (o observabilityViewDo) Session(config *gorm.Session) *observabilityViewDo { + return o.withDO(o.DO.Session(config)) +} + +func (o observabilityViewDo) Clauses(conds ...clause.Expression) *observabilityViewDo { + return o.withDO(o.DO.Clauses(conds...)) +} + +func (o observabilityViewDo) Returning(value interface{}, columns ...string) *observabilityViewDo { + return o.withDO(o.DO.Returning(value, columns...)) +} + +func (o observabilityViewDo) Not(conds ...gen.Condition) *observabilityViewDo { + return o.withDO(o.DO.Not(conds...)) +} + +func (o observabilityViewDo) Or(conds ...gen.Condition) *observabilityViewDo { + return o.withDO(o.DO.Or(conds...)) +} + +func (o observabilityViewDo) Select(conds ...field.Expr) *observabilityViewDo { + return o.withDO(o.DO.Select(conds...)) +} + +func (o observabilityViewDo) Where(conds ...gen.Condition) *observabilityViewDo { + return o.withDO(o.DO.Where(conds...)) +} + +func (o observabilityViewDo) Order(conds ...field.Expr) *observabilityViewDo { + return o.withDO(o.DO.Order(conds...)) +} + +func (o observabilityViewDo) Distinct(cols ...field.Expr) *observabilityViewDo { + return o.withDO(o.DO.Distinct(cols...)) +} + +func (o observabilityViewDo) Omit(cols ...field.Expr) *observabilityViewDo { + return o.withDO(o.DO.Omit(cols...)) +} + +func (o observabilityViewDo) Join(table schema.Tabler, on ...field.Expr) *observabilityViewDo { + return o.withDO(o.DO.Join(table, on...)) +} + +func (o observabilityViewDo) LeftJoin(table schema.Tabler, on ...field.Expr) *observabilityViewDo { + return o.withDO(o.DO.LeftJoin(table, on...)) +} + +func (o observabilityViewDo) RightJoin(table schema.Tabler, on ...field.Expr) *observabilityViewDo { + return o.withDO(o.DO.RightJoin(table, on...)) +} + +func (o observabilityViewDo) Group(cols ...field.Expr) *observabilityViewDo { + return o.withDO(o.DO.Group(cols...)) +} + +func (o observabilityViewDo) Having(conds ...gen.Condition) *observabilityViewDo { + return o.withDO(o.DO.Having(conds...)) +} + +func (o observabilityViewDo) Limit(limit int) *observabilityViewDo { + return o.withDO(o.DO.Limit(limit)) +} + +func (o observabilityViewDo) Offset(offset int) *observabilityViewDo { + return o.withDO(o.DO.Offset(offset)) +} + +func (o observabilityViewDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *observabilityViewDo { + return o.withDO(o.DO.Scopes(funcs...)) +} + +func (o observabilityViewDo) Unscoped() *observabilityViewDo { + return o.withDO(o.DO.Unscoped()) +} + +func (o observabilityViewDo) Create(values ...*model.ObservabilityView) error { + if len(values) == 0 { + return nil + } + return o.DO.Create(values) +} + +func (o observabilityViewDo) CreateInBatches(values []*model.ObservabilityView, batchSize int) error { + return o.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (o observabilityViewDo) Save(values ...*model.ObservabilityView) error { + if len(values) == 0 { + return nil + } + return o.DO.Save(values) +} + +func (o observabilityViewDo) First() (*model.ObservabilityView, error) { + if result, err := o.DO.First(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityView), nil + } +} + +func (o observabilityViewDo) Take() (*model.ObservabilityView, error) { + if result, err := o.DO.Take(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityView), nil + } +} + +func (o observabilityViewDo) Last() (*model.ObservabilityView, error) { + if result, err := o.DO.Last(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityView), nil + } +} + +func (o observabilityViewDo) Find() ([]*model.ObservabilityView, error) { + result, err := o.DO.Find() + return result.([]*model.ObservabilityView), err +} + +func (o observabilityViewDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ObservabilityView, err error) { + buf := make([]*model.ObservabilityView, 0, batchSize) + err = o.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (o observabilityViewDo) FindInBatches(result *[]*model.ObservabilityView, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return o.DO.FindInBatches(result, batchSize, fc) +} + +func (o observabilityViewDo) Attrs(attrs ...field.AssignExpr) *observabilityViewDo { + return o.withDO(o.DO.Attrs(attrs...)) +} + +func (o observabilityViewDo) Assign(attrs ...field.AssignExpr) *observabilityViewDo { + return o.withDO(o.DO.Assign(attrs...)) +} + +func (o observabilityViewDo) Joins(fields ...field.RelationField) *observabilityViewDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Joins(_f)) + } + return &o +} + +func (o observabilityViewDo) Preload(fields ...field.RelationField) *observabilityViewDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Preload(_f)) + } + return &o +} + +func (o observabilityViewDo) FirstOrInit() (*model.ObservabilityView, error) { + if result, err := o.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityView), nil + } +} + +func (o observabilityViewDo) FirstOrCreate() (*model.ObservabilityView, error) { + if result, err := o.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityView), nil + } +} + +func (o observabilityViewDo) FindByPage(offset int, limit int) (result []*model.ObservabilityView, count int64, err error) { + result, err = o.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = o.Offset(-1).Limit(-1).Count() + return +} + +func (o observabilityViewDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = o.Count() + if err != nil { + return + } + + err = o.Offset(offset).Limit(limit).Scan(result) + return +} + +func (o observabilityViewDo) Scan(result interface{}) (err error) { + return o.DO.Scan(result) +} + +func (o observabilityViewDo) Delete(models ...*model.ObservabilityView) (result gen.ResultInfo, err error) { + return o.DO.Delete(models) +} + +func (o *observabilityViewDo) withDO(do gen.Dao) *observabilityViewDo { + o.DO = *do.(*gen.DO) + return o +} diff --git a/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/task.gen.go b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/task.gen.go new file mode 100644 index 000000000..6822b179b --- /dev/null +++ b/backend/script/gorm_gen/modules/observability/infra/repo/mysql/gorm_gen/query/task.gen.go @@ -0,0 +1,388 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package query + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "gorm.io/gen" + "gorm.io/gen/field" + + "gorm.io/plugin/dbresolver" + + "github.com/coze-dev/coze-loop/backend/modules/observability/infra/repo/mysql/gorm_gen/model" +) + +func newObservabilityTask(db *gorm.DB, opts ...gen.DOOption) observabilityTask { + _observabilityTask := observabilityTask{} + + _observabilityTask.observabilityTaskDo.UseDB(db, opts...) + _observabilityTask.observabilityTaskDo.UseModel(&model.ObservabilityTask{}) + + tableName := _observabilityTask.observabilityTaskDo.TableName() + _observabilityTask.ALL = field.NewAsterisk(tableName) + _observabilityTask.ID = field.NewInt64(tableName, "id") + _observabilityTask.WorkspaceID = field.NewInt64(tableName, "workspace_id") + _observabilityTask.Name = field.NewString(tableName, "name") + _observabilityTask.Description = field.NewString(tableName, "description") + _observabilityTask.TaskType = field.NewString(tableName, "task_type") + _observabilityTask.TaskStatus = field.NewString(tableName, "task_status") + _observabilityTask.TaskDetail = field.NewString(tableName, "task_detail") + _observabilityTask.SpanFilter = field.NewString(tableName, "span_filter") + _observabilityTask.EffectiveTime = field.NewString(tableName, "effective_time") + _observabilityTask.Sampler = field.NewString(tableName, "sampler") + _observabilityTask.TaskConfig = field.NewString(tableName, "task_config") + _observabilityTask.CreatedAt = field.NewTime(tableName, "created_at") + _observabilityTask.UpdatedAt = field.NewTime(tableName, "updated_at") + _observabilityTask.CreatedBy = field.NewString(tableName, "created_by") + _observabilityTask.UpdatedBy = field.NewString(tableName, "updated_by") + + _observabilityTask.fillFieldMap() + + return _observabilityTask +} + +// observabilityTask 任务信息 +type observabilityTask struct { + observabilityTaskDo observabilityTaskDo + + ALL field.Asterisk + ID field.Int64 // Task ID + WorkspaceID field.Int64 // 空间ID + Name field.String // 任务名称 + Description field.String // 任务描述 + TaskType field.String // 任务类型 + TaskStatus field.String // 任务状态 + TaskDetail field.String // 任务运行状态详情 + SpanFilter field.String // span 过滤条件 + EffectiveTime field.String // 生效时间 + Sampler field.String // 采样器 + TaskConfig field.String // 相关任务的配置信息 + CreatedAt field.Time // 创建时间 + UpdatedAt field.Time // 更新时间 + CreatedBy field.String // 创建人 + UpdatedBy field.String // 更新人 + + fieldMap map[string]field.Expr +} + +func (o observabilityTask) Table(newTableName string) *observabilityTask { + o.observabilityTaskDo.UseTable(newTableName) + return o.updateTableName(newTableName) +} + +func (o observabilityTask) As(alias string) *observabilityTask { + o.observabilityTaskDo.DO = *(o.observabilityTaskDo.As(alias).(*gen.DO)) + return o.updateTableName(alias) +} + +func (o *observabilityTask) updateTableName(table string) *observabilityTask { + o.ALL = field.NewAsterisk(table) + o.ID = field.NewInt64(table, "id") + o.WorkspaceID = field.NewInt64(table, "workspace_id") + o.Name = field.NewString(table, "name") + o.Description = field.NewString(table, "description") + o.TaskType = field.NewString(table, "task_type") + o.TaskStatus = field.NewString(table, "task_status") + o.TaskDetail = field.NewString(table, "task_detail") + o.SpanFilter = field.NewString(table, "span_filter") + o.EffectiveTime = field.NewString(table, "effective_time") + o.Sampler = field.NewString(table, "sampler") + o.TaskConfig = field.NewString(table, "task_config") + o.CreatedAt = field.NewTime(table, "created_at") + o.UpdatedAt = field.NewTime(table, "updated_at") + o.CreatedBy = field.NewString(table, "created_by") + o.UpdatedBy = field.NewString(table, "updated_by") + + o.fillFieldMap() + + return o +} + +func (o *observabilityTask) WithContext(ctx context.Context) *observabilityTaskDo { + return o.observabilityTaskDo.WithContext(ctx) +} + +func (o observabilityTask) TableName() string { return o.observabilityTaskDo.TableName() } + +func (o observabilityTask) Alias() string { return o.observabilityTaskDo.Alias() } + +func (o observabilityTask) Columns(cols ...field.Expr) gen.Columns { + return o.observabilityTaskDo.Columns(cols...) +} + +func (o *observabilityTask) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := o.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (o *observabilityTask) fillFieldMap() { + o.fieldMap = make(map[string]field.Expr, 15) + o.fieldMap["id"] = o.ID + o.fieldMap["workspace_id"] = o.WorkspaceID + o.fieldMap["name"] = o.Name + o.fieldMap["description"] = o.Description + o.fieldMap["task_type"] = o.TaskType + o.fieldMap["task_status"] = o.TaskStatus + o.fieldMap["task_detail"] = o.TaskDetail + o.fieldMap["span_filter"] = o.SpanFilter + o.fieldMap["effective_time"] = o.EffectiveTime + o.fieldMap["sampler"] = o.Sampler + o.fieldMap["task_config"] = o.TaskConfig + o.fieldMap["created_at"] = o.CreatedAt + o.fieldMap["updated_at"] = o.UpdatedAt + o.fieldMap["created_by"] = o.CreatedBy + o.fieldMap["updated_by"] = o.UpdatedBy +} + +func (o observabilityTask) clone(db *gorm.DB) observabilityTask { + o.observabilityTaskDo.ReplaceConnPool(db.Statement.ConnPool) + return o +} + +func (o observabilityTask) replaceDB(db *gorm.DB) observabilityTask { + o.observabilityTaskDo.ReplaceDB(db) + return o +} + +type observabilityTaskDo struct{ gen.DO } + +func (o observabilityTaskDo) Debug() *observabilityTaskDo { + return o.withDO(o.DO.Debug()) +} + +func (o observabilityTaskDo) WithContext(ctx context.Context) *observabilityTaskDo { + return o.withDO(o.DO.WithContext(ctx)) +} + +func (o observabilityTaskDo) ReadDB() *observabilityTaskDo { + return o.Clauses(dbresolver.Read) +} + +func (o observabilityTaskDo) WriteDB() *observabilityTaskDo { + return o.Clauses(dbresolver.Write) +} + +func (o observabilityTaskDo) Session(config *gorm.Session) *observabilityTaskDo { + return o.withDO(o.DO.Session(config)) +} + +func (o observabilityTaskDo) Clauses(conds ...clause.Expression) *observabilityTaskDo { + return o.withDO(o.DO.Clauses(conds...)) +} + +func (o observabilityTaskDo) Returning(value interface{}, columns ...string) *observabilityTaskDo { + return o.withDO(o.DO.Returning(value, columns...)) +} + +func (o observabilityTaskDo) Not(conds ...gen.Condition) *observabilityTaskDo { + return o.withDO(o.DO.Not(conds...)) +} + +func (o observabilityTaskDo) Or(conds ...gen.Condition) *observabilityTaskDo { + return o.withDO(o.DO.Or(conds...)) +} + +func (o observabilityTaskDo) Select(conds ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Select(conds...)) +} + +func (o observabilityTaskDo) Where(conds ...gen.Condition) *observabilityTaskDo { + return o.withDO(o.DO.Where(conds...)) +} + +func (o observabilityTaskDo) Order(conds ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Order(conds...)) +} + +func (o observabilityTaskDo) Distinct(cols ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Distinct(cols...)) +} + +func (o observabilityTaskDo) Omit(cols ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Omit(cols...)) +} + +func (o observabilityTaskDo) Join(table schema.Tabler, on ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Join(table, on...)) +} + +func (o observabilityTaskDo) LeftJoin(table schema.Tabler, on ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.LeftJoin(table, on...)) +} + +func (o observabilityTaskDo) RightJoin(table schema.Tabler, on ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.RightJoin(table, on...)) +} + +func (o observabilityTaskDo) Group(cols ...field.Expr) *observabilityTaskDo { + return o.withDO(o.DO.Group(cols...)) +} + +func (o observabilityTaskDo) Having(conds ...gen.Condition) *observabilityTaskDo { + return o.withDO(o.DO.Having(conds...)) +} + +func (o observabilityTaskDo) Limit(limit int) *observabilityTaskDo { + return o.withDO(o.DO.Limit(limit)) +} + +func (o observabilityTaskDo) Offset(offset int) *observabilityTaskDo { + return o.withDO(o.DO.Offset(offset)) +} + +func (o observabilityTaskDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *observabilityTaskDo { + return o.withDO(o.DO.Scopes(funcs...)) +} + +func (o observabilityTaskDo) Unscoped() *observabilityTaskDo { + return o.withDO(o.DO.Unscoped()) +} + +func (o observabilityTaskDo) Create(values ...*model.ObservabilityTask) error { + if len(values) == 0 { + return nil + } + return o.DO.Create(values) +} + +func (o observabilityTaskDo) CreateInBatches(values []*model.ObservabilityTask, batchSize int) error { + return o.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (o observabilityTaskDo) Save(values ...*model.ObservabilityTask) error { + if len(values) == 0 { + return nil + } + return o.DO.Save(values) +} + +func (o observabilityTaskDo) First() (*model.ObservabilityTask, error) { + if result, err := o.DO.First(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) Take() (*model.ObservabilityTask, error) { + if result, err := o.DO.Take(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) Last() (*model.ObservabilityTask, error) { + if result, err := o.DO.Last(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) Find() ([]*model.ObservabilityTask, error) { + result, err := o.DO.Find() + return result.([]*model.ObservabilityTask), err +} + +func (o observabilityTaskDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ObservabilityTask, err error) { + buf := make([]*model.ObservabilityTask, 0, batchSize) + err = o.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (o observabilityTaskDo) FindInBatches(result *[]*model.ObservabilityTask, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return o.DO.FindInBatches(result, batchSize, fc) +} + +func (o observabilityTaskDo) Attrs(attrs ...field.AssignExpr) *observabilityTaskDo { + return o.withDO(o.DO.Attrs(attrs...)) +} + +func (o observabilityTaskDo) Assign(attrs ...field.AssignExpr) *observabilityTaskDo { + return o.withDO(o.DO.Assign(attrs...)) +} + +func (o observabilityTaskDo) Joins(fields ...field.RelationField) *observabilityTaskDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Joins(_f)) + } + return &o +} + +func (o observabilityTaskDo) Preload(fields ...field.RelationField) *observabilityTaskDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Preload(_f)) + } + return &o +} + +func (o observabilityTaskDo) FirstOrInit() (*model.ObservabilityTask, error) { + if result, err := o.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) FirstOrCreate() (*model.ObservabilityTask, error) { + if result, err := o.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*model.ObservabilityTask), nil + } +} + +func (o observabilityTaskDo) FindByPage(offset int, limit int) (result []*model.ObservabilityTask, count int64, err error) { + result, err = o.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = o.Offset(-1).Limit(-1).Count() + return +} + +func (o observabilityTaskDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = o.Count() + if err != nil { + return + } + + err = o.Offset(offset).Limit(limit).Scan(result) + return +} + +func (o observabilityTaskDo) Scan(result interface{}) (err error) { + return o.DO.Scan(result) +} + +func (o observabilityTaskDo) Delete(models ...*model.ObservabilityTask) (result gen.ResultInfo, err error) { + return o.DO.Delete(models) +} + +func (o *observabilityTaskDo) withDO(do gen.Dao) *observabilityTaskDo { + o.DO = *do.(*gen.DO) + return o +} diff --git a/idl/thrift/coze/loop/apis/coze.loop.apis.thrift b/idl/thrift/coze/loop/apis/coze.loop.apis.thrift index f9cf28ecb..3e9dd504d 100644 --- a/idl/thrift/coze/loop/apis/coze.loop.apis.thrift +++ b/idl/thrift/coze/loop/apis/coze.loop.apis.thrift @@ -20,6 +20,7 @@ include "../llm/coze.loop.llm.manage.thrift" include "../observability/coze.loop.observability.trace.thrift" include "../data/coze.loop.data.tag.thrift" include "../observability/coze.loop.observability.openapi.thrift" +include "../observability/coze.loop.observability.task.thrift" service EvaluationSetService extends coze.loop.evaluation.eval_set.EvaluationSetService{} service EvaluatorService extends coze.loop.evaluation.evaluator.EvaluatorService{} @@ -38,6 +39,7 @@ service LLMManageService extends coze.loop.llm.manage.LLMManageService {} service LLMRuntimeService extends coze.loop.llm.runtime.LLMRuntimeService {} service ObservabilityTraceService extends coze.loop.observability.trace.TraceService{} service ObservabilityOpenAPIService extends coze.loop.observability.openapi.OpenAPIService{} +service ObservabilityTaskService extends coze.loop.observability.task.TaskService{} service FoundationAuthService extends coze.loop.foundation.auth.AuthService{} service FoundationAuthNService extends coze.loop.foundation.authn.AuthNService{} diff --git a/idl/thrift/coze/loop/data/domain/dataset.thrift b/idl/thrift/coze/loop/data/domain/dataset.thrift index a6196b307..9925f774f 100644 --- a/idl/thrift/coze/loop/data/domain/dataset.thrift +++ b/idl/thrift/coze/loop/data/domain/dataset.thrift @@ -285,4 +285,11 @@ struct ItemErrorGroup { 2: optional string summary 3: optional i32 error_count // 错误条数 4: optional list details // 批量写入时,每类错误至多提供 5 个错误详情;导入任务,至多提供 10 个错误详情 +} + +struct CreateDatasetItemOutput { + 1: optional i32 item_index // item 在 BatchCreateDatasetItemsReq.items 中的索引 + 2: optional string item_key + 3: optional i64 item_id (agw.js_conv = "str") + 4: optional bool is_new_item // 是否是新的 Item。提供 itemKey 时,如果 itemKey 在数据集中已存在数据,则不算做「新 Item」,该字段为 false。 } \ No newline at end of file diff --git a/idl/thrift/coze/loop/evaluation/coze.loop.evaluation.eval_set.thrift b/idl/thrift/coze/loop/evaluation/coze.loop.evaluation.eval_set.thrift index 47c9ba495..119e2a49c 100644 --- a/idl/thrift/coze/loop/evaluation/coze.loop.evaluation.eval_set.thrift +++ b/idl/thrift/coze/loop/evaluation/coze.loop.evaluation.eval_set.thrift @@ -13,7 +13,7 @@ struct CreateEvaluationSetRequest { 4: optional eval_set.EvaluationSetSchema evaluation_set_schema, 5: optional eval_set.BizCategory biz_category (vt.max_size = "128") // 业务分类 - 200: optional common.Session session + 200: optional common.Session session (api.none = 'true') 255: optional base.Base Base } @@ -193,6 +193,8 @@ struct BatchCreateEvaluationSetItemsResponse { 1: optional map added_items (api.js_conv='true', go.tag='json:"added_items"') // key: item 在 items 中的索引 2: optional list errors + 3: optional list item_outputs + 255: base.BaseResp BaseResp } diff --git a/idl/thrift/coze/loop/evaluation/coze.loop.evaluation.expt.thrift b/idl/thrift/coze/loop/evaluation/coze.loop.evaluation.expt.thrift index a869d32fe..8fdd6247a 100644 --- a/idl/thrift/coze/loop/evaluation/coze.loop.evaluation.expt.thrift +++ b/idl/thrift/coze/loop/evaluation/coze.loop.evaluation.expt.thrift @@ -285,6 +285,8 @@ struct InvokeExperimentResponse { 1: optional map added_items // key: item 在 items 中的索引 2: optional list errors + 3: optional list item_outputs + 255: base.BaseResp BaseResp } diff --git a/idl/thrift/coze/loop/foundation/domain/auth.thrift b/idl/thrift/coze/loop/foundation/domain/auth.thrift index 201145b28..6abcc8e07 100644 --- a/idl/thrift/coze/loop/foundation/domain/auth.thrift +++ b/idl/thrift/coze/loop/foundation/domain/auth.thrift @@ -45,6 +45,7 @@ const AuthEntityType AuthEntityType_EvaluationTarget = "EvaluationTarget" const AuthEntityType AuthEntityType_TraceView = "TraceView" const AuthEntityType AuthEntityType_Model = "Model" const AuthEntityType AuthEntityType_Annotation = "Annotation" +const AuthEntityType AuthEntityType_TraceTask = "Task" // 鉴权资源,客体 struct AuthEntity { diff --git a/idl/thrift/coze/loop/observability/coze.loop.observability.task.thrift b/idl/thrift/coze/loop/observability/coze.loop.observability.task.thrift new file mode 100644 index 000000000..4c887775b --- /dev/null +++ b/idl/thrift/coze/loop/observability/coze.loop.observability.task.thrift @@ -0,0 +1,83 @@ +namespace go coze.loop.observability.task + +include "../../../base.thrift" +include "./domain/filter.thrift" +include "./domain/task.thrift" +include "./domain/common.thrift" + +struct CreateTaskRequest { + 1: required task.Task task (api.body = "task"), + + 255: optional base.Base base, +} + +struct CreateTaskResponse { + 1: optional i64 task_id (api.js_conv="true" api.body="task_id"), + + 255: optional base.BaseResp BaseResp +} + +struct UpdateTaskRequest { + 1: required i64 task_id (api.js_conv="true" api.path="task_id"), + 2: required i64 workspace_id (api.js_conv='true', go.tag='json:"workspace_id"', api.body="workspace_id", vt.gt="0") + 3: optional task.TaskStatus task_status (api.body = "task_status"), + 4: optional string description (api.body = "description"), + 5: optional task.EffectiveTime effective_time (api.body = "effective_time"), + 6: optional double sample_rate (api.body = "sample_rate"), + + 255: optional base.Base base, +} + +struct UpdateTaskResponse { + 255: optional base.BaseResp BaseResp +} + +struct ListTasksRequest { + 1: required i64 workspace_id (api.js_conv='true', go.tag='json:"workspace_id"', api.body="workspace_id", vt.gt="0") + 2: optional filter.TaskFilterFields task_filters (api.body = "task_filters"), + + 101: optional i32 limit (api.body = "limit") /* default 20 max 200 */ + 102: optional i32 offset (api.body = "offset") + 103: optional common.OrderBy order_by (api.body = "order_by") + 255: optional base.Base base, +} + +struct ListTasksResponse { + 1: optional list tasks (api.body = "tasks"), + + 100: optional i64 total (api.js_conv="true" api.body="total"), + 255: optional base.BaseResp BaseResp +} + +struct GetTaskRequest { + 1: required i64 task_id (api.path = "task_id" api.js_conv="true"), + 2: required i64 workspace_id (api.js_conv='true', go.tag='json:"workspace_id"', api.query="workspace_id", vt.gt="0") + + 255: optional base.Base base, +} + +struct GetTaskResponse { + 1: optional task.Task task (api.body="task"), + + 255: optional base.BaseResp BaseResp +} + +struct CheckTaskNameRequest { + 1: required i64 workspace_id (api.js_conv='true', go.tag='json:"workspace_id"', api.body="workspace_id", vt.gt="0") + 2: required string name (api.body='name') + 255: optional base.Base Base +} + +struct CheckTaskNameResponse { + 1: optional bool Pass (agw.key='pass') + 2: optional string Message (agw.key='message') + 255: base.BaseResp BaseResp +} + +service TaskService { + CheckTaskNameResponse CheckTaskName(1: CheckTaskNameRequest req) (api.post = '/api/observability/v1/tasks/check_name') + CreateTaskResponse CreateTask(1: CreateTaskRequest req) (api.post = '/api/observability/v1/tasks') + UpdateTaskResponse UpdateTask(1: UpdateTaskRequest req) (api.put = '/api/observability/v1/tasks/:task_id') + ListTasksResponse ListTasks(1: ListTasksRequest req) (api.post = '/api/observability/v1/tasks/list') + GetTaskResponse GetTask(1: GetTaskRequest req) (api.get = '/api/observability/v1/tasks/:task_id') +} \ No newline at end of file diff --git a/idl/thrift/coze/loop/observability/coze.loop.observability.thrift b/idl/thrift/coze/loop/observability/coze.loop.observability.thrift index d9a06328d..2f2115211 100644 --- a/idl/thrift/coze/loop/observability/coze.loop.observability.thrift +++ b/idl/thrift/coze/loop/observability/coze.loop.observability.thrift @@ -2,6 +2,8 @@ namespace go coze.loop.observability include "coze.loop.observability.trace.thrift" include "coze.loop.observability.openapi.thrift" +include "coze.loop.observability.task.thrift" service ObservabilityTraceService extends coze.loop.observability.trace.TraceService{} -service ObservabilityOpenAPIService extends coze.loop.observability.openapi.OpenAPIService{} \ No newline at end of file +service ObservabilityOpenAPIService extends coze.loop.observability.openapi.OpenAPIService{} +service ObservabilityTaskService extends coze.loop.observability.task.TaskService{} \ No newline at end of file diff --git a/idl/thrift/coze/loop/observability/coze.loop.observability.trace.thrift b/idl/thrift/coze/loop/observability/coze.loop.observability.trace.thrift index 176825b3d..39eb51161 100644 --- a/idl/thrift/coze/loop/observability/coze.loop.observability.trace.thrift +++ b/idl/thrift/coze/loop/observability/coze.loop.observability.trace.thrift @@ -8,6 +8,7 @@ include "./domain/filter.thrift" include "./domain/view.thrift" include "./domain/annotation.thrift" include "./domain/export_dataset.thrift" +include "./domain/task.thrift" struct ListSpansRequest { 1: required i64 workspace_id (api.js_conv='true', go.tag='json:"workspace_id"', api.body="workspace_id") @@ -289,6 +290,61 @@ struct PreviewExportTracesToDatasetResponse { 257: optional string Msg (agw.key = "msg") // 仅供http请求使用; 内部RPC不予使用,统一通过BaseResp获取Code和Msg } +struct ChangeEvaluatorScoreRequest { + 1: required i64 workspace_id (api.js_conv='true', go.tag='json:"workspace_id"', api.body="workspace_id", vt.gt="0") + 2: required string annotation_id (api.body="annotation_id", vt.min_size="1") + 3: required string span_id (api.body="span_id", vt.min_size="1") + 4: required i64 start_time (api.js_conv='true', go.tag='json:"start_time"', api.body="start_time", vt.gt="0") + 5: required annotation.Correction correction (api.body="correction") + 6: optional common.PlatformType platform_type (api.body="platform_type") + + 255: optional base.Base Base +} + +struct ChangeEvaluatorScoreResponse { + 1: required annotation.Annotation annotation + + 255: optional base.BaseResp BaseResp +} + + + +struct ListAnnotationEvaluatorsRequest { + 1: required i64 workspace_id (api.js_conv='true', go.tag='json:"workspace_id"', api.query="workspace_id", vt.gt="0") + 2: optional string name (api.body = "name") + + 255: optional base.Base Base (api.none="true") +} + +struct ListAnnotationEvaluatorsResponse { + 1: required list evaluators + + 255: optional base.BaseResp BaseResp +} + +struct ExtractSpanInfoRequest { + 1: required i64 workspace_id (api.js_conv='true', go.tag='json:"workspace_id"', api.body="workspace_id", vt.gt="0") + 2: required string trace_id (api.body = "trace_id" vt.min_size = "1") + 3: required list span_ids (api.body="span_ids", vt.min_size="1", vt.max_size="500") + 4: optional i64 start_time (api.js_conv='true', go.tag='json:"start_time"', api.body="start_time", vt.gt="0") + 5: optional i64 end_time (api.js_conv='true', go.tag='json:"end_time"', api.body="end_time", vt.gt="0") + 6: optional common.PlatformType platform_type (api.body="platform_type") + 7: optional list field_mappings (vt.min_size="1", vt.max_size="100") + + 255: optional base.Base Base (api.none="true") +} + +struct SpanInfo { + 1: required string span_id + 2: required list field_list +} +struct ExtractSpanInfoResponse { + 1: required list span_infos + + 255: optional base.BaseResp BaseResp +} + + service TraceService { ListSpansResponse ListSpans(1: ListSpansRequest req) (api.post = '/api/observability/v1/spans/list') GetTraceResponse GetTrace(1: GetTraceRequest req) (api.get = '/api/observability/v1/traces/:trace_id') @@ -305,4 +361,7 @@ service TraceService { ListAnnotationsResponse ListAnnotations(1: ListAnnotationsRequest req) (api.post = '/api/observability/v1/annotations/list') ExportTracesToDatasetResponse ExportTracesToDataset(1: ExportTracesToDatasetRequest Req)(api.post = '/api/observability/v1/traces/export_to_dataset') PreviewExportTracesToDatasetResponse PreviewExportTracesToDataset(1: PreviewExportTracesToDatasetRequest Req)(api.post = '/api/observability/v1/traces/preview_export_to_dataset') + ChangeEvaluatorScoreResponse ChangeEvaluatorScore(1: ChangeEvaluatorScoreRequest req) (api.post = '/api/observability/v1/traces/change_eval_score') + ListAnnotationEvaluatorsResponse ListAnnotationEvaluators(1: ListAnnotationEvaluatorsRequest req) (api.get = '/api/observability/v1/annotation/list_evaluators') + ExtractSpanInfoResponse ExtractSpanInfo(1: ExtractSpanInfoRequest req) (api.post = '/api/observability/v1/trace/extract_span_info') } diff --git a/idl/thrift/coze/loop/observability/domain/annotation.thrift b/idl/thrift/coze/loop/observability/domain/annotation.thrift index 5fcce6778..ca54ce19d 100644 --- a/idl/thrift/coze/loop/observability/domain/annotation.thrift +++ b/idl/thrift/coze/loop/observability/domain/annotation.thrift @@ -61,4 +61,10 @@ struct Annotation { 100: optional common.BaseInfo base_info 101: optional AutoEvaluate auto_evaluate 102: optional ManualFeedback manual_feedback +} + +struct AnnotationEvaluator { + 1: required i64 evaluator_version_id, + 2: required string evaluator_name, + 3: required string evaluator_version, } \ No newline at end of file diff --git a/idl/thrift/coze/loop/observability/domain/filter.thrift b/idl/thrift/coze/loop/observability/domain/filter.thrift index b16aeecaa..f3794ae1a 100644 --- a/idl/thrift/coze/loop/observability/domain/filter.thrift +++ b/idl/thrift/coze/loop/observability/domain/filter.thrift @@ -1,5 +1,7 @@ namespace go coze.loop.observability.domain.filter +include "common.thrift" + typedef string QueryType (ts.enum="true") const QueryType QueryType_Match = "match" const QueryType QueryType_Eq = "eq" @@ -24,6 +26,14 @@ const FieldType FieldType_Long = "long" const FieldType FieldType_Double = "double" const FieldType FieldType_Bool = "bool" +typedef string TaskFieldName +const TaskFieldName TaskFieldName_TaskStatus = "task_status" +const TaskFieldName TaskFieldName_TaskName = "task_name" +const TaskFieldName TaskFieldName_TaskType = "task_type" +const TaskFieldName TaskFieldName_SampleRate = "sample_rate" +const TaskFieldName TaskFieldName_CreatedBy = "created_by" + + struct FilterFields { 1: optional QueryRelation query_and_or 2: required list filter_fields @@ -43,3 +53,22 @@ struct FieldOptions { 3: optional list f64_list 4: optional list string_list } + +struct TaskFilterFields { + 1: optional QueryRelation query_and_or + 2: required list filter_fields +} + +struct TaskFilterField { + 1: optional TaskFieldName field_name + 2: optional FieldType field_type + 3: optional list values + 4: optional QueryType query_type + 5: optional QueryRelation query_and_or + 6: optional TaskFilterField sub_filter +} +struct SpanFilterFields { + 1: optional FilterFields filters // Span 过滤条件 + 2: optional common.PlatformType platform_type // 平台类型,不填默认是fornax + 3: optional common.SpanListType span_list_type // 查询的 span 标签页类型,不填默认是 root span +} \ No newline at end of file diff --git a/idl/thrift/coze/loop/observability/domain/task.thrift b/idl/thrift/coze/loop/observability/domain/task.thrift new file mode 100644 index 000000000..ff9df13cd --- /dev/null +++ b/idl/thrift/coze/loop/observability/domain/task.thrift @@ -0,0 +1,149 @@ +namespace go coze.loop.observability.domain.task + +include "common.thrift" +include "filter.thrift" +include "export_dataset.thrift" + +typedef string TimeUnit (ts.enum="true") +const TimeUnit TimeUnit_Day = "day" +const TimeUnit TimeUnit_Week = "week" +const TimeUnit TimeUnit_Null = "null" + +typedef string TaskType (ts.enum="true") +const TaskType TaskType_AutoEval = "auto_evaluate" // 自动评测 +const TaskType TaskType_AutoDataReflow = "auto_data_reflow" // 数据回流 + +typedef string TaskRunType (ts.enum="true") +const TaskRunType TaskRunType_BackFill = "back_fill" // 历史数据回填 +const TaskRunType TaskRunType_NewData = "new_data" // 新数据 + +typedef string TaskStatus (ts.enum="true") +const TaskStatus TaskStatus_Unstarted = "unstarted" // 未启动 +const TaskStatus TaskStatus_Running = "running" // 正在运行 +const TaskStatus TaskStatus_Failed = "failed" // 失败 +const TaskStatus TaskStatus_Success = "success" // 成功 +const TaskStatus TaskStatus_Pending = "pending" // 中止 +const TaskStatus TaskStatus_Disabled = "disabled" // 禁用 + +typedef string RunStatus (ts.enum="true") +const RunStatus RunStatus_Running = "running" // 正在运行 +const RunStatus RunStatus_Done = "done" // 完成运行 + +// Task +struct Task { + 1: optional i64 id (api.js_conv="true", go.tag='json:"id"') // 任务 id + 2: required string name // 名称 + 3: optional string description // 描述 + 4: optional i64 workspace_id (api.js_conv="true", go.tag='json:"workspace_id"') // 所在空间 + 5: required TaskType task_type // 类型 + 6: optional TaskStatus task_status // 状态 + 7: optional Rule rule // 规则 + 8: optional TaskConfig task_config // 配置 + 9: optional RunDetail task_detail // 任务状态详情 + 10: optional RunDetail backfill_task_detail // 任务历史数据执行详情 + + 100: optional common.BaseInfo base_info // 基础信息 +} + +// Rule +struct Rule { + 1: optional filter.SpanFilterFields span_filters // Span 过滤条件 + 2: optional Sampler sampler // 采样配置 + 3: optional EffectiveTime effective_time // 生效时间窗口 + 4: optional EffectiveTime backfill_effective_time // 历史数据生效时间窗口 +} + +struct Sampler { + 1: optional double sample_rate // 采样率 + 2: optional i64 sample_size // 采样上限 + 3: optional bool is_cycle // 是否启动任务循环 + 4: optional i64 cycle_count // 采样单次上限 + 5: optional i64 cycle_interval // 循环间隔 + 6: optional TimeUnit cycle_time_unit // 循环时间单位 +} + +struct EffectiveTime { + 1: optional i64 start_at (api.js_conv="true", go.tag='json:"start_at"') // ms timestamp + 2: optional i64 end_at (api.js_conv="true", go.tag='json:"end_at"') // ms timestamp +} + + +// TaskConfig +struct TaskConfig { + 1: optional list auto_evaluate_configs // 配置的评测规则信息 + 2: optional list data_reflow_config // 配置的数据回流的数据集信息 +} + +struct DataReflowConfig { + 1: optional i64 dataset_id (api.js_conv="true", go.tag='json:"dataset_id"') // 数据集id,新增数据集时可为空 + 2: optional string dataset_name // 数据集名称 + 3: optional export_dataset.DatasetSchema dataset_schema (vt.not_nil="true") // 数据集列数据schema + 4: optional list field_mappings (vt.min_size="1", vt.max_size="100") +} + +struct AutoEvaluateConfig { + 1: required i64 evaluator_version_id (api.js_conv="true", go.tag='json:"evaluator_version_id"') + 2: required i64 evaluator_id (api.js_conv="true", go.tag='json:"evaluator_id"') + 3: required list field_mappings +} + +// RunDetail +struct RunDetail { + 1: optional i64 success_count + 2: optional i64 failed_count + 3: optional i64 total_count +} + +struct BackfillDetail { + 1: optional i64 success_count + 2: optional i64 failed_count + 3: optional i64 total_count + 4: optional RunStatus backfill_status + 5: optional string last_span_page_token +} + +struct EvaluateFieldMapping { + 1: required export_dataset.FieldSchema field_schema // 数据集字段约束 + 2: required string trace_field_key + 3: required string trace_field_jsonpath + 4: optional string eval_set_name +} + +// TaskRun +struct TaskRun { + 1: required i64 id (api.js_conv="true", go.tag='json:"id"') // 任务 run id + 2: required i64 workspace_id (api.js_conv="true", go.tag='json:"workspace_id"') // 所在空间 + 3: required i64 task_id (api.js_conv="true", go.tag='json:"task_id"') // 任务 id + 4: required TaskRunType task_type // 类型 + 5: required RunStatus run_status // 状态 + 6: optional RunDetail run_detail // 任务状态详情 + 7: optional BackfillDetail backfill_run_detail // 任务历史数据执行详情 + 8: required i64 run_start_at (api.js_conv="true", go.tag='json:"run_start_at"') + 9: required i64 run_end_at (api.js_conv="true", go.tag='json:"run_end_at"') + 10: optional TaskRunConfig task_run_config // 配置 + + 100: optional common.BaseInfo base_info // 基础信息 +} +struct TaskRunConfig { + 1: optional AutoEvaluateRunConfig auto_evaluate_run_config // 自动评测对应的运行配置信息 + 2: optional DataReflowRunConfig data_reflow_run_config // 数据回流对应的运行配置信息 +} +struct AutoEvaluateRunConfig { + 1: required i64 expt_id (api.js_conv="true", go.tag='json:"expt_id"') + 2: required i64 expt_run_id (api.js_conv="true", go.tag='json:"expt_run_id"') + 3: required i64 eval_id (api.js_conv="true", go.tag='json:"eval_id"') + 4: required i64 schema_id (api.js_conv="true", go.tag='json:"schema_id"') + 5: optional string schema + 6: required i64 end_at (api.js_conv="true", go.tag='json:"end_at"') + 7: required i64 cycle_start_at (api.js_conv="true", go.tag='json:"cycle_start_at"') + 8: required i64 cycle_end_at (api.js_conv="true", go.tag='json:"cycle_end_at"') + 9: required string status +} +struct DataReflowRunConfig { + 1: required i64 dataset_id (api.js_conv="true", go.tag='json:"dataset_id"') + 2: required i64 dataset_run_id (api.js_conv="true", go.tag='json:"dataset_run_id"') + 3: required i64 end_at (api.js_conv="true", go.tag='json:"end_at"') + 4: required i64 cycle_start_at (api.js_conv="true", go.tag='json:"cycle_start_at"') + 5: required i64 cycle_end_at (api.js_conv="true", go.tag='json:"cycle_end_at"') + 6: required string status +} \ No newline at end of file diff --git a/release/deployment/docker-compose/bootstrap/mysql-init/init-sql/auto_task_run.sql b/release/deployment/docker-compose/bootstrap/mysql-init/init-sql/auto_task_run.sql new file mode 100644 index 000000000..4c2de5a2b --- /dev/null +++ b/release/deployment/docker-compose/bootstrap/mysql-init/init-sql/auto_task_run.sql @@ -0,0 +1,17 @@ +CREATE TABLE auto_task_run ( + `id` bigint unsigned NOT NULL COMMENT 'TaskRun ID', + `workspace_id` bigint unsigned NOT NULL COMMENT '空间ID', + `task_id` bigint unsigned NOT NULL COMMENT 'Task ID', + `task_type` varchar(64) NOT NULL DEFAULT '' COMMENT 'Task类型', + `run_status` varchar(64) NOT NULL DEFAULT '' COMMENT 'Task Run状态', + `run_detail` json DEFAULT NULL COMMENT 'Task Run运行状态详情', + `backfill_detail` json DEFAULT NULL COMMENT '历史回溯Task Run运行状态详情', + `run_start_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务开始时间', + `run_end_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务结束时间', + `run_config` json DEFAULT NULL COMMENT '相关Run的配置信息', + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间', + PRIMARY KEY (`id`), + KEY `idx_task_id_status` (`task_id`,`run_status`), + KEY `idx_workspace_task` (`workspace_id`, `task_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci COMMENT='Task Run信息'; \ No newline at end of file diff --git a/release/deployment/docker-compose/bootstrap/mysql-init/init-sql/task.sql b/release/deployment/docker-compose/bootstrap/mysql-init/init-sql/task.sql new file mode 100644 index 000000000..dccf63377 --- /dev/null +++ b/release/deployment/docker-compose/bootstrap/mysql-init/init-sql/task.sql @@ -0,0 +1,21 @@ +CREATE TABLE `task` ( + `id` bigint unsigned NOT NULL COMMENT 'Task ID', + `workspace_id` bigint unsigned NOT NULL COMMENT '空间ID', + `name` varchar(128) COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT '任务名称', + `description` varchar(2048) COLLATE utf8mb4_general_ci DEFAULT '' COMMENT '任务描述', + `task_type` varchar(64) COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT '任务类型', + `task_status` varchar(64) COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT '任务状态', + `task_detail` json DEFAULT NULL COMMENT '任务运行状态详情', + `span_filter` json DEFAULT NULL COMMENT 'span 过滤条件', + `effective_time` json DEFAULT NULL COMMENT '生效时间', + `backfill_effective_time` json DEFAULT NULL COMMENT '历史回溯生效时间', + `sampler` json DEFAULT NULL COMMENT '采样器', + `task_config` json DEFAULT NULL COMMENT '相关任务的配置信息', + `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', + `updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '更新时间', + `created_by` varchar(128) COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT '创建人', + `updated_by` varchar(128) COLLATE utf8mb4_general_ci NOT NULL DEFAULT '' COMMENT '更新人', + PRIMARY KEY (`id`), + KEY `idx_space_id_status` (`workspace_id`,`task_status`), + KEY `idx_space_id_type` (`workspace_id`,`task_type`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci COMMENT='任务信息' \ No newline at end of file