diff --git a/client/servicediscovery/service_discovery.go b/client/servicediscovery/service_discovery.go index bef80e28a37..f5ac665b7cd 100644 --- a/client/servicediscovery/service_discovery.go +++ b/client/servicediscovery/service_discovery.go @@ -74,7 +74,7 @@ const ( type serviceType int const ( - apiService serviceType = iota + pdService serviceType = iota tsoService ) @@ -678,7 +678,7 @@ func (*serviceDiscovery) GetKeyspaceGroupID() uint32 { // DiscoverMicroservice discovers the microservice with the specified type and returns the server urls. func (c *serviceDiscovery) discoverMicroservice(svcType serviceType) (urls []string, err error) { switch svcType { - case apiService: + case pdService: urls = c.GetServiceURLs() case tsoService: leaderURL := c.getLeaderURL() diff --git a/cmd/pd-server/main.go b/cmd/pd-server/main.go index 24ca46e7d5e..e9abef1fd4a 100644 --- a/cmd/pd-server/main.go +++ b/cmd/pd-server/main.go @@ -218,11 +218,7 @@ func start(cmd *cobra.Command, args []string, services ...string) { // Flushing any buffered log entries defer log.Sync() memory.InitMemoryHook() - if len(services) != 0 { - versioninfo.Log(server.PDServiceMode) - } else { - versioninfo.Log(server.PDMode) - } + versioninfo.Log(server.PD) for _, msg := range cfg.WarningMsgs { log.Warn(msg) diff --git a/pkg/mcs/registry/registry.go b/pkg/mcs/registry/registry.go index 2ffa04b1bf9..dc0fafbbd33 100644 --- a/pkg/mcs/registry/registry.go +++ b/pkg/mcs/registry/registry.go @@ -85,18 +85,18 @@ func (r *ServiceRegistry) InstallAllRESTHandler(srv bs.Server, h map[string]http serviceName := createServiceName(prefix, name) if l, ok := r.services[serviceName]; ok { if err := l.RegisterRESTHandler(h); err != nil { - log.Error("register restful PD service failed", zap.String("prefix", prefix), zap.String("service-name", name), zap.Error(err)) + log.Error("register restful PD failed", zap.String("prefix", prefix), zap.String("service-name", name), zap.Error(err)) } else { - log.Info("restful PD service already registered", zap.String("prefix", prefix), zap.String("service-name", name)) + log.Info("restful PD already registered", zap.String("prefix", prefix), zap.String("service-name", name)) } continue } l := builder(srv) r.services[serviceName] = l if err := l.RegisterRESTHandler(h); err != nil { - log.Error("register restful PD service failed", zap.String("prefix", prefix), zap.String("service-name", name), zap.Error(err)) + log.Error("register restful PD failed", zap.String("prefix", prefix), zap.String("service-name", name), zap.Error(err)) } else { - log.Info("restful PD service registered successfully", zap.String("prefix", prefix), zap.String("service-name", name)) + log.Info("restful PD registered successfully", zap.String("prefix", prefix), zap.String("service-name", name)) } } } diff --git a/pkg/mcs/scheduling/server/cluster.go b/pkg/mcs/scheduling/server/cluster.go index 6f80572673c..43e0179412e 100644 --- a/pkg/mcs/scheduling/server/cluster.go +++ b/pkg/mcs/scheduling/server/cluster.go @@ -257,8 +257,8 @@ func (c *Cluster) triggerMembershipCheck() { } } -// SwitchPDServiceLeader switches the PD service leader. -func (c *Cluster) SwitchPDServiceLeader(new pdpb.PDClient) bool { +// SwitchPDLeader switches the PD leader. +func (c *Cluster) SwitchPDLeader(new pdpb.PDClient) bool { old := c.pdLeader.Load() return c.pdLeader.CompareAndSwap(old, new) } diff --git a/pkg/mcs/scheduling/server/config/config.go b/pkg/mcs/scheduling/server/config/config.go index 413a6c601cc..2b3e350552f 100644 --- a/pkg/mcs/scheduling/server/config/config.go +++ b/pkg/mcs/scheduling/server/config/config.go @@ -243,7 +243,7 @@ func NewPersistConfig(cfg *Config, ttl *cache.TTLString) *PersistConfig { o.SetClusterVersion(&cfg.ClusterVersion) o.schedule.Store(&cfg.Schedule) o.replication.Store(&cfg.Replication) - // storeConfig will be fetched from TiKV by PD service, + // storeConfig will be fetched from TiKV by PD, // so we just set an empty value here first. o.storeConfig.Store(&sc.StoreConfig{}) o.ttl = ttl @@ -748,11 +748,11 @@ func (o *PersistConfig) IsRaftKV2() bool { // TODO: implement the following methods // AddSchedulerCfg adds the scheduler configurations. -// This method is a no-op since we only use configurations derived from one-way synchronization from PD service now. +// This method is a no-op since we only use configurations derived from one-way synchronization from PD now. func (*PersistConfig) AddSchedulerCfg(types.CheckerSchedulerType, []string) {} // RemoveSchedulerCfg removes the scheduler configurations. -// This method is a no-op since we only use configurations derived from one-way synchronization from PD service now. +// This method is a no-op since we only use configurations derived from one-way synchronization from PD now. func (*PersistConfig) RemoveSchedulerCfg(types.CheckerSchedulerType) {} // CheckLabelProperty checks if the label property is satisfied. diff --git a/pkg/mcs/scheduling/server/config/watcher.go b/pkg/mcs/scheduling/server/config/watcher.go index 9db2d47d0f4..1c87076429e 100644 --- a/pkg/mcs/scheduling/server/config/watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -36,7 +36,7 @@ import ( "github.com/tikv/pd/pkg/utils/keypath" ) -// Watcher is used to watch the PD service for any configuration changes. +// Watcher is used to watch the PD for any configuration changes. type Watcher struct { wg sync.WaitGroup ctx context.Context @@ -76,7 +76,7 @@ type persistedConfig struct { Store sc.StoreConfig `json:"store"` } -// NewWatcher creates a new watcher to watch the config meta change from PD service. +// NewWatcher creates a new watcher to watch the config meta change from PD. func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, diff --git a/pkg/mcs/scheduling/server/grpc_service.go b/pkg/mcs/scheduling/server/grpc_service.go index bd2cc40c21d..033397e3b50 100644 --- a/pkg/mcs/scheduling/server/grpc_service.go +++ b/pkg/mcs/scheduling/server/grpc_service.go @@ -159,7 +159,7 @@ func (s *Service) RegionHeartbeat(stream schedulingpb.Scheduling_RegionHeartbeat region := core.RegionFromHeartbeat(request, 0) err = c.HandleRegionHeartbeat(region) if err != nil { - // TODO: if we need to send the error back to PD service. + // TODO: if we need to send the error back to PD. log.Error("failed handle region heartbeat", zap.Error(err)) continue } diff --git a/pkg/mcs/scheduling/server/meta/watcher.go b/pkg/mcs/scheduling/server/meta/watcher.go index c51f10027d7..048e2716cd1 100644 --- a/pkg/mcs/scheduling/server/meta/watcher.go +++ b/pkg/mcs/scheduling/server/meta/watcher.go @@ -33,7 +33,7 @@ import ( "github.com/tikv/pd/pkg/utils/keypath" ) -// Watcher is used to watch the PD service for any meta changes. +// Watcher is used to watch the PD for any meta changes. type Watcher struct { wg sync.WaitGroup ctx context.Context @@ -48,7 +48,7 @@ type Watcher struct { storeWatcher *etcdutil.LoopWatcher } -// NewWatcher creates a new watcher to watch the meta change from PD service. +// NewWatcher creates a new watcher to watch the meta change from PD. func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, diff --git a/pkg/mcs/scheduling/server/rule/watcher.go b/pkg/mcs/scheduling/server/rule/watcher.go index 014a3abc2be..f20c4fa7f99 100644 --- a/pkg/mcs/scheduling/server/rule/watcher.go +++ b/pkg/mcs/scheduling/server/rule/watcher.go @@ -34,7 +34,7 @@ import ( "github.com/tikv/pd/pkg/utils/keypath" ) -// Watcher is used to watch the PD service for any Placement Rule changes. +// Watcher is used to watch the PD for any Placement Rule changes. type Watcher struct { ctx context.Context cancel context.CancelFunc @@ -74,7 +74,7 @@ type Watcher struct { patch *placement.RuleConfigPatch } -// NewWatcher creates a new watcher to watch the Placement Rule change from PD service. +// NewWatcher creates a new watcher to watch the Placement Rule change from PD. func NewWatcher( ctx context.Context, etcdClient *clientv3.Client, diff --git a/pkg/mcs/scheduling/server/server.go b/pkg/mcs/scheduling/server/server.go index 80156c1e26b..482bc48d2b4 100644 --- a/pkg/mcs/scheduling/server/server.go +++ b/pkg/mcs/scheduling/server/server.go @@ -110,7 +110,7 @@ type Server struct { hbStreams *hbstream.HeartbeatStreams storage *endpoint.StorageEndpoint - // for watching the PD service meta info updates that are related to the scheduling. + // for watching the PD meta info updates that are related to the scheduling. configWatcher *config.Watcher ruleWatcher *rule.Watcher metaWatcher *meta.Watcher @@ -169,10 +169,10 @@ func (s *Server) startServerLoop() { s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(s.Context()) s.serverLoopWg.Add(2) go s.primaryElectionLoop() - go s.updatePDServiceMemberLoop() + go s.updatePDMemberLoop() } -func (s *Server) updatePDServiceMemberLoop() { +func (s *Server) updatePDMemberLoop() { defer logutil.LogPanic() defer s.serverLoopWg.Done() @@ -220,7 +220,7 @@ func (s *Server) updatePDServiceMemberLoop() { // double check break } - if s.cluster.SwitchPDServiceLeader(pdpb.NewPDClient(cc)) { + if s.cluster.SwitchPDLeader(pdpb.NewPDClient(cc)) { if status.Leader != curLeader { log.Info("switch leader", zap.String("leader-id", fmt.Sprintf("%x", ep.ID)), zap.String("endpoint", ep.ClientURLs[0])) } diff --git a/pkg/member/election_leader.go b/pkg/member/election_leader.go index 5cdc7b4cd9b..76d977eddbd 100644 --- a/pkg/member/election_leader.go +++ b/pkg/member/election_leader.go @@ -21,7 +21,7 @@ import ( ) // ElectionLeader defines the common interface of the leader, which is the pdpb.Member -// for in PD/PD service or the tsopb.Participant in the microservices. +// for in PD or the tsopb.Participant in the microservices. type ElectionLeader interface { // GetListenUrls returns the listen urls GetListenUrls() []string diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index 80299bf1e25..0f09b119939 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -389,7 +389,7 @@ func (c *Coordinator) LoadPlugin(pluginPath string, ch chan string) { return } log.Info("create scheduler", zap.String("scheduler-name", s.GetName())) - // TODO: handle the plugin in PD service mode. + // TODO: handle the plugin in microservice env. if err = c.schedulers.AddScheduler(s); err != nil { log.Error("can't add scheduler", zap.String("scheduler-name", s.GetName()), errs.ZapError(err)) return diff --git a/pkg/schedule/schedulers/scheduler_controller.go b/pkg/schedule/schedulers/scheduler_controller.go index 5f461d326c5..a6d37c9c834 100644 --- a/pkg/schedule/schedulers/scheduler_controller.go +++ b/pkg/schedule/schedulers/scheduler_controller.go @@ -52,10 +52,10 @@ type Controller struct { cluster sche.SchedulerCluster storage endpoint.ConfigStorage // schedulers are used to manage all schedulers, which will only be initialized - // and used in the PD leader service mode now. + // and used in the non-microservice env now. schedulers map[string]*ScheduleController // schedulerHandlers is used to manage the HTTP handlers of schedulers, - // which will only be initialized and used in the PD service mode now. + // which will only be initialized and used in the microservice env now. schedulerHandlers map[string]http.Handler opController *operator.Controller } diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index 9793939fa17..4817aeeeddf 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -334,7 +334,7 @@ type KeyspaceGroupManager struct { // Value: discover.ServiceRegistryEntry tsoServiceKey string // legacySvcRootPath defines the legacy root path for all etcd paths which derives from - // the PD/PD service. It's in the format of "/pd/{cluster_id}". + // the PD. It's in the format of "/pd/{cluster_id}". // The main paths for different usages include: // 1. The path, used by the default keyspace group, for LoadTimestamp/SaveTimestamp in the // storage endpoint. diff --git a/pkg/utils/apiutil/serverapi/middleware.go b/pkg/utils/apiutil/serverapi/middleware.go index 0b03c787e6b..089acd47470 100644 --- a/pkg/utils/apiutil/serverapi/middleware.go +++ b/pkg/utils/apiutil/serverapi/middleware.go @@ -116,7 +116,7 @@ func MicroserviceRedirectRule(matchPath, targetPath, targetServiceName string, } func (h *redirector) matchMicroserviceRedirectRules(r *http.Request) (bool, string) { - if !h.s.IsPDServiceMode() { + if !h.s.IsKeyspaceGroupEnabled() { return false, "" } if len(h.microserviceRedirectRules) == 0 { @@ -223,7 +223,7 @@ func (h *redirector) ServeHTTP(w http.ResponseWriter, r *http.Request, next http clientUrls = leader.GetClientUrls() r.Header.Set(apiutil.PDRedirectorHeader, h.s.Name()) } else { - // Prevent more than one redirection among PD/PD service. + // Prevent more than one redirection among PD. log.Error("redirect but server is not leader", zap.String("from", name), zap.String("server", h.s.Name()), errs.ZapError(errs.ErrRedirectToNotLeader)) http.Error(w, errs.ErrRedirectToNotLeader.FastGenByArgs().Error(), http.StatusInternalServerError) return diff --git a/server/api/admin.go b/server/api/admin.go index 561f4ec4bff..b5612999dd0 100644 --- a/server/api/admin.go +++ b/server/api/admin.go @@ -254,5 +254,5 @@ func (h *adminHandler) deleteRegionCacheInSchedulingServer(id ...uint64) error { } func buildMsg(err error) string { - return fmt.Sprintf("This operation was executed in PD service but needs to be re-executed on scheduling server due to the following error: %s", err.Error()) + return fmt.Sprintf("This operation was executed in PD but needs to be re-executed on scheduling server due to the following error: %s", err.Error()) } diff --git a/server/api/server.go b/server/api/server.go index 8e352b6a36e..a9464bdb2e3 100644 --- a/server/api/server.go +++ b/server/api/server.go @@ -63,7 +63,7 @@ func NewHandler(_ context.Context, svr *server.Server) (http.Handler, apiutil.AP // Following requests are **not** redirected: // "/schedulers", http.MethodPost // "/schedulers/{name}", http.MethodDelete - // Because the writing of all the config of the scheduling service is in the PD service, + // Because the writing of all the config of the scheduling service is in the PD, // we should not post and delete the scheduler directly in the scheduling service. router.PathPrefix(apiPrefix).Handler(negroni.New( serverapi.NewRuntimeServiceValidator(svr, group), @@ -153,7 +153,7 @@ func NewHandler(_ context.Context, svr *server.Server) (http.Handler, apiutil.AP scheapi.APIPathPrefix+"/config/placement-rule", constant.SchedulingServiceName, []string{http.MethodGet}), - // because the writing of all the meta information of the scheduling service is in the PD service, + // because the writing of all the meta information of the scheduling service is in the PD, // we should not post and delete the scheduler directly in the scheduling service. serverapi.MicroserviceRedirectRule( prefix+"/schedulers", diff --git a/server/apiv2/handlers/micro_service.go b/server/apiv2/handlers/micro_service.go index 3a0d6a2c2a6..25a2b17b18c 100644 --- a/server/apiv2/handlers/micro_service.go +++ b/server/apiv2/handlers/micro_service.go @@ -39,7 +39,7 @@ func RegisterMicroservice(r *gin.RouterGroup) { // @Router /ms/members/{service} [get] func GetMembers(c *gin.Context) { svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) - if !svr.IsPDServiceMode() { + if !svr.IsKeyspaceGroupEnabled() { c.AbortWithStatusJSON(http.StatusNotFound, "not support microservice") return } @@ -65,7 +65,7 @@ func GetMembers(c *gin.Context) { // @Router /ms/primary/{service} [get] func GetPrimary(c *gin.Context) { svr := c.MustGet(middlewares.ServerContextKey).(*server.Server) - if !svr.IsPDServiceMode() { + if !svr.IsKeyspaceGroupEnabled() { c.AbortWithStatusJSON(http.StatusNotFound, "not support microservice") return } diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 4ad97dfd0cc..8923efef9a6 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -131,7 +131,7 @@ type Server interface { GetMembers() ([]*pdpb.Member, error) ReplicateFileToMember(ctx context.Context, member *pdpb.Member, name string, data []byte) error GetKeyspaceGroupManager() *keyspace.GroupManager - IsPDServiceMode() bool + IsKeyspaceGroupEnabled() bool GetSafePointV2Manager() *gc.SafePointV2Manager } @@ -156,12 +156,12 @@ type RaftCluster struct { etcdClient *clientv3.Client httpClient *http.Client - running bool - isPDServiceMode bool - meta *metapb.Cluster - storage storage.Storage - minResolvedTS atomic.Value // Store as uint64 - externalTS atomic.Value // Store as uint64 + running bool + isKeyspaceGroupEnabled bool + meta *metapb.Cluster + storage storage.Storage + minResolvedTS atomic.Value // Store as uint64 + externalTS atomic.Value // Store as uint64 // Keep the previous store limit settings when removing a store. prevStoreLimit map[uint64]map[storelimit.Type]float64 @@ -325,7 +325,7 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) { log.Warn("raft cluster has already been started") return nil } - c.isPDServiceMode = s.IsPDServiceMode() + c.isKeyspaceGroupEnabled = s.IsKeyspaceGroupEnabled() err = c.InitCluster(s.GetAllocator(), s.GetPersistOptions(), s.GetHBStreams(), s.GetKeyspaceGroupManager()) if err != nil { return err @@ -376,7 +376,7 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) { c.loadExternalTS() c.loadMinResolvedTS() - if c.isPDServiceMode { + if c.isKeyspaceGroupEnabled { // bootstrap keyspace group manager after starting other parts successfully. // This order avoids a stuck goroutine in keyspaceGroupManager when it fails to create raftcluster. err = c.keyspaceGroupManager.Bootstrap(c.ctx) @@ -404,7 +404,7 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) { } func (c *RaftCluster) checkSchedulingService() { - if c.isPDServiceMode { + if c.isKeyspaceGroupEnabled { servers, err := discovery.Discover(c.etcdClient, constant.SchedulingServiceName) if c.opt.GetMicroserviceConfig().IsSchedulingFallbackEnabled() && (err != nil || len(servers) == 0) { c.startSchedulingJobs(c, c.hbstreams) @@ -425,7 +425,7 @@ func (c *RaftCluster) checkSchedulingService() { // checkTSOService checks the TSO service. func (c *RaftCluster) checkTSOService() { - if c.isPDServiceMode { + if c.isKeyspaceGroupEnabled { if c.opt.GetMicroserviceConfig().IsTSODynamicSwitchingEnabled() { servers, err := discovery.Discover(c.etcdClient, constant.TSOServiceName) if err != nil || len(servers) == 0 { diff --git a/server/config/config.go b/server/config/config.go index 3d0d16bc376..a0ff5967259 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -860,7 +860,7 @@ func (c *MicroserviceConfig) Clone() *MicroserviceConfig { return &cfg } -// IsSchedulingFallbackEnabled returns whether to enable scheduling service fallback to PD service. +// IsSchedulingFallbackEnabled returns whether to enable scheduling service fallback to PD. func (c *MicroserviceConfig) IsSchedulingFallbackEnabled() bool { return c.EnableSchedulingFallback } diff --git a/server/config/service_middleware_persist_options.go b/server/config/service_middleware_persist_options.go index ae4c0ef9b7a..d59d23146f7 100644 --- a/server/config/service_middleware_persist_options.go +++ b/server/config/service_middleware_persist_options.go @@ -45,7 +45,7 @@ func (o *ServiceMiddlewarePersistOptions) GetAuditConfig() *AuditConfig { return o.audit.Load().(*AuditConfig) } -// SetAuditConfig sets the PD service middleware configuration. +// SetAuditConfig sets the PD middleware configuration. func (o *ServiceMiddlewarePersistOptions) SetAuditConfig(cfg *AuditConfig) { o.audit.Store(cfg) } @@ -55,12 +55,12 @@ func (o *ServiceMiddlewarePersistOptions) IsAuditEnabled() bool { return o.GetAuditConfig().EnableAudit } -// GetRateLimitConfig returns pd service middleware configurations. +// GetRateLimitConfig returns PD middleware configurations. func (o *ServiceMiddlewarePersistOptions) GetRateLimitConfig() *RateLimitConfig { return o.rateLimit.Load().(*RateLimitConfig) } -// SetRateLimitConfig sets the PD service middleware configuration. +// SetRateLimitConfig sets the PD middleware configuration. func (o *ServiceMiddlewarePersistOptions) SetRateLimitConfig(cfg *RateLimitConfig) { o.rateLimit.Store(cfg) } @@ -70,12 +70,12 @@ func (o *ServiceMiddlewarePersistOptions) IsRateLimitEnabled() bool { return o.GetRateLimitConfig().EnableRateLimit } -// GetGRPCRateLimitConfig returns pd service middleware configurations. +// GetGRPCRateLimitConfig returns PD middleware configurations. func (o *ServiceMiddlewarePersistOptions) GetGRPCRateLimitConfig() *GRPCRateLimitConfig { return o.grpcRateLimit.Load().(*GRPCRateLimitConfig) } -// SetGRPCRateLimitConfig sets the PD service middleware configuration. +// SetGRPCRateLimitConfig sets the PD middleware configuration. func (o *ServiceMiddlewarePersistOptions) SetGRPCRateLimitConfig(cfg *GRPCRateLimitConfig) { o.grpcRateLimit.Store(cfg) } diff --git a/server/grpc_service.go b/server/grpc_service.go index 118b3d84748..5cf3df9e5eb 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -271,8 +271,8 @@ func (s *GrpcServer) GetClusterInfo(context.Context, *pdpb.GetClusterInfoRequest }, nil } -// GetMinTS implements gRPC PDServer. In PD mode, it simply returns a timestamp. -// In PD service mode, it queries all tso servers and gets the minimum timestamp across +// GetMinTS implements gRPC PDServer. In normal PD, it simply returns a timestamp. +// If the tso server exist, it queries all tso servers and gets the minimum timestamp across // all keyspace groups. func (s *GrpcServer) GetMinTS( ctx context.Context, request *pdpb.GetMinTSRequest, diff --git a/server/server.go b/server/server.go index 7ecf309b1d9..4ebc43d2388 100644 --- a/server/server.go +++ b/server/server.go @@ -99,10 +99,8 @@ const ( recoveringMarkPath = "cluster/markers/snapshot-recovering" - // PDMode represents that server is in PD mode. - PDMode = "PD" - // PDServiceMode represents that server is in PD service mode which is in microservice architecture. - PDServiceMode = "PD Service" + // PD is name of member. + PD = "PD" // maxRetryTimesGetServicePrimary is the max retry times for getting primary addr. // Note: it need to be less than client.defaultPDTimeout @@ -227,7 +225,7 @@ type Server struct { auditBackends []audit.Backend registry *registry.ServiceRegistry - mode string + isKeyspaceGroupEnabled bool servicePrimaryMap sync.Map /* Store as map[string]string */ tsoPrimaryWatcher *etcdutil.LoopWatcher schedulingPrimaryWatcher *etcdutil.LoopWatcher @@ -241,13 +239,8 @@ type HandlerBuilder func(context.Context, *Server) (http.Handler, apiutil.APISer // CreateServer creates the UNINITIALIZED pd server with given configuration. func CreateServer(ctx context.Context, cfg *config.Config, services []string, legacyServiceBuilders ...HandlerBuilder) (*Server, error) { - var mode string - if len(services) != 0 { - mode = PDServiceMode - } else { - mode = PDMode - } - log.Info(fmt.Sprintf("%s config", mode), zap.Reflect("config", cfg)) + isKeyspaceGroupEnabled := len(services) != 0 + log.Info("PD config", zap.Bool("enable-keyspace-group", isKeyspaceGroupEnabled), zap.Reflect("config", cfg)) serviceMiddlewareCfg := config.NewServiceMiddlewareConfig() s := &Server{ @@ -259,7 +252,7 @@ func CreateServer(ctx context.Context, cfg *config.Config, services []string, le ctx: ctx, startTimestamp: time.Now().Unix(), DiagnosticsServer: sysutil.NewDiagnosticsServer(cfg.Log.File.Filename), - mode: mode, + isKeyspaceGroupEnabled: isKeyspaceGroupEnabled, tsoClientPool: struct { syncutil.RWMutex clients map[string]tsopb.TSO_TsoClient @@ -478,7 +471,7 @@ func (s *Server) startServer(ctx context.Context) error { Member: s.member.MemberValue(), Step: keyspace.AllocStep, }) - if s.IsPDServiceMode() { + if s.IsKeyspaceGroupEnabled() { s.keyspaceGroupManager = keyspace.NewKeyspaceGroupManager(s.ctx, s.storage, s.client) } s.keyspaceManager = keyspace.NewKeyspaceManager(s.ctx, s.storage, s.cluster, keyspaceIDAllocator, &s.cfg.Keyspace, s.keyspaceGroupManager) @@ -530,7 +523,7 @@ func (s *Server) Close() { s.cgMonitor.StopMonitor() s.stopServerLoop() - if s.IsPDServiceMode() { + if s.IsKeyspaceGroupEnabled() { s.keyspaceGroupManager.Close() } @@ -641,7 +634,7 @@ func (s *Server) startServerLoop(ctx context.Context) { go s.etcdLeaderLoop() go s.serverMetricsLoop() go s.encryptionKeyManagerLoop() - if s.IsPDServiceMode() { + if s.IsKeyspaceGroupEnabled() { s.initTSOPrimaryWatcher() s.initSchedulingPrimaryWatcher() } @@ -788,9 +781,9 @@ func (s *Server) stopRaftCluster() { s.cluster.Stop() } -// IsPDServiceMode return whether the server is in PD service mode. -func (s *Server) IsPDServiceMode() bool { - return s.mode == PDServiceMode +// IsKeyspaceGroupEnabled return whether the server is in PD. +func (s *Server) IsKeyspaceGroupEnabled() bool { + return s.isKeyspaceGroupEnabled } // GetAddr returns the server urls for clients. @@ -1390,7 +1383,7 @@ func (s *Server) GetRaftCluster() *cluster.RaftCluster { // IsServiceIndependent returns whether the service is independent. func (s *Server) IsServiceIndependent(name string) bool { - if s.mode == PDServiceMode && !s.IsClosed() { + if s.isKeyspaceGroupEnabled && !s.IsClosed() { if name == constant.TSOServiceName && !s.GetMicroserviceConfig().IsTSODynamicSwitchingEnabled() { return true } @@ -1595,7 +1588,7 @@ func (s *Server) leaderLoop() { for { if s.IsClosed() { - log.Info(fmt.Sprintf("server is closed, return %s leader loop", s.mode)) + log.Info("server is closed, return PD leader loop") return } @@ -1664,13 +1657,13 @@ func (s *Server) leaderLoop() { } func (s *Server) campaignLeader() { - log.Info(fmt.Sprintf("start to campaign %s leader", s.mode), zap.String("campaign-leader-name", s.Name())) + log.Info("start to campaign PD leader", zap.String("campaign-leader-name", s.Name())) if err := s.member.CampaignLeader(s.ctx, s.cfg.LeaderLease); err != nil { if err.Error() == errs.ErrEtcdTxnConflict.Error() { - log.Info(fmt.Sprintf("campaign %s leader meets error due to txn conflict, another PD/PD service may campaign successfully", s.mode), + log.Info("campaign PD leader meets error due to txn conflict, another PD may campaign successfully", zap.String("campaign-leader-name", s.Name())) } else { - log.Error(fmt.Sprintf("campaign %s leader meets error due to etcd error", s.mode), + log.Error("campaign PD leader meets error due to etcd error", zap.String("campaign-leader-name", s.Name()), errs.ZapError(err)) } @@ -1690,7 +1683,7 @@ func (s *Server) campaignLeader() { // maintain the PD leadership, after this, TSO can be service. s.member.KeepLeader(ctx) - log.Info(fmt.Sprintf("campaign %s leader ok", s.mode), zap.String("campaign-leader-name", s.Name())) + log.Info("campaign PD leader ok", zap.String("campaign-leader-name", s.Name())) if err := s.reloadConfigFromKV(); err != nil { log.Error("failed to reload configuration", errs.ZapError(err)) @@ -1727,17 +1720,17 @@ func (s *Server) campaignLeader() { } // EnableLeader to accept the remaining service, such as GetStore, GetRegion. s.member.EnableLeader() - member.ServiceMemberGauge.WithLabelValues(s.mode).Set(1) + member.ServiceMemberGauge.WithLabelValues(PD).Set(1) defer resetLeaderOnce.Do(func() { // as soon as cancel the leadership keepalive, then other member have chance // to be new leader. cancel() s.member.ResetLeader() - member.ServiceMemberGauge.WithLabelValues(s.mode).Set(0) + member.ServiceMemberGauge.WithLabelValues(PD).Set(0) }) CheckPDVersionWithClusterVersion(s.persistOptions) - log.Info(fmt.Sprintf("%s leader is ready to serve", s.mode), zap.String("leader-name", s.Name())) + log.Info("PD leader is ready to serve", zap.String("leader-name", s.Name())) leaderTicker := time.NewTicker(constant.LeaderTickInterval) defer leaderTicker.Stop() @@ -1746,7 +1739,7 @@ func (s *Server) campaignLeader() { select { case <-leaderTicker.C: if !s.member.IsLeader() { - log.Info("no longer a leader because lease has expired, pd leader will step down") + log.Info("no longer a leader because lease has expired, PD leader will step down") return } // add failpoint to test exit leader, failpoint judge the member is the give value, then break diff --git a/server/server_test.go b/server/server_test.go index 28839b89389..2c7e1fedef2 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -272,7 +272,7 @@ func TestAPIService(t *testing.T) { err = svr.Run() re.NoError(err) MustWaitLeader(re, []*Server{svr}) - re.True(svr.IsPDServiceMode()) + re.True(svr.IsKeyspaceGroupEnabled()) } func TestIsPathInDirectory(t *testing.T) { diff --git a/tests/cluster.go b/tests/cluster.go index 4189b43902a..66d8ce60e7b 100644 --- a/tests/cluster.go +++ b/tests/cluster.go @@ -429,8 +429,8 @@ func NewTestCluster(ctx context.Context, initialServerCount int, opts ...ConfigO return createTestCluster(ctx, initialServerCount, nil, opts...) } -// NewTestPDServiceCluster creates a new TestCluster with PD service. -func NewTestPDServiceCluster(ctx context.Context, initialServerCount int, opts ...ConfigOption) (*TestCluster, error) { +// NewTestClusterWithKeyspaceGroup creates a new TestCluster with PD. +func NewTestClusterWithKeyspaceGroup(ctx context.Context, initialServerCount int, opts ...ConfigOption) (*TestCluster, error) { return createTestCluster(ctx, initialServerCount, []string{constant.PDServiceName}, opts...) } @@ -461,13 +461,13 @@ func createTestCluster(ctx context.Context, initialServerCount int, services []s }, nil } -// RestartTestAPICluster restarts the API test cluster. -func RestartTestAPICluster(ctx context.Context, cluster *TestCluster) (*TestCluster, error) { +// RestartTestPDCluster restarts the PD test cluster. +func RestartTestPDCluster(ctx context.Context, cluster *TestCluster) (*TestCluster, error) { return restartTestCluster(ctx, cluster, true) } func restartTestCluster( - ctx context.Context, cluster *TestCluster, isPDServiceMode bool, + ctx context.Context, cluster *TestCluster, isKeyspaceGroupEnabled bool, ) (newTestCluster *TestCluster, err error) { schedulers.Register() newTestCluster = &TestCluster{ @@ -494,7 +494,7 @@ func restartTestCluster( newServer *TestServer serverErr error ) - if isPDServiceMode { + if isKeyspaceGroupEnabled { newServer, serverErr = NewTestServer(ctx, serverCfg, []string{constant.PDServiceName}) } else { newServer, serverErr = NewTestServer(ctx, serverCfg, nil) @@ -729,8 +729,8 @@ func (c *TestCluster) Join(ctx context.Context, opts ...ConfigOption) (*TestServ return s, nil } -// JoinPDServer is used to add a new TestServer into the cluster. -func (c *TestCluster) JoinPDServer(ctx context.Context, opts ...ConfigOption) (*TestServer, error) { +// JoinWithKeyspaceGroup is used to add a new TestServer into the cluster with keyspace group enabled. +func (c *TestCluster) JoinWithKeyspaceGroup(ctx context.Context, opts ...ConfigOption) (*TestServer, error) { conf, err := c.config.join().Generate(opts...) if err != nil { return nil, err diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index ab3874a33a7..6bd25567f81 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -361,7 +361,7 @@ func TestTSOFollowerProxyWithTSOService(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/client/servicediscovery/fastUpdateServiceMode", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster, err := tests.NewTestPDServiceCluster(ctx, 1) + cluster, err := tests.NewTestClusterWithKeyspaceGroup(ctx, 1) re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() diff --git a/tests/integrations/mcs/discovery/register_test.go b/tests/integrations/mcs/discovery/register_test.go index eb8933e10d8..217d8137e5b 100644 --- a/tests/integrations/mcs/discovery/register_test.go +++ b/tests/integrations/mcs/discovery/register_test.go @@ -54,7 +54,7 @@ func (suite *serverRegisterTestSuite) SetupSuite() { re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -84,8 +84,7 @@ func (suite *serverRegisterTestSuite) checkServerRegister(serviceName string) { addr := s.GetAddr() client := suite.pdLeader.GetEtcdClient() - // test PD service discovery - + // test PD discovery endpoints, err := discovery.Discover(client, serviceName) re.NoError(err) returnedEntry := &discovery.ServiceRegistryEntry{} @@ -98,7 +97,7 @@ func (suite *serverRegisterTestSuite) checkServerRegister(serviceName string) { re.True(exist) re.Equal(expectedPrimary, primary) - // test PD service discovery after unregister + // test PD discovery after unregister cleanup() endpoints, err = discovery.Discover(client, serviceName) re.NoError(err) @@ -140,7 +139,7 @@ func (suite *serverRegisterTestSuite) checkServerPrimaryChange(serviceName strin delete(serverMap, primary) expectedPrimary = tests.WaitForPrimaryServing(re, serverMap) - // test PD service discovery + // test PD discovery client := suite.pdLeader.GetEtcdClient() endpoints, err := discovery.Discover(client, serviceName) re.NoError(err) diff --git a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go index b31d919324d..4644b2131d1 100644 --- a/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go +++ b/tests/integrations/mcs/keyspace/tso_keyspace_group_test.go @@ -60,7 +60,7 @@ func (suite *keyspaceGroupTestSuite) SetupTest() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) suite.ctx = ctx - cluster, err := tests.NewTestPDServiceCluster(suite.ctx, 1) + cluster, err := tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) suite.cluster = cluster re.NoError(err) re.NoError(cluster.RunInitialServers()) diff --git a/tests/integrations/mcs/members/member_test.go b/tests/integrations/mcs/members/member_test.go index 0ec0b323d7a..7da0bf652bb 100644 --- a/tests/integrations/mcs/members/member_test.go +++ b/tests/integrations/mcs/members/member_test.go @@ -64,7 +64,7 @@ func (suite *memberTestSuite) SetupTest() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) ctx, cancel := context.WithCancel(context.Background()) suite.ctx = ctx - cluster, err := tests.NewTestPDServiceCluster(suite.ctx, 1) + cluster, err := tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) suite.cluster = cluster re.NoError(err) re.NoError(cluster.RunInitialServers()) diff --git a/tests/integrations/mcs/scheduling/api_test.go b/tests/integrations/mcs/scheduling/api_test.go index e52ced10011..55407831df2 100644 --- a/tests/integrations/mcs/scheduling/api_test.go +++ b/tests/integrations/mcs/scheduling/api_test.go @@ -56,7 +56,7 @@ func (suite *apiTestSuite) TearDownSuite() { } func (suite *apiTestSuite) TestGetCheckerByName() { - suite.env.RunTestInPDServiceMode(suite.checkGetCheckerByName) + suite.env.RunTestInMicroserviceEnv(suite.checkGetCheckerByName) } func (suite *apiTestSuite) checkGetCheckerByName(cluster *tests.TestCluster) { @@ -102,7 +102,7 @@ func (suite *apiTestSuite) checkGetCheckerByName(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestAPIForward() { - suite.env.RunTestInPDServiceMode(suite.checkAPIForward) + suite.env.RunTestInMicroserviceEnv(suite.checkAPIForward) } func (suite *apiTestSuite) checkAPIForward(cluster *tests.TestCluster) { @@ -378,7 +378,7 @@ func (suite *apiTestSuite) checkAPIForward(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestConfig() { - suite.env.RunTestInPDServiceMode(suite.checkConfig) + suite.env.RunTestInMicroserviceEnv(suite.checkConfig) } func (suite *apiTestSuite) checkConfig(cluster *tests.TestCluster) { @@ -401,7 +401,7 @@ func (suite *apiTestSuite) checkConfig(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestConfigForward() { - suite.env.RunTestInPDServiceMode(suite.checkConfigForward) + suite.env.RunTestInMicroserviceEnv(suite.checkConfigForward) } func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { @@ -413,7 +413,7 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { urlPrefix := fmt.Sprintf("%s/pd/api/v1/config", addr) // Test config forward - // Expect to get same config in scheduling server and PD service + // Expect to get same config in scheduling server and PD testutil.Eventually(re, func() bool { testutil.ReadGetJSON(re, tests.TestDialClient, urlPrefix, &cfg) re.Equal(cfg["schedule"].(map[string]any)["leader-schedule-limit"], @@ -421,8 +421,8 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { return cfg["replication"].(map[string]any)["max-replicas"] == float64(opts.GetReplicationConfig().MaxReplicas) }) - // Test to change config in PD service - // Expect to get new config in scheduling server and PD service + // Test to change config in PD + // Expect to get new config in scheduling server and PD reqData, err := json.Marshal(map[string]any{ "max-replicas": 4, }) @@ -436,7 +436,7 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { }) // Test to change config only in scheduling server - // Expect to get new config in scheduling server but not old config in PD service + // Expect to get new config in scheduling server but not old config in PD scheCfg := opts.GetScheduleConfig().Clone() scheCfg.LeaderScheduleLimit = 100 opts.SetScheduleConfig(scheCfg) @@ -452,7 +452,7 @@ func (suite *apiTestSuite) checkConfigForward(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestAdminRegionCache() { - suite.env.RunTestInPDServiceMode(suite.checkAdminRegionCache) + suite.env.RunTestInMicroserviceEnv(suite.checkAdminRegionCache) } func (suite *apiTestSuite) checkAdminRegionCache(cluster *tests.TestCluster) { @@ -479,7 +479,7 @@ func (suite *apiTestSuite) checkAdminRegionCache(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestAdminRegionCacheForward() { - suite.env.RunTestInPDServiceMode(suite.checkAdminRegionCacheForward) + suite.env.RunTestInMicroserviceEnv(suite.checkAdminRegionCacheForward) } func (suite *apiTestSuite) checkAdminRegionCacheForward(cluster *tests.TestCluster) { @@ -510,7 +510,7 @@ func (suite *apiTestSuite) checkAdminRegionCacheForward(cluster *tests.TestClust } func (suite *apiTestSuite) TestFollowerForward() { - suite.env.RunTestBasedOnMode(suite.checkFollowerForward) + suite.env.RunTest(suite.checkFollowerForward) suite.TearDownSuite() suite.SetupSuite() } @@ -520,7 +520,7 @@ func (suite *apiTestSuite) checkFollowerForward(cluster *tests.TestCluster) { leaderAddr := cluster.GetLeaderServer().GetAddr() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - follower, err := cluster.JoinPDServer(ctx) + follower, err := cluster.Join(ctx) re.NoError(err) re.NoError(follower.Run()) re.NotEmpty(cluster.WaitLeader()) @@ -558,7 +558,7 @@ func (suite *apiTestSuite) checkFollowerForward(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestMetrics() { - suite.env.RunTestInPDServiceMode(suite.checkMetrics) + suite.env.RunTestInMicroserviceEnv(suite.checkMetrics) } func (suite *apiTestSuite) checkMetrics(cluster *tests.TestCluster) { @@ -577,7 +577,7 @@ func (suite *apiTestSuite) checkMetrics(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestStatus() { - suite.env.RunTestInPDServiceMode(suite.checkStatus) + suite.env.RunTestInMicroserviceEnv(suite.checkStatus) } func (suite *apiTestSuite) checkStatus(cluster *tests.TestCluster) { @@ -600,7 +600,7 @@ func (suite *apiTestSuite) checkStatus(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestStores() { - suite.env.RunTestInPDServiceMode(suite.checkStores) + suite.env.RunTestInMicroserviceEnv(suite.checkStores) } func (suite *apiTestSuite) checkStores(cluster *tests.TestCluster) { @@ -647,8 +647,8 @@ func (suite *apiTestSuite) checkStores(cluster *tests.TestCluster) { tests.MustPutStore(re, cluster, store) } // Test /stores - pdServiceAddr := cluster.GetLeaderServer().GetAddr() - urlPrefix := fmt.Sprintf("%s/pd/api/v1/stores", pdServiceAddr) + pdAddr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/stores", pdAddr) var resp map[string]any err := testutil.ReadGetJSON(re, tests.TestDialClient, urlPrefix, &resp) re.NoError(err) @@ -682,7 +682,7 @@ func (suite *apiTestSuite) checkStores(cluster *tests.TestCluster) { } func (suite *apiTestSuite) TestRegions() { - suite.env.RunTestInPDServiceMode(suite.checkRegions) + suite.env.RunTestInMicroserviceEnv(suite.checkRegions) } func (suite *apiTestSuite) checkRegions(cluster *tests.TestCluster) { @@ -691,8 +691,8 @@ func (suite *apiTestSuite) checkRegions(cluster *tests.TestCluster) { tests.MustPutRegion(re, cluster, 2, 2, []byte("c"), []byte("d")) tests.MustPutRegion(re, cluster, 3, 1, []byte("e"), []byte("f")) // Test /regions - pdServiceAddr := cluster.GetLeaderServer().GetAddr() - urlPrefix := fmt.Sprintf("%s/pd/api/v1/regions", pdServiceAddr) + pdAddr := cluster.GetLeaderServer().GetAddr() + urlPrefix := fmt.Sprintf("%s/pd/api/v1/regions", pdAddr) var resp map[string]any err := testutil.ReadGetJSON(re, tests.TestDialClient, urlPrefix, &resp) re.NoError(err) diff --git a/tests/integrations/mcs/scheduling/config_test.go b/tests/integrations/mcs/scheduling/config_test.go index 6c770d3e4c1..caf86a665cb 100644 --- a/tests/integrations/mcs/scheduling/config_test.go +++ b/tests/integrations/mcs/scheduling/config_test.go @@ -62,7 +62,7 @@ func (suite *configTestSuite) SetupSuite() { schedulers.Register() var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() re.NoError(err) @@ -132,7 +132,7 @@ func (suite *configTestSuite) TestConfigWatch() { watcher.Close() } -// Manually trigger the config persistence in the PD service side. +// Manually trigger the config persistence in the PD side. func persistConfig(re *require.Assertions, pdLeaderServer *tests.TestServer) { err := pdLeaderServer.GetPersistOptions().Persist(pdLeaderServer.GetServer().GetStorage()) re.NoError(err) diff --git a/tests/integrations/mcs/scheduling/meta_test.go b/tests/integrations/mcs/scheduling/meta_test.go index 8df576b82ca..b9c371bf12c 100644 --- a/tests/integrations/mcs/scheduling/meta_test.go +++ b/tests/integrations/mcs/scheduling/meta_test.go @@ -53,7 +53,7 @@ func (suite *metaTestSuite) SetupSuite() { re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() re.NoError(err) diff --git a/tests/integrations/mcs/scheduling/rule_test.go b/tests/integrations/mcs/scheduling/rule_test.go index 706c5784831..b741f147061 100644 --- a/tests/integrations/mcs/scheduling/rule_test.go +++ b/tests/integrations/mcs/scheduling/rule_test.go @@ -54,7 +54,7 @@ func (suite *ruleTestSuite) SetupSuite() { var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() re.NoError(err) @@ -97,7 +97,7 @@ func (suite *ruleTestSuite) TestRuleWatch() { re.Equal(placement.DefaultGroupID, ruleGroups[0].ID) re.Equal(0, ruleGroups[0].Index) re.False(ruleGroups[0].Override) - // Set a new rule via the PD service. + // Set a new rule via the PD. apiRuleManager := suite.pdLeaderServer.GetRaftCluster().GetRuleManager() rule := &placement.Rule{ GroupID: "2", diff --git a/tests/integrations/mcs/scheduling/server_test.go b/tests/integrations/mcs/scheduling/server_test.go index 7271111bd79..d79bcb47228 100644 --- a/tests/integrations/mcs/scheduling/server_test.go +++ b/tests/integrations/mcs/scheduling/server_test.go @@ -66,7 +66,7 @@ func (suite *serverTestSuite) SetupSuite() { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/scheduling/server/changeRunCollectWaitTime", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -220,7 +220,7 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { // Change back to the default value. conf.EnableSchedulingFallback = true leaderServer.SetMicroserviceConfig(*conf) - // PD service will execute scheduling jobs since there is no scheduling server. + // PD will execute scheduling jobs since there is no scheduling server. testutil.Eventually(re, func() bool { return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -229,7 +229,7 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { re.NoError(err) defer tc.Destroy() tc.WaitForPrimaryServing(re) - // After scheduling server is started, PD service will not execute scheduling jobs. + // After scheduling server is started, PD will not execute scheduling jobs. testutil.Eventually(re, func() bool { return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -238,7 +238,7 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { return tc.GetPrimaryServer().GetCluster().IsBackgroundJobsRunning() }) tc.GetPrimaryServer().Close() - // Stop scheduling server. PD service will execute scheduling jobs again. + // Stop scheduling server. PD will execute scheduling jobs again. testutil.Eventually(re, func() bool { return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -246,7 +246,7 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { re.NoError(err) defer tc1.Destroy() tc1.WaitForPrimaryServing(re) - // After scheduling server is started, PD service will not execute scheduling jobs. + // After scheduling server is started, PD will not execute scheduling jobs. testutil.Eventually(re, func() bool { return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -259,21 +259,21 @@ func (suite *serverTestSuite) TestSchedulingServiceFallback() { func (suite *serverTestSuite) TestDisableSchedulingServiceFallback() { re := suite.Require() - // PD service will execute scheduling jobs since there is no scheduling server. + // PD will execute scheduling jobs since there is no scheduling server. testutil.Eventually(re, func() bool { re.NotNil(suite.pdLeader.GetServer()) re.NotNil(suite.pdLeader.GetServer().GetRaftCluster()) return suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) leaderServer := suite.pdLeader.GetServer() - // After Disabling scheduling service fallback, the PD service will stop scheduling. + // After Disabling scheduling service fallback, the PD will stop scheduling. conf := leaderServer.GetMicroserviceConfig().Clone() conf.EnableSchedulingFallback = false leaderServer.SetMicroserviceConfig(*conf) testutil.Eventually(re, func() bool { return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) - // Enable scheduling service fallback again, the PD service will restart scheduling. + // Enable scheduling service fallback again, the PD will restart scheduling. conf.EnableSchedulingFallback = true leaderServer.SetMicroserviceConfig(*conf) testutil.Eventually(re, func() bool { @@ -284,7 +284,7 @@ func (suite *serverTestSuite) TestDisableSchedulingServiceFallback() { re.NoError(err) defer tc.Destroy() tc.WaitForPrimaryServing(re) - // After scheduling server is started, PD service will not execute scheduling jobs. + // After scheduling server is started, PD will not execute scheduling jobs. testutil.Eventually(re, func() bool { return !suite.pdLeader.GetServer().GetRaftCluster().IsSchedulingControllerRunning() }) @@ -292,7 +292,7 @@ func (suite *serverTestSuite) TestDisableSchedulingServiceFallback() { testutil.Eventually(re, func() bool { return tc.GetPrimaryServer().GetCluster().IsBackgroundJobsRunning() }) - // Disable scheduling service fallback and stop scheduling server. PD service won't execute scheduling jobs again. + // Disable scheduling service fallback and stop scheduling server. PD won't execute scheduling jobs again. conf.EnableSchedulingFallback = false leaderServer.SetMicroserviceConfig(*conf) tc.GetPrimaryServer().Close() @@ -310,14 +310,14 @@ func (suite *serverTestSuite) TestSchedulerSync() { tc.WaitForPrimaryServing(re) schedulersController := tc.GetPrimaryServer().GetCluster().GetCoordinator().GetSchedulersController() checkEvictLeaderSchedulerExist(re, schedulersController, false) - // Add a new evict-leader-scheduler through the PD service. + // Add a new evict-leader-scheduler through the PD. api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) // Check if the evict-leader-scheduler is added. checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1}) - // Add a store_id to the evict-leader-scheduler through the PD service. + // Add a store_id to the evict-leader-scheduler through the PD. err = suite.pdLeader.GetServer().GetRaftCluster().PutMetaStore( &metapb.Store{ Id: 2, @@ -334,18 +334,18 @@ func (suite *serverTestSuite) TestSchedulerSync() { }) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1, 2}) - // Delete a store_id from the evict-leader-scheduler through the PD service. + // Delete a store_id from the evict-leader-scheduler through the PD. api.MustDeleteScheduler(re, suite.backendEndpoints, fmt.Sprintf("%s-%d", types.EvictLeaderScheduler.String(), 1)) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{2}) - // Add a store_id to the evict-leader-scheduler through the PD service by the scheduler handler. + // Add a store_id to the evict-leader-scheduler through the PD by the scheduler handler. api.MustCallSchedulerConfigAPI(re, http.MethodPost, suite.backendEndpoints, types.EvictLeaderScheduler.String(), []string{"config"}, map[string]any{ "name": types.EvictLeaderScheduler.String(), "store_id": 1, }) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1, 2}) - // Delete a store_id from the evict-leader-scheduler through the PD service by the scheduler handler. + // Delete a store_id from the evict-leader-scheduler through the PD by the scheduler handler. api.MustCallSchedulerConfigAPI(re, http.MethodDelete, suite.backendEndpoints, types.EvictLeaderScheduler.String(), []string{"delete", "2"}, nil) checkEvictLeaderSchedulerExist(re, schedulersController, true) checkEvictLeaderStoreIDs(re, schedulersController, []uint64{1}) @@ -354,7 +354,7 @@ func (suite *serverTestSuite) TestSchedulerSync() { // Check if the scheduler is removed. checkEvictLeaderSchedulerExist(re, schedulersController, false) - // Delete the evict-leader-scheduler through the PD service by removing the last store_id. + // Delete the evict-leader-scheduler through the PD by removing the last store_id. api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) @@ -363,7 +363,7 @@ func (suite *serverTestSuite) TestSchedulerSync() { api.MustDeleteScheduler(re, suite.backendEndpoints, fmt.Sprintf("%s-%d", types.EvictLeaderScheduler.String(), 1)) checkEvictLeaderSchedulerExist(re, schedulersController, false) - // Delete the evict-leader-scheduler through the PD service. + // Delete the evict-leader-scheduler through the PD. api.MustAddScheduler(re, suite.backendEndpoints, types.EvictLeaderScheduler.String(), map[string]any{ "store_id": 1, }) @@ -551,7 +551,7 @@ func (suite *serverTestSuite) TestStoreLimit() { leaderServer.GetRaftCluster().SetStoreLimit(1, storelimit.RemovePeer, 60) leaderServer.GetRaftCluster().SetStoreLimit(2, storelimit.AddPeer, 60) leaderServer.GetRaftCluster().SetStoreLimit(2, storelimit.RemovePeer, 60) - // There is a time window between setting store limit in PD service side and capturing the change in scheduling service. + // There is a time window between setting store limit in PD side and capturing the change in scheduling service. waitSyncFinish(re, tc, storelimit.AddPeer, 60) for i := uint64(1); i <= 5; i++ { op := operator.NewTestOperator(2, &metapb.RegionEpoch{}, operator.OpRegion, operator.AddPeer{ToStore: 2, PeerID: 100}) @@ -636,7 +636,7 @@ func (suite *multipleServerTestSuite) SetupSuite() { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 2) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 2) re.NoError(err) err = suite.cluster.RunInitialServers() diff --git a/tests/integrations/mcs/tso/api_test.go b/tests/integrations/mcs/tso/api_test.go index 97f56cbcee1..aa153a7eb9f 100644 --- a/tests/integrations/mcs/tso/api_test.go +++ b/tests/integrations/mcs/tso/api_test.go @@ -62,7 +62,7 @@ func (suite *tsoAPITestSuite) SetupTest() { var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.pdCluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.pdCluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.pdCluster.RunInitialServers() re.NoError(err) @@ -137,7 +137,7 @@ func TestTSOServerStartFirst(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - apiCluster, err := tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { + apiCluster, err := tests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = []string{"k1", "k2"} }) defer apiCluster.Destroy() @@ -200,7 +200,7 @@ func TestForwardOnlyTSONoScheduling(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := tests.NewTestPDServiceCluster(ctx, 1) + tc, err := tests.NewTestClusterWithKeyspaceGroup(ctx, 1) defer tc.Destroy() re.NoError(err) err = tc.RunInitialServers() diff --git a/tests/integrations/mcs/tso/keyspace_group_manager_test.go b/tests/integrations/mcs/tso/keyspace_group_manager_test.go index ecbc0295845..51ace0f08c4 100644 --- a/tests/integrations/mcs/tso/keyspace_group_manager_test.go +++ b/tests/integrations/mcs/tso/keyspace_group_manager_test.go @@ -82,7 +82,7 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) SetupSuite() { var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() re.NoError(err) @@ -537,8 +537,8 @@ func TestTwiceSplitKeyspaceGroup(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/fastGroupSplitPatroller", `return(true)`)) - // Init PD service config but not start. - tc, err := tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { + // Init PD config but not start. + tc, err := tests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = []string{ "keyspace_a", "keyspace_b", } @@ -546,7 +546,7 @@ func TestTwiceSplitKeyspaceGroup(t *testing.T) { re.NoError(err) pdAddr := tc.GetConfig().GetClientURL() - // Start PD service and tso server. + // Start PD and tso server. err = tc.RunInitialServers() re.NoError(err) defer tc.Destroy() @@ -734,8 +734,8 @@ func TestGetTSOImmediately(t *testing.T) { re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/acceleratedAllocNodes", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/tso/fastGroupSplitPatroller", `return(true)`)) - // Init PD service config but not start. - tc, err := tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { + // Init PD config but not start. + tc, err := tests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = []string{ "keyspace_a", "keyspace_b", } @@ -743,7 +743,7 @@ func TestGetTSOImmediately(t *testing.T) { re.NoError(err) pdAddr := tc.GetConfig().GetClientURL() - // Start PD service and tso server. + // Start PD and tso server. err = tc.RunInitialServers() re.NoError(err) defer tc.Destroy() diff --git a/tests/integrations/mcs/tso/proxy_test.go b/tests/integrations/mcs/tso/proxy_test.go index 50583ebbbb4..99213742e6c 100644 --- a/tests/integrations/mcs/tso/proxy_test.go +++ b/tests/integrations/mcs/tso/proxy_test.go @@ -62,7 +62,7 @@ func (s *tsoProxyTestSuite) SetupSuite() { var err error s.ctx, s.cancel = context.WithCancel(context.Background()) // Create an API cluster with 1 server - s.apiCluster, err = tests.NewTestPDServiceCluster(s.ctx, 1) + s.apiCluster, err = tests.NewTestClusterWithKeyspaceGroup(s.ctx, 1) re.NoError(err) err = s.apiCluster.RunInitialServers() re.NoError(err) diff --git a/tests/integrations/mcs/tso/server_test.go b/tests/integrations/mcs/tso/server_test.go index 4c9bb4248e5..168741204de 100644 --- a/tests/integrations/mcs/tso/server_test.go +++ b/tests/integrations/mcs/tso/server_test.go @@ -75,7 +75,7 @@ func (suite *tsoServerTestSuite) SetupSuite() { re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -156,19 +156,19 @@ func (suite *tsoServerTestSuite) TestParticipantStartWithAdvertiseListenAddr() { func TestTSOPath(t *testing.T) { re := require.New(t) - checkTSOPath(re, true /*isPDServiceMode*/) - checkTSOPath(re, false /*isPDServiceMode*/) + checkTSOPath(re, true /*isKeyspaceGroupEnabled*/) + checkTSOPath(re, false /*isKeyspaceGroupEnabled*/) } -func checkTSOPath(re *require.Assertions, isPDServiceMode bool) { +func checkTSOPath(re *require.Assertions, isKeyspaceGroupEnabled bool) { var ( cluster *tests.TestCluster err error ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - if isPDServiceMode { - cluster, err = tests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { + if isKeyspaceGroupEnabled { + cluster, err = tests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Microservice.EnableTSODynamicSwitching = false }) } else { @@ -184,7 +184,7 @@ func checkTSOPath(re *require.Assertions, isPDServiceMode bool) { re.NoError(pdLeader.BootstrapCluster()) backendEndpoints := pdLeader.GetAddr() client := pdLeader.GetEtcdClient() - if isPDServiceMode { + if isKeyspaceGroupEnabled { re.Equal(0, getEtcdTimestampKeyNum(re, client)) } else { re.Equal(1, getEtcdTimestampKeyNum(re, client)) @@ -233,7 +233,7 @@ func NewPDServiceForward(re *require.Assertions) PDServiceForward { } var err error suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 3) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 3) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -512,7 +512,7 @@ func (suite *CommonTestSuite) SetupSuite() { var err error re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, 1) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) err = suite.cluster.RunInitialServers() @@ -576,7 +576,7 @@ func (suite *CommonTestSuite) TestBootstrapDefaultKeyspaceGroup() { } check() - s, err := suite.cluster.JoinPDServer(suite.ctx) + s, err := suite.cluster.JoinWithKeyspaceGroup(suite.ctx) re.NoError(err) re.NoError(s.Run()) @@ -598,7 +598,7 @@ func TestTSOServiceSwitch(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := tests.NewTestPDServiceCluster(ctx, 1, + tc, err := tests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Microservice.EnableTSODynamicSwitching = true }, diff --git a/tests/integrations/tso/client_test.go b/tests/integrations/tso/client_test.go index 87ddd9e226b..4822d6439ca 100644 --- a/tests/integrations/tso/client_test.go +++ b/tests/integrations/tso/client_test.go @@ -98,7 +98,7 @@ func (suite *tsoClientTestSuite) SetupSuite() { if suite.legacy { suite.cluster, err = tests.NewTestCluster(suite.ctx, serverCount) } else { - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, serverCount, func(conf *config.Config, _ string) { + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, serverCount, func(conf *config.Config, _ string) { conf.Microservice.EnableTSODynamicSwitching = false }) } @@ -510,9 +510,9 @@ func TestMixedTSODeployment(t *testing.T) { re.NotNil(leaderServer) backendEndpoints := leaderServer.GetAddr() - apiSvr, err := cluster.JoinPDServer(ctx) + pdSvr, err := cluster.Join(ctx) re.NoError(err) - err = apiSvr.Run() + err = pdSvr.Run() re.NoError(err) s, cleanup := tests.StartSingleTSOTestServer(ctx, re, backendEndpoints, tempurl.Alloc()) @@ -537,20 +537,20 @@ func TestMixedTSODeployment(t *testing.T) { wg.Wait() } -// TestUpgradingAPIandTSOClusters tests the scenario that after we restart the API cluster +// TestUpgradingPDAndTSOClusters tests the scenario that after we restart the PD cluster // then restart the TSO cluster, the TSO service can still serve TSO requests normally. -func TestUpgradingAPIandTSOClusters(t *testing.T) { +func TestUpgradingPDAndTSOClusters(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) - // Create an API cluster which has 3 servers - apiCluster, err := tests.NewTestPDServiceCluster(ctx, 3) + // Create an PD cluster which has 3 servers + pdCluster, err := tests.NewTestClusterWithKeyspaceGroup(ctx, 3) re.NoError(err) - err = apiCluster.RunInitialServers() + err = pdCluster.RunInitialServers() re.NoError(err) - leaderName := apiCluster.WaitLeader() + leaderName := pdCluster.WaitLeader() re.NotEmpty(leaderName) - pdLeader := apiCluster.GetServer(leaderName) + pdLeader := pdCluster.GetServer(leaderName) backendEndpoints := pdLeader.GetAddr() // Create a pd client in PD mode to let the API leader to forward requests to the TSO cluster. @@ -569,7 +569,7 @@ func TestUpgradingAPIandTSOClusters(t *testing.T) { mcs.WaitForTSOServiceAvailable(ctx, re, pdClient) // Restart the API cluster - apiCluster, err = tests.RestartTestAPICluster(ctx, apiCluster) + pdCluster, err = tests.RestartTestPDCluster(ctx, pdCluster) re.NoError(err) // The TSO service should be eventually healthy mcs.WaitForTSOServiceAvailable(ctx, re, pdClient) @@ -581,7 +581,7 @@ func TestUpgradingAPIandTSOClusters(t *testing.T) { mcs.WaitForTSOServiceAvailable(ctx, re, pdClient) tsoCluster.Destroy() - apiCluster.Destroy() + pdCluster.Destroy() cancel() re.NoError(failpoint.Disable("github.com/tikv/pd/client/servicediscovery/usePDServiceMode")) } diff --git a/tests/integrations/tso/consistency_test.go b/tests/integrations/tso/consistency_test.go index b29ae696f26..b4c3ffef790 100644 --- a/tests/integrations/tso/consistency_test.go +++ b/tests/integrations/tso/consistency_test.go @@ -76,7 +76,7 @@ func (suite *tsoConsistencyTestSuite) SetupSuite() { if suite.legacy { suite.cluster, err = tests.NewTestCluster(suite.ctx, serverCount) } else { - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, serverCount) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, serverCount) } re.NoError(err) err = suite.cluster.RunInitialServers() diff --git a/tests/integrations/tso/server_test.go b/tests/integrations/tso/server_test.go index 1428dbcd1a6..3860f054c31 100644 --- a/tests/integrations/tso/server_test.go +++ b/tests/integrations/tso/server_test.go @@ -74,7 +74,7 @@ func (suite *tsoServerTestSuite) SetupSuite() { if suite.legacy { suite.cluster, err = tests.NewTestCluster(suite.ctx, serverCount) } else { - suite.cluster, err = tests.NewTestPDServiceCluster(suite.ctx, serverCount) + suite.cluster, err = tests.NewTestClusterWithKeyspaceGroup(suite.ctx, serverCount) } re.NoError(err) err = suite.cluster.RunInitialServers() diff --git a/tests/server/api/checker_test.go b/tests/server/api/checker_test.go index 14077ec30ae..5444fbf730b 100644 --- a/tests/server/api/checker_test.go +++ b/tests/server/api/checker_test.go @@ -45,7 +45,7 @@ func (suite *checkerTestSuite) TearDownSuite() { } func (suite *checkerTestSuite) TestAPI() { - suite.env.RunTestBasedOnMode(suite.checkAPI) + suite.env.RunTest(suite.checkAPI) } func (suite *checkerTestSuite) checkAPI(cluster *tests.TestCluster) { diff --git a/tests/server/api/operator_test.go b/tests/server/api/operator_test.go index 54abdf2d236..9e87215b490 100644 --- a/tests/server/api/operator_test.go +++ b/tests/server/api/operator_test.go @@ -57,7 +57,7 @@ func (suite *operatorTestSuite) TearDownSuite() { } func (suite *operatorTestSuite) TestAddRemovePeer() { - suite.env.RunTestBasedOnMode(suite.checkAddRemovePeer) + suite.env.RunTest(suite.checkAddRemovePeer) } func (suite *operatorTestSuite) checkAddRemovePeer(cluster *tests.TestCluster) { @@ -167,7 +167,7 @@ func (suite *operatorTestSuite) checkAddRemovePeer(cluster *tests.TestCluster) { } func (suite *operatorTestSuite) TestMergeRegionOperator() { - suite.env.RunTestBasedOnMode(suite.checkMergeRegionOperator) + suite.env.RunTest(suite.checkMergeRegionOperator) } func (suite *operatorTestSuite) checkMergeRegionOperator(cluster *tests.TestCluster) { @@ -227,7 +227,7 @@ func (suite *operatorTestSuite) TestTransferRegionWithPlacementRule() { func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 3 }) - env.RunTestBasedOnMode(suite.checkTransferRegionWithPlacementRule) + env.RunTest(suite.checkTransferRegionWithPlacementRule) env.Cleanup() } @@ -507,7 +507,7 @@ func (suite *operatorTestSuite) TestGetOperatorsAsObject() { func(conf *config.Config, _ string) { conf.Replication.MaxReplicas = 1 }) - env.RunTestBasedOnMode(suite.checkGetOperatorsAsObject) + env.RunTest(suite.checkGetOperatorsAsObject) env.Cleanup() } @@ -604,7 +604,7 @@ func (suite *operatorTestSuite) checkGetOperatorsAsObject(cluster *tests.TestClu } func (suite *operatorTestSuite) TestRemoveOperators() { - suite.env.RunTestBasedOnMode(suite.checkRemoveOperators) + suite.env.RunTest(suite.checkRemoveOperators) } func (suite *operatorTestSuite) checkRemoveOperators(cluster *tests.TestCluster) { diff --git a/tests/server/api/region_test.go b/tests/server/api/region_test.go index b98b4419a80..ccd70aaae87 100644 --- a/tests/server/api/region_test.go +++ b/tests/server/api/region_test.go @@ -93,13 +93,13 @@ func (suite *regionTestSuite) TearDownTest() { return true }) } - suite.env.RunTestBasedOnMode(cleanFunc) + suite.env.RunTest(cleanFunc) } func (suite *regionTestSuite) TestSplitRegions() { // use a new environment to avoid affecting other tests env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestBasedOnMode(suite.checkSplitRegions) + env.RunTest(suite.checkSplitRegions) env.Cleanup() } @@ -142,7 +142,7 @@ func (suite *regionTestSuite) checkSplitRegions(cluster *tests.TestCluster) { func (suite *regionTestSuite) TestAccelerateRegionsScheduleInRange() { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/checker/skipCheckSuspectRanges", "return(true)")) - suite.env.RunTestBasedOnMode(suite.checkAccelerateRegionsScheduleInRange) + suite.env.RunTest(suite.checkAccelerateRegionsScheduleInRange) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/checker/skipCheckSuspectRanges")) } @@ -180,7 +180,7 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRange(cluster *tes func (suite *regionTestSuite) TestAccelerateRegionsScheduleInRanges() { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/checker/skipCheckSuspectRanges", "return(true)")) - suite.env.RunTestBasedOnMode(suite.checkAccelerateRegionsScheduleInRanges) + suite.env.RunTest(suite.checkAccelerateRegionsScheduleInRanges) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/checker/skipCheckSuspectRanges")) } @@ -219,7 +219,7 @@ func (suite *regionTestSuite) checkAccelerateRegionsScheduleInRanges(cluster *te func (suite *regionTestSuite) TestScatterRegions() { // use a new environment to avoid affecting other tests env := tests.NewSchedulingTestEnvironment(suite.T()) - env.RunTestBasedOnMode(suite.checkScatterRegions) + env.RunTest(suite.checkScatterRegions) env.Cleanup() } @@ -266,7 +266,7 @@ func (suite *regionTestSuite) checkScatterRegions(cluster *tests.TestCluster) { } func (suite *regionTestSuite) TestCheckRegionsReplicated() { - suite.env.RunTestBasedOnMode(suite.checkRegionsReplicated) + suite.env.RunTest(suite.checkRegionsReplicated) } func (suite *regionTestSuite) checkRegionsReplicated(cluster *tests.TestCluster) { diff --git a/tests/server/api/rule_test.go b/tests/server/api/rule_test.go index 3982e61b6ea..58832c26b42 100644 --- a/tests/server/api/rule_test.go +++ b/tests/server/api/rule_test.go @@ -76,11 +76,11 @@ func (suite *ruleTestSuite) TearDownTest() { err = tu.CheckPostJSON(tests.TestDialClient, urlPrefix+"/pd/api/v1/config/placement-rule", data, tu.StatusOK(re)) re.NoError(err) } - suite.env.RunTestBasedOnMode(cleanFunc) + suite.env.RunTest(cleanFunc) } func (suite *ruleTestSuite) TestSet() { - suite.env.RunTestBasedOnMode(suite.checkSet) + suite.env.RunTest(suite.checkSet) } func (suite *ruleTestSuite) checkSet(cluster *tests.TestCluster) { @@ -196,7 +196,7 @@ func (suite *ruleTestSuite) checkSet(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGet() { - suite.env.RunTestBasedOnMode(suite.checkGet) + suite.env.RunTest(suite.checkGet) } func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { @@ -247,7 +247,7 @@ func (suite *ruleTestSuite) checkGet(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGetAll() { - suite.env.RunTestBasedOnMode(suite.checkGetAll) + suite.env.RunTest(suite.checkGetAll) } func (suite *ruleTestSuite) checkGetAll(cluster *tests.TestCluster) { @@ -269,7 +269,7 @@ func (suite *ruleTestSuite) checkGetAll(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestSetAll() { - suite.env.RunTestBasedOnMode(suite.checkSetAll) + suite.env.RunTest(suite.checkSetAll) } func (suite *ruleTestSuite) checkSetAll(cluster *tests.TestCluster) { @@ -385,7 +385,7 @@ func (suite *ruleTestSuite) checkSetAll(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGetAllByGroup() { - suite.env.RunTestBasedOnMode(suite.checkGetAllByGroup) + suite.env.RunTest(suite.checkGetAllByGroup) } func (suite *ruleTestSuite) checkGetAllByGroup(cluster *tests.TestCluster) { @@ -442,7 +442,7 @@ func (suite *ruleTestSuite) checkGetAllByGroup(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGetAllByRegion() { - suite.env.RunTestBasedOnMode(suite.checkGetAllByRegion) + suite.env.RunTest(suite.checkGetAllByRegion) } func (suite *ruleTestSuite) checkGetAllByRegion(cluster *tests.TestCluster) { @@ -507,7 +507,7 @@ func (suite *ruleTestSuite) checkGetAllByRegion(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestGetAllByKey() { - suite.env.RunTestBasedOnMode(suite.checkGetAllByKey) + suite.env.RunTest(suite.checkGetAllByKey) } func (suite *ruleTestSuite) checkGetAllByKey(cluster *tests.TestCluster) { @@ -566,7 +566,7 @@ func (suite *ruleTestSuite) checkGetAllByKey(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestDelete() { - suite.env.RunTestBasedOnMode(suite.checkDelete) + suite.env.RunTest(suite.checkDelete) } func (suite *ruleTestSuite) checkDelete(cluster *tests.TestCluster) { @@ -632,7 +632,7 @@ func (suite *ruleTestSuite) checkDelete(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestBatch() { - suite.env.RunTestBasedOnMode(suite.checkBatch) + suite.env.RunTest(suite.checkBatch) } func (suite *ruleTestSuite) checkBatch(cluster *tests.TestCluster) { @@ -761,7 +761,7 @@ func (suite *ruleTestSuite) checkBatch(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestBundle() { - suite.env.RunTestBasedOnMode(suite.checkBundle) + suite.env.RunTest(suite.checkBundle) } func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { @@ -871,7 +871,7 @@ func (suite *ruleTestSuite) checkBundle(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestBundleBadRequest() { - suite.env.RunTestBasedOnMode(suite.checkBundleBadRequest) + suite.env.RunTest(suite.checkBundleBadRequest) } func (suite *ruleTestSuite) checkBundleBadRequest(cluster *tests.TestCluster) { @@ -902,7 +902,7 @@ func (suite *ruleTestSuite) checkBundleBadRequest(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestLeaderAndVoter() { - suite.env.RunTestBasedOnMode(suite.checkLeaderAndVoter) + suite.env.RunTest(suite.checkLeaderAndVoter) } func (suite *ruleTestSuite) checkLeaderAndVoter(cluster *tests.TestCluster) { @@ -993,7 +993,7 @@ func (suite *ruleTestSuite) checkLeaderAndVoter(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestDeleteAndUpdate() { - suite.env.RunTestBasedOnMode(suite.checkDeleteAndUpdate) + suite.env.RunTest(suite.checkDeleteAndUpdate) } func (suite *ruleTestSuite) checkDeleteAndUpdate(cluster *tests.TestCluster) { @@ -1074,7 +1074,7 @@ func (suite *ruleTestSuite) checkDeleteAndUpdate(cluster *tests.TestCluster) { } func (suite *ruleTestSuite) TestConcurrency() { - suite.env.RunTestBasedOnMode(suite.checkConcurrency) + suite.env.RunTest(suite.checkConcurrency) } func (suite *ruleTestSuite) checkConcurrency(cluster *tests.TestCluster) { @@ -1163,7 +1163,7 @@ func (suite *ruleTestSuite) checkConcurrencyWith(cluster *tests.TestCluster, } func (suite *ruleTestSuite) TestLargeRules() { - suite.env.RunTestBasedOnMode(suite.checkLargeRules) + suite.env.RunTest(suite.checkLargeRules) } func (suite *ruleTestSuite) checkLargeRules(cluster *tests.TestCluster) { @@ -1292,7 +1292,7 @@ func (suite *regionRuleTestSuite) TearDownSuite() { } func (suite *regionRuleTestSuite) TestRegionPlacementRule() { - suite.env.RunTestBasedOnMode(suite.checkRegionPlacementRule) + suite.env.RunTest(suite.checkRegionPlacementRule) } func (suite *regionRuleTestSuite) checkRegionPlacementRule(cluster *tests.TestCluster) { diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index d1d5b06ceb4..b01e569e2f0 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -43,19 +43,19 @@ const apiPrefix = "/pd" type scheduleTestSuite struct { suite.Suite - env *tests.SchedulingTestEnvironment - runMode tests.SchedulerMode + te *tests.SchedulingTestEnvironment + env tests.Env } -func TestPDSchedulingTestSuite(t *testing.T) { +func TestNonMicroserviceSchedulingTestSuite(t *testing.T) { suite.Run(t, &scheduleTestSuite{ - runMode: tests.PDMode, + env: tests.NonMicroserviceEnv, }) } -func TestAPISchedulingTestSuite(t *testing.T) { +func TestMicroserviceSchedulingTestSuite(t *testing.T) { suite.Run(t, &scheduleTestSuite{ - runMode: tests.PDServiceMode, + env: tests.MicroserviceEnv, }) } @@ -63,19 +63,19 @@ func (suite *scheduleTestSuite) SetupSuite() { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/skipStoreConfigSync", `return(true)`)) - suite.env = tests.NewSchedulingTestEnvironment(suite.T()) - suite.env.RunMode = suite.runMode + suite.te = tests.NewSchedulingTestEnvironment(suite.T()) + suite.te.Env = suite.env } func (suite *scheduleTestSuite) TearDownSuite() { re := suite.Require() - suite.env.Cleanup() + suite.te.Cleanup() re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/skipStoreConfigSync")) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/schedule/changeCoordinatorTicker")) } func (suite *scheduleTestSuite) TestOriginAPI() { - suite.env.RunTestBasedOnMode(suite.checkOriginAPI) + suite.te.RunTest(suite.checkOriginAPI) } func (suite *scheduleTestSuite) checkOriginAPI(cluster *tests.TestCluster) { @@ -157,7 +157,7 @@ func (suite *scheduleTestSuite) checkOriginAPI(cluster *tests.TestCluster) { } func (suite *scheduleTestSuite) TestAPI() { - suite.env.RunTestBasedOnMode(suite.checkAPI) + suite.te.RunTest(suite.checkAPI) } func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { @@ -654,7 +654,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { } func (suite *scheduleTestSuite) TestDisable() { - suite.env.RunTestBasedOnMode(suite.checkDisable) + suite.te.RunTest(suite.checkDisable) } func (suite *scheduleTestSuite) checkDisable(cluster *tests.TestCluster) { @@ -765,7 +765,7 @@ func (suite *scheduleTestSuite) testPauseOrResume(re *require.Assertions, urlPre } func (suite *scheduleTestSuite) TestEmptySchedulers() { - suite.env.RunTestBasedOnMode(suite.checkEmptySchedulers) + suite.te.RunTest(suite.checkEmptySchedulers) } func (suite *scheduleTestSuite) checkEmptySchedulers(cluster *tests.TestCluster) { diff --git a/tests/server/apiv2/handlers/tso_keyspace_group_test.go b/tests/server/apiv2/handlers/tso_keyspace_group_test.go index 851df9b5fd1..17f326f4a4d 100644 --- a/tests/server/apiv2/handlers/tso_keyspace_group_test.go +++ b/tests/server/apiv2/handlers/tso_keyspace_group_test.go @@ -42,7 +42,7 @@ func TestKeyspaceGroupTestSuite(t *testing.T) { func (suite *keyspaceGroupTestSuite) SetupTest() { re := suite.Require() suite.ctx, suite.cancel = context.WithCancel(context.Background()) - cluster, err := tests.NewTestPDServiceCluster(suite.ctx, 1) + cluster, err := tests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) suite.cluster = cluster re.NoError(err) re.NoError(cluster.RunInitialServers()) diff --git a/tests/server/config/config_test.go b/tests/server/config/config_test.go index bb387b68030..ced0a6b8261 100644 --- a/tests/server/config/config_test.go +++ b/tests/server/config/config_test.go @@ -92,7 +92,7 @@ func (suite *configTestSuite) TearDownSuite() { } func (suite *configTestSuite) TestConfigAll() { - suite.env.RunTestBasedOnMode(suite.checkConfigAll) + suite.env.RunTest(suite.checkConfigAll) } func (suite *configTestSuite) checkConfigAll(cluster *tests.TestCluster) { @@ -213,7 +213,7 @@ func (suite *configTestSuite) checkConfigAll(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestConfigSchedule() { - suite.env.RunTestBasedOnMode(suite.checkConfigSchedule) + suite.env.RunTest(suite.checkConfigSchedule) } func (suite *configTestSuite) checkConfigSchedule(cluster *tests.TestCluster) { @@ -239,7 +239,7 @@ func (suite *configTestSuite) checkConfigSchedule(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestConfigReplication() { - suite.env.RunTestBasedOnMode(suite.checkConfigReplication) + suite.env.RunTest(suite.checkConfigReplication) } func (suite *configTestSuite) checkConfigReplication(cluster *tests.TestCluster) { @@ -282,7 +282,7 @@ func (suite *configTestSuite) checkConfigReplication(cluster *tests.TestCluster) } func (suite *configTestSuite) TestConfigLabelProperty() { - suite.env.RunTestBasedOnMode(suite.checkConfigLabelProperty) + suite.env.RunTest(suite.checkConfigLabelProperty) } func (suite *configTestSuite) checkConfigLabelProperty(cluster *tests.TestCluster) { @@ -334,7 +334,7 @@ func (suite *configTestSuite) checkConfigLabelProperty(cluster *tests.TestCluste } func (suite *configTestSuite) TestConfigDefault() { - suite.env.RunTestBasedOnMode(suite.checkConfigDefault) + suite.env.RunTest(suite.checkConfigDefault) } func (suite *configTestSuite) checkConfigDefault(cluster *tests.TestCluster) { @@ -378,7 +378,7 @@ func (suite *configTestSuite) checkConfigDefault(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestConfigPDServer() { - suite.env.RunTestBasedOnMode(suite.checkConfigPDServer) + suite.env.RunTest(suite.checkConfigPDServer) } func (suite *configTestSuite) checkConfigPDServer(cluster *tests.TestCluster) { @@ -508,7 +508,7 @@ func createTTLUrl(url string, ttl int) string { } func (suite *configTestSuite) TestConfigTTL() { - suite.env.RunTestBasedOnMode(suite.checkConfigTTL) + suite.env.RunTest(suite.checkConfigTTL) } func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { @@ -571,7 +571,7 @@ func (suite *configTestSuite) checkConfigTTL(cluster *tests.TestCluster) { } func (suite *configTestSuite) TestTTLConflict() { - suite.env.RunTestBasedOnMode(suite.checkTTLConflict) + suite.env.RunTest(suite.checkTTLConflict) } func (suite *configTestSuite) checkTTLConflict(cluster *tests.TestCluster) { diff --git a/tests/testutil.go b/tests/testutil.go index 4bbfa8155b4..406e09345b9 100644 --- a/tests/testutil.go +++ b/tests/testutil.go @@ -271,25 +271,25 @@ func MustReportBuckets(re *require.Assertions, cluster *TestCluster, regionID ui return buckets } -// SchedulerMode is used for test purpose. -type SchedulerMode int +// Env is used for test purpose. +type Env int const ( - // Both represents both PD mode and API mode. - Both SchedulerMode = iota - // PDMode represents PD mode. - PDMode - // PDServiceMode represents API mode. - PDServiceMode + // Both represents both scheduler environments. + Both Env = iota + // NonMicroserviceEnv represents non-microservice env. + NonMicroserviceEnv + // MicroserviceEnv represents microservice env. + MicroserviceEnv ) // SchedulingTestEnvironment is used for test purpose. type SchedulingTestEnvironment struct { t *testing.T opts []ConfigOption - clusters map[SchedulerMode]*TestCluster + clusters map[Env]*TestCluster cancels []context.CancelFunc - RunMode SchedulerMode + Env Env } // NewSchedulingTestEnvironment is to create a new SchedulingTestEnvironment. @@ -297,38 +297,38 @@ func NewSchedulingTestEnvironment(t *testing.T, opts ...ConfigOption) *Schedulin return &SchedulingTestEnvironment{ t: t, opts: opts, - clusters: make(map[SchedulerMode]*TestCluster), + clusters: make(map[Env]*TestCluster), cancels: make([]context.CancelFunc, 0), } } -// RunTestBasedOnMode runs test based on mode. -// If mode not set, it will run test in both PD mode and API mode. -func (s *SchedulingTestEnvironment) RunTestBasedOnMode(test func(*TestCluster)) { - switch s.RunMode { - case PDMode: - s.RunTestInPDMode(test) - case PDServiceMode: - s.RunTestInPDServiceMode(test) +// RunTest is to run test based on the environment. +// If env not set, it will run test in both non-microservice env and microservice env. +func (s *SchedulingTestEnvironment) RunTest(test func(*TestCluster)) { + switch s.Env { + case NonMicroserviceEnv: + s.RunTestInNonMicroserviceEnv(test) + case MicroserviceEnv: + s.RunTestInMicroserviceEnv(test) default: - s.RunTestInPDMode(test) - s.RunTestInPDServiceMode(test) + s.RunTestInNonMicroserviceEnv(test) + s.RunTestInMicroserviceEnv(test) } } -// RunTestInPDMode is to run test in pd mode. -func (s *SchedulingTestEnvironment) RunTestInPDMode(test func(*TestCluster)) { - s.t.Logf("start test %s in pd mode", getTestName()) - if _, ok := s.clusters[PDMode]; !ok { - s.startCluster(PDMode) +// RunTestInNonMicroserviceMode is to run test in non-microservice environment. +func (s *SchedulingTestEnvironment) RunTestInNonMicroserviceEnv(test func(*TestCluster)) { + s.t.Logf("start test %s in non-microservice environment", getTestName()) + if _, ok := s.clusters[NonMicroserviceEnv]; !ok { + s.startCluster(NonMicroserviceEnv) } - test(s.clusters[PDMode]) + test(s.clusters[NonMicroserviceEnv]) } func getTestName() string { pc, _, _, _ := runtime.Caller(2) caller := runtime.FuncForPC(pc) - if caller == nil || strings.Contains(caller.Name(), "RunTestBasedOnMode") { + if caller == nil || strings.Contains(caller.Name(), "RunTest") { pc, _, _, _ = runtime.Caller(3) caller = runtime.FuncForPC(pc) } @@ -339,8 +339,8 @@ func getTestName() string { return "" } -// RunTestInPDServiceMode is to run test in pd service mode. -func (s *SchedulingTestEnvironment) RunTestInPDServiceMode(test func(*TestCluster)) { +// RunTestInMicroserviceMode is to run test in microservice environment. +func (s *SchedulingTestEnvironment) RunTestInMicroserviceEnv(test func(*TestCluster)) { re := require.New(s.t) re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember", `return(true)`)) @@ -348,11 +348,11 @@ func (s *SchedulingTestEnvironment) RunTestInPDServiceMode(test func(*TestCluste re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/mcs/scheduling/server/fastUpdateMember")) re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/highFrequencyClusterJobs")) }() - s.t.Logf("start test %s in pd service mode", getTestName()) - if _, ok := s.clusters[PDServiceMode]; !ok { - s.startCluster(PDServiceMode) + s.t.Logf("start test %s in microservice environment", getTestName()) + if _, ok := s.clusters[MicroserviceEnv]; !ok { + s.startCluster(MicroserviceEnv) } - test(s.clusters[PDServiceMode]) + test(s.clusters[MicroserviceEnv]) } // Cleanup is to cleanup the environment. @@ -365,12 +365,12 @@ func (s *SchedulingTestEnvironment) Cleanup() { } } -func (s *SchedulingTestEnvironment) startCluster(m SchedulerMode) { +func (s *SchedulingTestEnvironment) startCluster(m Env) { re := require.New(s.t) ctx, cancel := context.WithCancel(context.Background()) s.cancels = append(s.cancels, cancel) switch m { - case PDMode: + case NonMicroserviceEnv: cluster, err := NewTestCluster(ctx, 1, s.opts...) re.NoError(err) err = cluster.RunInitialServers() @@ -378,9 +378,9 @@ func (s *SchedulingTestEnvironment) startCluster(m SchedulerMode) { re.NotEmpty(cluster.WaitLeader()) leaderServer := cluster.GetServer(cluster.GetLeader()) re.NoError(leaderServer.BootstrapCluster()) - s.clusters[PDMode] = cluster - case PDServiceMode: - cluster, err := NewTestPDServiceCluster(ctx, 1, s.opts...) + s.clusters[NonMicroserviceEnv] = cluster + case MicroserviceEnv: + cluster, err := NewTestClusterWithKeyspaceGroup(ctx, 1, s.opts...) re.NoError(err) err = cluster.RunInitialServers() re.NoError(err) @@ -398,7 +398,7 @@ func (s *SchedulingTestEnvironment) startCluster(m SchedulerMode) { testutil.Eventually(re, func() bool { return cluster.GetLeaderServer().GetServer().IsServiceIndependent(constant.SchedulingServiceName) }) - s.clusters[PDServiceMode] = cluster + s.clusters[MicroserviceEnv] = cluster } } diff --git a/tools/pd-ctl/pdctl/command/config_command.go b/tools/pd-ctl/pdctl/command/config_command.go index c49b0e3b9e5..64da096a2f0 100644 --- a/tools/pd-ctl/pdctl/command/config_command.go +++ b/tools/pd-ctl/pdctl/command/config_command.go @@ -49,8 +49,8 @@ const ( ruleBundlePrefix = "pd/api/v1/config/placement-rule" pdServerPrefix = "pd/api/v1/config/pd-server" serviceMiddlewareConfigPrefix = "pd/api/v1/service-middleware/config" - // flagFromPDService has no influence for pd mode, but it is useful for us to debug in pd service mode. - flagFromPDService = "from_pd_service" + // flagFromPD has no influence for pd mode, but it is useful for us to debug in pd service mode. + flagFromPD = "from_pd" ) // NewConfigCommand return a config subcommand of rootCmd @@ -81,7 +81,7 @@ func NewShowConfigCommand() *cobra.Command { sc.AddCommand(newShowReplicationModeCommand()) sc.AddCommand(NewShowServerConfigCommand()) sc.AddCommand(NewShowServiceMiddlewareConfigCommand()) - sc.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + sc.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") return sc } @@ -92,7 +92,7 @@ func NewShowAllConfigCommand() *cobra.Command { Short: "show all config of PD", Run: showAllConfigCommandFunc, } - sc.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + sc.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") return sc } @@ -103,7 +103,7 @@ func NewShowScheduleConfigCommand() *cobra.Command { Short: "show schedule config of PD", Run: showScheduleConfigCommandFunc, } - sc.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + sc.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") return sc } @@ -114,7 +114,7 @@ func NewShowReplicationConfigCommand() *cobra.Command { Short: "show replication config of PD", Run: showReplicationConfigCommandFunc, } - sc.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + sc.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") return sc } @@ -528,7 +528,7 @@ func NewPlacementRulesCommand() *cobra.Command { show.Flags().String("id", "", "rule id") show.Flags().String("region", "", "region id") show.Flags().Bool("detail", false, "detailed match info for region") - show.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + show.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") load := &cobra.Command{ Use: "load", Short: "load placement rules to a file", @@ -538,7 +538,7 @@ func NewPlacementRulesCommand() *cobra.Command { load.Flags().String("id", "", "rule id") load.Flags().String("region", "", "region id") load.Flags().String("out", "rules.json", "the filename contains rules") - load.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + load.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") save := &cobra.Command{ Use: "save", Short: "save rules from file", @@ -554,7 +554,7 @@ func NewPlacementRulesCommand() *cobra.Command { Short: "show rule group configuration(s)", Run: showRuleGroupFunc, } - ruleGroupShow.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + ruleGroupShow.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") ruleGroupSet := &cobra.Command{ Use: "set ", Short: "update rule group configuration", @@ -577,7 +577,7 @@ func NewPlacementRulesCommand() *cobra.Command { Run: getRuleBundle, } ruleBundleGet.Flags().String("out", "", "the output file") - ruleBundleGet.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + ruleBundleGet.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") ruleBundleSet := &cobra.Command{ Use: "set", Short: "set rule group config and its rules from file", @@ -596,7 +596,7 @@ func NewPlacementRulesCommand() *cobra.Command { Run: loadRuleBundle, } ruleBundleLoad.Flags().String("out", "rules.json", "the output file") - ruleBundleLoad.Flags().Bool(flagFromPDService, false, "read data from PD service rather than microservice") + ruleBundleLoad.Flags().Bool(flagFromPD, false, "read data from PD rather than microservice") ruleBundleSave := &cobra.Command{ Use: "save", Short: "save all group configs and rules from file", @@ -895,7 +895,7 @@ func saveRuleBundle(cmd *cobra.Command, _ []string) { func buildHeader(cmd *cobra.Command) http.Header { header := http.Header{} - forbiddenRedirectToMicroservice, err := cmd.Flags().GetBool(flagFromPDService) + forbiddenRedirectToMicroservice, err := cmd.Flags().GetBool(flagFromPD) if err == nil && forbiddenRedirectToMicroservice { header.Add(apiutil.XForbiddenForwardToMicroserviceHeader, "true") } diff --git a/tools/pd-ctl/tests/config/config_test.go b/tools/pd-ctl/tests/config/config_test.go index 3b622beecbf..7bdce225cbf 100644 --- a/tools/pd-ctl/tests/config/config_test.go +++ b/tools/pd-ctl/tests/config/config_test.go @@ -99,14 +99,14 @@ func (suite *configTestSuite) TearDownTest() { err = testutil.CheckPostJSON(testDialClient, urlPrefix+"/pd/api/v1/config/placement-rule", data, testutil.StatusOK(re)) re.NoError(err) } - suite.env.RunTestBasedOnMode(cleanFunc) + suite.env.RunTest(cleanFunc) suite.env.Cleanup() } func (suite *configTestSuite) TestConfig() { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/dashboard/adapter/skipDashboardLoop", `return(true)`)) - suite.env.RunTestBasedOnMode(suite.checkConfig) + suite.env.RunTest(suite.checkConfig) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/dashboard/adapter/skipDashboardLoop")) } @@ -369,7 +369,7 @@ func (suite *configTestSuite) checkConfig(cluster *pdTests.TestCluster) { func (suite *configTestSuite) TestConfigForwardControl() { re := suite.Require() re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/dashboard/adapter/skipDashboardLoop", `return(true)`)) - suite.env.RunTestBasedOnMode(suite.checkConfigForwardControl) + suite.env.RunTest(suite.checkConfigForwardControl) re.NoError(failpoint.Disable("github.com/tikv/pd/pkg/dashboard/adapter/skipDashboardLoop")) } @@ -449,7 +449,7 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu args := []string{"-u", pdAddr, "config", "show"} args = append(args, options...) if isFromPDService { - args = append(args, "--from_pd_service") + args = append(args, "--from_pd") } output, err := tests.ExecuteCommand(cmd, args...) re.NoError(err) @@ -478,7 +478,7 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu args := []string{"-u", pdAddr, "config", "placement-rules"} args = append(args, options...) if isFromPDService { - args = append(args, "--from_pd_service") + args = append(args, "--from_pd") } output, err := tests.ExecuteCommand(cmd, args...) re.NoError(err) @@ -522,13 +522,13 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu re.Equal(uint64(233), sche.GetPersistConfig().GetLeaderScheduleLimit()) re.Equal(7, sche.GetPersistConfig().GetMaxReplicas()) } - // show config from PD service rather than scheduling server + // show config from PD rather than scheduling server testConfig() - // show all config from PD service rather than scheduling server + // show all config from PD rather than scheduling server testConfig("all") - // show replication config from PD service rather than scheduling server + // show replication config from PD rather than scheduling server testConfig("replication") - // show schedule config from PD service rather than scheduling server + // show schedule config from PD rather than scheduling server testConfig("schedule") // Test Rule @@ -572,7 +572,7 @@ func (suite *configTestSuite) checkConfigForwardControl(cluster *pdTests.TestClu } func (suite *configTestSuite) TestPlacementRules() { - suite.env.RunTestBasedOnMode(suite.checkPlacementRules) + suite.env.RunTest(suite.checkPlacementRules) } func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) { @@ -638,7 +638,7 @@ func (suite *configTestSuite) checkPlacementRules(cluster *pdTests.TestCluster) } func (suite *configTestSuite) TestPlacementRuleGroups() { - suite.env.RunTestBasedOnMode(suite.checkPlacementRuleGroups) + suite.env.RunTest(suite.checkPlacementRuleGroups) } func (suite *configTestSuite) checkPlacementRuleGroups(cluster *pdTests.TestCluster) { @@ -715,7 +715,7 @@ func (suite *configTestSuite) checkPlacementRuleGroups(cluster *pdTests.TestClus } func (suite *configTestSuite) TestPlacementRuleBundle() { - suite.env.RunTestBasedOnMode(suite.checkPlacementRuleBundle) + suite.env.RunTest(suite.checkPlacementRuleBundle) } func (suite *configTestSuite) checkPlacementRuleBundle(cluster *pdTests.TestCluster) { @@ -1052,7 +1052,7 @@ func TestServiceMiddlewareConfig(t *testing.T) { } func (suite *configTestSuite) TestUpdateDefaultReplicaConfig() { - suite.env.RunTestBasedOnMode(suite.checkUpdateDefaultReplicaConfig) + suite.env.RunTest(suite.checkUpdateDefaultReplicaConfig) } func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *pdTests.TestCluster) { @@ -1201,7 +1201,7 @@ func (suite *configTestSuite) checkUpdateDefaultReplicaConfig(cluster *pdTests.T } func (suite *configTestSuite) TestPDServerConfig() { - suite.env.RunTestBasedOnMode(suite.checkPDServerConfig) + suite.env.RunTest(suite.checkPDServerConfig) } func (suite *configTestSuite) checkPDServerConfig(cluster *pdTests.TestCluster) { @@ -1234,7 +1234,7 @@ func (suite *configTestSuite) checkPDServerConfig(cluster *pdTests.TestCluster) } func (suite *configTestSuite) TestMicroserviceConfig() { - suite.env.RunTestBasedOnMode(suite.checkMicroserviceConfig) + suite.env.RunTest(suite.checkMicroserviceConfig) } func (suite *configTestSuite) checkMicroserviceConfig(cluster *pdTests.TestCluster) { @@ -1264,7 +1264,7 @@ func (suite *configTestSuite) checkMicroserviceConfig(cluster *pdTests.TestClust } func (suite *configTestSuite) TestRegionRules() { - suite.env.RunTestBasedOnMode(suite.checkRegionRules) + suite.env.RunTest(suite.checkRegionRules) } func (suite *configTestSuite) checkRegionRules(cluster *pdTests.TestCluster) { diff --git a/tools/pd-ctl/tests/hot/hot_test.go b/tools/pd-ctl/tests/hot/hot_test.go index d01f861d861..9254c31dfba 100644 --- a/tools/pd-ctl/tests/hot/hot_test.go +++ b/tools/pd-ctl/tests/hot/hot_test.go @@ -73,11 +73,11 @@ func (suite *hotTestSuite) TearDownTest() { } hotStat.HotCache.CleanCache() } - suite.env.RunTestBasedOnMode(cleanFunc) + suite.env.RunTest(cleanFunc) } func (suite *hotTestSuite) TestHot() { - suite.env.RunTestBasedOnMode(suite.checkHot) + suite.env.RunTest(suite.checkHot) } func (suite *hotTestSuite) checkHot(cluster *pdTests.TestCluster) { @@ -247,7 +247,7 @@ func (suite *hotTestSuite) checkHot(cluster *pdTests.TestCluster) { } func (suite *hotTestSuite) TestHotWithStoreID() { - suite.env.RunTestBasedOnMode(suite.checkHotWithStoreID) + suite.env.RunTest(suite.checkHotWithStoreID) } func (suite *hotTestSuite) checkHotWithStoreID(cluster *pdTests.TestCluster) { @@ -314,7 +314,7 @@ func (suite *hotTestSuite) checkHotWithStoreID(cluster *pdTests.TestCluster) { } func (suite *hotTestSuite) TestHotWithoutHotPeer() { - suite.env.RunTestBasedOnMode(suite.checkHotWithoutHotPeer) + suite.env.RunTest(suite.checkHotWithoutHotPeer) } func (suite *hotTestSuite) checkHotWithoutHotPeer(cluster *pdTests.TestCluster) { diff --git a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go index fff95856931..88ee782ab4b 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_group_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_group_test.go @@ -41,7 +41,7 @@ func TestKeyspaceGroup(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tc, err := pdTests.NewTestPDServiceCluster(ctx, 1) + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(ctx, 1) re.NoError(err) defer tc.Destroy() err = tc.RunInitialServers() @@ -102,7 +102,7 @@ func TestSplitKeyspaceGroup(t *testing.T) { for i := range 129 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestPDServiceCluster(ctx, 3, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -157,7 +157,7 @@ func TestExternalAllocNodeWhenStart(t *testing.T) { for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -197,7 +197,7 @@ func TestSetNodeAndPriorityKeyspaceGroup(t *testing.T) { for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestPDServiceCluster(ctx, 3, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -301,7 +301,7 @@ func TestMergeKeyspaceGroup(t *testing.T) { for i := range 129 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -420,7 +420,7 @@ func TestKeyspaceGroupState(t *testing.T) { for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -511,7 +511,7 @@ func TestShowKeyspaceGroupPrimary(t *testing.T) { for i := range 10 { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestPDServiceCluster(ctx, 1, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(ctx, 1, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) diff --git a/tools/pd-ctl/tests/keyspace/keyspace_test.go b/tools/pd-ctl/tests/keyspace/keyspace_test.go index 6a523ced7b8..e6c28de599f 100644 --- a/tools/pd-ctl/tests/keyspace/keyspace_test.go +++ b/tools/pd-ctl/tests/keyspace/keyspace_test.go @@ -49,7 +49,7 @@ func TestKeyspace(t *testing.T) { for i := 1; i < 10; i++ { keyspaces = append(keyspaces, fmt.Sprintf("keyspace_%d", i)) } - tc, err := pdTests.NewTestPDServiceCluster(ctx, 3, func(conf *config.Config, _ string) { + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(ctx, 3, func(conf *config.Config, _ string) { conf.Keyspace.PreAlloc = keyspaces }) re.NoError(err) @@ -155,7 +155,7 @@ func (suite *keyspaceTestSuite) SetupTest() { suite.ctx, suite.cancel = context.WithCancel(context.Background()) re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayStartServerLoop", `return(true)`)) re.NoError(failpoint.Enable("github.com/tikv/pd/pkg/keyspace/skipSplitRegion", "return(true)")) - tc, err := pdTests.NewTestPDServiceCluster(suite.ctx, 1) + tc, err := pdTests.NewTestClusterWithKeyspaceGroup(suite.ctx, 1) re.NoError(err) re.NoError(tc.RunInitialServers()) tc.WaitLeader() diff --git a/tools/pd-ctl/tests/operator/operator_test.go b/tools/pd-ctl/tests/operator/operator_test.go index 9e8c374ea49..39b8b04b758 100644 --- a/tools/pd-ctl/tests/operator/operator_test.go +++ b/tools/pd-ctl/tests/operator/operator_test.go @@ -59,7 +59,7 @@ func (suite *operatorTestSuite) TearDownSuite() { } func (suite *operatorTestSuite) TestOperator() { - suite.env.RunTestBasedOnMode(suite.checkOperator) + suite.env.RunTest(suite.checkOperator) } func (suite *operatorTestSuite) checkOperator(cluster *pdTests.TestCluster) { diff --git a/tools/pd-ctl/tests/scheduler/scheduler_test.go b/tools/pd-ctl/tests/scheduler/scheduler_test.go index f3a81845921..9de62f2b1db 100644 --- a/tools/pd-ctl/tests/scheduler/scheduler_test.go +++ b/tools/pd-ctl/tests/scheduler/scheduler_test.go @@ -96,7 +96,7 @@ func (suite *schedulerTestSuite) TearDownTest() { } } } - suite.env.RunTestBasedOnMode(cleanFunc) + suite.env.RunTest(cleanFunc) suite.env.Cleanup() } @@ -109,7 +109,7 @@ func (suite *schedulerTestSuite) checkDefaultSchedulers(re *require.Assertions, } func (suite *schedulerTestSuite) TestScheduler() { - suite.env.RunTestBasedOnMode(suite.checkScheduler) + suite.env.RunTest(suite.checkScheduler) } func (suite *schedulerTestSuite) checkScheduler(cluster *pdTests.TestCluster) { @@ -408,7 +408,7 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *pdTests.TestCluster) { } func (suite *schedulerTestSuite) TestSchedulerConfig() { - suite.env.RunTestBasedOnMode(suite.checkSchedulerConfig) + suite.env.RunTest(suite.checkSchedulerConfig) } func (suite *schedulerTestSuite) checkSchedulerConfig(cluster *pdTests.TestCluster) { @@ -569,7 +569,7 @@ func (suite *schedulerTestSuite) checkSchedulerConfig(cluster *pdTests.TestClust } func (suite *schedulerTestSuite) TestGrantHotRegionScheduler() { - suite.env.RunTestBasedOnMode(suite.checkGrantHotRegionScheduler) + suite.env.RunTest(suite.checkGrantHotRegionScheduler) } func (suite *schedulerTestSuite) checkGrantHotRegionScheduler(cluster *pdTests.TestCluster) { @@ -685,7 +685,7 @@ func (suite *schedulerTestSuite) checkGrantHotRegionScheduler(cluster *pdTests.T } func (suite *schedulerTestSuite) TestHotRegionSchedulerConfig() { - suite.env.RunTestBasedOnMode(suite.checkHotRegionSchedulerConfig) + suite.env.RunTest(suite.checkHotRegionSchedulerConfig) } func (suite *schedulerTestSuite) checkHotRegionSchedulerConfig(cluster *pdTests.TestCluster) { @@ -849,7 +849,7 @@ func (suite *schedulerTestSuite) checkHotRegionSchedulerConfig(cluster *pdTests. } func (suite *schedulerTestSuite) TestSchedulerDiagnostic() { - suite.env.RunTestBasedOnMode(suite.checkSchedulerDiagnostic) + suite.env.RunTest(suite.checkSchedulerDiagnostic) } func (suite *schedulerTestSuite) checkSchedulerDiagnostic(cluster *pdTests.TestCluster) { @@ -913,7 +913,7 @@ func (suite *schedulerTestSuite) checkSchedulerDiagnostic(cluster *pdTests.TestC } func (suite *schedulerTestSuite) TestEvictLeaderScheduler() { - suite.env.RunTestBasedOnMode(suite.checkEvictLeaderScheduler) + suite.env.RunTest(suite.checkEvictLeaderScheduler) } func (suite *schedulerTestSuite) checkEvictLeaderScheduler(cluster *pdTests.TestCluster) {