From e0f0e97bbbc1f443fef182c1a1c87ec5618ede28 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 4 Dec 2024 18:19:06 +0800 Subject: [PATCH] debug Signed-off-by: Ryan Leung --- pkg/mcs/scheduling/server/grpc_service.go | 1 + pkg/schedule/checker/checker_controller.go | 2 ++ pkg/schedule/handler/handler.go | 2 ++ server/cluster/cluster_worker.go | 1 + 4 files changed, 6 insertions(+) diff --git a/pkg/mcs/scheduling/server/grpc_service.go b/pkg/mcs/scheduling/server/grpc_service.go index f4fe606b4036..589be52fc188 100644 --- a/pkg/mcs/scheduling/server/grpc_service.go +++ b/pkg/mcs/scheduling/server/grpc_service.go @@ -325,6 +325,7 @@ func (s *Service) AskBatchSplit(_ context.Context, request *schedulingpb.AskBatc // If region splits during the scheduling process, regions with abnormal // status may be left, and these regions need to be checked with higher // priority. + log.Info("AskBatchSplit", zap.Reflect("recordRegions", recordRegions)) c.GetCoordinator().GetCheckerController().AddPendingProcessedRegions(false, recordRegions...) return &schedulingpb.AskBatchSplitResponse{ diff --git a/pkg/schedule/checker/checker_controller.go b/pkg/schedule/checker/checker_controller.go index 587cf2f80cf7..3b879d1649e2 100644 --- a/pkg/schedule/checker/checker_controller.go +++ b/pkg/schedule/checker/checker_controller.go @@ -363,6 +363,7 @@ func (c *Controller) tryAddOperators(region *core.RegionInfo) { c.opController.AddWaitingOperator(ops...) c.RemovePendingProcessedRegion(id) } else { + log.Info("tryAddOperators exceed store limit", zap.Uint64("region-id", id)) c.AddPendingProcessedRegions(true, id) } } @@ -439,6 +440,7 @@ func (c *Controller) CheckSuspectRanges() { if lastRegion.GetEndKey() != nil && bytes.Compare(lastRegion.GetEndKey(), keyRange[1]) < 0 { c.AddSuspectKeyRange(lastRegion.GetEndKey(), keyRange[1]) } + log.Info("CheckSuspectRanges", zap.Reflect("regionIDList", regionIDList)) c.AddPendingProcessedRegions(false, regionIDList...) } } diff --git a/pkg/schedule/handler/handler.go b/pkg/schedule/handler/handler.go index a8540b4b5f41..60a0550f9a52 100644 --- a/pkg/schedule/handler/handler.go +++ b/pkg/schedule/handler/handler.go @@ -1126,6 +1126,7 @@ func (h *Handler) AccelerateRegionsScheduleInRange(rawStartKey, rawEndKey string for _, region := range regions { regionsIDList = append(regionsIDList, region.GetID()) } + log.Info("AccelerateRegionsScheduleInRange", zap.Reflect("regionIDList", regionsIDList)) co.GetCheckerController().AddPendingProcessedRegions(false, regionsIDList...) } return nil @@ -1153,6 +1154,7 @@ func (h *Handler) AccelerateRegionsScheduleInRanges(startKeys [][]byte, endKeys for _, region := range regions { regionsIDList = append(regionsIDList, region.GetID()) } + log.Info("AccelerateRegionsScheduleInRanges", zap.Reflect("regionIDList", regionsIDList)) co.GetCheckerController().AddPendingProcessedRegions(false, regionsIDList...) } return nil diff --git a/server/cluster/cluster_worker.go b/server/cluster/cluster_worker.go index c14ceff2153b..34070d7b60df 100644 --- a/server/cluster/cluster_worker.go +++ b/server/cluster/cluster_worker.go @@ -165,6 +165,7 @@ func (c *RaftCluster) HandleAskBatchSplit(request *pdpb.AskBatchSplitRequest) (* // If region splits during the scheduling process, regions with abnormal // status may be left, and these regions need to be checked with higher // priority. + log.Info("HandleAskBatchSplit", zap.Reflect("recordRegions", recordRegions)) c.AddPendingProcessedRegions(false, recordRegions...) resp := &pdpb.AskBatchSplitResponse{Ids: splitIDs}