diff --git a/pkg/mcs/scheduling/server/grpc_service.go b/pkg/mcs/scheduling/server/grpc_service.go index f4fe606b403..589be52fc18 100644 --- a/pkg/mcs/scheduling/server/grpc_service.go +++ b/pkg/mcs/scheduling/server/grpc_service.go @@ -325,6 +325,7 @@ func (s *Service) AskBatchSplit(_ context.Context, request *schedulingpb.AskBatc // If region splits during the scheduling process, regions with abnormal // status may be left, and these regions need to be checked with higher // priority. + log.Info("AskBatchSplit", zap.Reflect("recordRegions", recordRegions)) c.GetCoordinator().GetCheckerController().AddPendingProcessedRegions(false, recordRegions...) return &schedulingpb.AskBatchSplitResponse{ diff --git a/pkg/schedule/checker/checker_controller.go b/pkg/schedule/checker/checker_controller.go index 587cf2f80cf..3b879d1649e 100644 --- a/pkg/schedule/checker/checker_controller.go +++ b/pkg/schedule/checker/checker_controller.go @@ -363,6 +363,7 @@ func (c *Controller) tryAddOperators(region *core.RegionInfo) { c.opController.AddWaitingOperator(ops...) c.RemovePendingProcessedRegion(id) } else { + log.Info("tryAddOperators exceed store limit", zap.Uint64("region-id", id)) c.AddPendingProcessedRegions(true, id) } } @@ -439,6 +440,7 @@ func (c *Controller) CheckSuspectRanges() { if lastRegion.GetEndKey() != nil && bytes.Compare(lastRegion.GetEndKey(), keyRange[1]) < 0 { c.AddSuspectKeyRange(lastRegion.GetEndKey(), keyRange[1]) } + log.Info("CheckSuspectRanges", zap.Reflect("regionIDList", regionIDList)) c.AddPendingProcessedRegions(false, regionIDList...) } } diff --git a/pkg/schedule/handler/handler.go b/pkg/schedule/handler/handler.go index a8540b4b5f4..60a0550f9a5 100644 --- a/pkg/schedule/handler/handler.go +++ b/pkg/schedule/handler/handler.go @@ -1126,6 +1126,7 @@ func (h *Handler) AccelerateRegionsScheduleInRange(rawStartKey, rawEndKey string for _, region := range regions { regionsIDList = append(regionsIDList, region.GetID()) } + log.Info("AccelerateRegionsScheduleInRange", zap.Reflect("regionIDList", regionsIDList)) co.GetCheckerController().AddPendingProcessedRegions(false, regionsIDList...) } return nil @@ -1153,6 +1154,7 @@ func (h *Handler) AccelerateRegionsScheduleInRanges(startKeys [][]byte, endKeys for _, region := range regions { regionsIDList = append(regionsIDList, region.GetID()) } + log.Info("AccelerateRegionsScheduleInRanges", zap.Reflect("regionIDList", regionsIDList)) co.GetCheckerController().AddPendingProcessedRegions(false, regionsIDList...) } return nil diff --git a/server/cluster/cluster_worker.go b/server/cluster/cluster_worker.go index c14ceff2153..34070d7b60d 100644 --- a/server/cluster/cluster_worker.go +++ b/server/cluster/cluster_worker.go @@ -165,6 +165,7 @@ func (c *RaftCluster) HandleAskBatchSplit(request *pdpb.AskBatchSplitRequest) (* // If region splits during the scheduling process, regions with abnormal // status may be left, and these regions need to be checked with higher // priority. + log.Info("HandleAskBatchSplit", zap.Reflect("recordRegions", recordRegions)) c.AddPendingProcessedRegions(false, recordRegions...) resp := &pdpb.AskBatchSplitResponse{Ids: splitIDs}