diff --git a/pkg/schedule/checker/rule_checker.go b/pkg/schedule/checker/rule_checker.go index 7350d92cf58..453a9d25ee4 100644 --- a/pkg/schedule/checker/rule_checker.go +++ b/pkg/schedule/checker/rule_checker.go @@ -312,7 +312,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement. if region.GetLeader().GetId() != peer.GetId() && rf.Rule.Role == placement.Leader { ruleCheckerFixLeaderRoleCounter.Inc() if c.allowLeader(fit, peer) { - return operator.CreateTransferLeaderOperator("fix-leader-role", c.cluster, region, peer.GetStoreId(), []uint64{}, 0) + return operator.CreateTransferLeaderOperator("fix-leader-role", c.cluster, region, peer.GetStoreId(), 0) } ruleCheckerNotAllowLeaderCounter.Inc() return nil, errs.ErrPeerCannotBeLeader @@ -321,7 +321,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement. ruleCheckerFixFollowerRoleCounter.Inc() for _, p := range region.GetPeers() { if c.allowLeader(fit, p) { - return operator.CreateTransferLeaderOperator("fix-follower-role", c.cluster, region, p.GetStoreId(), []uint64{}, 0) + return operator.CreateTransferLeaderOperator("fix-follower-role", c.cluster, region, p.GetStoreId(), 0) } } ruleCheckerNoNewLeaderCounter.Inc() diff --git a/pkg/schedule/filter/comparer.go b/pkg/schedule/filter/comparer.go index 58d3032f36d..75877066835 100644 --- a/pkg/schedule/filter/comparer.go +++ b/pkg/schedule/filter/comparer.go @@ -27,6 +27,7 @@ type StoreComparer func(a, b *core.StoreInfo) int // score. func RegionScoreComparer(conf config.SharedConfigProvider) StoreComparer { return func(a, b *core.StoreInfo) int { + // TODO: we should use the real time delta data to calculate the score. sa := a.RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), 0) sb := b.RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), 0) switch { diff --git a/pkg/schedule/filter/comparer_test.go b/pkg/schedule/filter/comparer_test.go new file mode 100644 index 00000000000..b318808b192 --- /dev/null +++ b/pkg/schedule/filter/comparer_test.go @@ -0,0 +1,45 @@ +// Copyright 2025 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package filter + +import ( + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/pingcap/kvproto/pkg/metapb" + + "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/mock/mockconfig" +) + +func TestRegionCompare(t *testing.T) { + re := require.New(t) + ids := []uint64{1, 2, 3, 4, 5} + stores := make([]*core.StoreInfo, 0, len(ids)) + for _, id := range ids { + stores = append(stores, core.NewStoreInfo( + &metapb.Store{Id: id}, + core.SetRegionSize(int64(6-id)*1000), + )) + } + cs := NewCandidates(rand.New(rand.NewSource(time.Now().UnixNano())), stores) + cfg := mockconfig.NewTestOptions() + re.Equal(uint64(1), cs.PickFirst().GetID()) + cs.Sort(RegionScoreComparer(cfg)) + re.Equal(uint64(5), cs.PickFirst().GetID()) +} diff --git a/pkg/schedule/handler/handler.go b/pkg/schedule/handler/handler.go index 042adf96611..1f44f45a300 100644 --- a/pkg/schedule/handler/handler.go +++ b/pkg/schedule/handler/handler.go @@ -420,7 +420,7 @@ func (h *Handler) AddTransferLeaderOperator(regionID uint64, storeID uint64) err return errors.Errorf("region has no voter in store %v", storeID) } - op, err := operator.CreateTransferLeaderOperator("admin-transfer-leader", c, region, newLeader.GetStoreId(), []uint64{}, operator.OpAdmin) + op, err := operator.CreateTransferLeaderOperator("admin-transfer-leader", c, region, newLeader.GetStoreId(), operator.OpAdmin) if err != nil { log.Debug("fail to create transfer leader operator", errs.ZapError(err)) return err diff --git a/pkg/schedule/operator/builder.go b/pkg/schedule/operator/builder.go index 8fd2393da2e..faa8c0b3992 100644 --- a/pkg/schedule/operator/builder.go +++ b/pkg/schedule/operator/builder.go @@ -297,25 +297,6 @@ func (b *Builder) SetLeader(storeID uint64) *Builder { return b } -// SetLeaders records all valid target leaders in Builder. -func (b *Builder) SetLeaders(storeIDs []uint64) *Builder { - if b.err != nil { - return b - } - sort.Slice(storeIDs, func(i, j int) bool { return storeIDs[i] < storeIDs[j] }) - for _, storeID := range storeIDs { - peer := b.targetPeers[storeID] - if peer == nil || core.IsLearner(peer) || b.unhealthyPeers[storeID] != nil { - continue - } - b.targetLeaderStoreIDs = append(b.targetLeaderStoreIDs, storeID) - } - // Don't need to check if there's valid target, because `targetLeaderStoreIDs` - // can be empty if this is not a multi-target evict leader operation. Besides, - // `targetLeaderStoreID` must be valid and there must be at least one valid target. - return b -} - // SetPeers resets the target peer list. // // If peer's ID is 0, the builder will allocate a new ID later. If current diff --git a/pkg/schedule/operator/create_operator.go b/pkg/schedule/operator/create_operator.go index 2558ca8d75b..a1bf503bfa6 100644 --- a/pkg/schedule/operator/create_operator.go +++ b/pkg/schedule/operator/create_operator.go @@ -80,10 +80,9 @@ func CreateRemovePeerOperator(desc string, ci sche.SharedCluster, kind OpKind, r } // CreateTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store. -func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) { +func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, targetStoreID uint64, kind OpKind) (*Operator, error) { return NewBuilder(desc, ci, region, SkipOriginJointStateCheck). SetLeader(targetStoreID). - SetLeaders(targetStoreIDs). Build(kind) } diff --git a/pkg/schedule/operator/create_operator_test.go b/pkg/schedule/operator/create_operator_test.go index 0143b1b8011..29e80e9ef34 100644 --- a/pkg/schedule/operator/create_operator_test.go +++ b/pkg/schedule/operator/create_operator_test.go @@ -425,7 +425,7 @@ func (suite *createOperatorTestSuite) TestCreateTransferLeaderOperator() { } for _, testCase := range testCases { region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: testCase.originPeers}, testCase.originPeers[0]) - op, err := CreateTransferLeaderOperator("test", suite.cluster, region, testCase.targetLeaderStoreID, []uint64{}, 0) + op, err := CreateTransferLeaderOperator("test", suite.cluster, region, testCase.targetLeaderStoreID, 0) if testCase.isErr { re.Error(err) diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 2884e8a486f..73e85068f8b 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -537,7 +537,7 @@ func (s *balanceLeaderScheduler) createOperator(solver *solver, collector *plan. } solver.Step++ defer func() { solver.Step-- }() - op, err := operator.CreateTransferLeaderOperator(s.GetName(), solver, solver.Region, solver.targetStoreID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(s.GetName(), solver, solver.Region, solver.targetStoreID(), operator.OpLeader) if err != nil { log.Debug("fail to create balance leader operator", errs.ZapError(err)) if collector != nil { diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index 45285b51137..bf06ee4dd11 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -17,6 +17,7 @@ package schedulers import ( "math/rand" "net/http" + "sort" "strconv" "github.com/gorilla/mux" @@ -288,7 +289,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster) // Schedule implements the Scheduler interface. func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) { evictLeaderCounter.Inc() - return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf), nil + return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf, s.OpController), nil } func uniqueAppendOperator(dst []*operator.Operator, src ...*operator.Operator) []*operator.Operator { @@ -312,11 +313,11 @@ type evictLeaderStoresConf interface { getBatch() int } -func scheduleEvictLeaderBatch(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderBatch(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf, opController *operator.Controller) []*operator.Operator { var ops []*operator.Operator batchSize := conf.getBatch() for range batchSize { - once := scheduleEvictLeaderOnce(r, name, cluster, conf) + once := scheduleEvictLeaderOnce(r, name, cluster, conf, opController) // no more regions if len(once) == 0 { break @@ -330,7 +331,7 @@ func scheduleEvictLeaderBatch(r *rand.Rand, name string, cluster sche.SchedulerC return ops } -func scheduleEvictLeaderOnce(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator { +func scheduleEvictLeaderOnce(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf, opController *operator.Controller) []*operator.Operator { stores := conf.getStores() ops := make([]*operator.Operator, 0, len(stores)) for _, storeID := range stores { @@ -363,19 +364,12 @@ func scheduleEvictLeaderOnce(r *rand.Rand, name string, cluster sche.SchedulerCl filters = append(filters, &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent}) candidates := filter.NewCandidates(r, cluster.GetFollowerStores(region)). FilterTarget(cluster.GetSchedulerConfig(), nil, nil, filters...) - // Compatible with old TiKV transfer leader logic. - target := candidates.RandomPick() - targets := candidates.PickAll() - // `targets` MUST contains `target`, so only needs to check if `target` is nil here. - if target == nil { + + if len(candidates.Stores) == 0 { evictLeaderNoTargetStoreCounter.Inc() continue } - targetIDs := make([]uint64, 0, len(targets)) - for _, t := range targets { - targetIDs = append(targetIDs, t.GetID()) - } - op, err := operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), targetIDs, operator.OpLeader) + op, err := createOperatorWithSort(name, cluster, candidates, region, opController) if err != nil { log.Debug("fail to create evict leader operator", errs.ZapError(err)) continue @@ -387,6 +381,31 @@ func scheduleEvictLeaderOnce(r *rand.Rand, name string, cluster sche.SchedulerCl return ops } +func createOperatorWithSort(name string, cluster sche.SchedulerCluster, candidates *filter.StoreCandidates, region *core.RegionInfo, opController *operator.Controller) (*operator.Operator, error) { + // we will pick low leader score store firstly. + targets := candidates.Stores + sort.Slice(targets, func(i, j int) bool { + leaderSchedulePolicy := cluster.GetSchedulerConfig().GetLeaderSchedulePolicy() + opInfluence := opController.GetOpInfluence(cluster.GetBasicCluster()) + kind := constant.NewScheduleKind(constant.LeaderKind, leaderSchedulePolicy) + iOp := opInfluence.GetStoreInfluence(targets[i].GetID()).ResourceProperty(kind) + jOp := opInfluence.GetStoreInfluence(targets[j].GetID()).ResourceProperty(kind) + return targets[i].LeaderScore(leaderSchedulePolicy, iOp) < + targets[j].LeaderScore(leaderSchedulePolicy, jOp) + }) + var ( + op *operator.Operator + err error + ) + for _, target := range targets { + op, err = operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), operator.OpLeader) + if op != nil && err == nil { + return op, err + } + } + return op, err +} + type evictLeaderHandler struct { rd *render.Render config *evictLeaderSchedulerConfig diff --git a/pkg/schedule/schedulers/evict_leader_test.go b/pkg/schedule/schedulers/evict_leader_test.go index 587a48358e9..6dbe976f5c6 100644 --- a/pkg/schedule/schedulers/evict_leader_test.go +++ b/pkg/schedule/schedulers/evict_leader_test.go @@ -137,3 +137,33 @@ func TestBatchEvict(t *testing.T) { return len(ops) == 5 }) } + +func TestEvictLeaderSelectsLowScoreStore(t *testing.T) { + re := require.New(t) + cancel, _, tc, oc := prepareSchedulersTest() + defer cancel() + + // Add stores with different scores + tc.AddLeaderStore(1, 30) // store 1 + tc.AddLeaderStore(2, 20) // store 2 + tc.AddLeaderStore(3, 10) // store 3 + + // Add regions 1, 2, 3 with leaders in stores 1, 2, 3 + tc.AddLeaderRegion(1, 1, 2, 3) + + // Create EvictLeader scheduler + sl, err := CreateScheduler(types.EvictLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.EvictLeaderScheduler, []string{"1"}), func(string) error { return nil }) + re.NoError(err) + re.True(sl.IsScheduleAllowed(tc)) + + // Schedule the operator and it should select store 3 to evict the leader, because it has the lowest score with 10. + ops, _ := sl.Schedule(tc, false) + re.Len(ops, 1) + operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{3}) + + // Schedule the operator and it should select store 2 to evict the leader, because it has the lowest score with 5. + tc.AddLeaderStore(2, 5) + ops, _ = sl.Schedule(tc, false) + re.Len(ops, 1) + operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2}) +} diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index 8d8c014b110..af6ab7db244 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -244,7 +244,7 @@ func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.SchedulerClust } func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.SchedulerCluster) []*operator.Operator { - return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf) + return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf, s.OpController) } // IsScheduleAllowed implements the Scheduler interface. diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index cf4bf3d3b39..bc61ff2f794 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -357,7 +357,7 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.SchedulerClus return nil } storeSlowTrendEvictedStatusGauge.WithLabelValues(store.GetAddress(), strconv.FormatUint(store.GetID(), 10)).Set(1) - return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf) + return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf, s.OpController) } // IsScheduleAllowed implements the Scheduler interface. diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 005e6b4182a..cf9eb4ddb1c 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -316,7 +316,7 @@ func (s *grantHotRegionScheduler) transfer(cluster sche.SchedulerCluster, region dstStore := &metapb.Peer{StoreId: destStoreIDs[i]} if isLeader { - op, err = operator.CreateTransferLeaderOperator(s.GetName()+"-leader", cluster, srcRegion, dstStore.StoreId, []uint64{}, operator.OpLeader) + op, err = operator.CreateTransferLeaderOperator(s.GetName()+"-leader", cluster, srcRegion, dstStore.StoreId, operator.OpLeader) } else { op, err = operator.CreateMovePeerOperator(s.GetName()+"-move", cluster, srcRegion, operator.OpRegion|operator.OpLeader, srcStore.GetID(), dstStore) } diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 2ba9af782db..ef60336bfaa 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -1475,7 +1475,6 @@ func (bs *balanceSolver) createOperator(region *core.RegionInfo, srcStoreID, dst bs, region, dstStoreID, - []uint64{}, operator.OpHotRegion) } else { srcPeer := region.GetStorePeer(srcStoreID) // checked in `filterHotPeers` diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index b37fc00cea1..8cdf01249b5 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -150,7 +150,7 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { case movePeer: op, err = operator.CreateMovePeerOperator("move-peer-test", tc, region, operator.OpAdmin, 2, &metapb.Peer{Id: region.GetID()*10000 + 1, StoreId: 4}) case transferLeader: - op, err = operator.CreateTransferLeaderOperator("transfer-leader-test", tc, region, 2, []uint64{}, operator.OpAdmin) + op, err = operator.CreateTransferLeaderOperator("transfer-leader-test", tc, region, 2, operator.OpAdmin) } re.NoError(err) re.NotNil(op) diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index cd63b6c0511..1e199d5db90 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -102,7 +102,7 @@ func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*ope continue } - op, err := operator.CreateTransferLeaderOperator("label-reject-leader", cluster, region, target.GetID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator("label-reject-leader", cluster, region, target.GetID(), operator.OpLeader) if err != nil { log.Debug("fail to create transfer label reject leader operator", errs.ZapError(err)) return nil, nil diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 8fd5b87c1bc..cacf9833ec7 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -89,7 +89,7 @@ func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) shuffleLeaderNoFollowerCounter.Inc() return nil, nil } - op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, targetStore.GetID(), []uint64{}, operator.OpAdmin) + op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, targetStore.GetID(), operator.OpAdmin) if err != nil { log.Debug("fail to create shuffle leader operator", errs.ZapError(err)) return nil, nil diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index c84f0918884..b1cf21b7247 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -99,19 +99,22 @@ func scheduleTransferWitnessLeader(r *rand.Rand, name string, cluster sche.Sched filters = append(filters, filter.NewExcludedFilter(name, nil, unhealthyPeerStores), &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent}) candidates := filter.NewCandidates(r, cluster.GetFollowerStores(region)).FilterTarget(cluster.GetSchedulerConfig(), nil, nil, filters...) - // Compatible with old TiKV transfer leader logic. - target := candidates.RandomPick() - targets := candidates.PickAll() - // `targets` MUST contains `target`, so only needs to check if `target` is nil here. - if target == nil { + if len(candidates.Stores) == 0 { transferWitnessLeaderNoTargetStoreCounter.Inc() return nil, errors.New("no target store to schedule") } - targetIDs := make([]uint64, 0, len(targets)) - for _, t := range targets { - targetIDs = append(targetIDs, t.GetID()) + // TODO: also add sort such as evict leader + var ( + op *operator.Operator + err error + ) + for _, target := range candidates.Stores { + op, err = operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), operator.OpLeader) + if op != nil && err == nil { + return op, err + } } - return operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), targetIDs, operator.OpWitnessLeader) + return op, err } // RecvRegionInfo receives a checked region from coordinator diff --git a/pkg/utils/operatorutil/operator_check.go b/pkg/utils/operatorutil/operator_check.go index 4e0d4332a45..bd968abccb3 100644 --- a/pkg/utils/operatorutil/operator_check.go +++ b/pkg/utils/operatorutil/operator_check.go @@ -53,7 +53,7 @@ func CheckMultiTargetTransferLeader(re *require.Assertions, op *operator.Operato re.Equal(1, op.Len()) expectedOps := make([]any, 0, len(targetIDs)) for _, targetID := range targetIDs { - expectedOps = append(expectedOps, operator.TransferLeader{FromStore: sourceID, ToStore: targetID, ToStores: targetIDs}) + expectedOps = append(expectedOps, operator.TransferLeader{FromStore: sourceID, ToStore: targetID}) } re.Contains(expectedOps, op.Step(0)) kind |= operator.OpLeader diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 4e0e700e8ce..f3046b35b79 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -230,7 +230,7 @@ func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ( if target == nil { continue } - op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, target.GetID(), []uint64{}, operator.OpLeader) + op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, target.GetID(), operator.OpLeader) if err != nil { log.Debug("fail to create evict leader operator", errs.ZapError(err)) continue