@@ -450,59 +450,6 @@ func (s *SystemScheduler) computeJobAllocs() error {
450450 return nil
451451}
452452
453- // findNodesForTG runs feasibility checks on nodes. The result includes all nodes for each
454- // task group (feasible and infeasible) along with metrics information on the checks.
455- func (s * SystemScheduler ) findNodesForTG (buckets * reconciler.NodeReconcileResult ) (tgNodes map [string ]taskGroupNodes , filteredMetrics map [string ]* structs.AllocMetric ) {
456- tgNodes = make (map [string ]taskGroupNodes )
457- filteredMetrics = make (map [string ]* structs.AllocMetric )
458-
459- nodeByID := make (map [string ]* structs.Node , len (s .nodes ))
460- for _ , node := range s .nodes {
461- nodeByID [node .ID ] = node
462- }
463-
464- nodes := make ([]* structs.Node , 1 )
465- for _ , a := range slices .Concat (buckets .Place , buckets .Update , buckets .Ignore ) {
466- tgName := a .TaskGroup .Name
467- if tgNodes [tgName ] == nil {
468- tgNodes [tgName ] = taskGroupNodes {}
469- }
470-
471- node , ok := nodeByID [a .Alloc .NodeID ]
472- if ! ok {
473- s .logger .Debug ("could not find node" , "node" , a .Alloc .NodeID )
474- continue
475- }
476-
477- // Update the set of placement nodes
478- nodes [0 ] = node
479- s .stack .SetNodes (nodes )
480-
481- if a .Alloc .ID != "" {
482- // temporarily include the old alloc from a destructive update so
483- // that we can account for resources that will be freed by that
484- // allocation. We'll back this change out if we end up needing to
485- // limit placements by max_parallel or canaries.
486- s .plan .AppendStoppedAlloc (a .Alloc , sstructs .StatusAllocUpdating , "" , "" )
487- }
488-
489- // Attempt to match the task group
490- option := s .stack .Select (a .TaskGroup , & feasible.SelectOptions {AllocName : a .Name })
491-
492- // Always store the results. Keep the metrics that were generated
493- // for the match attempt so they can be used during placement.
494- tgNodes [tgName ] = append (tgNodes [tgName ], & taskGroupNode {node .ID , option , s .ctx .Metrics ().Copy ()})
495-
496- if option == nil {
497- // When no match is found, merge the filter metrics for the task
498- // group so proper reporting can be done during placement.
499- filteredMetrics [tgName ] = mergeNodeFiltered (filteredMetrics [tgName ], s .ctx .Metrics ())
500- }
501- }
502-
503- return
504- }
505-
506453// computePlacements computes placements for allocations
507454func (s * SystemScheduler ) computePlacements (
508455 reconcilerResult * reconciler.NodeReconcileResult , existingByTaskGroup map [string ]bool ,
@@ -747,8 +694,9 @@ func (s *SystemScheduler) evictAndPlace(reconciled *reconciler.NodeReconcileResu
747694 // findFeasibleNodesForTG method marks all old allocs from a destructive
748695 // update as stopped in order to get an accurate feasible nodes count
749696 // (accounting for resources that would be freed). Now we need to remove all
750- // the allocs that have StatusAllocUpdating from the set we're stopping (NodeUpdate) and
751- // only place and stop the ones the ones that correspond to updates limited by max parallel.
697+ // the allocs that have StatusAllocUpdating from the set we're stopping
698+ // (NodeUpdate) and only place and stop the ones the ones that correspond to
699+ // updates limited by max parallel.
752700 for node , allocations := range s .plan .NodeUpdate {
753701 n := 0
754702 for _ , alloc := range allocations {
0 commit comments