@@ -122,30 +122,6 @@ impl OptimizationTrace {
122122 }
123123 }
124124
125- pub fn check_convergence_with_optimizer (
126- & mut self ,
127- iteration : usize ,
128- function_value : f64 ,
129- _optimizer : & dyn Optimizer ,
130- parameters : & [ f64 ] ,
131- gradient : & [ f64 ] ,
132- step_size : f64 ,
133- timestamp : Duration ,
134- total_function_evaluations : usize ,
135- total_gradient_evaluations : usize ,
136- ) {
137- self . iterations . push ( IterationData {
138- iteration,
139- function_value,
140- gradient_norm : gradient. iter ( ) . map ( |g| g * g) . sum :: < f64 > ( ) . sqrt ( ) ,
141- step_size,
142- parameters : parameters. to_vec ( ) ,
143- timestamp : timestamp. into ( ) ,
144- total_function_evaluations,
145- total_gradient_evaluations,
146- } ) ;
147- }
148-
149125 pub fn final_value ( & self ) -> Option < f64 > {
150126 if self . iterations . is_empty ( ) {
151127 None
@@ -553,17 +529,19 @@ impl BenchmarkRunner {
553529 } ;
554530 * gradient_evaluations += 1 ;
555531 // Record initial state (iteration 0)
556- trace. check_convergence_with_optimizer (
557- 0 ,
558- initial_f_val,
559- optimizer,
560- input_floats,
561- & initial_gradient,
562- 0.0 , // No step size for initial evaluation
563- start_time. elapsed ( ) ,
564- * function_evaluations,
565- * gradient_evaluations,
566- ) ;
532+ let timestamp = start_time. elapsed ( ) ;
533+ let total_function_evaluations = * function_evaluations;
534+ let total_gradient_evaluations = * gradient_evaluations;
535+ trace. iterations . push ( IterationData {
536+ iteration : 0 ,
537+ function_value : initial_f_val,
538+ gradient_norm : initial_gradient. iter ( ) . map ( |g| g * g) . sum :: < f64 > ( ) . sqrt ( ) ,
539+ step_size : 0.0 ,
540+ parameters : input_floats. to_vec ( ) ,
541+ timestamp : timestamp. into ( ) ,
542+ total_function_evaluations,
543+ total_gradient_evaluations,
544+ } ) ;
567545 let mut best_f_val = initial_f_val;
568546
569547 while * iteration < self . config . max_iterations {
@@ -668,17 +646,16 @@ impl BenchmarkRunner {
668646 if f_val < optimal_value {
669647 info ! ( "Converged by function tolerance at iteration {iteration}" ) ;
670648 // Record final iteration data before returning
671- trace. check_convergence_with_optimizer (
672- * iteration,
673- f_val,
674- optimizer,
675- input_floats,
676- & gradient,
677- 0.0 ,
678- start_time. elapsed ( ) ,
679- * function_evaluations,
680- * gradient_evaluations,
681- ) ;
649+ trace. iterations . push ( IterationData {
650+ iteration : * iteration,
651+ function_value : f_val,
652+ gradient_norm : gradient. iter ( ) . map ( |g| g * g) . sum :: < f64 > ( ) . sqrt ( ) ,
653+ step_size : 0.0 ,
654+ parameters : input_floats. to_vec ( ) ,
655+ timestamp : start_time. elapsed ( ) . into ( ) ,
656+ total_function_evaluations : * function_evaluations,
657+ total_gradient_evaluations : * gradient_evaluations,
658+ } ) ;
682659 return Ok ( ConvergenceReason :: FunctionTolerance ) ;
683660 }
684661 }
@@ -707,17 +684,21 @@ impl BenchmarkRunner {
707684 self . config. maximum_function_calls
708685 ) ;
709686 // Record final iteration data before returning
710- trace. check_convergence_with_optimizer (
711- * iteration,
712- f_val,
713- optimizer,
714- input_floats,
715- & gradient,
716- step_result. step_size ,
717- start_time. elapsed ( ) ,
718- * function_evaluations,
719- * gradient_evaluations,
720- ) ;
687+ let iteration1 = * iteration;
688+ let step_size = step_result. step_size ;
689+ let timestamp = start_time. elapsed ( ) ;
690+ let total_function_evaluations = * function_evaluations;
691+ let total_gradient_evaluations = * gradient_evaluations;
692+ trace. iterations . push ( IterationData {
693+ iteration : iteration1,
694+ function_value : f_val,
695+ gradient_norm : gradient. iter ( ) . map ( |g| g * g) . sum :: < f64 > ( ) . sqrt ( ) ,
696+ step_size,
697+ parameters : input_floats. to_vec ( ) ,
698+ timestamp : timestamp. into ( ) ,
699+ total_function_evaluations,
700+ total_gradient_evaluations,
701+ } ) ;
721702 return Ok ( ConvergenceReason :: MaxFunctionEvaluations ) ;
722703 }
723704
@@ -729,17 +710,21 @@ impl BenchmarkRunner {
729710 iteration, step_result. step_size
730711 ) ;
731712 // Record final iteration data before returning
732- trace. check_convergence_with_optimizer (
733- * iteration - 1 , // Use previous iteration number since we already incremented
734- f_val,
735- optimizer,
736- input_floats,
737- & gradient,
738- step_result. step_size ,
739- start_time. elapsed ( ) ,
740- * function_evaluations,
741- * gradient_evaluations,
742- ) ;
713+ let iteration1 = * iteration - 1 ;
714+ let step_size = step_result. step_size ;
715+ let timestamp = start_time. elapsed ( ) ;
716+ let total_function_evaluations = * function_evaluations;
717+ let total_gradient_evaluations = * gradient_evaluations;
718+ trace. iterations . push ( IterationData {
719+ iteration : iteration1,
720+ function_value : f_val,
721+ gradient_norm : gradient. iter ( ) . map ( |g| g * g) . sum :: < f64 > ( ) . sqrt ( ) ,
722+ step_size,
723+ parameters : input_floats. to_vec ( ) ,
724+ timestamp : timestamp. into ( ) ,
725+ total_function_evaluations,
726+ total_gradient_evaluations,
727+ } ) ;
743728 return Ok ( ConvergenceReason :: GradientTolerance ) ;
744729 }
745730
@@ -769,17 +754,21 @@ impl BenchmarkRunner {
769754 }
770755
771756 // Record iteration data only after successful step
772- trace. check_convergence_with_optimizer (
773- * iteration - 1 , // Use previous iteration number since we already incremented
774- f_val,
775- optimizer,
776- input_floats,
777- & gradient,
778- step_result. step_size ,
779- start_time. elapsed ( ) ,
780- * function_evaluations,
781- * gradient_evaluations,
782- ) ;
757+ let iteration1 = * iteration - 1 ;
758+ let step_size = step_result. step_size ;
759+ let timestamp = start_time. elapsed ( ) ;
760+ let total_function_evaluations = * function_evaluations;
761+ let total_gradient_evaluations = * gradient_evaluations;
762+ trace. iterations . push ( IterationData {
763+ iteration : iteration1,
764+ function_value : f_val,
765+ gradient_norm : gradient. iter ( ) . map ( |g| g * g) . sum :: < f64 > ( ) . sqrt ( ) ,
766+ step_size,
767+ parameters : input_floats. to_vec ( ) ,
768+ timestamp : timestamp. into ( ) ,
769+ total_function_evaluations,
770+ total_gradient_evaluations,
771+ } ) ;
783772
784773 // Check for numerical errors
785774 if input_floats. iter ( ) . any ( |& xi| !xi. is_finite ( ) ) {
0 commit comments