@@ -10,8 +10,7 @@ use anyhow::Result;
1010use candle_core:: { Device , Tensor } ;
1111use qqn_optimizer:: utils:: math:: DifferentiableFunction ;
1212use qqn_optimizer:: {
13- LBFGSConfig , LBFGSOptimizer , OptimizationProblem , Optimizer ,
14- QQNConfig , QQNOptimizer ,
13+ LBFGSConfig , LBFGSOptimizer , OptimizationProblem , Optimizer , QQNConfig , QQNOptimizer ,
1514} ;
1615use std:: sync:: Arc ;
1716
@@ -20,11 +19,11 @@ use std::sync::Arc;
2019pub struct QuadraticProblem {
2120 name : String ,
2221 dimension : usize ,
23- matrix_a : Vec < Vec < f64 > > , // Positive definite matrix
24- vector_b : Vec < f64 > , // Linear term
25- constant_c : f64 , // Constant term
26- optimal_point : Vec < f64 > , // Known optimal point: x* = -A^(-1) * b
27- optimal_value : f64 , // Known optimal value
22+ matrix_a : Vec < Vec < f64 > > , // Positive definite matrix
23+ vector_b : Vec < f64 > , // Linear term
24+ constant_c : f64 , // Constant term
25+ optimal_point : Vec < f64 > , // Known optimal point: x* = -A^(-1) * b
26+ optimal_value : f64 , // Known optimal value
2827}
2928
3029impl QuadraticProblem {
@@ -40,15 +39,14 @@ impl QuadraticProblem {
4039 }
4140
4241 // Create a random linear term
43- let vector_b: Vec < f64 > = ( 0 ..dimension)
44- . map ( |i| ( i as f64 + 1.0 ) * 0.1 )
45- . collect ( ) ;
42+ let vector_b: Vec < f64 > = ( 0 ..dimension) . map ( |i| ( i as f64 + 1.0 ) * 0.1 ) . collect ( ) ;
4643
4744 let constant_c = 5.0 ;
4845
4946 // Compute optimal point: x* = -A^(-1) * b
5047 // For diagonal A, this is simple: x*[i] = -b[i] / A[i][i]
51- let optimal_point: Vec < f64 > = vector_b. iter ( )
48+ let optimal_point: Vec < f64 > = vector_b
49+ . iter ( )
5250 . enumerate ( )
5351 . map ( |( i, & bi) | -bi / matrix_a[ i] [ i] )
5452 . collect ( ) ;
@@ -137,23 +135,21 @@ impl OptimizationProblem for QuadraticProblem {
137135impl DifferentiableFunction for QuadraticProblem {
138136 fn evaluate ( & self , params : & [ Tensor ] ) -> candle_core:: Result < f64 > {
139137 // Convert tensors to f64 vector
140- let x: Result < Vec < f64 > , _ > = params. iter ( )
141- . map ( |t| t. to_scalar :: < f64 > ( ) )
142- . collect ( ) ;
138+ let x: Result < Vec < f64 > , _ > = params. iter ( ) . map ( |t| t. to_scalar :: < f64 > ( ) ) . collect ( ) ;
143139 let x = x?;
144140 // Evaluate using f64 implementation
145- let result = self . evaluate_f64 ( & x)
141+ let result = self
142+ . evaluate_f64 ( & x)
146143 . map_err ( |e| candle_core:: Error :: Msg ( format ! ( "Evaluation error: {}" , e) ) ) ?;
147144 Ok ( result)
148145 }
149146 fn gradient ( & self , params : & [ Tensor ] ) -> candle_core:: Result < Vec < Tensor > > {
150147 // Convert tensors to f64 vector
151- let x: Result < Vec < f64 > , _ > = params. iter ( )
152- . map ( |t| t. to_scalar :: < f64 > ( ) )
153- . collect ( ) ;
148+ let x: Result < Vec < f64 > , _ > = params. iter ( ) . map ( |t| t. to_scalar :: < f64 > ( ) ) . collect ( ) ;
154149 let x = x?;
155150 // Compute gradient using f64 implementation
156- let grad = self . gradient_f64 ( & x)
151+ let grad = self
152+ . gradient_f64 ( & x)
157153 . map_err ( |e| candle_core:: Error :: Msg ( format ! ( "Gradient error: {}" , e) ) ) ?;
158154 // Convert back to tensors
159155 grad. iter ( )
@@ -162,7 +158,6 @@ impl DifferentiableFunction for QuadraticProblem {
162158 }
163159}
164160
165-
166161fn main ( ) -> Result < ( ) > {
167162 println ! ( "Custom Optimization Problem Example" ) ;
168163 println ! ( "===================================" ) ;
@@ -191,8 +186,14 @@ fn main() -> Result<()> {
191186 ) ?;
192187 // Compare results
193188 println ! ( "\n --- Comparison ---" ) ;
194- println ! ( "QQN: {} iterations, final value: {:.6}" , qqn_result. 0 , qqn_result. 1 ) ;
195- println ! ( "L-BFGS: {} iterations, final value: {:.6}" , lbfgs_result. 0 , lbfgs_result. 1 ) ;
189+ println ! (
190+ "QQN: {} iterations, final value: {:.6}" ,
191+ qqn_result. 0 , qqn_result. 1
192+ ) ;
193+ println ! (
194+ "L-BFGS: {} iterations, final value: {:.6}" ,
195+ lbfgs_result. 0 , lbfgs_result. 1
196+ ) ;
196197 let qqn_error = ( qqn_result. 1 - problem. optimal_value ( ) . unwrap ( ) ) . abs ( ) ;
197198 let lbfgs_error = ( lbfgs_result. 1 - problem. optimal_value ( ) . unwrap ( ) ) . abs ( ) ;
198199 println ! ( "QQN error: {:.2e}" , qqn_error) ;
@@ -214,7 +215,8 @@ fn run_optimizer(
214215 let initial_point = problem. initial_point ( ) ;
215216 let device = Device :: Cpu ;
216217 // Convert initial point to tensors
217- let mut params: Vec < Tensor > = initial_point. iter ( )
218+ let mut params: Vec < Tensor > = initial_point
219+ . iter ( )
218220 . map ( |& val| Tensor :: from_slice ( & [ val] , ( 1 , ) , & device) )
219221 . collect :: < candle_core:: Result < Vec < _ > > > ( )
220222 . map_err ( |e| anyhow:: anyhow!( "Failed to create tensors: {}" , e) ) ?;
@@ -223,31 +225,38 @@ fn run_optimizer(
223225 println ! ( "Starting {} optimization..." , name) ;
224226 while iteration < max_iterations {
225227 // Convert tensors back to f64 for convergence checking
226- let x: Vec < f64 > = params. iter ( )
228+ let x: Vec < f64 > = params
229+ . iter ( )
227230 . map ( |t| t. to_scalar :: < f64 > ( ) )
228231 . collect :: < candle_core:: Result < Vec < _ > > > ( )
229232 . map_err ( |e| anyhow:: anyhow!( "Failed to extract values: {}" , e) ) ?;
230233 let gradient = problem. gradient_f64 ( & x) ?;
231234 let grad_norm = gradient. iter ( ) . map ( |g| g * g) . sum :: < f64 > ( ) . sqrt ( ) ;
232235 // Perform optimization step
233- let _step_result = optimizer. step ( & mut params, problem. clone ( ) )
236+ let _step_result = optimizer
237+ . step ( & mut params, problem. clone ( ) )
234238 . map_err ( |e| anyhow:: anyhow!( "Optimizer step failed: {}" , e) ) ?;
235239 iteration += 1 ;
236240 // Print progress occasionally
237241 if iteration % 50 == 0 {
238- let x: Vec < f64 > = params. iter ( )
242+ let x: Vec < f64 > = params
243+ . iter ( )
239244 . map ( |t| t. to_scalar :: < f64 > ( ) )
240245 . collect :: < candle_core:: Result < Vec < _ > > > ( )
241246 . map_err ( |e| anyhow:: anyhow!( "Failed to extract values: {}" , e) ) ?;
242247 let f_val = problem. evaluate_f64 ( & x) ?;
243- println ! ( " Iteration {}: f = {:.6}, ||∇f|| = {:.2e}" , iteration, f_val, grad_norm) ;
248+ println ! (
249+ " Iteration {}: f = {:.6}, ||∇f|| = {:.2e}" ,
250+ iteration, f_val, grad_norm
251+ ) ;
244252 }
245253 }
246254 // Convert final parameters back to f64 for evaluation
247- let final_x: Vec < f64 > = params. iter ( )
255+ let final_x: Vec < f64 > = params
256+ . iter ( )
248257 . map ( |t| t. to_scalar :: < f64 > ( ) )
249258 . collect :: < candle_core:: Result < Vec < _ > > > ( )
250259 . map_err ( |e| anyhow:: anyhow!( "Failed to extract final values: {}" , e) ) ?;
251260 let final_value = problem. evaluate_f64 ( & final_x) ?;
252261 Ok ( ( iteration, final_value) )
253- }
262+ }
0 commit comments