Skip to content

Commit 5e60afe

Browse files
Copilotacharneski
andcommitted
Fix clippy warnings and rustfmt issues - CI build now passing
Co-authored-by: acharneski <[email protected]>
1 parent fddb039 commit 5e60afe

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+878
-1217
lines changed

examples/basic_usage.rs

Lines changed: 31 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -8,19 +8,16 @@
88
99
use anyhow::Result;
1010
use candle_core::{Device, Tensor};
11-
use qqn_optimizer::utils::math::SeparateFunctions;
12-
use qqn_optimizer::{
13-
OptimizationProblem, Optimizer, QQNConfig,
14-
QQNOptimizer,
15-
};
16-
use std::sync::Arc;
1711
use qqn_optimizer::benchmarks::analytic_functions::RosenbrockFunction;
1812
use qqn_optimizer::line_search::{LineSearchConfig, LineSearchMethod};
13+
use qqn_optimizer::utils::math::SeparateFunctions;
14+
use qqn_optimizer::{OptimizationProblem, Optimizer, QQNConfig, QQNOptimizer};
15+
use std::sync::Arc;
1916

2017
fn main() -> Result<()> {
2118
// Configure the QQN optimizer
2219
let config = QQNConfig {
23-
lbfgs_history: 10, // L-BFGS history length
20+
lbfgs_history: 10, // L-BFGS history length
2421
min_lbfgs_iterations: 2,
2522
line_search: LineSearchConfig {
2623
method: LineSearchMethod::StrongWolfe,
@@ -30,11 +27,11 @@ fn main() -> Result<()> {
3027
initial_step: 1.0,
3128
min_step: 1e-16,
3229
max_step: 1e16,
33-
verbose: false, // Enable verbose output for line search
30+
verbose: false, // Enable verbose output for line search
3431
line_bracket_method: 1, // 1: gradient-based bracketing, 2: function-value-based bracketing
3532
},
36-
epsilon: 1e-8, // Numerical stability constant
37-
verbose: false, // Enable verbose output
33+
epsilon: 1e-8, // Numerical stability constant
34+
verbose: false, // Enable verbose output
3835
min_step_persist: 0.0,
3936
min_step_size: 0.0,
4037
gradient_scale_factor: 1.0,
@@ -49,7 +46,10 @@ fn main() -> Result<()> {
4946

5047
println!("Starting optimization of 2D Rosenbrock function");
5148
println!("Initial point: {:?}", initial_point);
52-
println!("Initial value: {:.6}", problem.evaluate_f64(&initial_point)?);
49+
println!(
50+
"Initial value: {:.6}",
51+
problem.evaluate_f64(&initial_point)?
52+
);
5353

5454
// Optimization loop
5555
let mut iteration = 0;
@@ -63,7 +63,10 @@ fn main() -> Result<()> {
6363
// Print progress
6464
if iteration % 10 == 0 {
6565
let f_val = problem.evaluate_f64(&initial_point)?;
66-
println!("Iteration {}: f = {:.6}, ||∇f|| = {:.6}", iteration, f_val, grad_norm);
66+
println!(
67+
"Iteration {}: f = {:.6}, ||∇f|| = {:.6}",
68+
iteration, f_val, grad_norm
69+
);
6770
}
6871

6972
// Check convergence
@@ -78,22 +81,31 @@ fn main() -> Result<()> {
7881
let problem = problem.clone();
7982
move |params: &[Tensor]| -> candle_core::Result<f64> {
8083
let x_vec = params[0].to_vec1::<f64>()?;
81-
problem.evaluate_f64(&x_vec).map_err(|e| candle_core::Error::Msg(e.to_string()))
84+
problem
85+
.evaluate_f64(&x_vec)
86+
.map_err(|e| candle_core::Error::Msg(e.to_string()))
8287
}
8388
},
8489
{
8590
let problem = problem.clone();
8691
let device = device.clone();
8792
move |params: &[Tensor]| -> candle_core::Result<Vec<Tensor>> {
8893
let x_vec = params[0].to_vec1::<f64>()?;
89-
let grad = problem.gradient_f64(&x_vec).map_err(|e| candle_core::Error::Msg(e.to_string()))?;
90-
Ok(vec![Tensor::from_slice(&grad, grad.len(), &device).map_err(|e| candle_core::Error::Msg(e.to_string()))?])
94+
let grad = problem
95+
.gradient_f64(&x_vec)
96+
.map_err(|e| candle_core::Error::Msg(e.to_string()))?;
97+
Ok(vec![Tensor::from_slice(&grad, grad.len(), &device)
98+
.map_err(|e| candle_core::Error::Msg(e.to_string()))?])
9199
}
92100
},
93101
));
94102

95103
// Convert Vec<f64> to Tensor for optimizer
96-
let mut x_tensor = vec![Tensor::from_slice(&initial_point, initial_point.len(), &device)?];
104+
let mut x_tensor = vec![Tensor::from_slice(
105+
&initial_point,
106+
initial_point.len(),
107+
&device,
108+
)?];
97109

98110
// Perform optimization step
99111
let _step_result = optimizer.step(&mut x_tensor, function.clone())?;
@@ -122,7 +134,8 @@ fn main() -> Result<()> {
122134

123135
// Compare with known optimum
124136
let optimum = vec![1.0, 1.0];
125-
let distance_to_optimum = initial_point.iter()
137+
let distance_to_optimum = initial_point
138+
.iter()
126139
.zip(&optimum)
127140
.map(|(xi, opt)| (xi - opt).powi(2))
128141
.sum::<f64>()
@@ -137,4 +150,4 @@ fn main() -> Result<()> {
137150
}
138151

139152
Ok(())
140-
}
153+
}

examples/custom_problem.rs

Lines changed: 38 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,7 @@ use anyhow::Result;
1010
use candle_core::{Device, Tensor};
1111
use qqn_optimizer::utils::math::DifferentiableFunction;
1212
use qqn_optimizer::{
13-
LBFGSConfig, LBFGSOptimizer, OptimizationProblem, Optimizer,
14-
QQNConfig, QQNOptimizer,
13+
LBFGSConfig, LBFGSOptimizer, OptimizationProblem, Optimizer, QQNConfig, QQNOptimizer,
1514
};
1615
use std::sync::Arc;
1716

@@ -20,11 +19,11 @@ use std::sync::Arc;
2019
pub struct QuadraticProblem {
2120
name: String,
2221
dimension: usize,
23-
matrix_a: Vec<Vec<f64>>, // Positive definite matrix
24-
vector_b: Vec<f64>, // Linear term
25-
constant_c: f64, // Constant term
26-
optimal_point: Vec<f64>, // Known optimal point: x* = -A^(-1) * b
27-
optimal_value: f64, // Known optimal value
22+
matrix_a: Vec<Vec<f64>>, // Positive definite matrix
23+
vector_b: Vec<f64>, // Linear term
24+
constant_c: f64, // Constant term
25+
optimal_point: Vec<f64>, // Known optimal point: x* = -A^(-1) * b
26+
optimal_value: f64, // Known optimal value
2827
}
2928

3029
impl QuadraticProblem {
@@ -40,15 +39,14 @@ impl QuadraticProblem {
4039
}
4140

4241
// Create a random linear term
43-
let vector_b: Vec<f64> = (0..dimension)
44-
.map(|i| (i as f64 + 1.0) * 0.1)
45-
.collect();
42+
let vector_b: Vec<f64> = (0..dimension).map(|i| (i as f64 + 1.0) * 0.1).collect();
4643

4744
let constant_c = 5.0;
4845

4946
// Compute optimal point: x* = -A^(-1) * b
5047
// For diagonal A, this is simple: x*[i] = -b[i] / A[i][i]
51-
let optimal_point: Vec<f64> = vector_b.iter()
48+
let optimal_point: Vec<f64> = vector_b
49+
.iter()
5250
.enumerate()
5351
.map(|(i, &bi)| -bi / matrix_a[i][i])
5452
.collect();
@@ -137,23 +135,21 @@ impl OptimizationProblem for QuadraticProblem {
137135
impl DifferentiableFunction for QuadraticProblem {
138136
fn evaluate(&self, params: &[Tensor]) -> candle_core::Result<f64> {
139137
// Convert tensors to f64 vector
140-
let x: Result<Vec<f64>, _> = params.iter()
141-
.map(|t| t.to_scalar::<f64>())
142-
.collect();
138+
let x: Result<Vec<f64>, _> = params.iter().map(|t| t.to_scalar::<f64>()).collect();
143139
let x = x?;
144140
// Evaluate using f64 implementation
145-
let result = self.evaluate_f64(&x)
141+
let result = self
142+
.evaluate_f64(&x)
146143
.map_err(|e| candle_core::Error::Msg(format!("Evaluation error: {}", e)))?;
147144
Ok(result)
148145
}
149146
fn gradient(&self, params: &[Tensor]) -> candle_core::Result<Vec<Tensor>> {
150147
// Convert tensors to f64 vector
151-
let x: Result<Vec<f64>, _> = params.iter()
152-
.map(|t| t.to_scalar::<f64>())
153-
.collect();
148+
let x: Result<Vec<f64>, _> = params.iter().map(|t| t.to_scalar::<f64>()).collect();
154149
let x = x?;
155150
// Compute gradient using f64 implementation
156-
let grad = self.gradient_f64(&x)
151+
let grad = self
152+
.gradient_f64(&x)
157153
.map_err(|e| candle_core::Error::Msg(format!("Gradient error: {}", e)))?;
158154
// Convert back to tensors
159155
grad.iter()
@@ -162,7 +158,6 @@ impl DifferentiableFunction for QuadraticProblem {
162158
}
163159
}
164160

165-
166161
fn main() -> Result<()> {
167162
println!("Custom Optimization Problem Example");
168163
println!("===================================");
@@ -191,8 +186,14 @@ fn main() -> Result<()> {
191186
)?;
192187
// Compare results
193188
println!("\n--- Comparison ---");
194-
println!("QQN: {} iterations, final value: {:.6}", qqn_result.0, qqn_result.1);
195-
println!("L-BFGS: {} iterations, final value: {:.6}", lbfgs_result.0, lbfgs_result.1);
189+
println!(
190+
"QQN: {} iterations, final value: {:.6}",
191+
qqn_result.0, qqn_result.1
192+
);
193+
println!(
194+
"L-BFGS: {} iterations, final value: {:.6}",
195+
lbfgs_result.0, lbfgs_result.1
196+
);
196197
let qqn_error = (qqn_result.1 - problem.optimal_value().unwrap()).abs();
197198
let lbfgs_error = (lbfgs_result.1 - problem.optimal_value().unwrap()).abs();
198199
println!("QQN error: {:.2e}", qqn_error);
@@ -214,7 +215,8 @@ fn run_optimizer(
214215
let initial_point = problem.initial_point();
215216
let device = Device::Cpu;
216217
// Convert initial point to tensors
217-
let mut params: Vec<Tensor> = initial_point.iter()
218+
let mut params: Vec<Tensor> = initial_point
219+
.iter()
218220
.map(|&val| Tensor::from_slice(&[val], (1,), &device))
219221
.collect::<candle_core::Result<Vec<_>>>()
220222
.map_err(|e| anyhow::anyhow!("Failed to create tensors: {}", e))?;
@@ -223,31 +225,38 @@ fn run_optimizer(
223225
println!("Starting {} optimization...", name);
224226
while iteration < max_iterations {
225227
// Convert tensors back to f64 for convergence checking
226-
let x: Vec<f64> = params.iter()
228+
let x: Vec<f64> = params
229+
.iter()
227230
.map(|t| t.to_scalar::<f64>())
228231
.collect::<candle_core::Result<Vec<_>>>()
229232
.map_err(|e| anyhow::anyhow!("Failed to extract values: {}", e))?;
230233
let gradient = problem.gradient_f64(&x)?;
231234
let grad_norm = gradient.iter().map(|g| g * g).sum::<f64>().sqrt();
232235
// Perform optimization step
233-
let _step_result = optimizer.step(&mut params, problem.clone())
236+
let _step_result = optimizer
237+
.step(&mut params, problem.clone())
234238
.map_err(|e| anyhow::anyhow!("Optimizer step failed: {}", e))?;
235239
iteration += 1;
236240
// Print progress occasionally
237241
if iteration % 50 == 0 {
238-
let x: Vec<f64> = params.iter()
242+
let x: Vec<f64> = params
243+
.iter()
239244
.map(|t| t.to_scalar::<f64>())
240245
.collect::<candle_core::Result<Vec<_>>>()
241246
.map_err(|e| anyhow::anyhow!("Failed to extract values: {}", e))?;
242247
let f_val = problem.evaluate_f64(&x)?;
243-
println!(" Iteration {}: f = {:.6}, ||∇f|| = {:.2e}", iteration, f_val, grad_norm);
248+
println!(
249+
" Iteration {}: f = {:.6}, ||∇f|| = {:.2e}",
250+
iteration, f_val, grad_norm
251+
);
244252
}
245253
}
246254
// Convert final parameters back to f64 for evaluation
247-
let final_x: Vec<f64> = params.iter()
255+
let final_x: Vec<f64> = params
256+
.iter()
248257
.map(|t| t.to_scalar::<f64>())
249258
.collect::<candle_core::Result<Vec<_>>>()
250259
.map_err(|e| anyhow::anyhow!("Failed to extract final values: {}", e))?;
251260
let final_value = problem.evaluate_f64(&final_x)?;
252261
Ok((iteration, final_value))
253-
}
262+
}

0 commit comments

Comments
 (0)