Skip to content

Commit 472c237

Browse files
committed
borken opus enhancements
1 parent b864df1 commit 472c237

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+4268
-1987
lines changed

examples/custom_problem.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ impl OptimizationProblem for QuadraticProblem {
120120
Some(self.optimal_value)
121121
}
122122

123-
fn clone_problem(&self) -> Box<dyn OptimizationProblem> {
123+
fn clone_boxed(&self) -> Box<dyn OptimizationProblem> {
124124
Box::new(QuadraticProblem {
125125
name: self.name.clone(),
126126
dimension: self.dimension,

src/analysis/mod.rs

Lines changed: 78 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ pub mod statistics;
1515

1616
use crate::benchmarks::evaluation::BenchmarkResults;
1717
use crate::optimizers::OptResult;
18+
use std::path::Path;
19+
1820
#[cfg(feature = "plotting")]
1921
pub use plotting::{ExtendedOptimizationTrace, PlotConfig, PlottingEngine};
2022
pub use reporting::{AcademicReport, CSVExporter, LaTeXExporter};
@@ -24,18 +26,21 @@ pub use statistics::{
2426
};
2527

2628
/// Generate comprehensive analysis report
29+
///
30+
/// # Arguments
31+
/// * `results` - Benchmark results to analyze
32+
///
33+
/// # Returns
34+
/// Complete analysis report with statistical tests and comparisons
2735
pub fn generate_full_analysis(results: &BenchmarkResults) -> OptResult<AnalysisReport> {
28-
let stats = StatisticalAnalysis::new(results);
29-
let convergence = stats.convergence_comparison().clone();
30-
let performance = stats.performance_profiles().clone();
31-
let robustness = stats.robustness_analysis().clone();
36+
let stats = StatisticalAnalysis::from_results(results)?;
3237

3338
Ok(AnalysisReport {
34-
convergence_comparison: convergence,
35-
performance_profiles: performance,
36-
robustness_analysis: robustness,
37-
statistical_tests: stats.significance_tests().clone(),
38-
effect_sizes: stats.effect_sizes(),
39+
convergence_comparison: stats.analyze_convergence()?,
40+
performance_profiles: stats.compute_performance_profiles()?,
41+
robustness_analysis: stats.analyze_robustness()?,
42+
statistical_tests: stats.run_significance_tests()?,
43+
effect_sizes: stats.compute_effect_sizes()?,
3944
})
4045
}
4146

@@ -57,7 +62,7 @@ impl AnalysisReport {
5762
}
5863

5964
/// Export to CSV for further analysis
60-
pub fn to_csv(&self, output_dir: &std::path::Path) -> OptResult<()> {
65+
pub fn to_csv(&self, output_dir: &Path) -> OptResult<()> {
6166
let exporter = CSVExporter::new();
6267
exporter.export_report(self, output_dir)
6368
}
@@ -69,25 +74,79 @@ impl AnalysisReport {
6974
- Problems analyzed: {}\n\
7075
- Optimizers compared: {}\n\
7176
- Significant improvements: {}\n\
72-
- Average effect size: {:.3}",
73-
self.convergence_comparison.num_problems(),
74-
self.convergence_comparison.num_optimizers(),
77+
- Average effect size: {:.3}\n\
78+
- Best performing optimizer: {}",
79+
self.performance_profiles.num_problems(),
80+
self.performance_profiles.num_optimizers(),
7581
self.statistical_tests
7682
.iter()
7783
.filter(|t| t.is_significant())
7884
.count(),
79-
self.effect_sizes.iter().map(|e| e.magnitude()).sum::<f64>()
80-
/ self.effect_sizes.len() as f64
85+
self.average_effect_size(),
86+
self.best_optimizer()
8187
)
8288
}
89+
/// Calculate average effect size across all comparisons
90+
pub fn average_effect_size(&self) -> f64 {
91+
if self.effect_sizes.is_empty() {
92+
return 0.0;
93+
}
94+
self.effect_sizes
95+
.iter()
96+
.map(|e| e.magnitude())
97+
.sum::<f64>()
98+
/ self.effect_sizes.len() as f64
99+
}
100+
/// Identify the best performing optimizer
101+
pub fn best_optimizer(&self) -> &str {
102+
self.performance_profiles
103+
.best_optimizer()
104+
.unwrap_or("Unknown")
105+
}
106+
/// Check if the analysis contains significant results
107+
pub fn has_significant_results(&self) -> bool {
108+
self.statistical_tests
109+
.iter()
110+
.any(|test| test.is_significant())
111+
}
83112
}
84113

85114
#[cfg(test)]
86115
mod tests {
87116
#[test]
88117
fn test_analysis_report_creation() {
89-
// This would require mock data in a real implementation
90-
// For now, just test that the types compile
91-
assert!(true);
118+
// Test that the module structure is correct
119+
use super::*;
120+
121+
// Verify that all submodules are accessible
122+
let _ = statistics::StatisticalAnalysis;
123+
let _ = reporting::AcademicReport;
124+
125+
#[cfg(feature = "plotting")]
126+
{
127+
let _ = plotting::PlotConfig;
128+
}
92129
}
93-
}
130+
131+
#[test]
132+
fn test_analysis_report_methods() {
133+
use super::*;
134+
135+
// Create a mock report for testing
136+
let report = AnalysisReport {
137+
convergence_comparison: ConvergenceComparison::default(),
138+
performance_profiles: PerformanceProfiles::default(),
139+
robustness_analysis: RobustnessAnalysis::default(),
140+
statistical_tests: vec![],
141+
effect_sizes: vec![],
142+
};
143+
144+
// Test basic methods
145+
assert_eq!(report.average_effect_size(), 0.0);
146+
assert!(!report.has_significant_results());
147+
148+
// Test summary generation
149+
let summary = report.summary();
150+
assert!(summary.contains("Analysis Summary"));
151+
}
152+
}

0 commit comments

Comments
 (0)