|
8 | 8 |
|
9 | 9 | #[cfg(feature = "plotting")] |
10 | 10 | pub mod plotting; |
11 | | -pub mod reporting; |
12 | | -pub mod statistics; |
13 | | - |
14 | | -// Re-export commonly used types |
15 | | - |
16 | | -use crate::benchmarks::evaluation::BenchmarkResults; |
17 | | -use crate::optimizers::OptResult; |
18 | | -#[cfg(feature = "plotting")] |
19 | | -pub use plotting::{ExtendedOptimizationTrace, PlotConfig, PlottingEngine}; |
20 | | -pub use reporting::{AcademicReport, CSVExporter, LaTeXExporter}; |
21 | | -pub use statistics::{ |
22 | | - ConvergenceComparison, EffectSize, PerformanceProfiles, RobustnessAnalysis, SignificanceTest, |
23 | | - StatisticalAnalysis, |
24 | | -}; |
25 | | - |
26 | | -/// Generate comprehensive analysis report |
27 | | -pub fn generate_full_analysis(results: &BenchmarkResults) -> OptResult<AnalysisReport> { |
28 | | - let stats = StatisticalAnalysis::new(results); |
29 | | - let convergence = stats.convergence_comparison().clone(); |
30 | | - let performance = stats.performance_profiles().clone(); |
31 | | - let robustness = stats.robustness_analysis().clone(); |
32 | | - |
33 | | - Ok(AnalysisReport { |
34 | | - convergence_comparison: convergence, |
35 | | - performance_profiles: performance, |
36 | | - robustness_analysis: robustness, |
37 | | - statistical_tests: stats.significance_tests().clone(), |
38 | | - effect_sizes: stats.effect_sizes(), |
39 | | - }) |
40 | | -} |
41 | | - |
42 | | -/// Complete analysis report structure |
43 | | -#[derive(Debug, Clone)] |
44 | | -pub struct AnalysisReport { |
45 | | - pub convergence_comparison: ConvergenceComparison, |
46 | | - pub performance_profiles: PerformanceProfiles, |
47 | | - pub robustness_analysis: RobustnessAnalysis, |
48 | | - pub statistical_tests: Vec<SignificanceTest>, |
49 | | - pub effect_sizes: Vec<EffectSize>, |
50 | | -} |
51 | | - |
52 | | -impl AnalysisReport { |
53 | | - /// Export to LaTeX format for academic papers |
54 | | - pub fn to_latex(&self) -> OptResult<String> { |
55 | | - let exporter = LaTeXExporter::new(); |
56 | | - exporter.export_report(self) |
57 | | - } |
58 | | - |
59 | | - /// Export to CSV for further analysis |
60 | | - pub fn to_csv(&self, output_dir: &std::path::Path) -> OptResult<()> { |
61 | | - let exporter = CSVExporter::new(); |
62 | | - exporter.export_report(self, output_dir) |
63 | | - } |
64 | | - |
65 | | - /// Generate summary statistics |
66 | | - pub fn summary(&self) -> String { |
67 | | - format!( |
68 | | - "Analysis Summary:\n\ |
69 | | - - Problems analyzed: {}\n\ |
70 | | - - Optimizers compared: {}\n\ |
71 | | - - Significant improvements: {}\n\ |
72 | | - - Average effect size: {:.3}", |
73 | | - self.convergence_comparison.num_problems(), |
74 | | - self.convergence_comparison.num_optimizers(), |
75 | | - self.statistical_tests |
76 | | - .iter() |
77 | | - .filter(|t| t.is_significant()) |
78 | | - .count(), |
79 | | - self.effect_sizes.iter().map(|e| e.magnitude()).sum::<f64>() |
80 | | - / self.effect_sizes.len() as f64 |
81 | | - ) |
82 | | - } |
83 | | -} |
84 | 11 |
|
85 | 12 | #[cfg(test)] |
86 | 13 | mod tests { |
|
0 commit comments