From 3ccf3da2971259d42bd99d7747e97d81d48d52df Mon Sep 17 00:00:00 2001 From: relf Date: Sat, 14 Oct 2023 18:15:16 +0200 Subject: [PATCH] Refactor nlopt feature use --- ego/src/egor.rs | 2 +- ego/src/egor_solver.rs | 135 +++------------------------------ ego/src/lib.rs | 4 +- ego/src/optimizer.rs | 165 ++++++++++++++++++++++++++++------------- 4 files changed, 125 insertions(+), 181 deletions(-) diff --git a/ego/src/egor.rs b/ego/src/egor.rs index 296fc926..ce7a5493 100644 --- a/ego/src/egor.rs +++ b/ego/src/egor.rs @@ -558,7 +558,7 @@ mod tests { .q_points(2) .qei_strategy(QEiStrategy::KrigingBeliever) .doe(&doe) - .target(-5.508013) + .target(-5.5030) .n_iter(30) .run() .expect("Egor minimization"); diff --git a/ego/src/egor_solver.rs b/ego/src/egor_solver.rs index c36fe8e2..0489671d 100644 --- a/ego/src/egor_solver.rs +++ b/ego/src/egor_solver.rs @@ -103,12 +103,8 @@ use crate::criteria::*; use crate::egor_state::{find_best_result_index, EgorState, MAX_POINT_ADDITION_RETRY}; use crate::errors::{EgoError, Result}; -#[cfg(feature = "nlopt")] -use crate::lhs_optimizer::LhsOptimizer; - use crate::mixint::*; -#[cfg(not(feature = "nlopt"))] use crate::optimizer::*; use crate::types::*; @@ -121,10 +117,9 @@ use finitediff::FiniteDiff; use linfa::ParamGuard; use log::{debug, info, warn}; use ndarray::{ - arr1, concatenate, s, Array, Array1, Array2, ArrayBase, ArrayView2, Axis, Data, Ix1, Ix2, Zip, + concatenate, s, Array, Array1, Array2, ArrayBase, ArrayView2, Axis, Data, Ix1, Ix2, Zip, }; use ndarray_npy::{read_npy, write_npy}; -use ndarray_rand::rand::SeedableRng; use ndarray_stats::QuantileExt; use rand_xoshiro::Xoshiro256Plus; @@ -932,7 +927,6 @@ where let (scale_infill_obj, scale_cstr, scale_wb2) = self.compute_scaling(sampling, obj_model, cstr_models, *f_min); - #[cfg(not(feature = "nlopt"))] let algorithm = match self.infill_optimizer { InfillOptimizer::Slsqp => crate::optimizer::Algorithm::Slsqp, InfillOptimizer::Cobyla => crate::optimizer::Algorithm::Cobyla, @@ -1017,41 +1011,20 @@ where } }) .collect(); + let cstr_refs: Vec<_> = cstrs.iter().map(|c| c.as_ref()).collect(); info!("Optimize infill criterion..."); + let obj_data = ObjData { + scale_infill_obj, + scale_cstr: scale_cstr.to_owned(), + scale_wb2, + }; while !success && n_optim <= n_max_optim { let x_start = sampling.sample(self.n_start); if let Some(seed) = lhs_optim_seed { - let obj_data = ObjData { - scale_infill_obj, - scale_cstr: scale_cstr.to_owned(), - scale_wb2, - }; - // let cstr_refs: Vec<&(dyn crate::types::ObjFn> + Sync)> = - // cstrs.iter().map(|c| c.as_ref()).collect(); - // let (x_opt, _) = LhsOptimizer::new(&self.xlimits, &obj, cstr_refs, &obj_data) - // .with_rng(Xoshiro256Plus::seed_from_u64(seed)) - // .minimize() - // .unwrap(); - // let (_, x_opt) = - // Optimizer::new(Algorithm::Lhs, &obj, cstr_refs, &obj_data, &self.xlimits) - // .seed(seed) - // .minimize(); - #[cfg(feature = "nlopt")] - let cstr_refs: Vec<_> = cstrs.iter().map(|c| c.as_ref()).collect(); - #[cfg(feature = "nlopt")] - let (_, x_opt) = LhsOptimizer::new(&self.xlimits, &obj, &cstr_refs, &obj_data) - .with_rng(Xoshiro256Plus::seed_from_u64(seed)) - .minimize(); - - #[cfg(not(feature = "nlopt"))] - let cstr_refs: Vec< - &(dyn crate::types::ObjFn> + Sync), - > = cstrs.iter().map(|c| c.as_ref()).collect(); - #[cfg(not(feature = "nlopt"))] let (_, x_opt) = - Optimizer::new(Algorithm::Lhs, &obj, cstr_refs, &obj_data, &self.xlimits) + Optimizer::new(Algorithm::Lhs, &obj, &cstr_refs, &obj_data, &self.xlimits) .seed(seed) .minimize(); @@ -1060,85 +1033,15 @@ where success = true; } else { let dim = x_data.ncols(); - let res = (0..self.n_start) .into_par_iter() .map(|i| { - #[cfg(feature = "nlopt")] - { - use nlopt::*; - let algorithm = match self.infill_optimizer { - InfillOptimizer::Slsqp => nlopt::Algorithm::Slsqp, - InfillOptimizer::Cobyla => nlopt::Algorithm::Cobyla, - }; - let mut optimizer = Nlopt::new( - algorithm, - dim, - obj, - Target::Minimize, - ObjData { - scale_infill_obj, - scale_cstr: scale_cstr.to_owned(), - scale_wb2, - }, - ); - let lower = self.xlimits.column(0).to_owned(); - optimizer - .set_lower_bounds(lower.as_slice().unwrap()) - .unwrap(); - let upper = self.xlimits.column(1).to_owned(); - optimizer - .set_upper_bounds(upper.as_slice().unwrap()) - .unwrap(); - optimizer.set_maxeval(200).unwrap(); - optimizer.set_ftol_rel(1e-4).unwrap(); - optimizer.set_ftol_abs(1e-4).unwrap(); - cstrs.iter().enumerate().for_each(|(i, cstr)| { - optimizer - .add_inequality_constraint( - cstr, - ObjData { - scale_infill_obj, - scale_wb2, - scale_cstr: scale_cstr.to_owned(), - }, - self.cstr_tol / scale_cstr[i], - ) - .unwrap(); - }); - - let mut x_opt = x_start.row(i).to_vec(); - match optimizer.optimize(&mut x_opt) { - Ok((_, opt)) => (opt, arr1(&x_opt)), - Err((err, code)) => { - debug!("Nlopt Err: {:?} (y_opt={})", err, code); - (f64::INFINITY, arr1(&x_opt)) - } - } - } - - #[cfg(not(feature = "nlopt"))] - { - let cstr_refs: Vec<&(dyn crate::types::ObjFn> + Sync)> = - cstrs.iter().map(|c| c.as_ref()).collect(); - - Optimizer::new( - algorithm, - &obj, - cstr_refs, - &ObjData { - scale_infill_obj, - scale_cstr: scale_cstr.to_owned(), - scale_wb2, - }, - &self.xlimits, - ) + Optimizer::new(algorithm, &obj, &cstr_refs, &obj_data, &self.xlimits) .xinit(&x_start.row(i)) .max_eval(200) .ftol_rel(1e-4) .ftol_abs(1e-4) .minimize() - } }) .reduce( || (f64::INFINITY, Array::ones((dim,))), @@ -1155,26 +1058,8 @@ where if n_optim == n_max_optim && best_x.is_none() { info!("All optimizations fail => Trigger LHS optimization"); - let obj_data = ObjData { - scale_infill_obj, - scale_cstr: scale_cstr.to_owned(), - scale_wb2, - }; - - #[cfg(feature = "nlopt")] - let cstr_refs: Vec<_> = cstrs.iter().map(|c| c.as_ref()).collect(); - #[cfg(feature = "nlopt")] - let (_, x_opt) = LhsOptimizer::new(&self.xlimits, &obj, &cstr_refs, &obj_data) - .with_rng(Xoshiro256Plus::from_entropy()) - .minimize(); - - #[cfg(not(feature = "nlopt"))] - let cstr_refs: Vec< - &(dyn crate::types::ObjFn> + Sync), - > = cstrs.iter().map(|c| c.as_ref()).collect(); - #[cfg(not(feature = "nlopt"))] let (_, x_opt) = - Optimizer::new(Algorithm::Lhs, &obj, cstr_refs, &obj_data, &self.xlimits) + Optimizer::new(Algorithm::Lhs, &obj, &cstr_refs, &obj_data, &self.xlimits) .minimize(); info!("LHS optimization best_x {}", x_opt); diff --git a/ego/src/lib.rs b/ego/src/lib.rs index 6603bf88..96ccad75 100644 --- a/ego/src/lib.rs +++ b/ego/src/lib.rs @@ -194,10 +194,8 @@ mod errors; mod mixint; mod types; -#[cfg(not(feature = "nlopt"))] -mod optimizer; - mod lhs_optimizer; +mod optimizer; mod sort_axis; mod utils; diff --git a/ego/src/optimizer.rs b/ego/src/optimizer.rs index 1d995a2c..8265be3b 100644 --- a/ego/src/optimizer.rs +++ b/ego/src/optimizer.rs @@ -38,14 +38,14 @@ impl<'a> Optimizer<'a> { pub fn new( algo: Algorithm, fun: &'a (dyn ObjFn> + Sync), - cons: Vec<&'a (dyn ObjFn> + Sync)>, + cons: &[&'a (dyn ObjFn> + Sync)], user_data: &'a ObjData, bounds: &Array2, ) -> Self { Optimizer { algo, fun, - cons, + cons: cons.to_vec(), bounds: bounds.clone(), user_data, max_eval: 200, @@ -81,62 +81,123 @@ impl<'a> Optimizer<'a> { self } + #[cfg(feature = "nlopt")] + fn nlopt_minimize(&self, algo: nlopt::Algorithm) -> (f64, Array1) { + use nlopt::*; + let mut optimizer = Nlopt::new( + algo, + self.bounds.nrows(), + self.fun, + Target::Minimize, + self.user_data.clone(), + ); + let lower = self.bounds.column(0).to_owned(); + optimizer + .set_lower_bounds(lower.as_slice().unwrap()) + .unwrap(); + let upper = self.bounds.column(1).to_owned(); + optimizer + .set_upper_bounds(upper.as_slice().unwrap()) + .unwrap(); + optimizer.set_maxeval(self.max_eval as u32).unwrap(); + optimizer + .set_ftol_rel(self.ftol_rel.unwrap_or(0.0)) + .unwrap(); + optimizer + .set_ftol_abs(self.ftol_abs.unwrap_or(0.0)) + .unwrap(); + self.cons.iter().enumerate().for_each(|(i, cstr)| { + optimizer + .add_inequality_constraint( + cstr, + self.user_data.clone(), + // self.cstr_tol / self.user_data.scale_cstr[i], + 2e-4 / self.user_data.scale_cstr[i], + ) + .unwrap(); + }); + + let mut x_opt = self.xinit.clone().unwrap().to_vec(); + match optimizer.optimize(&mut x_opt) { + Ok((_, opt)) => (opt, arr1(&x_opt)), + Err((_err, _code)) => { + // debug!("Nlopt Err: {:?} (y_opt={})", err, code); + (f64::INFINITY, arr1(&x_opt)) + } + } + } + pub fn minimize(&self) -> (f64, Array1) { match self.algo { Algorithm::Cobyla => { - let xinit = self.xinit.clone().unwrap().to_vec(); - let bounds: Vec<_> = self - .bounds - .outer_iter() - .map(|row| (row[0], row[1])) - .collect(); - let cstrs: Vec<_> = self - .cons - .iter() - .map(|f| |x: &[f64], u: &mut ObjData| (*f)(x, None, u)) - .collect(); - let res = cobyla::minimize( - |x: &[f64], u: &mut ObjData| (self.fun)(x, None, u), - &xinit, - &bounds, - &cstrs, - self.user_data.clone(), - self.max_eval, - RhoBeg::All(0.5), - Some(cobyla::StopTols { - ftol_rel: self.ftol_rel.unwrap_or(0.0), - ftol_abs: self.ftol_abs.unwrap_or(0.0), - ..cobyla::StopTols::default() - }), - ); - match res { - Ok((_, x_opt, y_opt)) => (y_opt, arr1(&x_opt)), - Err((_, x_opt, _)) => (f64::INFINITY, arr1(&x_opt)), + #[cfg(feature = "nlopt")] + { + self.nlopt_minimize(nlopt::Algorithm::Cobyla) + } + + #[cfg(not(feature = "nlopt"))] + { + let xinit = self.xinit.clone().unwrap().to_vec(); + let bounds: Vec<_> = self + .bounds + .outer_iter() + .map(|row| (row[0], row[1])) + .collect(); + let cstrs: Vec<_> = self + .cons + .iter() + .map(|f| |x: &[f64], u: &mut ObjData| -(*f)(x, None, u)) + .collect(); + let res = cobyla::minimize( + |x: &[f64], u: &mut ObjData| (self.fun)(x, None, u), + &xinit, + &bounds, + &cstrs, + self.user_data.clone(), + self.max_eval, + RhoBeg::All(0.5), + Some(cobyla::StopTols { + ftol_rel: self.ftol_rel.unwrap_or(0.0), + ftol_abs: self.ftol_abs.unwrap_or(0.0), + ..cobyla::StopTols::default() + }), + ); + match res { + Ok((_, x_opt, y_opt)) => (y_opt, arr1(&x_opt)), + Err((_, x_opt, _)) => (f64::INFINITY, arr1(&x_opt)), + } } } Algorithm::Slsqp => { - let xinit = self.xinit.clone().unwrap().to_vec(); - let bounds: Vec<_> = self - .bounds - .outer_iter() - .map(|row| (row[0], row[1])) - .collect(); - let res = slsqp::minimize( - self.fun, - &xinit, - &bounds, - &self.cons, - self.user_data.clone(), - self.max_eval, - Some(slsqp::StopTols { - ftol_rel: self.ftol_rel.unwrap_or(0.0), - ftol_abs: self.ftol_abs.unwrap_or(0.0), - ..slsqp::StopTols::default() - }), - ); - match res { - Ok((_, x_opt, y_opt)) => (y_opt, arr1(&x_opt)), - Err((_, x_opt, _)) => (f64::INFINITY, arr1(&x_opt)), + #[cfg(feature = "nlopt")] + { + self.nlopt_minimize(nlopt::Algorithm::Slsqp) + } + #[cfg(not(feature = "nlopt"))] + { + let xinit = self.xinit.clone().unwrap().to_vec(); + let bounds: Vec<_> = self + .bounds + .outer_iter() + .map(|row| (row[0], row[1])) + .collect(); + let res = slsqp::minimize( + self.fun, + &xinit, + &bounds, + &self.cons, + self.user_data.clone(), + self.max_eval, + Some(slsqp::StopTols { + ftol_rel: self.ftol_rel.unwrap_or(0.0), + ftol_abs: self.ftol_abs.unwrap_or(0.0), + ..slsqp::StopTols::default() + }), + ); + match res { + Ok((_, x_opt, y_opt)) => (y_opt, arr1(&x_opt)), + Err((_, x_opt, _)) => (f64::INFINITY, arr1(&x_opt)), + } } } Algorithm::Lhs => {