Skip to content

Commit

Permalink
Make nlop optional using cobyla and slsqp
Browse files Browse the repository at this point in the history
  • Loading branch information
relf committed Oct 13, 2023
1 parent 12f36fb commit 2c59e05
Show file tree
Hide file tree
Showing 7 changed files with 226 additions and 128 deletions.
7 changes: 5 additions & 2 deletions ego/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,13 @@ ndarray-rand = "0.14"
ndarray-npy = "0.8"
rayon = "1"

cobyla = { version = "0.5.0" }
slsqp = { version = "0.1.0" }
nlopt = { version = "0.6.0", optional = true }

rand_xoshiro = { version = "0.6", features = ["serde1"] }
argmin = { version = "0.8.0", features = ["serde1", "ctrlc"] }
instant = "0.1"
nlopt = "0.6.0"
rand_xoshiro = { version = "0.6", features = ["serde1"] }
libm = "0.2.6"
finitediff = { version = "0.1", features = ["ndarray"] }
# sort-axis
Expand Down
204 changes: 130 additions & 74 deletions ego/src/egor_solver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,15 @@
use crate::criteria::*;
use crate::egor_state::{find_best_result_index, EgorState, MAX_POINT_ADDITION_RETRY};
use crate::errors::{EgoError, Result};

#[cfg(feature = "nlopt")]
use crate::lhs_optimizer::LhsOptimizer;

use crate::mixint::*;

#[cfg(not(feature = "nlopt"))]
use crate::optimizer::*;

use crate::types::*;
use crate::utils::{compute_cstr_scales, no_discrete, update_data};

Expand All @@ -115,14 +121,12 @@ use finitediff::FiniteDiff;
use linfa::ParamGuard;
use log::{debug, info, warn};
use ndarray::{
concatenate, s, Array, Array1, Array2, ArrayBase, ArrayView2, Axis, Data, Ix1, Ix2, Zip,
arr1, concatenate, s, Array, Array1, Array2, ArrayBase, ArrayView2, Axis, Data, Ix1, Ix2, Zip,
};
use ndarray_npy::{read_npy, write_npy};
use ndarray_rand::rand::SeedableRng;
use ndarray_stats::QuantileExt;

use nlopt::*;

use rand_xoshiro::Xoshiro256Plus;

use argmin::argmin_error_closure;
Expand Down Expand Up @@ -928,9 +932,10 @@ where
let (scale_infill_obj, scale_cstr, scale_wb2) =
self.compute_scaling(sampling, obj_model, cstr_models, *f_min);

#[cfg(not(feature = "nlopt"))]
let algorithm = match self.infill_optimizer {
InfillOptimizer::Slsqp => nlopt::Algorithm::Slsqp,
InfillOptimizer::Cobyla => nlopt::Algorithm::Cobyla,
InfillOptimizer::Slsqp => crate::optimizer::Algorithm::Slsqp,
InfillOptimizer::Cobyla => crate::optimizer::Algorithm::Cobyla,
};

let obj = |x: &[f64], gradient: Option<&mut [f64]>, params: &mut ObjData<f64>| -> f64 {
Expand Down Expand Up @@ -959,7 +964,7 @@ where
self.eval_infill_obj(x, obj_model, *f_min, *scale_infill_obj, *scale_wb2)
};

let cstrs: Vec<Box<dyn crate::types::ObjFn<ObjData<f64>> + Sync>> = (0..self.n_cstr)
let cstrs: Vec<_> = (0..self.n_cstr)
.map(|i| {
let index = i;
let cstr = move |x: &[f64],
Expand Down Expand Up @@ -1002,7 +1007,14 @@ where
.unwrap()[[0, 0]]
/ params.scale_cstr[index]
};
Box::new(cstr) as Box<dyn crate::types::ObjFn<ObjData<f64>> + Sync>
#[cfg(feature = "nlopt")]
{
Box::new(cstr) as Box<dyn nlopt::ObjFn<ObjData<f64>> + Sync>
}
#[cfg(not(feature = "nlopt"))]
{
Box::new(cstr) as Box<dyn crate::types::ObjFn<ObjData<f64>> + Sync>
}
})
.collect();

Expand All @@ -1016,22 +1028,32 @@ where
scale_cstr: scale_cstr.to_owned(),
scale_wb2,
};
let cstr_refs: Vec<&(dyn crate::types::ObjFn<ObjData<f64>> + Sync)> =
cstrs.iter().map(|c| c.as_ref()).collect();
// let cstr_refs: Vec<&(dyn crate::types::ObjFn<ObjData<f64>> + Sync)> =
// cstrs.iter().map(|c| c.as_ref()).collect();
// let (x_opt, _) = LhsOptimizer::new(&self.xlimits, &obj, cstr_refs, &obj_data)
// .with_rng(Xoshiro256Plus::seed_from_u64(seed))
// .minimize()
// .unwrap();
let (x_opt, _) = OptimizerBuilder::new(
crate::types::Algorithm::Lhs,
&obj,
cstr_refs,
&obj_data,
&self.xlimits,
)
.seed(seed)
.minimize()
.unwrap();
// let (_, x_opt) =
// Optimizer::new(Algorithm::Lhs, &obj, cstr_refs, &obj_data, &self.xlimits)
// .seed(seed)
// .minimize();
#[cfg(feature = "nlopt")]
let cstr_refs: Vec<_> = cstrs.iter().map(|c| c.as_ref()).collect();
#[cfg(feature = "nlopt")]
let (_, x_opt) = LhsOptimizer::new(&self.xlimits, &obj, &cstr_refs, &obj_data)
.with_rng(Xoshiro256Plus::seed_from_u64(seed))
.minimize();

#[cfg(not(feature = "nlopt"))]
let cstr_refs: Vec<
&(dyn crate::types::ObjFn<ObjData<f64>> + Sync),
> = cstrs.iter().map(|c| c.as_ref()).collect();
#[cfg(not(feature = "nlopt"))]
let (_, x_opt) =
Optimizer::new(Algorithm::Lhs, &obj, cstr_refs, &obj_data, &self.xlimits)
.seed(seed)
.minimize();

info!("LHS optimization best_x {}", x_opt);
best_x = Some(x_opt);
Expand All @@ -1042,53 +1064,84 @@ where
let res = (0..self.n_start)
.into_par_iter()
.map(|i| {
let mut optimizer = Nlopt::new(
algorithm,
dim,
obj,
Target::Minimize,
ObjData {
scale_infill_obj,
scale_cstr: scale_cstr.to_owned(),
scale_wb2,
},
);
let lower = self.xlimits.column(0).to_owned();
optimizer
.set_lower_bounds(lower.as_slice().unwrap())
.unwrap();
let upper = self.xlimits.column(1).to_owned();
optimizer
.set_upper_bounds(upper.as_slice().unwrap())
.unwrap();
optimizer.set_maxeval(200).unwrap();
optimizer.set_ftol_rel(1e-4).unwrap();
optimizer.set_ftol_abs(1e-4).unwrap();
cstrs.iter().enumerate().for_each(|(i, cstr)| {
#[cfg(feature = "nlopt")]
{
use nlopt::*;
let algorithm = match self.infill_optimizer {
InfillOptimizer::Slsqp => nlopt::Algorithm::Slsqp,
InfillOptimizer::Cobyla => nlopt::Algorithm::Cobyla,
};
let mut optimizer = Nlopt::new(
algorithm,
dim,
obj,
Target::Minimize,
ObjData {
scale_infill_obj,
scale_cstr: scale_cstr.to_owned(),
scale_wb2,
},
);
let lower = self.xlimits.column(0).to_owned();
optimizer
.add_inequality_constraint(
cstr,
ObjData {
scale_infill_obj,
scale_wb2,
scale_cstr: scale_cstr.to_owned(),
},
self.cstr_tol / scale_cstr[i],
)
.set_lower_bounds(lower.as_slice().unwrap())
.unwrap();
});

let mut x_opt = x_start.row(i).to_vec();
match optimizer.optimize(&mut x_opt) {
Ok((_, opt)) => (opt, x_opt),
Err((err, code)) => {
debug!("Nlopt Err: {:?} (y_opt={})", err, code);
(f64::INFINITY, x_opt)
let upper = self.xlimits.column(1).to_owned();
optimizer
.set_upper_bounds(upper.as_slice().unwrap())
.unwrap();
optimizer.set_maxeval(200).unwrap();
optimizer.set_ftol_rel(1e-4).unwrap();
optimizer.set_ftol_abs(1e-4).unwrap();
cstrs.iter().enumerate().for_each(|(i, cstr)| {
optimizer
.add_inequality_constraint(
cstr,
ObjData {
scale_infill_obj,
scale_wb2,
scale_cstr: scale_cstr.to_owned(),
},
self.cstr_tol / scale_cstr[i],
)
.unwrap();
});

let mut x_opt = x_start.row(i).to_vec();
match optimizer.optimize(&mut x_opt) {
Ok((_, opt)) => (opt, arr1(&x_opt)),
Err((err, code)) => {
debug!("Nlopt Err: {:?} (y_opt={})", err, code);
(f64::INFINITY, arr1(&x_opt))
}
}
}

#[cfg(not(feature = "nlopt"))]
{
let cstr_refs: Vec<&(dyn crate::types::ObjFn<ObjData<f64>> + Sync)> =
cstrs.iter().map(|c| c.as_ref()).collect();

Optimizer::new(
algorithm,
&obj,
cstr_refs,
&ObjData {
scale_infill_obj,
scale_cstr: scale_cstr.to_owned(),
scale_wb2,
},
&self.xlimits,
)
.xinit(&x_start.row(i))
.max_eval(200)
.ftol_rel(1e-4)
.ftol_abs(1e-4)
.minimize()
}
})
.reduce(
|| (f64::INFINITY, vec![1.0; dim]),
|| (f64::INFINITY, Array::ones((dim,))),
|a, b| if b.0 < a.0 { b } else { a },
);

Expand All @@ -1107,20 +1160,23 @@ where
scale_cstr: scale_cstr.to_owned(),
scale_wb2,
};
let cstr_refs = cstrs.iter().map(|c| c.as_ref()).collect();
// let (x_opt, _) = LhsOptimizer::new(&self.xlimits, &obj, cstr_refs, &obj_data)
// .with_rng(Xoshiro256Plus::from_entropy())
// .minimize()
// .unwrap();
let (x_opt, _) = OptimizerBuilder::new(
crate::types::Algorithm::Lhs,
&obj,
cstr_refs,
&obj_data,
&self.xlimits,
)
.minimize()
.unwrap();

#[cfg(feature = "nlopt")]
let cstr_refs: Vec<_> = cstrs.iter().map(|c| c.as_ref()).collect();
#[cfg(feature = "nlopt")]
let (_, x_opt) = LhsOptimizer::new(&self.xlimits, &obj, &cstr_refs, &obj_data)
.with_rng(Xoshiro256Plus::from_entropy())
.minimize();

#[cfg(not(feature = "nlopt"))]
let cstr_refs: Vec<
&(dyn crate::types::ObjFn<ObjData<f64>> + Sync),
> = cstrs.iter().map(|c| c.as_ref()).collect();
#[cfg(not(feature = "nlopt"))]
let (_, x_opt) =
Optimizer::new(Algorithm::Lhs, &obj, cstr_refs, &obj_data, &self.xlimits)
.minimize();

info!("LHS optimization best_x {}", x_opt);
best_x = Some(x_opt);
success = true;
Expand Down
10 changes: 0 additions & 10 deletions ego/src/errors.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
use nlopt::FailState;
use thiserror::Error;

/// A result type for EGO errors
Expand All @@ -16,9 +15,6 @@ pub enum EgoError {
/// When an invalid value is encountered
#[error("Value error: {0}")]
InvalidValue(String),
/// When `NLOpt` fails
#[error("NLOpt optimizer error")]
NloptFailure,
/// When Moe error occurs
#[error("MOE error")]
MoeError(#[from] egobox_moe::MoeError),
Expand All @@ -38,9 +34,3 @@ pub enum EgoError {
#[error(transparent)]
ArgminError(#[from] argmin::core::Error),
}

impl From<FailState> for EgoError {
fn from(_error: FailState) -> EgoError {
EgoError::NloptFailure
}
}
19 changes: 9 additions & 10 deletions ego/src/lhs_optimizer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,11 @@ use linfa_linalg::norm::*;
#[cfg(feature = "blas")]
use ndarray_linalg::Norm;

#[cfg(not(feature = "nlopt"))]
use crate::types::ObjFn;
use ndarray_stats::QuantileExt;
// use nlopt::ObjFn;
use crate::types::{ObjFn, Optimizer};
#[cfg(feature = "nlopt")]
use nlopt::ObjFn;

pub(crate) struct LhsOptimizer<'a, R: Rng + Clone + Sync + Send> {
xlimits: Array2<f64>,
Expand Down Expand Up @@ -132,10 +134,8 @@ impl<'a, R: Rng + Clone + Sync + Send> LhsOptimizer<'a, R> {
)
}
}
}

impl<'a, R: Rng + Clone + Sync + Send> Optimizer for LhsOptimizer<'a, R> {
fn minimize(&self) -> std::result::Result<(Array1<f64>, f64), ()> {
pub fn minimize(&self) -> (f64, Array1<f64>) {
let lhs = Lhs::new(&self.xlimits)
.kind(LhsKind::Classic)
.with_rng(self.rng.clone());
Expand All @@ -155,11 +155,11 @@ impl<'a, R: Rng + Clone + Sync + Send> Optimizer for LhsOptimizer<'a, R> {
.collect();
let yvals: Array1<_> = values.iter().map(|val| val.1).collect();
let index_min = yvals.argmin().unwrap();
Ok((values[index_min].0.to_owned(), yvals[index_min]))
(yvals[index_min], values[index_min].0.to_owned())
} else {
let l1_norms: Array1<_> = x_optims.iter().map(|opt| opt.3.norm_l1()).collect();
let index_min = l1_norms.argmin().unwrap();
Ok((x_optims[index_min].1.to_owned(), l1_norms[index_min]))
(l1_norms[index_min], x_optims[index_min].1.to_owned())
}
}
}
Expand All @@ -184,10 +184,9 @@ mod tests {
scale_wb2: 1.,
};

let (res, _) = LhsOptimizer::new(&xlimits, &obj, &cstrs, &obj_data)
let (_, res) = LhsOptimizer::new(&xlimits, &obj, &cstrs, &obj_data)
.with_rng(Xoshiro256Plus::seed_from_u64(42))
.minimize()
.unwrap();
.minimize();
assert_abs_diff_eq!(res, array![0.], epsilon = 1e-1)
}
}
4 changes: 3 additions & 1 deletion ego/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,10 @@ mod errors;
mod mixint;
mod types;

mod lhs_optimizer;
#[cfg(not(feature = "nlopt"))]
mod optimizer;

mod lhs_optimizer;
mod sort_axis;
mod utils;

Expand Down
Loading

0 comments on commit 2c59e05

Please sign in to comment.