diff --git a/src/nemos/glm.py b/src/nemos/glm.py index c4de380c..c38c2216 100644 --- a/src/nemos/glm.py +++ b/src/nemos/glm.py @@ -1017,6 +1017,7 @@ def update( return opt_step def get_optimal_solver_params_config(self): + """Return the functions for computing default step and batch size for the solver.""" return glm_compute_optimal_stepsize_configs(self) diff --git a/tests/conftest.py b/tests/conftest.py index 8a36fadf..a235f7b3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -82,6 +82,9 @@ def initialize_params(self, *args, **kwargs): def _predict_and_compute_loss(self, params, X, y): pass + def get_optimal_solver_params_config(self): + return None, None, None + class MockRegressorNested(MockRegressor): def __init__(self, other_param: int, std_param: int = 0): diff --git a/tests/test_base_class.py b/tests/test_base_class.py index 4af81c30..1f89f7d6 100644 --- a/tests/test_base_class.py +++ b/tests/test_base_class.py @@ -24,6 +24,8 @@ def predict(self, X: Union[NDArray, jnp.ndarray]) -> jnp.ndarray: def score(self, X, y, score_type="pseudo-r2-McFadden"): pass + def get_optimal_solver_params_config(self): + return None, None, None class BadEstimator(Base): def __init__(self, param1, *args):