diff --git a/atom/basemodel.py b/atom/basemodel.py
index d62ec9156..0c05ee702 100644
--- a/atom/basemodel.py
+++ b/atom/basemodel.py
@@ -3014,7 +3014,15 @@ def _prediction(
called.
"""
- Xt, yt = self.transform(X, y, verbose=verbose)
+ if y is not None or X is not None:
+ if isinstance(out := self.transform(X, y, verbose=verbose), tuple):
+ Xt, yt = out
+ elif X is not None:
+ Xt, yt = out, y
+ else:
+ Xt, yt = X, out
+ else:
+ Xt, yt = X, y
if method != "score":
fh = kwargs.get("fh")
@@ -3022,9 +3030,9 @@ def _prediction(
kwargs["fh"] = self.branch._get_rows(fh).index
if "y" in sign(func := getattr(self.estimator, method)):
- return self.memory.cache(func)(fh=fh, y=yt, X=Xt, **kwargs)
+ return self.memory.cache(func)(y=yt, X=Xt, **kwargs)
else:
- return self.memory.cache(func)(fh=fh, X=Xt, **kwargs)
+ return self.memory.cache(func)(X=Xt, **kwargs)
else:
if metric is None:
scorer = self._metric[0]
diff --git a/atom/baserunner.py b/atom/baserunner.py
index 13ff9e2fa..0c83088f7 100644
--- a/atom/baserunner.py
+++ b/atom/baserunner.py
@@ -40,7 +40,8 @@
Bool, DataFrame, FloatZeroToOneExc, HarmonicsSelector, Int, IntLargerOne,
MetricConstructor, Model, ModelSelector, ModelsSelector, Pandas,
RowSelector, Scalar, Seasonality, Segment, Sequence, Series,
- TargetSelector, YSelector, dataframe_t, int_t, segment_t, sequence_t,
+ TargetSelector, YSelector, bool_t, dataframe_t, int_t, segment_t,
+ sequence_t,
)
from atom.utils.utils import (
ClassMap, DataContainer, Goal, SeasonalPeriod, Task, bk, check_is_fitted,
@@ -888,9 +889,18 @@ def _delete_models(self, models: str | Model | Sequence[str | Model]):
self._metric = ClassMap()
@crash
- def available_models(self) -> pd.DataFrame:
+ def available_models(self, **kwargs) -> pd.DataFrame:
"""Give an overview of the available predefined models.
+ Parameters
+ ----------
+ **kwargs
+ Filter the returned models providing any of the column as
+ keyword arguments, where the value is the desired filter,
+ e.g., `accepts_sparse=True`, to get all models that accept
+ sparse input or `supports_engines="cuml"` to get all models
+ that support the [cuML][] engine.
+
Returns
-------
pd.DataFrame
@@ -902,8 +912,8 @@ def available_models(self) -> pd.DataFrame:
- **estimator:** Name of the model's underlying estimator.
- **module:** The estimator's module.
- **handles_missing:** Whether the model can handle missing
- (`NaN`) values without preprocessing. If False, consider using
- the [Imputer][] class before training the models.
+ values without preprocessing. If False, consider using the
+ [Imputer][] class before training the models.
- **needs_scaling:** Whether the model requires feature scaling.
If True, [automated feature scaling][] is applied.
- **accepts_sparse:** Whether the model accepts [sparse input][sparse-datasets].
@@ -922,7 +932,16 @@ def available_models(self) -> pd.DataFrame:
for model in MODELS:
m = model(goal=self._goal, branches=self._branches)
if self._goal.name in m._estimators:
- rows.append(m.get_tags())
+ tags = m.get_tags()
+
+ for key, value in kwargs.items():
+ k = tags.get(key)
+ if isinstance(value, bool_t) and value is not bool(k):
+ break
+ elif isinstance(value, str) and not re.search(value, k, re.I):
+ break
+ else:
+ rows.append(tags)
return pd.DataFrame(rows)
diff --git a/atom/models/ts.py b/atom/models/ts.py
index 410e5b804..df2df3e0b 100644
--- a/atom/models/ts.py
+++ b/atom/models/ts.py
@@ -21,7 +21,7 @@
class ARIMA(ForecastModel):
- """Autoregressive Integrated Moving Average Model.
+ """Autoregressive Integrated Moving Average.
Seasonal ARIMA models and exogenous input is supported, hence this
estimator is capable of fitting SARIMA, ARIMAX, and SARIMAX.
@@ -178,7 +178,7 @@ def _get_distributions(self) -> dict[str, BaseDistribution]:
class AutoARIMA(ForecastModel):
- """Automatic Autoregressive Integrated Moving Average Model.
+ """Automatic Autoregressive Integrated Moving Average.
[ARIMA][] implementation that includes automated fitting of
(S)ARIMA(X) hyperparameters (p, d, q, P, D, Q). The AutoARIMA
@@ -649,7 +649,7 @@ def _get_distributions(self) -> dict[str, BaseDistribution]:
class MSTL(ForecastModel):
- """Multiple Seasonal-Trend decomposition using LOESS model.
+ """Multiple Seasonal-Trend decomposition using LOESS.
The MSTL decomposes the time series in multiple seasonalities using
LOESS. Then forecasts the trend using a custom non-seasonal model
@@ -956,7 +956,7 @@ def _get_distributions() -> dict[str, BaseDistribution]:
class SARIMAX(ForecastModel):
- """Seasonal Autoregressive Integrated Moving Average with eXogenous factors.
+ """Seasonal Autoregressive Integrated Moving Average.
SARIMAX stands for Seasonal Autoregressive Integrated Moving Average
with eXogenous factors. It extends [ARIMA][] by incorporating seasonal
@@ -1106,7 +1106,7 @@ def _get_distributions(self) -> dict[str, BaseDistribution]:
class STL(ForecastModel):
- """Seasonal-Trend decomposition using Loess.
+ """Seasonal-Trend decomposition using LOESS.
STL is a technique commonly used for decomposing time series data
into components like trend, seasonality, and residuals.
@@ -1381,7 +1381,7 @@ def _get_distributions() -> dict[str, BaseDistribution]:
class VARMAX(ForecastModel):
- """Vector Autoregressive Moving-Average with exogenous variables.
+ """Vector Autoregressive Moving-Average.
VARMAX is an extension of the [VAR][] model that incorporates not
only lagged values of the endogenous variables, but also includes
diff --git a/atom/pipeline.py b/atom/pipeline.py
index 4e3179e5c..d2877f156 100644
--- a/atom/pipeline.py
+++ b/atom/pipeline.py
@@ -477,6 +477,9 @@ def transform(
Transformed target column. Only returned if provided.
"""
+ if X is None and y is None:
+ raise ValueError("X and y cannot be both None.")
+
for _, _, transformer in self._iter(**kwargs):
with adjust_verbosity(transformer, self.verbose):
X, y = self._mem_transform(transformer, X, y)
@@ -520,6 +523,9 @@ def inverse_transform(
Transformed target column. Only returned if provided.
"""
+ if X is None and y is None:
+ raise ValueError("X and y cannot be both None.")
+
for _, _, transformer in reversed(list(self._iter())):
with adjust_verbosity(transformer, self.verbose):
X, y = self._mem_transform(transformer, X, y, method="inverse_transform")
diff --git a/atom/plots/predictionplot.py b/atom/plots/predictionplot.py
index 4bc8b2f37..719529a26 100644
--- a/atom/plots/predictionplot.py
+++ b/atom/plots/predictionplot.py
@@ -967,10 +967,11 @@ def plot_feature_importance(
def plot_forecast(
self,
models: ModelsSelector = None,
- fh: RowSelector | ForecastingHorizon = "test",
+ fh: RowSelector | ForecastingHorizon = "dataset",
X: XSelector | None = None,
target: TargetSelector = 0,
*,
+ plot_insample: Bool = False,
plot_interval: Bool = True,
title: str | dict[str, Any] | None = None,
legend: Legend | dict[str, Any] | None = "upper left",
@@ -988,7 +989,7 @@ def plot_forecast(
models: int, str, Model, segment, sequence or None, default=None
Models to plot. If None, all models are selected.
- fh: hashable, segment, sequence, dataframe or [ForecastingHorizon][], default="test"
+ fh: hashable, segment, sequence, dataframe or [ForecastingHorizon][], default="dataset"
The [forecasting horizon][row-and-column-selection] for
which to plot the predictions.
@@ -999,6 +1000,10 @@ def plot_forecast(
target: int or str, default=0
Target column to look at. Only for [multivariate][] tasks.
+ plot_insample: bool, default=False
+ Whether to draw in-sample predictions (predictions on the training
+ set). Models that do not support this feature are silently skipped.
+
plot_interval: bool, default=True
Whether to plot prediction intervals together with the exact
predicted values. Models wihtout a `predict_interval` method
@@ -1040,7 +1045,7 @@ def plot_forecast(
--------
atom.plots:DataPlot.plot_distribution
atom.plots:DataPlot.plot_series
- atom.plots:PredictionPlot.plot_roc
+ atom.plots:PredictionPlot.plot_errors
Examples
--------
@@ -1070,7 +1075,7 @@ def plot_forecast(
fh = self.branch._get_rows(fh).index
if X is None:
- X = self.branch.X.loc[fh]
+ X = self.branch._all.loc[fh]
else:
X = self.transform(X)
@@ -1083,9 +1088,12 @@ def plot_forecast(
if self.task.is_multioutput:
y_pred = y_pred[target_c]
+ if not plot_insample:
+ y_pred.loc[m.branch.train.index] = np.NaN
+
fig.add_trace(
self._draw_line(
- x=self._get_plot_index(y_pred),
+ x=(x := self._get_plot_index(y_pred)),
y=y_pred,
mode="lines+markers",
parent=m.name,
@@ -1098,7 +1106,7 @@ def plot_forecast(
if plot_interval:
try:
y_pred = m.predict_interval(fh=fh, X=X)
- except NotImplementedError:
+ except (AttributeError, NotImplementedError):
continue # Fails for some models like ES
if self.task.is_multioutput:
@@ -1107,10 +1115,13 @@ def plot_forecast(
else:
y = y_pred # Univariate
+ if not plot_insample:
+ y_pred.loc[m.branch.train.index] = np.NaN
+
fig.add_traces(
[
go.Scatter(
- x=self._get_plot_index(y_pred),
+ x=x,
y=y.iloc[:, 1],
mode="lines",
line={"width": 1, "color": BasePlot._fig.get_elem(m.name)},
@@ -1121,7 +1132,7 @@ def plot_forecast(
yaxis=yaxis,
),
go.Scatter(
- x=self._get_plot_index(y_pred),
+ x=x,
y=y.iloc[:, 0],
mode="lines",
line={"width": 1, "color": BasePlot._fig.get_elem(m.name)},
@@ -1139,12 +1150,11 @@ def plot_forecast(
# Draw original time series
fig.add_trace(
go.Scatter(
- x=y_pred.index,
- y=self.branch.dataset.loc[y_pred.index, target_c],
+ x=x,
+ y=self.branch._all.loc[y_pred.index, target_c],
mode="lines+markers",
line={"width": 1, "color": "black", "dash": "dash"},
opacity=0.6,
- layer="below",
showlegend=False,
xaxis=xaxis,
yaxis=yaxis,
diff --git a/docs_sources/about.md b/docs_sources/about.md
index 019037720..79342b49e 100644
--- a/docs_sources/about.md
+++ b/docs_sources/about.md
@@ -215,6 +215,13 @@ core project contributors with a set of developer tools free of charge.
+
+
+
-
+
diff --git a/docs_sources/api/ATOM/atomforecaster.md b/docs_sources/api/ATOM/atomforecaster.md
index 18a4da805..8911be7df 100644
--- a/docs_sources/api/ATOM/atomforecaster.md
+++ b/docs_sources/api/ATOM/atomforecaster.md
@@ -145,8 +145,8 @@ of utility methods to handle the data and manage the pipeline.
- eda
- evaluate
- export_pipeline
- - get_class_weight
- get_sample_weight
+ - get_seasonal_period
- inverse_transform
- load
- merge
diff --git a/docs_sources/api/ATOM/atomregressor.md b/docs_sources/api/ATOM/atomregressor.md
index 21c551391..e886f717f 100644
--- a/docs_sources/api/ATOM/atomregressor.md
+++ b/docs_sources/api/ATOM/atomregressor.md
@@ -145,7 +145,6 @@ of utility methods to handle the data and manage the pipeline.
- eda
- evaluate
- export_pipeline
- - get_class_weight
- get_sample_weight
- inverse_transform
- load
diff --git a/docs_sources/img/logos/prophet.png b/docs_sources/img/logos/prophet.png
new file mode 100644
index 000000000..a7c137650
Binary files /dev/null and b/docs_sources/img/logos/prophet.png differ
diff --git a/docs_sources/img/logos/sktime.png b/docs_sources/img/logos/sktime.png
new file mode 100644
index 000000000..2ddf800fe
Binary files /dev/null and b/docs_sources/img/logos/sktime.png differ
diff --git a/docs_sources/license.md b/docs_sources/license.md
index 8e6adb388..6993d99cc 100644
--- a/docs_sources/license.md
+++ b/docs_sources/license.md
@@ -1,7 +1,7 @@
# MIT License
-------------
-Copyright © 2023 Mavs
+Copyright © 2019-2024 Mavs
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/docs_sources/user_guide/data_management.md b/docs_sources/user_guide/data_management.md
index f798b44bf..0ee92f9bc 100644
--- a/docs_sources/user_guide/data_management.md
+++ b/docs_sources/user_guide/data_management.md
@@ -396,7 +396,12 @@ The check is performed in the order described hereunder:
Additionally, the forecast horizon (parameter `fh`) in [forecasting tasks][time-series]
can be selected much in the same way as `rows`, where the horizon is inferred
-as the index of the row selection.
+as the index of the row selection. Note that, contrary to sktime's API but for
+consistency with the rest of ATOM's API, atom's fh starts with the training set,
+i.e., selecting `#!python atom.nf.predict(fh=range(5))` forecasts the first 5
+rows of the training set, not the test set. To get the same result as sktime, use
+`#!python atom.nf.predict(fh=range(len(atom.test), len(atom.test) + 5))` or
+`#!python atom.nf.predict(fh=atom.test.index[:5])` instead.
!!! info
diff --git a/docs_sources/user_guide/models.md b/docs_sources/user_guide/models.md
index 75670bddf..70bc0ddc1 100644
--- a/docs_sources/user_guide/models.md
+++ b/docs_sources/user_guide/models.md
@@ -40,7 +40,7 @@ per task, but can include:
- **fullname:** Name of the model's class.
- **estimator:** Name of the model's underlying estimator.
- **module:** The estimator's module.
-- **handles_missing:** Whether the model can handle missing (`NaN`) values
+- **handles_missing:** Whether the model can handle missing values
without preprocessing. If False, consider using the [Imputer][] class
before training the models.
- **needs_scaling:** Whether the model requires feature scaling. If True,
@@ -54,6 +54,11 @@ per task, but can include:
- **validation:** Whether the model has [in-training validation][].
- **supports_engines:** [Engines][estimator-acceleration] supported by the model.
+To filter for specific tags, specify the column name with the desired value
+in the arguments of `available_models`, e.g., `#!python atom.available_models(accepts_sparse=True)`
+to get all models that accept sparse input or `#!python atom.available_models(supports_engines="cuml")`
+to get all models that support the [cuML][] engine.
+
diff --git a/docs_sources/user_guide/time_series.md b/docs_sources/user_guide/time_series.md
index 90b93d674..fe526277e 100644
--- a/docs_sources/user_guide/time_series.md
+++ b/docs_sources/user_guide/time_series.md
@@ -1,23 +1,90 @@
# Time series
-------------
-Introduction
+Time series applies machine learning techniques to sequential data, where
+observations are ordered over time. This approach is crucial for predicting
+future values or events and finds applications in finance, healthcare, weather
+forecasting, and more. ATOM support two time series tasks: univariate forecast
+and multivariate forecast.
+
+
## Exogenous variables
+Exogenous variables are external factors that can influence the target variable
+and, unlike endogenous variables, are not part of the time series being analyzed.
+
+Incorporating exogenous variables into time series models helps capture additional
+information that may impact the observed patterns. This inclusion allows for a
+more comprehensive understanding of the underlying dynamics and can lead to more
+accurate predictions.
+
+Exogenous variables are added to atom with the `X` variable. When no exogenous
+variables are provided, `atom.X` returns an empty dataframe. Note that not all
+models make use of exogenous variables. Read more [here][model-selection] about
+how to check specific model characteristics.
## Seasonality
- It refers to the regular and repeating pattern of variation in data
- that occurs at specific intervals of time. It's associated with seasonal
- effects, which are patterns that tend to recur at consistent intervals.
-
-The same period is used for all columns in a [multivariate][] setting.
+Seasonality refers to the recurring patterns that repeat at regular intervals
+over time, often corresponding to specific time periods, such as days, weeks,
+or months, and can significantly influence the observed data.
+
+Add seasonality to atom using the [`sp`][atomforecaster-sp] parameter. You can
+add a single value for single seasonality or a sequence of values for multiple
+seasonalities. If you don't know the seasonality a priori, you can use the
+[`get_seasonal_period`][atomforecaster-get_seasonal_period] method to
+automatically detect the seasonality, e.g. `#!python atom.sp = atom.get_seasonal_period()`.
+
+The majority of models only support one seasonal period. If more than one period
+is defined, such models only use the first one. Read [here][model-selection] how
+to check which models support multiple seasonality.
+
+!!! info
+ In a [multivariate][] setting, the same period is used for all target columns.
## Forecasting with regressors
-No in-sample predictions, so scores on training set are `NaN`.
+All of ATOM's [regressors][predefined-models] can also be used in forecasting
+tasks. Simply select the regressor like any other model, e.g.,
+`#!python atom.run(models="RF")` to use a [RandomForest][] model.
+
+The regressor is automatically converted to a forecaster, based on reduction
+to tabular or time-series regression. During fitting, a sliding-window approach
+is used to first transform the time series into tabular or panel data, which is
+then used to fit a tabular or time-series regression estimator. During prediction,
+the last available data is used as input to the fitted regression estimator to
+generate forecasts.
+
+See below a graphical representation of the reduction logic using the following
+symbols:
+
+- y: forecast target.
+- x: past values of y that are used as features (X) to forecast y.
+- *:observations, past or future, neither part of the window nor forecast.
+
+Assume we have the following training data (15 observations):
+
+|------------------------------|
+| * * * * * * * * * * * * * * *|
+|------------------------------|
+
+The reducer targets the first data point after the window, irrespective of the
+forecasting horizons requested. In the example, the following five windows are
+created:
+
+|------------------------------|
+| x x x x x x x x x x y * * * *|
+| * x x x x x x x x x x y * * *|
+| * * x x x x x x x x x x y * *|
+| * * * x x x x x x x x x x y *|
+| * * * * x x x x x x x x x x y|
+|------------------------------|
+
+!!! warning
+ Regressor forecasters do not support in-sample predictions. Scores on the
+ training set return `NaN`.
diff --git a/mkdocs.yml b/mkdocs.yml
index 16aa00332..737aa492a 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -6,7 +6,7 @@ repo_url: https://github.com/tvdboom/ATOM
edit_uri: ""
docs_dir: docs_sources/
site_dir: docs/
-copyright: Copyright © 2023, by Mavs.
+copyright: Copyright © 2019-2024, by Mavs.
theme:
name: material
diff --git a/tests/conftest.py b/tests/conftest.py
index 7f9c2b28a..d1056b8e2 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -333,7 +333,7 @@ def get_train_test(
# Time series datasets
y_fc = load_airline()
-y_ex, X_ex = load_longley()
+y_multivariate = load_longley()
# Train and test sets per task
bin_train, bin_test = get_train_test(X_bin, y_bin)
diff --git a/tests/test_baserunner.py b/tests/test_baserunner.py
index ff1e12bf8..f7b03b5f6 100644
--- a/tests/test_baserunner.py
+++ b/tests/test_baserunner.py
@@ -788,10 +788,12 @@ def test_get_models_remove_duplicates():
def test_available_models():
"""Assert that the available_models method shows the models per task."""
atom = ATOMClassifier(X_bin, y_bin, random_state=1)
- models = atom.available_models()
+ models = atom.available_models(native_multioutput=True, supports_engines="cuml")
assert isinstance(models, pd.DataFrame)
- assert "LR" in models["acronym"].unique()
+ assert "RF" in models["acronym"].unique()
assert "BR" not in models["acronym"].unique() # Is not a classifier
+ assert "MLP" not in models["acronym"].unique() # Is not native multioutput
+ assert models["supports_engines"].str.contains("cuml").all()
def test_clear():
@@ -881,6 +883,37 @@ def test_get_sample_weight_multioutput():
assert len(atom.get_sample_weight()) == len(atom.train)
+def test_get_seasonal_period_no_harmonics():
+ """Assert that the get_seasonal_period returns a list of periods."""
+ atom = ATOMForecaster(y_fc, random_state=1)
+ assert atom.get_seasonal_period(harmonics=None) == [12, 24, 36, 11, 48]
+
+
+def test_get_seasonal_period_drop_harmonics():
+ """Assert that the harmonics are dropped from the seasonal periods."""
+ atom = ATOMForecaster(y_fc, random_state=1)
+ assert atom.get_seasonal_period(harmonics="drop") == [12, 11]
+
+
+def test_get_seasonal_period_raw_strength_harmonics():
+ """Assert that the strongest harmonics are kept in the seasonal periods."""
+ atom = ATOMForecaster(y_fc, random_state=1)
+ assert atom.get_seasonal_period(harmonics="raw_strength") == [11, 48]
+
+
+def test_get_seasonal_period_harmonic_strength_harmonics():
+ """Assert that the strongest harmonics are pushed forward."""
+ atom = ATOMForecaster(y_fc, random_state=1)
+ assert atom.get_seasonal_period(harmonics="harmonic_strength") == [48, 11]
+
+
+def test_get_seasonal_period_no_periods():
+ """Assert that an error is raised when no periods are detected."""
+ atom = ATOMForecaster(y_fc, random_state=1)
+ with pytest.raises(ValueError, match=".*No seasonal periods.*"):
+ atom.get_seasonal_period(max_sp=2)
+
+
def test_merge_invalid_class():
"""Assert that an error is raised when the class is not a trainer."""
atom = ATOMClassifier(X_bin, y_bin, random_state=1)
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index 038a9d67f..748f2b33f 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -81,6 +81,13 @@ def test_fit_transform(pipeline):
assert isinstance(pl.fit_transform(X_bin, y_bin), tuple) # Returns X, y
+def test_transform_both_None(pipeline):
+ """Assert that an error is raised when both X and y are None."""
+ pl = pipeline(model=False)
+ with pytest.raises(ValueError, match=".*X and y cannot be both None.*"):
+ pl.transform()
+
+
def test_transform_train_only(pipeline):
"""Assert that the pipeline ignores train_only during predicting."""
pl = pipeline(model=False)
@@ -124,6 +131,13 @@ def test_transform(pipeline):
assert isinstance(pl.transform(X_bin, y_bin), tuple)
+def test_inverse_transform_both_None(pipeline):
+ """Assert that an error is raised when both X and y are None."""
+ pl = pipeline(model=False)
+ with pytest.raises(ValueError, match=".*X and y cannot be both None.*"):
+ pl.inverse_transform()
+
+
def test_inverse_transform():
"""Assert that the pipeline uses inverse_transform normally."""
pl = Pipeline([("scaler", StandardScaler())]).fit(X_bin)
diff --git a/tests/test_plots.py b/tests/test_plots.py
index f26191c04..a48e79bcf 100644
--- a/tests/test_plots.py
+++ b/tests/test_plots.py
@@ -22,8 +22,8 @@
from atom.utils.utils import NotFittedError
from .conftest import (
- X10, X10_str, X_bin, X_class, X_ex, X_label, X_reg, X_sparse, X_text, y10,
- y_bin, y_class, y_ex, y_fc, y_label, y_multiclass, y_reg,
+ X10, X10_str, X_bin, X_class, X_label, X_reg, X_sparse, X_text, y10, y_bin,
+ y_class, y_fc, y_label, y_multiclass, y_multivariate, y_reg,
)
@@ -552,7 +552,7 @@ def test_plot_feature_importance():
def test_plot_forecast():
"""Assert that the plot_forecast method works."""
- atom = ATOMForecaster(X_ex, y=y_ex, holdout_size=0.1, random_state=1)
+ atom = ATOMForecaster(y_multivariate, y=(-2, -1), holdout_size=0.1, random_state=1)
atom.run(models=["NF", "ES"])
atom.plot_forecast(display=False)
atom.plot_forecast(fh=atom.holdout.index, X=atom.holdout, display=False)