diff --git a/atom/api.py b/atom/api.py
index 55537c62e..ec9bf477f 100644
--- a/atom/api.py
+++ b/atom/api.py
@@ -17,9 +17,8 @@
from atom.atom import ATOM
from atom.utils.types import (
- Backend, Bool, ColumnSelector, Engine, IndexSelector, Int,
- IntLargerEqualZero, NJobs, Predictor, Scalar, Sequence, Verbose, Warnings,
- YSelector,
+ Backend, Bool, ColumnSelector, Engine, IndexSelector, IntLargerEqualZero,
+ NJobs, Predictor, Scalar, Seasonality, Verbose, Warnings, YSelector,
)
from atom.utils.utils import Goal
@@ -611,7 +610,7 @@ def __init__(
*arrays,
y: YSelector = -1,
ignore: ColumnSelector | None = None,
- sp: Int | str | Sequence[Int | str] | None = None,
+ sp: Seasonality = None,
n_rows: Scalar = 1,
test_size: Scalar = 0.2,
holdout_size: Scalar | None = None,
diff --git a/atom/basemodel.py b/atom/basemodel.py
index 71e89db65..5f1152e24 100644
--- a/atom/basemodel.py
+++ b/atom/basemodel.py
@@ -2901,6 +2901,7 @@ def get_tags(self) -> dict[str, Any]:
"module": self._est_class.__module__.split(".")[0] + self._module,
"handles_missing": self.handles_missing,
"in_sample_prediction": self.in_sample_prediction,
+ "multiple_seasonality": self.multiple_seasonality,
"native_multivariate": self.native_multivariate,
"supports_engines": ", ".join(self.supports_engines),
}
diff --git a/atom/baserunner.py b/atom/baserunner.py
index 7019bac81..867b84cb1 100644
--- a/atom/baserunner.py
+++ b/atom/baserunner.py
@@ -37,10 +37,10 @@
from atom.pipeline import Pipeline
from atom.utils.constants import DF_ATTRS
from atom.utils.types import (
- Bool, DataFrame, FloatZeroToOneExc, HarmonicsSelector, Int,
+ Bool, DataFrame, FloatZeroToOneExc, HarmonicsSelector, Int, IntLargerOne,
MetricConstructor, Model, ModelSelector, ModelsSelector, Pandas,
- RowSelector, Scalar, Segment, Sequence, Series, YSelector, dataframe_t,
- int_t, segment_t, sequence_t,
+ RowSelector, Scalar, Seasonality, Segment, Sequence, Series,
+ TargetSelector, YSelector, dataframe_t, int_t, segment_t, sequence_t,
)
from atom.utils.utils import (
ClassMap, DataContainer, SeasonalPeriod, Task, bk, check_is_fitted,
@@ -166,7 +166,7 @@ def sp(self) -> int | list[int] | None:
return self._sp
@sp.setter
- def sp(self, sp: Int | str | Sequence[Int | str] | None):
+ def sp(self, sp: Seasonality):
"""Convert seasonal period to integer value."""
if sp is None:
self._sp = None
@@ -177,7 +177,7 @@ def sp(self, sp: Int | str | Sequence[Int | str] | None):
f"The dataset's index has no attribute freqstr."
)
else:
- self._sp = self.dataset.index.freqstr
+ self._sp = self._get_sp(self.dataset.index.freqstr)
elif sp == "infer":
self._sp = self.get_seasonal_period()
else:
@@ -908,6 +908,8 @@ def available_models(self) -> pd.DataFrame:
- **uses_exogenous:** Whether the model uses exogenous variables.
- **in_sample_prediction:** Whether the model can do predictions
on the training set.
+ - **multiple_seasonality:** Whether the model can handle more than
+ one [seasonality periods][seasonality].
- **native_multilabel:** Whether the model has native support
for [multilabel][] tasks.
- **native_multioutput:** Whether the model has native support
@@ -1124,9 +1126,10 @@ def get_sample_weight(self, rows: RowSelector = "train") -> Series:
@composed(crash, beartype)
def get_seasonal_period(
self,
- max_sp: Int | None = None,
+ max_sp: IntLargerOne | None = None,
harmonics: HarmonicsSelector | None = None,
- ) -> int:
+ target: TargetSelector = 0,
+ ) -> int | list[int]:
"""Get the seasonal periods of the time series.
Use the data in the training set to calculate the seasonal
@@ -1161,13 +1164,16 @@ def get_seasonal_period(
- If "raw_strength", result=[3, 7, 8]
- If "harmonic_strength", result=[8, 3, 7]
+ target: int or str, default=0
+ Target column to look at. Only for [multivariate][] tasks.
+
Returns
-------
- list of int
+ int or list of int
Seasonal periods, ordered by significance.
"""
- yt = self.y_train.copy()
+ yt = self.dataset[self.branch._get_target(target, only_columns=True)]
max_sp = max_sp or (len(yt) - 1) // 2
for _ in np.arange(ndiffs(yt)):
diff --git a/atom/basetrainer.py b/atom/basetrainer.py
index 0a9f63110..efd8ef1b6 100644
--- a/atom/basetrainer.py
+++ b/atom/basetrainer.py
@@ -194,8 +194,6 @@ def _prepare_parameters(self):
# Check if libraries for non-sklearn models are available
dependencies = {
- "ARIMA": "pmdarima",
- "AutoARIMA": "pmdarima",
"BATS": "tbats",
"CatB": "catboost",
"LGB": "lightgbm",
diff --git a/atom/models/classreg.py b/atom/models/classreg.py
index 994ce35f6..760e2ab77 100644
--- a/atom/models/classreg.py
+++ b/atom/models/classreg.py
@@ -472,17 +472,17 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
if getattr(self, "_metric", None) and not self._gpu:
eval_metric = CatBMetric(self._metric[0], task=self.task)
- return self._est_class(
- eval_metric=params.pop("eval_metric", eval_metric),
- train_dir=params.pop("train_dir", ""),
- allow_writing_files=params.pop("allow_writing_files", False),
- thread_count=params.pop("n_jobs", self.n_jobs),
- task_type=params.pop("task_type", "GPU" if self._gpu else "CPU"),
- devices=str(self._device_id),
- verbose=params.pop("verbose", False),
- random_state=params.pop("random_state", self.random_state),
- **params,
- )
+ default = {
+ "eval_metric": eval_metric,
+ "train_dir": "",
+ "allow_writing_files": False,
+ "thread_count": self.n_jobs,
+ "task_type": "GPU" if self._gpu else "CPU",
+ "devices": str(self._device_id),
+ "verbose": False,
+ }
+
+ return super()._get_est(default | params)
def _fit_estimator(
self,
@@ -1672,14 +1672,13 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
# PYTHONWarnings doesn't work since they go from C/C++ code to stdout
warns = {"always": 2, "default": 1, "once": 0, "error": 0, "ignore": -1}
- return self._est_class(
- verbose=params.pop("verbose", warns.get(self.warnings, -1)),
- n_jobs=params.pop("n_jobs", self.n_jobs),
- device=params.pop("device", "gpu" if self._gpu else "cpu"),
- gpu_device_id=params.pop("gpu_device_id", self._device_id or -1),
- random_state=params.pop("random_state", self.random_state),
- **params,
- )
+ default = {
+ "verbose": warns.get(self.warnings, -1),
+ "device": "gpu" if self._gpu else "cpu",
+ "gpu_device_id": self._device_id or -1,
+ }
+
+ return super()._get_est(default | params)
def _fit_estimator(
self,
@@ -1960,7 +1959,7 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
"""
if self.engine.get("estimator") == "cuml" and self._goal is Goal.classification:
- return self._est_class(probability=params.pop("probability", True), **params)
+ return super()._get_est({"probability": True} | params)
else:
return super()._get_est(params)
@@ -3010,11 +3009,7 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
"""
if self.engine.get("estimator") == "cuml" and self._goal is Goal.classification:
- return self._est_class(
- probability=params.pop("probability", True),
- random_state=params.pop("random_state", self.random_state),
- **params,
- )
+ return super()._get_est({"probability": True} | params)
else:
return super()._get_est(params)
@@ -3142,14 +3137,8 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
if getattr(self, "_metric", None):
eval_metric = XGBMetric(self._metric[0], task=self.task)
- return self._est_class(
- eval_metric=params.pop("eval_metric", eval_metric),
- n_jobs=params.pop("n_jobs", self.n_jobs),
- device=params.pop("device", self.device),
- verbosity=params.pop("verbosity", 0),
- random_state=params.pop("random_state", self.random_state),
- **params,
- )
+ default = {"eval_metric": eval_metric, "device": self.device, "verbosity": 0}
+ return super()._get_est(default | params)
def _fit_estimator(
self,
diff --git a/atom/models/ensembles.py b/atom/models/ensembles.py
index 29224d57b..184761956 100644
--- a/atom/models/ensembles.py
+++ b/atom/models/ensembles.py
@@ -63,7 +63,8 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
"""
return self._est_class(
estimators=[
- (m.name, m.export_pipeline() if m.scaler else m.estimator) for m in self._models
+ (m.name, m.export_pipeline() if m.scaler else m.estimator)
+ for m in self._models
],
n_jobs=params.pop("n_jobs", self.n_jobs),
**params,
@@ -128,7 +129,8 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
"""
return self._est_class(
estimators=[
- (m.name, m.export_pipeline() if m.scaler else m.estimator) for m in self._models
+ (m.name, m.export_pipeline() if m.scaler else m.estimator)
+ for m in self._models
],
n_jobs=params.pop("n_jobs", self.n_jobs),
**params,
diff --git a/atom/models/ts.py b/atom/models/ts.py
index b10fe99cc..1c4732a34 100644
--- a/atom/models/ts.py
+++ b/atom/models/ts.py
@@ -77,6 +77,7 @@ class ARIMA(ForecastModel):
handles_missing = True
uses_exogenous = True
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -84,7 +85,7 @@ class ARIMA(ForecastModel):
_estimators: ClassVar[dict[str, str]] = {"forecast": "ARIMA"}
_order = ("p", "d", "q")
- _seasonal_order = ("P", "D", "Q", "S")
+ _s_order = ("P", "D", "Q")
def _get_parameters(self, trial: Trial) -> dict[str, BaseDistribution]:
"""Get the trial's hyperparameters.
@@ -103,8 +104,8 @@ def _get_parameters(self, trial: Trial) -> dict[str, BaseDistribution]:
params = super()._get_parameters(trial)
# If no seasonal periodicity, set seasonal components to zero
- if self._get_param("S", params) == 0:
- for p in self._seasonal_order:
+ if not self._config.sp:
+ for p in self._s_order:
if p in params:
params[p] = 0
@@ -127,10 +128,21 @@ def _trial_to_est(self, params: dict[str, Any]) -> dict[str, Any]:
params = super()._trial_to_est(params)
# Convert params to hyperparameters 'order' and 'seasonal_order'
- if all(p in params for p in self._order):
- params["order"] = tuple(params.pop(p) for p in self._order)
- if all(p in params for p in self._seasonal_order):
- params["seasonal_order"] = tuple(params.pop(p) for p in self._seasonal_order)
+ if all(p in params for p in self._order) and "order" not in params:
+ params["order"] = [params.pop(p) for p in self._order]
+ else:
+ for p in self._order:
+ params.pop(p, None)
+
+ if (
+ all(p in params for p in self._s_order)
+ and self._config.sp
+ and "seasonal_order" not in params
+ ):
+ params["seasonal_order"] = [params.pop(p) for p in self._s_order] + [self._config.sp]
+ else:
+ for p in self._s_order:
+ params.pop(p, None)
return params
@@ -143,8 +155,6 @@ def _get_distributions(self) -> dict[str, BaseDistribution]:
Hyperparameter distributions.
"""
- methods = ["newton", "nm", "bfgs", "lbfgs", "powell", "cg", "ncg", "basinhopping"]
-
dist = {
"p": Int(0, 2),
"d": Int(0, 1),
@@ -152,8 +162,9 @@ def _get_distributions(self) -> dict[str, BaseDistribution]:
"P": Int(0, 2),
"D": Int(0, 1),
"Q": Int(0, 2),
- "S": Cat([0, 4, 6, 7, 12]),
- "method": Cat(methods),
+ "method": Cat(
+ ["newton", "nm", "bfgs", "lbfgs", "powell", "cg", "ncg", "basinhopping"]
+ ),
"maxiter": Int(50, 200, step=10),
"with_intercept": Cat([True, False]),
}
@@ -163,7 +174,7 @@ def _get_distributions(self) -> dict[str, BaseDistribution]:
for p in self._order:
dist.pop(p)
if "seasonal_order" in self._est_params:
- for p in self._seasonal_order:
+ for p in self._s_order:
dist.pop(p)
return dist
@@ -218,6 +229,7 @@ class AutoARIMA(ForecastModel):
handles_missing = True
uses_exogenous = True
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -293,6 +305,7 @@ class BATS(ForecastModel):
handles_missing = False
uses_exogenous = False
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -313,11 +326,7 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
Estimator instance.
"""
- return self._est_class(
- show_warnings=params.pop("show_warnings", self.warnings in ("always", "default")),
- n_jobs=params.pop("n_jobs", self.n_jobs),
- **params,
- )
+ return super()._get_est({"show_warnings": self.warnings != "ignore"} | params)
@staticmethod
def _get_distributions() -> dict[str, BaseDistribution]:
@@ -377,6 +386,7 @@ class Croston(ForecastModel):
handles_missing = False
uses_exogenous = True
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -427,6 +437,7 @@ class ExponentialSmoothing(ForecastModel):
handles_missing = False
uses_exogenous = False
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -468,7 +479,6 @@ def _get_distributions() -> dict[str, BaseDistribution]:
"trend": Cat(["add", "mul", None]),
"damped_trend": Cat([True, False]),
"seasonal": Cat(["add", "mul", None]),
- "sp": Cat([4, 6, 7, 12, None]),
"use_boxcox": Cat([True, False]),
"initialization_method": Cat(["estimated", "heuristic"]),
"method": Cat(["L-BFGS-B", "TNC", "SLSQP", "Powell", "trust-constr", "bh", "ls"]),
@@ -513,6 +523,7 @@ class ETS(ForecastModel):
handles_missing = True
uses_exogenous = False
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -555,7 +566,6 @@ def _get_distributions() -> dict[str, BaseDistribution]:
"trend": Cat(["add", "mul", None]),
"damped_trend": Cat([True, False]),
"seasonal": Cat(["add", "mul", None]),
- "sp": Cat([1, 4, 6, 7, 12]),
"initialization_method": Cat(["estimated", "heuristic"]),
"maxiter": Int(500, 2000, step=100),
"auto": Cat([True, False]),
@@ -604,6 +614,7 @@ class NaiveForecaster(ForecastModel):
handles_missing = True
uses_exogenous = False
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -658,6 +669,7 @@ class PolynomialTrend(ForecastModel):
handles_missing = False
uses_exogenous = False
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -714,6 +726,7 @@ class STL(ForecastModel):
handles_missing = False
uses_exogenous = False
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -791,6 +804,7 @@ class TBATS(ForecastModel):
handles_missing = False
uses_exogenous = False
in_sample_prediction = True
+ multiple_seasonality = True
native_multivariate = False
supports_engines = ("sktime",)
@@ -811,11 +825,7 @@ def _get_est(self, params: dict[str, Any]) -> Predictor:
Estimator instance.
"""
- return self._est_class(
- show_warnings=params.pop("show_warnings", self.warnings in ("always", "default")),
- n_jobs=params.pop("n_jobs", self.n_jobs),
- **params,
- )
+ return super()._get_est({"show_warnings": self.warnings != "ignore"} | params)
@staticmethod
def _get_distributions() -> dict[str, BaseDistribution]:
@@ -879,6 +889,7 @@ class Theta(ForecastModel):
handles_missing = False
uses_exogenous = False
in_sample_prediction = True
+ multiple_seasonality = False
native_multivariate = False
supports_engines = ("sktime",)
@@ -896,3 +907,171 @@ def _get_distributions() -> dict[str, BaseDistribution]:
"""
return {"deseasonalize": Cat([False, True])}
+
+
+class VAR(ForecastModel):
+ """Vector Autoregressive.
+
+ A VAR model is a generalization of the univariate autoregressive.
+
+ Corresponding estimators are:
+
+ - [VAR][varclass] for forecasting tasks.
+
+ See Also
+ --------
+ atom.models:MSTL
+ atom.models:Prophet
+ atom.models:VARMAX
+
+ Examples
+ --------
+ ```pycon
+ from atom import ATOMForecaster
+ from sktime.datasets import load_airline
+
+ y = load_airline()
+
+ atom = ATOMForecaster(y, random_state=1)
+ atom.run(models="VAR", verbose=2)
+ ```
+
+ """
+
+ acronym = "VAR"
+ handles_missing = False
+ uses_exogenous = False
+ in_sample_prediction = True
+ multiple_seasonality = False
+ native_multivariate = True
+ supports_engines = ("sktime",)
+
+ _module = "sktime.forecasting.var"
+ _estimators: ClassVar[dict[str, str]] = {"forecast": "VAR"}
+
+ @staticmethod
+ def _get_distributions() -> dict[str, BaseDistribution]:
+ """Get the predefined hyperparameter distributions.
+
+ Returns
+ -------
+ dict
+ Hyperparameter distributions.
+
+ """
+ return {
+ "trend": Cat(["c", "ct", "ctt", "n"]),
+ "ic": Cat(["aic", "fpe", "hqic", "bic"]),
+ }
+
+
+class VARMAX(ForecastModel):
+ """Vector Autoregressive Moving Average.
+
+ Variation on the [VAR][] that makes use of the exogenous variables.
+
+ Corresponding estimators are:
+
+ - [VARMAX][varmaxclass] for forecasting tasks.
+
+ See Also
+ --------
+ atom.models:MSTL
+ atom.models:Prophet
+ atom.models:VAR
+
+ Examples
+ --------
+ ```pycon
+ from atom import ATOMForecaster
+ from sktime.datasets import load_airline
+
+ y = load_airline()
+
+ atom = ATOMForecaster(y, random_state=1)
+ atom.run(models="VARMAX", verbose=2)
+ ```
+
+ """
+
+ acronym = "VARMAX"
+ handles_missing = False
+ uses_exogenous = True
+ in_sample_prediction = True
+ multiple_seasonality = False
+ native_multivariate = True
+ supports_engines = ("sktime",)
+
+ _module = "sktime.forecasting.var"
+ _estimators: ClassVar[dict[str, str]] = {"forecast": "VARMAX"}
+
+ _order = ("p", "q")
+
+ def _trial_to_est(self, params: dict[str, Any]) -> dict[str, Any]:
+ """Convert trial's hyperparameters to parameters for the estimator.
+
+ Parameters
+ ----------
+ params: dict
+ Trial's hyperparameters.
+
+ Returns
+ -------
+ dict
+ Estimator's hyperparameters.
+
+ """
+ params = super()._trial_to_est(params)
+
+ # Convert params to hyperparameter 'order'
+ if all(p in params for p in self._order) and "order" not in params:
+ params["order"] = [params.pop(p) for p in self._order]
+ else:
+ for p in self._order:
+ params.pop(p, None)
+
+ return params
+
+ def _get_est(self, params: dict[str, Any]) -> Predictor:
+ """Get the model's estimator with unpacked parameters.
+
+ Parameters
+ ----------
+ params: dict
+ Hyperparameters for the estimator.
+
+ Returns
+ -------
+ Predictor
+ Estimator instance.
+
+ """
+ return super()._get_est({"suppress_warnings": self.warnings == "ignore"} | params)
+
+ @staticmethod
+ def _get_distributions() -> dict[str, BaseDistribution]:
+ """Get the predefined hyperparameter distributions.
+
+ Returns
+ -------
+ dict
+ Hyperparameter distributions.
+
+ """
+ return {
+ "p": Int(0, 2),
+ "q": Int(0, 2),
+ "trend": Cat(["c", "ct", "ctt", "n"]),
+ "error_cov_type": Cat(["diagonal", "unstructured"]),
+ "measurement_error": Cat([True, False]),
+ "enforce_stationarity": Cat([True, False]),
+ "enforce_invertibility": Cat([True, False]),
+ "cov_type": Cat(["opg", "oim", "approx", "robust", "robust_approx"]),
+ "method": Cat(
+ ["newton", "nm", "bfgs", "lbfgs", "powell", "cg", "ncg", "basinhopping"]
+ ),
+ "maxiter": Int(50, 200, step=10),
+ "optim_score": Cat(["harvey", "approx", None]),
+ "optim_complex_step": Cat([True, False]),
+ "optim_hessian": Cat(["opg", "oim", "approx"]),
+ }
diff --git a/atom/utils/types.py b/atom/utils/types.py
index a606fb7c1..5400f7cdc 100644
--- a/atom/utils/types.py
+++ b/atom/utils/types.py
@@ -248,14 +248,6 @@ def predict(self, *args, **kwargs) -> Pandas: ...
| None
)
-# Runner parameters
-NItems: TypeAlias = (
- IntLargerEqualZero
- | dict[str, IntLargerEqualZero]
- | Sequence[IntLargerEqualZero]
-)
-HarmonicsSelector: TypeAlias = Literal["drop", "raw_strength", "harmonic_strength"]
-
# Allowed values for method selection
PredictionMethods: TypeAlias = Literal[
"decision_function", "predict", "predict_log_proba", "predict_proba", "score"
@@ -289,8 +281,15 @@ def predict(self, *args, **kwargs) -> Pandas: ...
"out",
]
-# Mlflow stages
+# Others
+Seasonality: TypeAlias = IntLargerOne | str | Sequence[IntLargerOne | str] | None
+HarmonicsSelector: TypeAlias = Literal["drop", "raw_strength", "harmonic_strength"]
Stages: TypeAlias = Literal["None", "Staging", "Production", "Archived"]
+NItems: TypeAlias = (
+ IntLargerEqualZero
+ | dict[str, IntLargerEqualZero]
+ | Sequence[IntLargerEqualZero]
+)
# Variable types for isinstance ================================== >>
diff --git a/docs_sources/api/models/var.md b/docs_sources/api/models/var.md
new file mode 100644
index 000000000..f83d97bc4
--- /dev/null
+++ b/docs_sources/api/models/var.md
@@ -0,0 +1,80 @@
+# VAR
+-----
+
+:: atom.models:VAR
+ :: tags
+ :: description
+ :: see also
+
+
+
+## Example
+
+:: examples
+
+
+
+## Hyperparameters
+
+:: hyperparameters
+
+
+
+## Attributes
+
+### Data attributes
+
+:: table:
+ - attributes:
+ from_docstring: False
+ include:
+ - pipeline
+ - atom.branch:Branch.mapping
+ - dataset
+ - train
+ - test
+ - X
+ - y
+ - X_train
+ - y_train
+ - X_test
+ - atom.branch:Branch.y_test
+ - X_holdout
+ - y_holdout
+ - shape
+ - columns
+ - n_columns
+ - features
+ - n_features
+ - atom.branch:Branch.target
+
+
+
+### Utility attributes
+
+:: table:
+ - attributes:
+ from_docstring: False
+ include:
+ - name
+ - run
+ - study
+ - trials
+ - best_trial
+ - best_params
+ - estimator
+ - bootstrap
+ - results
+ - feature_importance
+
+
+
+## Methods
+
+The [plots][available-plots] can be called directly from the model.
+The remaining utility methods can be found hereunder.
+
+:: methods:
+ toc_only: False
+ exclude:
+ - plot_.*
diff --git a/docs_sources/api/models/varmax.md b/docs_sources/api/models/varmax.md
new file mode 100644
index 000000000..641eb6609
--- /dev/null
+++ b/docs_sources/api/models/varmax.md
@@ -0,0 +1,80 @@
+# VARMAX
+--------
+
+:: atom.models:VARMAX
+ :: tags
+ :: description
+ :: see also
+
+
+
+## Example
+
+:: examples
+
+
+
+## Hyperparameters
+
+:: hyperparameters
+
+
+
+## Attributes
+
+### Data attributes
+
+:: table:
+ - attributes:
+ from_docstring: False
+ include:
+ - pipeline
+ - atom.branch:Branch.mapping
+ - dataset
+ - train
+ - test
+ - X
+ - y
+ - X_train
+ - y_train
+ - X_test
+ - atom.branch:Branch.y_test
+ - X_holdout
+ - y_holdout
+ - shape
+ - columns
+ - n_columns
+ - features
+ - n_features
+ - atom.branch:Branch.target
+
+
+
+### Utility attributes
+
+:: table:
+ - attributes:
+ from_docstring: False
+ include:
+ - name
+ - run
+ - study
+ - trials
+ - best_trial
+ - best_params
+ - estimator
+ - bootstrap
+ - results
+ - feature_importance
+
+
+
+## Methods
+
+The [plots][available-plots] can be called directly from the model.
+The remaining utility methods can be found hereunder.
+
+:: methods:
+ toc_only: False
+ exclude:
+ - plot_.*
diff --git a/docs_sources/dependencies.md b/docs_sources/dependencies.md
index 2a4b46020..a994dea1c 100644
--- a/docs_sources/dependencies.md
+++ b/docs_sources/dependencies.md
@@ -41,6 +41,7 @@ packages are necessary for its correct functioning.
* **[numpy](https://numpy.org/)** (>=1.23.0)
* **[optuna](https://optuna.org/)** (>=3.4.0)
* **[pandas[parquet]](https://pandas.pydata.org/)** (>=2.1.2)
+* **[pmdarima](http://alkaline-ml.com/pmdarima/)** (>=2.0.3)
* **[plotly](https://plotly.com/python/)** (>=5.15.0)
* **[ray[serve]](https://docs.ray.io/en/latest/)** (>=2.7.1)
* **[requests](https://requests.readthedocs.io/en/latest/)** (>=2.31.0)
@@ -63,7 +64,6 @@ additional libraries. You can install all the optional dependencies using
* **[explainerdashboard](https://explainerdashboard.readthedocs.io/en/latest/)** (>=0.4.3)
* **[gradio](https://github.com/gradio-app/gradio)** (>=3.44.4)
* **[lightgbm](https://lightgbm.readthedocs.io/en/latest/)** (>=4.1.0)
-* **[pmdarima](http://alkaline-ml.com/pmdarima/)** (>=2.0.3)
* **[schemdraw](https://schemdraw.readthedocs.io/en/latest/index.html)** (>=0.16)
* **[sweetviz](https://github.com/fbdesignpro/sweetviz)** (>=2.3.1)
* **[tbats](https://github.com/intive-DataScience/tbats)** (>=1.1.3)
diff --git a/docs_sources/scripts/autodocs.py b/docs_sources/scripts/autodocs.py
index 54fa85024..db2f3187b 100644
--- a/docs_sources/scripts/autodocs.py
+++ b/docs_sources/scripts/autodocs.py
@@ -226,6 +226,8 @@
xgbregressor="https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBRegressor",
xgbdocs="https://xgboost.readthedocs.io/en/latest/index.html",
naiveforecasterclass="https://www.sktime.net/en/stable/api_reference/auto_generated/sktime.forecasting.naive.NaiveForecaster.html",
+ varclass="https://www.sktime.net/en/latest/api_reference/auto_generated/sktime.forecasting.var.VAR.html",
+ varmaxclass="https://www.sktime.net/en/latest/api_reference/auto_generated/sktime.forecasting.varmax.VARMAX.html",
# NLP
snowballstemmer="https://www.nltk.org/api/nltk.stem.snowball.html#nltk.stem.snowball.SnowballStemmer",
bow="https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html",
@@ -417,6 +419,8 @@ def get_tags(self) -> str:
text += " [needs scaling][automated-feature-scaling]{ .md-tag }"
if getattr(self.obj, "accepts_sparse", False):
text += " [accept sparse][sparse-datasets]{ .md-tag }"
+ if getattr(self.obj, "multiple_seasonality", False):
+ text += " [multiple seasonality][seasonality]{ .md-tag }"
if getattr(self.obj, "native_multilabel", False):
text += " [native multilabel][multilabel]{ .md-tag }"
if getattr(self.obj, "native_multioutput", False):
@@ -424,7 +428,7 @@ def get_tags(self) -> str:
if getattr(self.obj, "native_multivariate", False):
text += " [native multivariate][multivariate]{ .md-tag }"
if getattr(self.obj, "validation", None):
- text += " [allows validation][in-training-validation]{ .md-tag }"
+ text += " [in-training validation][]{ .md-tag }"
if any(engine not in ("sklearn", "sktime") for engine in self.obj.supports_engines):
text += " [supports acceleration][estimator-acceleration]{ .md-tag }"
diff --git a/docs_sources/user_guide/time_series.md b/docs_sources/user_guide/time_series.md
index 15381f02c..25f5897e3 100644
--- a/docs_sources/user_guide/time_series.md
+++ b/docs_sources/user_guide/time_series.md
@@ -23,3 +23,4 @@
that occurs at specific intervals of time. It's associated with seasonal
effects, which are patterns that tend to recur at consistent intervals.
+The same period is used for all columns in a [multivariate][] setting.
diff --git a/mkdocs.yml b/mkdocs.yml
index 0027ecb3b..9e0348c9b 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -222,6 +222,8 @@ nav:
- SupportVectorMachine: API/models/svm.md
- TBATS: API/models/tbats.md
- Theta: API/models/theta.md
+ - VAR: API/models/var.md
+ - VARMAX: API/models/varmax.md
- XGBoost: API/models/xgb.md
- Pipeline:
- Pipeline: API/pipeline/pipeline.md
diff --git a/pyproject.toml b/pyproject.toml
index 643cec65e..07cc20578 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -36,6 +36,7 @@ dependencies = [
"numpy>=1.23.0",
"optuna>=3.4.0",
"pandas[parquet]>=2.1.2",
+ "pmdarima>=2.0.3",
"plotly>=5.15.0",
"ray[serve]>=2.7.1",
"requests>=2.31.0",
@@ -54,7 +55,6 @@ full = [
"explainerdashboard>=0.4.3",
"gradio>=3.44.4",
"lightgbm>=4.1.0",
- "pmdarima>=2.0.3",
"schemdraw>=0.16",
"sweetviz>=2.3.1",
"tbats>=1.1.3",