Skip to content

Commit

Permalink
add some coverage for performance metrics (#2226)
Browse files Browse the repository at this point in the history
  • Loading branch information
TonyBagnall authored Oct 24, 2024
1 parent e07ba96 commit 8281d25
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 1 deletion.
2 changes: 1 addition & 1 deletion aeon/performance_metrics/forecasting/tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_gmse_function():


def test_linex_function():
"""Doctest from mean_linex_error."""
"""Test from mean_linex_error."""
y_true = np.array([3, -0.5, 2, 7, 2])
y_pred = np.array([2.5, 0.0, 2, 8, 1.25])
assert np.allclose(mean_linex_error(y_true, y_pred), 0.19802627763937575)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,23 @@

__maintainer__ = []

import inspect

import numpy as np
import pandas as pd
import pytest
from pandas.api.types import is_numeric_dtype

from aeon.performance_metrics.forecasting import (
geometric_mean_absolute_error,
geometric_mean_relative_absolute_error,
geometric_mean_relative_squared_error,
geometric_mean_squared_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_absolute_scaled_error,
mean_asymmetric_error,
mean_linex_error,
mean_relative_absolute_error,
mean_squared_error,
mean_squared_percentage_error,
Expand All @@ -27,6 +32,7 @@
median_squared_scaled_error,
relative_loss,
)
from aeon.performance_metrics.forecasting._functions import _get_kwarg
from aeon.testing.data_generation._legacy import make_series

RANDOM_SEED = 42
Expand Down Expand Up @@ -493,3 +499,60 @@ def test_y_true_y_pred_inconsistent_n_variables_raises_error(metric_func_name):
ValueError, match="y_true and y_pred have different number of output"
):
metric_func(y_true, y_pred, y_train=y_train, y_pred_benchmark=y_pred_benchmark)


def test_kwargs():
"""Test get_kwarg with None."""
with pytest.raises(ValueError):
_get_kwarg(None)


functions = [
median_squared_scaled_error,
mean_squared_error,
# geometric_mean_relative_absolute_error,
geometric_mean_relative_squared_error,
mean_absolute_error,
mean_absolute_percentage_error,
mean_absolute_scaled_error,
mean_asymmetric_error,
mean_relative_absolute_error,
mean_squared_percentage_error,
mean_squared_scaled_error,
median_absolute_error,
median_absolute_percentage_error,
median_absolute_scaled_error,
median_relative_absolute_error,
median_squared_error,
median_squared_percentage_error,
relative_loss,
mean_linex_error,
geometric_mean_absolute_error,
geometric_mean_squared_error,
]


@pytest.mark.parametrize("function", functions)
def test_check_inputs(function):
"""Test check_consistent_lengths function in metrics."""
kwargs = {
"y_train": np.array([3, -0.5, 2, 7, 2]),
"y_pred_benchmark": np.array([2.5, 0.0, 2, 8, 1.25]),
}
y_true = np.array([3, -0.5, 2, 7, 2])
y_pred = np.array([2.5, 0.0, 2, 8, 1.25])
function(y_true, y_pred, **kwargs)
if "horizon_weight" in inspect.signature(function).parameters:
with pytest.raises(ValueError):
function(y_true, y_pred, horizon_weight=[0.1, 0.2], **kwargs)
y_pred = np.array([[2.5, 0.0, 2, 8, 1.25], [2.5, 0.0, 2, 8, 1.25]])
with pytest.raises(ValueError):
function(y_true, y_pred, **kwargs)
y_true = np.array([[3, -0.5, 2, 7], [3, -0.5, 2, 7]])
with pytest.raises(ValueError):
function(y_true, y_pred, **kwargs)
y_true = np.array(
[[2.5, 0.0, 2, 8, 1.25], [2.5, 0.0, 2, 8, 1.25], [2.5, 0.0, 2, 8, 1.25]]
)
with pytest.raises(ValueError):
function(y_true, y_pred, **kwargs)

0 comments on commit 8281d25

Please sign in to comment.