From ee89f46fb90dbd19e574ff3bd4ddb5fd67d41afb Mon Sep 17 00:00:00 2001 From: Mavs Date: Tue, 5 Mar 2024 21:20:30 +0100 Subject: [PATCH] increase coverage 2 --- README.md | 1 + atom/basemodel.py | 2 + atom/data_cleaning.py | 16 +- atom/nlp.py | 4 - atom/pipeline.py | 5 - atom/training.py | 8 + atom/utils/utils.py | 17 +- docs_sources/dependencies.md | 2 +- .../examples/automated_feature_scaling.ipynb | 330 +- .../examples/hyperparameter_tuning.ipynb | 325 +- .../examples/in_training_validation.ipynb | 718 +- .../examples/multiclass_classification.ipynb | 4 +- .../examples/multilabel_classification.ipynb | 328 +- .../examples/multioutput_regression.ipynb | 14771 ++++++++-------- docs_sources/examples/utilities.ipynb | 318 +- docs_sources/overrides/home.html | 7 +- examples/automated_feature_scaling.ipynb | 330 +- examples/hyperparameter_tuning.ipynb | 325 +- examples/in_training_validation.ipynb | 718 +- examples/multiclass_classification.ipynb | 4 +- examples/multilabel_classification.ipynb | 328 +- examples/multioutput_regression.ipynb | 14771 ++++++++-------- pyproject.toml | 2 +- tests/test_basemodel.py | 19 +- tests/test_basetransformer.py | 19 +- tests/test_data_cleaning.py | 9 +- tests/test_pipeline.py | 17 + tests/test_training.py | 15 +- tests/test_utils.py | 50 +- 29 files changed, 16581 insertions(+), 16882 deletions(-) diff --git a/README.md b/README.md index 582878acc..66412f3a4 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,7 @@ Example steps taken by ATOM's pipeline: * [50+ plots to analyze the data and model performance](https://tvdboom.github.io/ATOM/latest/user_guide/plots/#available-plots) * [Avoid refactoring to test new pipelines](https://tvdboom.github.io/ATOM/latest/user_guide/data_management/#branches) * [Native support for GPU training](https://tvdboom.github.io/ATOM/latest/user_guide/accelerating/#gpu-acceleration) +* [Integration with polars, pyspark and pyarrow](https://tvdboom.github.io/ATOM/latest/user_guide/data_management/#data-engines) * [25+ example notebooks to get you started](https://tvdboom.github.io/ATOM/latest/examples/accelerating_cuml/) * [Full integration with multilabel and multioutput datasets](https://tvdboom.github.io/ATOM/latest/user_guide/data_management/#multioutput-tasks) * [Native support for sparse datasets](https://tvdboom.github.io/ATOM/latest/user_guide/data_management/#sparse-datasets) diff --git a/atom/basemodel.py b/atom/basemodel.py index d0a6c7786..9f55efa11 100644 --- a/atom/basemodel.py +++ b/atom/basemodel.py @@ -779,6 +779,8 @@ def _score_from_est( data=_check_response_method(estimator, scorer._response_method)(X), index=y.index, ) + if isinstance(y_pred, pd.DataFrame) and self.task is Task.binary_classification: + y_pred = y_pred.iloc[:, 1] # Return probability of the positive class return self._score_from_pred(scorer, y, y_pred, **kwargs) diff --git a/atom/data_cleaning.py b/atom/data_cleaning.py index f5619da07..6e4fc865b 100644 --- a/atom/data_cleaning.py +++ b/atom/data_cleaning.py @@ -2621,11 +2621,6 @@ def fit(self, X: XConstructor, y: YConstructor | None = None) -> Self: random_state=kwargs.pop("random_state", self.random_state), **kwargs, ) - else: - raise ValueError( - f"Invalid value for the strategy parameter, got {self.strategy}. " - f"Choose from: {', '.join(strategies)}." - ) num_cols = Xt.select_dtypes(include="number") @@ -2889,11 +2884,6 @@ def transform( } for strat in lst(self.strategy): - if strat not in ["zscore", *strategies]: - raise ValueError( - "Invalid value for the strategy parameter. " - f"Choose from: zscore, {', '.join(strategies)}." - ) if strat != "zscore" and str(self.method) != "drop": raise ValueError( "Invalid value for the method parameter. Only the zscore " @@ -2986,12 +2976,8 @@ def transform( yt = yt[outlier_rows] else: - # Replace the columns in X and y with the new values from objective + # Replace the columns in X with the new values from objective Xt.update(objective) - if isinstance(yt, pd.Series) and yt.name in objective: - yt.update(objective[str(yt.name)]) - elif isinstance(yt, pd.DataFrame): - yt.update(objective) return variable_return(self._convert(Xt), self._convert(yt)) diff --git a/atom/nlp.py b/atom/nlp.py index 392124eb9..634101d56 100644 --- a/atom/nlp.py +++ b/atom/nlp.py @@ -955,10 +955,6 @@ def fit(self, X: XConstructor, y: YConstructor | None = None) -> Self: ) self._estimator = estimator(**self.kwargs) - if hasattr(self._estimator, "set_output"): - # transform="pandas" fails for sparse output - self._estimator.set_output(transform="default") - self._log("Fitting Vectorizer...", 1) self._estimator.fit(Xt[self._corpus]) diff --git a/atom/pipeline.py b/atom/pipeline.py index 9507450d7..6ec604d6f 100644 --- a/atom/pipeline.py +++ b/atom/pipeline.py @@ -354,11 +354,6 @@ def _fit( else: cloned = clone(transformer) - # Attach internal attrs otherwise wiped by clone - for attr in ("_cols", "_train_only"): - if hasattr(transformer, attr): - setattr(cloned, attr, getattr(transformer, attr)) - with adjust(cloned, verbose=self._verbose): # Fit or load the current estimator from cache # Type ignore because routed_params is never None but diff --git a/atom/training.py b/atom/training.py index 0d993f2c8..7103d23ad 100644 --- a/atom/training.py +++ b/atom/training.py @@ -29,6 +29,14 @@ ) +__all__ = [ + "DirectClassifier", "DirectForecaster", "DirectRegressor", + "SuccessiveHalvingClassifier", "SuccessiveHalvingForecaster", + "SuccessiveHalvingRegressor", "TrainSizingClassifier", + "TrainSizingForecaster", "TrainSizingRegressor", +] + + class Direct(BaseEstimator, BaseTrainer): """Direct training approach. diff --git a/atom/utils/utils.py b/atom/utils/utils.py index 30b50bf50..43e22096e 100644 --- a/atom/utils/utils.py +++ b/atom/utils/utils.py @@ -2023,17 +2023,14 @@ def check_is_fitted( Whether the estimator is fitted. """ - if not _is_fitted(obj, attributes): - if exception: - raise NotFittedError( - f"This {type(obj).__name__} instance is not yet fitted. " - f"Call {'run' if hasattr(obj, 'run') else 'fit'} with " - "appropriate arguments before using this object." - ) - else: - return False + if not (is_fitted := _is_fitted(obj, attributes)) and exception: + raise NotFittedError( + f"This {type(obj).__name__} instance is not yet fitted. " + f"Call {'run' if hasattr(obj, 'run') else 'fit'} with " + "appropriate arguments before using this object." + ) - return True + return is_fitted def get_custom_scorer(metric: str | MetricFunction | Scorer) -> Scorer: diff --git a/docs_sources/dependencies.md b/docs_sources/dependencies.md index a9d45b453..b39ee8103 100644 --- a/docs_sources/dependencies.md +++ b/docs_sources/dependencies.md @@ -40,7 +40,7 @@ packages are necessary for its correct functioning. * **[optuna](https://optuna.org/)** (>=3.4.0) * **[pandas](https://pandas.pydata.org/)** (>=2.1.2) * **[plotly](https://plotly.com/python/)** (>=5.18.0) -* **[scikit-learn](https://scikit-learn.org/stable/)** (>=1.4.0) +* **[scikit-learn](https://scikit-learn.org/stable/)** (>=1.4.1.post1) * **[scipy](https://www.scipy.org/)** (>=1.10.1) * **[shap](https://github.com/slundberg/shap/)** (>=0.43.0) * **[sktime[forecasting]](http://www.sktime.net/en/latest/)** (>=0.26.0) diff --git a/docs_sources/examples/automated_feature_scaling.ipynb b/docs_sources/examples/automated_feature_scaling.ipynb index 411a5fec0..c15989dbc 100644 --- a/docs_sources/examples/automated_feature_scaling.ipynb +++ b/docs_sources/examples/automated_feature_scaling.ipynb @@ -105,226 +105,264 @@ " \n", " acronym\n", " fullname\n", + " estimator\n", + " module\n", + " handles_missing\n", " needs_scaling\n", + " accepts_sparse\n", + " native_multilabel\n", + " native_multioutput\n", + " validation\n", + " supports_engines\n", " \n", " \n", " \n", " \n", " 0\n", - " AdaB\n", - " AdaBoost\n", - " False\n", - " \n", - " \n", - " 1\n", - " Bag\n", - " Bagging\n", - " False\n", - " \n", - " \n", - " 2\n", - " BNB\n", - " BernoulliNB\n", - " False\n", - " \n", - " \n", - " 3\n", " CatB\n", " CatBoost\n", + " CatBoostClassifier\n", + " catboost.core\n", + " True\n", + " True\n", " True\n", - " \n", - " \n", - " 4\n", - " CatNB\n", - " CategoricalNB\n", - " False\n", - " \n", - " \n", - " 5\n", - " CNB\n", - " ComplementNB\n", - " False\n", - " \n", - " \n", - " 6\n", - " Tree\n", - " DecisionTree\n", - " False\n", - " \n", - " \n", - " 7\n", - " Dummy\n", - " Dummy\n", - " False\n", - " \n", - " \n", - " 8\n", - " ETree\n", - " ExtraTree\n", - " False\n", - " \n", - " \n", - " 9\n", - " ET\n", - " ExtraTrees\n", - " False\n", - " \n", - " \n", - " 10\n", - " GNB\n", - " GaussianNB\n", - " False\n", - " \n", - " \n", - " 11\n", - " GP\n", - " GaussianProcess\n", - " False\n", - " \n", - " \n", - " 12\n", - " GBM\n", - " GradientBoostingMachine\n", " False\n", - " \n", - " \n", - " 13\n", - " hGBM\n", - " HistGradientBoosting\n", " False\n", + " n_estimators\n", + " catboost\n", " \n", " \n", - " 14\n", + " 1\n", " KNN\n", " KNearestNeighbors\n", + " KNeighborsClassifier\n", + " sklearn.neighbors._classification\n", + " False\n", + " True\n", " True\n", + " True\n", + " True\n", + " None\n", + " sklearn, sklearnex, cuml\n", " \n", " \n", - " 15\n", + " 2\n", " LGB\n", " LightGBM\n", + " LGBMClassifier\n", + " lightgbm.sklearn\n", + " True\n", + " True\n", " True\n", - " \n", - " \n", - " 16\n", - " LDA\n", - " LinearDiscriminantAnalysis\n", " False\n", + " False\n", + " n_estimators\n", + " lightgbm\n", " \n", " \n", - " 17\n", + " 3\n", " lSVM\n", " LinearSVM\n", + " LinearSVC\n", + " sklearn.svm._classes\n", + " False\n", + " True\n", " True\n", + " False\n", + " False\n", + " None\n", + " sklearn, cuml\n", " \n", " \n", - " 18\n", + " 4\n", " LR\n", " LogisticRegression\n", + " LogisticRegression\n", + " sklearn.linear_model._logistic\n", + " False\n", + " True\n", " True\n", + " False\n", + " False\n", + " None\n", + " sklearn, sklearnex, cuml\n", " \n", " \n", - " 19\n", + " 5\n", " MLP\n", " MultiLayerPerceptron\n", + " MLPClassifier\n", + " sklearn.neural_network._multilayer_perceptron\n", + " False\n", + " True\n", + " True\n", " True\n", - " \n", - " \n", - " 20\n", - " MNB\n", - " MultinomialNB\n", " False\n", + " max_iter\n", + " sklearn\n", " \n", " \n", - " 21\n", + " 6\n", " PA\n", " PassiveAggressive\n", + " PassiveAggressiveClassifier\n", + " sklearn.linear_model._passive_aggressive\n", + " False\n", " True\n", + " True\n", + " False\n", + " False\n", + " max_iter\n", + " sklearn\n", " \n", " \n", - " 22\n", + " 7\n", " Perc\n", " Perceptron\n", + " Perceptron\n", + " sklearn.linear_model._perceptron\n", + " False\n", " True\n", - " \n", - " \n", - " 23\n", - " QDA\n", - " QuadraticDiscriminantAnalysis\n", " False\n", + " False\n", + " False\n", + " max_iter\n", + " sklearn\n", " \n", " \n", - " 24\n", + " 8\n", " RNN\n", " RadiusNearestNeighbors\n", - " True\n", - " \n", - " \n", - " 25\n", - " RF\n", - " RandomForest\n", + " RadiusNeighborsClassifier\n", + " sklearn.neighbors._classification\n", " False\n", + " True\n", + " True\n", + " True\n", + " True\n", + " None\n", + " sklearn\n", " \n", " \n", - " 26\n", + " 9\n", " Ridge\n", " Ridge\n", + " RidgeClassifier\n", + " sklearn.linear_model._ridge\n", + " False\n", + " True\n", " True\n", + " True\n", + " False\n", + " None\n", + " sklearn, sklearnex, cuml\n", " \n", " \n", - " 27\n", + " 10\n", " SGD\n", " StochasticGradientDescent\n", + " SGDClassifier\n", + " sklearn.linear_model._stochastic_gradient\n", + " False\n", + " True\n", " True\n", + " False\n", + " False\n", + " max_iter\n", + " sklearn\n", " \n", " \n", - " 28\n", + " 11\n", " SVM\n", " SupportVectorMachine\n", + " SVC\n", + " sklearn.svm._classes\n", + " False\n", " True\n", + " True\n", + " False\n", + " False\n", + " None\n", + " sklearn, sklearnex, cuml\n", " \n", " \n", - " 29\n", + " 12\n", " XGB\n", " XGBoost\n", + " XGBClassifier\n", + " xgboost.sklearn\n", " True\n", + " True\n", + " True\n", + " False\n", + " False\n", + " n_estimators\n", + " xgboost\n", " \n", " \n", "\n", "" ], "text/plain": [ - " acronym fullname needs_scaling\n", - "0 AdaB AdaBoost False\n", - "1 Bag Bagging False\n", - "2 BNB BernoulliNB False\n", - "3 CatB CatBoost True\n", - "4 CatNB CategoricalNB False\n", - "5 CNB ComplementNB False\n", - "6 Tree DecisionTree False\n", - "7 Dummy Dummy False\n", - "8 ETree ExtraTree False\n", - "9 ET ExtraTrees False\n", - "10 GNB GaussianNB False\n", - "11 GP GaussianProcess False\n", - "12 GBM GradientBoostingMachine False\n", - "13 hGBM HistGradientBoosting False\n", - "14 KNN KNearestNeighbors True\n", - "15 LGB LightGBM True\n", - "16 LDA LinearDiscriminantAnalysis False\n", - "17 lSVM LinearSVM True\n", - "18 LR LogisticRegression True\n", - "19 MLP MultiLayerPerceptron True\n", - "20 MNB MultinomialNB False\n", - "21 PA PassiveAggressive True\n", - "22 Perc Perceptron True\n", - "23 QDA QuadraticDiscriminantAnalysis False\n", - "24 RNN RadiusNearestNeighbors True\n", - "25 RF RandomForest False\n", - "26 Ridge Ridge True\n", - "27 SGD StochasticGradientDescent True\n", - "28 SVM SupportVectorMachine True\n", - "29 XGB XGBoost True" + " acronym fullname estimator \\\n", + "0 CatB CatBoost CatBoostClassifier \n", + "1 KNN KNearestNeighbors KNeighborsClassifier \n", + "2 LGB LightGBM LGBMClassifier \n", + "3 lSVM LinearSVM LinearSVC \n", + "4 LR LogisticRegression LogisticRegression \n", + "5 MLP MultiLayerPerceptron MLPClassifier \n", + "6 PA PassiveAggressive PassiveAggressiveClassifier \n", + "7 Perc Perceptron Perceptron \n", + "8 RNN RadiusNearestNeighbors RadiusNeighborsClassifier \n", + "9 Ridge Ridge RidgeClassifier \n", + "10 SGD StochasticGradientDescent SGDClassifier \n", + "11 SVM SupportVectorMachine SVC \n", + "12 XGB XGBoost XGBClassifier \n", + "\n", + " module handles_missing \\\n", + "0 catboost.core True \n", + "1 sklearn.neighbors._classification False \n", + "2 lightgbm.sklearn True \n", + "3 sklearn.svm._classes False \n", + "4 sklearn.linear_model._logistic False \n", + "5 sklearn.neural_network._multilayer_perceptron False \n", + "6 sklearn.linear_model._passive_aggressive False \n", + "7 sklearn.linear_model._perceptron False \n", + "8 sklearn.neighbors._classification False \n", + "9 sklearn.linear_model._ridge False \n", + "10 sklearn.linear_model._stochastic_gradient False \n", + "11 sklearn.svm._classes False \n", + "12 xgboost.sklearn True \n", + "\n", + " needs_scaling accepts_sparse native_multilabel native_multioutput \\\n", + "0 True True False False \n", + "1 True True True True \n", + "2 True True False False \n", + "3 True True False False \n", + "4 True True False False \n", + "5 True True True False \n", + "6 True True False False \n", + "7 True False False False \n", + "8 True True True True \n", + "9 True True True False \n", + "10 True True False False \n", + "11 True True False False \n", + "12 True True False False \n", + "\n", + " validation supports_engines \n", + "0 n_estimators catboost \n", + "1 None sklearn, sklearnex, cuml \n", + "2 n_estimators lightgbm \n", + "3 None sklearn, cuml \n", + "4 None sklearn, sklearnex, cuml \n", + "5 max_iter sklearn \n", + "6 max_iter sklearn \n", + "7 max_iter sklearn \n", + "8 None sklearn \n", + "9 None sklearn, sklearnex, cuml \n", + "10 max_iter sklearn \n", + "11 None sklearn, sklearnex, cuml \n", + "12 n_estimators xgboost " ] }, "execution_count": 4, @@ -334,7 +372,7 @@ ], "source": [ "# Check which models require feature scaling\n", - "atom.available_models()[[\"acronym\", \"fullname\", \"needs_scaling\"]]" + "atom.available_models(needs_scaling=True)" ] }, { @@ -356,22 +394,22 @@ "Fit ---------------------------------------------\n", "Train evaluation --> f1: 0.9913\n", "Test evaluation --> f1: 0.9861\n", - "Time elapsed: 0.124s\n", + "Time elapsed: 0.120s\n", "-------------------------------------------------\n", - "Time: 0.124s\n", + "Time: 0.120s\n", "\n", "\n", "Results for Bagging:\n", "Fit ---------------------------------------------\n", "Train evaluation --> f1: 0.9982\n", "Test evaluation --> f1: 0.9444\n", - "Time elapsed: 0.070s\n", + "Time elapsed: 0.067s\n", "-------------------------------------------------\n", - "Time: 0.070s\n", + "Time: 0.067s\n", "\n", "\n", "Final results ==================== >>\n", - "Total time: 0.200s\n", + "Total time: 0.194s\n", "-------------------------------------\n", "LogisticRegression --> f1: 0.9861 !\n", "Bagging --> f1: 0.9444\n" diff --git a/docs_sources/examples/hyperparameter_tuning.ipynb b/docs_sources/examples/hyperparameter_tuning.ipynb index 18c936fbc..acb242341 100644 --- a/docs_sources/examples/hyperparameter_tuning.ipynb +++ b/docs_sources/examples/hyperparameter_tuning.ipynb @@ -98,34 +98,34 @@ "Running hyperparameter tuning for MultiLayerPerceptron...\n", "| trial | hidden_layer_1 | hidden_layer_2 | hidden_layer_3 | hidden_layer_4 | f1 | best_f1 | ap | best_ap | time_trial | time_ht | state |\n", "| ----- | -------------- | -------------- | -------------- | -------------- | ------- | ------- | ------- | ------- | ---------- | ------- | -------- |\n", - "| 0 | 3 | 17 | 10 | 2 | 0.9464 | 0.9464 | 0.9512 | 0.9512 | 17.580s | 17.580s | COMPLETE |\n", - "| 1 | 2 | 11 | 12 | 3 | 0.9744 | 0.9744 | 0.9603 | 0.9603 | 17.440s | 35.021s | COMPLETE |\n", - "| 2 | 3 | 15 | 14 | 4 | 0.9915 | 0.9915 | 0.9831 | 0.9831 | 13.108s | 48.129s | COMPLETE |\n", - "| 3 | 2 | 19 | 10 | 4 | 0.9655 | 0.9915 | 0.954 | 0.9831 | 12.274s | 01m:00s | COMPLETE |\n", - "| 4 | 3 | 16 | 11 | 2 | 0.9661 | 0.9915 | 0.9445 | 0.9831 | 0.856s | 01m:01s | COMPLETE |\n", - "| 5 | 4 | 20 | 13 | 4 | 0.9739 | 0.9915 | 0.9703 | 0.9831 | 0.905s | 01m:02s | COMPLETE |\n", - "| 6 | 4 | 19 | 10 | 2 | 0.9828 | 0.9915 | 0.9767 | 0.9831 | 1.051s | 01m:03s | COMPLETE |\n", - "| 7 | 2 | 19 | 11 | 3 | 0.7733 | 0.9915 | 0.6304 | 0.9831 | 1.028s | 01m:04s | COMPLETE |\n", - "| 8 | 4 | 15 | 17 | 2 | 0.9915 | 0.9915 | 0.9831 | 0.9831 | 0.896s | 01m:05s | COMPLETE |\n", - "| 9 | 4 | 19 | 10 | 4 | 0.9828 | 0.9915 | 0.9767 | 0.9831 | 0.893s | 01m:06s | COMPLETE |\n", + "| 0 | 3 | 17 | 10 | 2 | 0.9464 | 0.9464 | 0.9844 | 0.9844 | 9.522s | 9.522s | COMPLETE |\n", + "| 1 | 2 | 11 | 12 | 3 | 0.9744 | 0.9744 | 0.9991 | 0.9991 | 9.369s | 18.891s | COMPLETE |\n", + "| 2 | 3 | 15 | 14 | 4 | 0.9915 | 0.9915 | 0.9978 | 0.9991 | 11.460s | 30.351s | COMPLETE |\n", + "| 3 | 2 | 19 | 10 | 4 | 0.9655 | 0.9915 | 0.9878 | 0.9991 | 11.359s | 41.709s | COMPLETE |\n", + "| 4 | 3 | 16 | 11 | 2 | 0.9661 | 0.9915 | 0.9981 | 0.9991 | 0.653s | 42.362s | COMPLETE |\n", + "| 5 | 4 | 20 | 13 | 4 | 0.9739 | 0.9915 | 0.9989 | 0.9991 | 0.610s | 42.972s | COMPLETE |\n", + "| 6 | 4 | 19 | 10 | 2 | 0.9828 | 0.9915 | 0.9907 | 0.9991 | 0.606s | 43.578s | COMPLETE |\n", + "| 7 | 2 | 19 | 11 | 3 | 0.7733 | 0.9915 | 0.9997 | 0.9997 | 0.601s | 44.179s | COMPLETE |\n", + "| 8 | 4 | 15 | 17 | 2 | 0.9915 | 0.9915 | 0.9997 | 0.9997 | 0.606s | 44.785s | COMPLETE |\n", + "| 9 | 4 | 19 | 10 | 4 | 0.9828 | 0.9915 | 0.9822 | 0.9997 | 0.610s | 45.395s | COMPLETE |\n", "Hyperparameter tuning ---------------------------\n", - "Best trial --> 2\n", + "Best trial --> 8\n", "Best parameters:\n", - " --> hidden_layer_sizes: (3, 15, 14, 4)\n", - "Best evaluation --> f1: 0.9915 ap: 0.9831\n", - "Time elapsed: 01m:06s\n", + " --> hidden_layer_sizes: (4, 15, 17, 2)\n", + "Best evaluation --> f1: 0.9915 ap: 0.9997\n", + "Time elapsed: 45.395s\n", "Fit ---------------------------------------------\n", - "Train evaluation --> f1: 0.993 ap: 0.998\n", - "Test evaluation --> f1: 0.9861 ap: 0.995\n", - "Time elapsed: 2.396s\n", + "Train evaluation --> f1: 0.9965 ap: 0.9991\n", + "Test evaluation --> f1: 0.9718 ap: 0.9938\n", + "Time elapsed: 1.740s\n", "-------------------------------------------------\n", - "Time: 01m:08s\n", + "Time: 47.135s\n", "\n", "\n", "Final results ==================== >>\n", - "Total time: 01m:09s\n", + "Total time: 47.340s\n", "-------------------------------------\n", - "MultiLayerPerceptron --> f1: 0.9861 ap: 0.995\n" + "MultiLayerPerceptron --> f1: 0.9718 ap: 0.9938\n" ] } ], @@ -144,7 +144,8 @@ " \"hidden_layer_3\": IntDistribution(10, 20),\n", " \"hidden_layer_4\": IntDistribution(2, 4),\n", " }\n", - " }\n", + " },\n", + " errors='raise'\n", ")" ] }, @@ -156,7 +157,7 @@ { "data": { "text/plain": [ - "FrozenTrial(number=2, state=1, values=[0.9914529914529915, 0.9830508474576272], datetime_start=datetime.datetime(2024, 3, 3, 13, 58, 16, 682005), datetime_complete=datetime.datetime(2024, 3, 3, 13, 58, 29, 790092), params={'hidden_layer_1': 3, 'hidden_layer_2': 15, 'hidden_layer_3': 14, 'hidden_layer_4': 4}, user_attrs={'estimator': MLPClassifier(hidden_layer_sizes=(3, 15, 14, 4), random_state=1)}, system_attrs={'nsga2:generation': 0}, intermediate_values={}, distributions={'hidden_layer_1': IntDistribution(high=4, log=False, low=2, step=1), 'hidden_layer_2': IntDistribution(high=20, log=False, low=10, step=1), 'hidden_layer_3': IntDistribution(high=20, log=False, low=10, step=1), 'hidden_layer_4': IntDistribution(high=4, log=False, low=2, step=1)}, trial_id=2, value=None)" + "FrozenTrial(number=8, state=1, values=[0.9914529914529915, 0.9997077732320282], datetime_start=datetime.datetime(2024, 3, 5, 12, 9, 40, 773486), datetime_complete=datetime.datetime(2024, 3, 5, 12, 9, 41, 379399), params={'hidden_layer_1': 4, 'hidden_layer_2': 15, 'hidden_layer_3': 17, 'hidden_layer_4': 2}, user_attrs={'estimator': MLPClassifier(hidden_layer_sizes=(4, 15, 17, 2), random_state=1)}, system_attrs={'nsga2:generation': 0}, intermediate_values={}, distributions={'hidden_layer_1': IntDistribution(high=4, log=False, low=2, step=1), 'hidden_layer_2': IntDistribution(high=20, log=False, low=10, step=1), 'hidden_layer_3': IntDistribution(high=20, log=False, low=10, step=1), 'hidden_layer_4': IntDistribution(high=4, log=False, low=2, step=1)}, trial_id=8, value=None)" ] }, "execution_count": 5, @@ -289,16 +290,16 @@ ], "xaxis": "x", "y": [ - 0.951218835026931, - 0.9603143343582446, - 0.9830508474576272, - 0.9539626738354962, - 0.9444902548725637, - 0.9703174728425261, - 0.9766840717572248, - 0.6304347826086957, - 0.9830508474576272, - 0.9766840717572248 + 0.9844020103837696, + 0.9991280221957989, + 0.9978420724898686, + 0.9878052289964802, + 0.998086355529132, + 0.998854798152083, + 0.9907480263309556, + 0.9997077732320282, + 0.9997077732320282, + 0.9821684616842536 ], "yaxis": "y" } @@ -1162,8 +1163,8 @@ 1 ], "range": [ - 0.7586983832766965, - 1.0060879415096282 + 0.758660163586355, + 1.0061261611999697 ], "title": { "font": { @@ -1182,8 +1183,8 @@ 1 ], "range": [ - 0.6065253140439666, - 1.0069603160223564 + 0.9809791919376295, + 1.0008970429786523 ], "title": { "font": { @@ -1196,9 +1197,9 @@ } }, "text/html": [ - "