|
18 | 18 | assert_almost_equal,
|
19 | 19 | assert_array_almost_equal,
|
20 | 20 | assert_array_equal,
|
21 |
| - assert_raises, |
22 | 21 | )
|
23 | 22 | from sklearn.utils.validation import check_is_fitted, check_random_state
|
24 | 23 |
|
@@ -264,7 +263,8 @@ def check_predict_quantiles_toy(name):
|
264 | 263 | weighted_leaves=False,
|
265 | 264 | oob_score=oob_score,
|
266 | 265 | )
|
267 |
| - assert_raises(AssertionError, assert_allclose, y_pred1, y_pred2) |
| 266 | + with pytest.raises(AssertionError): |
| 267 | + assert_allclose(y_pred1, y_pred2) |
268 | 268 |
|
269 | 269 | # Check that leaf weighting without weighted quantiles does nothing.
|
270 | 270 | y_pred1 = est.predict(
|
@@ -579,8 +579,10 @@ def check_predict_quantiles(
|
579 | 579 | assert np.any(y_pred_1 != y_pred_2)
|
580 | 580 |
|
581 | 581 | # Check error if invalid quantiles.
|
582 |
| - assert_raises(ValueError, est.predict, X_test, -0.01) |
583 |
| - assert_raises(ValueError, est.predict, X_test, 1.01) |
| 582 | + with pytest.raises(ValueError): |
| 583 | + est.predict(X_test, -0.01) |
| 584 | + with pytest.raises(ValueError): |
| 585 | + est.predict(X_test, 1.01) |
584 | 586 |
|
585 | 587 |
|
586 | 588 | @pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
@@ -720,7 +722,8 @@ def check_quantile_ranks(name):
|
720 | 722 |
|
721 | 723 | # Check error if training and test number of targets are not equal.
|
722 | 724 | est.fit(X_train, y_train[:, 0]) # training target size = 1
|
723 |
| - assert_raises(ValueError, est.quantile_ranks, X_test, y_test[:, :2]) # test target size = 2 |
| 725 | + with pytest.raises(ValueError): |
| 726 | + est.quantile_ranks(X_test, y_test[:, :2]) # test target size = 2 |
724 | 727 |
|
725 | 728 |
|
726 | 729 | @pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
@@ -773,10 +776,12 @@ def check_proximity_counts(name):
|
773 | 776 | assert_array_equal([len(p) for p in proximities], [len(e) for e in expected])
|
774 | 777 |
|
775 | 778 | # Check error if `max_proximities` < 1.
|
776 |
| - assert_raises(ValueError, est.proximity_counts, X, max_proximities=0) |
| 779 | + with pytest.raises(ValueError): |
| 780 | + est.proximity_counts(X, max_proximities=0) |
777 | 781 |
|
778 | 782 | # Check error if `max_proximities` is a float.
|
779 |
| - assert_raises(ValueError, est.proximity_counts, X, max_proximities=1.5) |
| 783 | + with pytest.raises(ValueError): |
| 784 | + est.proximity_counts(X, max_proximities=1.5) |
780 | 785 |
|
781 | 786 | # Check that proximity counts match expected counts without splits.
|
782 | 787 | est = ForestRegressor(
|
@@ -869,14 +874,25 @@ def check_max_samples_leaf(name):
|
869 | 874 | for param_validation in [True, False]:
|
870 | 875 | est = ForestRegressor(n_estimators=1, max_samples_leaf=max_samples_leaf)
|
871 | 876 | est.param_validation = param_validation
|
872 |
| - assert_raises(ValueError, est.fit, X, y) |
| 877 | + with pytest.raises(ValueError): |
| 878 | + est.fit(X, y) |
873 | 879 | est.max_samples_leaf = max_samples_leaf
|
874 |
| - assert_raises(ValueError, est._get_y_train_leaves, X, y) |
| 880 | + with pytest.raises(ValueError): |
| 881 | + est._get_y_train_leaves(X, y) |
875 | 882 |
|
876 | 883 |
|
877 | 884 | @pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
878 | 885 | def test_max_samples_leaf(name):
|
879 | 886 | check_max_samples_leaf(name)
|
| 887 | + """ |
| 888 | + Test that `max_samples_leaf` is correctly passed to the `fit` method, |
| 889 | + and that it results in the correct maximum leaf size. |
| 890 | +
|
| 891 | + Parameters |
| 892 | + ---------- |
| 893 | + name : str |
| 894 | + The name of the forest regressor to test. |
| 895 | + """ |
880 | 896 |
|
881 | 897 |
|
882 | 898 | def check_oob_samples(name):
|
@@ -1065,16 +1081,16 @@ def check_predict_oob(
|
1065 | 1081 | assert_allclose(y_pred_oob1, y_pred_oob2)
|
1066 | 1082 |
|
1067 | 1083 | # Check error if OOB score without `indices` do not match training count.
|
1068 |
| - assert_raises(ValueError, est.predict, X[:1], oob_score=True) |
| 1084 | + with pytest.raises(ValueError): |
| 1085 | + est.predict(X[:1], oob_score=True) |
1069 | 1086 |
|
1070 | 1087 | # Check error if OOB score with `indices` do not match samples count.
|
1071 |
| - assert_raises( |
1072 |
| - ValueError, |
1073 |
| - est.predict, |
1074 |
| - X, |
1075 |
| - oob_score=True, |
1076 |
| - indices=-np.ones(len(X) - 1), |
1077 |
| - ) |
| 1088 | + with pytest.raises(ValueError): |
| 1089 | + est.predict( |
| 1090 | + X, |
| 1091 | + oob_score=True, |
| 1092 | + indices=-np.ones(len(X) - 1), |
| 1093 | + ) |
1078 | 1094 |
|
1079 | 1095 | # Check warning if not enough estimators.
|
1080 | 1096 | with np.errstate(divide="ignore", invalid="ignore"):
|
@@ -1106,30 +1122,28 @@ def check_predict_oob(
|
1106 | 1122 | # Check error if no bootstrapping.
|
1107 | 1123 | est = ForestRegressor(n_estimators=1, bootstrap=False)
|
1108 | 1124 | est.fit(X, y)
|
1109 |
| - assert_raises( |
1110 |
| - ValueError, |
1111 |
| - est.predict, |
1112 |
| - X, |
1113 |
| - weighted_quantile=weighted_quantile, |
1114 |
| - aggregate_leaves_first=aggregate_leaves_first, |
1115 |
| - oob_score=True, |
1116 |
| - ) |
| 1125 | + with pytest.raises(ValueError): |
| 1126 | + est.predict( |
| 1127 | + X, |
| 1128 | + weighted_quantile=weighted_quantile, |
| 1129 | + aggregate_leaves_first=aggregate_leaves_first, |
| 1130 | + oob_score=True, |
| 1131 | + ) |
1117 | 1132 | with warnings.catch_warnings():
|
1118 | 1133 | warnings.simplefilter("ignore", UserWarning)
|
1119 | 1134 | assert np.all(est._get_unsampled_indices(est.estimators_[0]) == np.array([]))
|
1120 | 1135 |
|
1121 | 1136 | # Check error if number of scoring and training samples are different.
|
1122 | 1137 | est = ForestRegressor(n_estimators=1, bootstrap=True)
|
1123 | 1138 | est.fit(X, y)
|
1124 |
| - assert_raises( |
1125 |
| - ValueError, |
1126 |
| - est.predict, |
1127 |
| - X[:1], |
1128 |
| - y[:1], |
1129 |
| - weighted_quantile=weighted_quantile, |
1130 |
| - aggregate_leaves_first=aggregate_leaves_first, |
1131 |
| - oob_score=True, |
1132 |
| - ) |
| 1139 | + with pytest.raises(ValueError): |
| 1140 | + est.predict( |
| 1141 | + X[:1], |
| 1142 | + y[:1], |
| 1143 | + weighted_quantile=weighted_quantile, |
| 1144 | + aggregate_leaves_first=aggregate_leaves_first, |
| 1145 | + oob_score=True, |
| 1146 | + ) |
1133 | 1147 |
|
1134 | 1148 |
|
1135 | 1149 | @pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
@@ -1200,12 +1214,14 @@ def check_quantile_ranks_oob(name):
|
1200 | 1214 | # Check error if no bootstrapping.
|
1201 | 1215 | est = ForestRegressor(n_estimators=1, bootstrap=False)
|
1202 | 1216 | est.fit(X, y)
|
1203 |
| - assert_raises(ValueError, est.quantile_ranks, X, y, oob_score=True) |
| 1217 | + with pytest.raises(ValueError): |
| 1218 | + est.quantile_ranks(X, y, oob_score=True) |
1204 | 1219 |
|
1205 | 1220 | # Check error if number of scoring and training samples are different.
|
1206 | 1221 | est = ForestRegressor(n_estimators=1, bootstrap=True)
|
1207 | 1222 | est.fit(X, y)
|
1208 |
| - assert_raises(ValueError, est.quantile_ranks, X[:1], y[:1], oob_score=True) |
| 1223 | + with pytest.raises(ValueError): |
| 1224 | + est.quantile_ranks(X[:1], y[:1], oob_score=True) |
1209 | 1225 |
|
1210 | 1226 |
|
1211 | 1227 | @pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
@@ -1284,7 +1300,8 @@ def check_proximity_counts_oob(name):
|
1284 | 1300 | # Check error if no bootstrapping.
|
1285 | 1301 | est = ForestRegressor(n_estimators=1, max_samples_leaf=None, bootstrap=False)
|
1286 | 1302 | est.fit(X, y)
|
1287 |
| - assert_raises(ValueError, est.proximity_counts, X, oob_score=True) |
| 1303 | + with pytest.raises(ValueError): |
| 1304 | + est.proximity_counts(X, oob_score=True) |
1288 | 1305 |
|
1289 | 1306 |
|
1290 | 1307 | @pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
@@ -1357,7 +1374,8 @@ def check_monotonic_constraints(name, max_samples_leaf):
|
1357 | 1374 | max_leaf_nodes=n_samples_train,
|
1358 | 1375 | bootstrap=True,
|
1359 | 1376 | )
|
1360 |
| - assert_raises(ValueError, est.fit, X_train, y_train) |
| 1377 | + with pytest.raises(ValueError): |
| 1378 | + est.fit(X_train, y_train) |
1361 | 1379 |
|
1362 | 1380 |
|
1363 | 1381 | @pytest.mark.parametrize("name", FOREST_REGRESSORS)
|
@@ -1466,8 +1484,10 @@ def test_calc_quantile():
|
1466 | 1484 | assert actual1 != actual2
|
1467 | 1485 |
|
1468 | 1486 | # Check error if invalid parameters.
|
1469 |
| - assert_raises(TypeError, calc_quantile, [1, 2], 0.5) |
1470 |
| - assert_raises(TypeError, calc_quantile, [1, 2], [0.5], interpolation=None) |
| 1487 | + with pytest.raises(TypeError): |
| 1488 | + calc_quantile([1, 2], 0.5) |
| 1489 | + with pytest.raises(TypeError): |
| 1490 | + calc_quantile([1, 2], [0.5], interpolation=None) |
1471 | 1491 |
|
1472 | 1492 |
|
1473 | 1493 | def test_calc_weighted_quantile():
|
@@ -1585,8 +1605,10 @@ def _dicts_to_input_pairs(input_dicts):
|
1585 | 1605 | assert actual1 != actual2
|
1586 | 1606 |
|
1587 | 1607 | # Check error if invalid parameters.
|
1588 |
| - assert_raises(TypeError, calc_weighted_quantile, [1, 2], [1, 1], 0.5) |
1589 |
| - assert_raises(TypeError, calc_weighted_quantile, [1, 2], [1, 1], [0.5], interpolation=None) |
| 1608 | + with pytest.raises(TypeError): |
| 1609 | + calc_weighted_quantile([1, 2], [1, 1], 0.5) |
| 1610 | + with pytest.raises(TypeError): |
| 1611 | + calc_weighted_quantile([1, 2], [1, 1], [0.5], interpolation=None) |
1590 | 1612 |
|
1591 | 1613 |
|
1592 | 1614 | def test_calc_quantile_rank():
|
@@ -1635,5 +1657,7 @@ def test_calc_quantile_rank():
|
1635 | 1657 | assert actual1 != actual2
|
1636 | 1658 |
|
1637 | 1659 | # Check error if invalid parameters.
|
1638 |
| - assert_raises(TypeError, calc_quantile_rank, [1, 2], [1]) |
1639 |
| - assert_raises(TypeError, calc_quantile_rank, [1, 2], float(1), kind=None) |
| 1660 | + with pytest.raises(TypeError): |
| 1661 | + calc_quantile_rank([1, 2], [1]) |
| 1662 | + with pytest.raises(TypeError): |
| 1663 | + calc_quantile_rank([1, 2], float(1), kind=None) |
0 commit comments