diff --git a/doc/whats_new/v0.11.rst b/doc/whats_new/v0.11.rst index 9c7feefea..7588c71a1 100644 --- a/doc/whats_new/v0.11.rst +++ b/doc/whats_new/v0.11.rst @@ -9,6 +9,10 @@ Changelog Bug fixes ......... +- Fix a bug in :func:`~imblearn.metrics.classification_report_imbalanced` where the + parameter `target_names` was not taken into account when `output_dict=True`. + :pr:`989` by :user:`AYY7 `. + - :class:`~imblearn.over_sampling.SMOTENC` now handles mix types of data type such as `bool` and `pd.category` by delegating the conversion to scikit-learn encoder. :pr:`1002` by :user:`Guillaume Lemaitre `. diff --git a/imblearn/metrics/_classification.py b/imblearn/metrics/_classification.py index a5f896316..489066d98 100644 --- a/imblearn/metrics/_classification.py +++ b/imblearn/metrics/_classification.py @@ -1038,7 +1038,7 @@ class 2 1.00 0.67 1.00 0.80 0.82 0.64\ report_dict_label[headers[-1]] = support[i] report += fmt % tuple(values) - report_dict[label] = report_dict_label + report_dict[target_names[i]] = report_dict_label report += "\n" diff --git a/imblearn/metrics/tests/test_classification.py b/imblearn/metrics/tests/test_classification.py index cb9d25309..8169cee81 100644 --- a/imblearn/metrics/tests/test_classification.py +++ b/imblearn/metrics/tests/test_classification.py @@ -459,7 +459,7 @@ def test_iba_error_y_score_prob_error(score_loss): aps(y_true, y_pred) -def test_classification_report_imbalanced_dict(): +def test_classification_report_imbalanced_dict_with_target_names(): iris = datasets.load_iris() y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) @@ -471,12 +471,44 @@ def test_classification_report_imbalanced_dict(): output_dict=True, ) outer_keys = set(report.keys()) - inner_keys = set(report[0].keys()) + inner_keys = set(report["setosa"].keys()) expected_outer_keys = { - 0, - 1, - 2, + "setosa", + "versicolor", + "virginica", + "avg_pre", + "avg_rec", + "avg_spe", + "avg_f1", + "avg_geo", + "avg_iba", + "total_support", + } + expected_inner_keys = {"spe", "f1", "sup", "rec", "geo", "iba", "pre"} + + assert outer_keys == expected_outer_keys + assert inner_keys == expected_inner_keys + + +def test_classification_report_imbalanced_dict_without_target_names(): + iris = datasets.load_iris() + y_true, y_pred, _ = make_prediction(dataset=iris, binary=False) + print(iris.target_names) + report = classification_report_imbalanced( + y_true, + y_pred, + labels=np.arange(len(iris.target_names)), + output_dict=True, + ) + print(report.keys()) + outer_keys = set(report.keys()) + inner_keys = set(report["0"].keys()) + + expected_outer_keys = { + "0", + "1", + "2", "avg_pre", "avg_rec", "avg_spe",