diff --git a/test/test/conftest.py b/test/test/conftest.py index 8313c9693..74655d526 100644 --- a/test/test/conftest.py +++ b/test/test/conftest.py @@ -1,7 +1,7 @@ import functools import logging import operator -from typing import List, Set +from typing import Any, List, Mapping, Optional, Sequence, Set import numpy as np import pandas as pd @@ -28,7 +28,6 @@ ) import facet -from .facet import STEP_IMPUTE, STEP_ONE_HOT_ENCODE from facet.crossfit import LearnerCrossfit from facet.data import Sample from facet.inspection import LearnerInspector, TreeExplainerFactory @@ -51,6 +50,9 @@ K_FOLDS = 5 N_BOOTSTRAPS = 30 +STEP_IMPUTE = "impute" +STEP_ONE_HOT_ENCODE = "one-hot-encode" + @pytest.fixture def boston_target() -> str: @@ -299,3 +301,46 @@ def iris_sample_binary_dual_target( iris_sample_binary.features.join(target).join(target.rename(iris_target_2)), target_name=[iris_sample_binary.target_name, iris_target_2], ) + + +def check_ranking( + ranking: List[LearnerEvaluation], + expected_scores: Sequence[float], + expected_learners: Optional[Sequence[type]], + expected_parameters: Optional[Mapping[int, Mapping[str, Any]]], +) -> None: + """ + Test helper to check rankings produced by learner rankers + + :param ranking: a list of LearnerEvaluations + :param expected_scores: expected ranking scores, rounded to 3 decimal places + :param expected_learners: expected learner classes + :param expected_parameters: expected learner parameters + :return: None + """ + + if expected_learners is None: + expected_learners = [None] * len(ranking) + + for rank, (learner_eval, score_expected, learner_expected) in enumerate( + zip(ranking, expected_scores, expected_learners) + ): + score_actual = round(learner_eval.ranking_score, 3) + assert score_actual == pytest.approx(score_expected, abs=0.1), ( + f"unexpected score for learner at rank #{rank + 1}: " + f"got {score_actual} but expected {score_expected}" + ) + if learner_expected is not None: + learner_actual = learner_eval.pipeline.final_estimator + assert type(learner_actual) == learner_expected, ( + f"unexpected class for learner at rank #{rank}: " + f"got {type(learner_actual)} but expected {learner_expected}" + ) + + if expected_parameters is not None: + for rank, parameters_expected in expected_parameters.items(): + parameters_actual = ranking[rank].parameters + assert parameters_actual == parameters_expected, ( + f"unexpected parameters for learner at rank #{rank}: " + f"got {parameters_actual} but expected {parameters_expected}" + ) diff --git a/test/test/facet/__init__.py b/test/test/facet/__init__.py index 8e19d48b0..e69de29bb 100644 --- a/test/test/facet/__init__.py +++ b/test/test/facet/__init__.py @@ -1,51 +0,0 @@ -from typing import Any, List, Mapping, Optional, Sequence - -import pytest - -from facet.selection import LearnerEvaluation - -STEP_IMPUTE = "impute" -STEP_ONE_HOT_ENCODE = "one-hot-encode" - - -def check_ranking( - ranking: List[LearnerEvaluation], - expected_scores: Sequence[float], - expected_learners: Optional[Sequence[type]], - expected_parameters: Optional[Mapping[int, Mapping[str, Any]]], -) -> None: - """ - Test helper to check rankings produced by learner rankers - - :param ranking: a list of LearnerEvaluations - :param expected_scores: expected ranking scores, rounded to 3 decimal places - :param expected_learners: expected learner classes - :param expected_parameters: expected learner parameters - :return: None - """ - - if expected_learners is None: - expected_learners = [None] * len(ranking) - - for rank, (learner_eval, score_expected, learner_expected) in enumerate( - zip(ranking, expected_scores, expected_learners) - ): - score_actual = round(learner_eval.ranking_score, 3) - assert score_actual == pytest.approx(score_expected, abs=0.1), ( - f"unexpected score for learner at rank #{rank + 1}: " - f"got {score_actual} but expected {score_expected}" - ) - if learner_expected is not None: - learner_actual = learner_eval.pipeline.final_estimator - assert type(learner_actual) == learner_expected, ( - f"unexpected class for learner at rank #{rank}: " - f"got {type(learner_actual)} but expected {learner_expected}" - ) - - if expected_parameters is not None: - for rank, parameters_expected in expected_parameters.items(): - parameters_actual = ranking[rank].parameters - assert parameters_actual == parameters_expected, ( - f"unexpected parameters for learner at rank #{rank}: " - f"got {parameters_actual} but expected {parameters_expected}" - ) diff --git a/test/test/facet/test_crossfit.py b/test/test/facet/test_crossfit.py index 108a3b5cd..81c96dc8e 100644 --- a/test/test/facet/test_crossfit.py +++ b/test/test/facet/test_crossfit.py @@ -6,7 +6,7 @@ from sklearndf.pipeline import ClassifierPipelineDF, RegressorPipelineDF from sklearndf.regression import RandomForestRegressorDF -from . import check_ranking +from ..conftest import check_ranking from facet.data import Sample from facet.selection import LearnerGrid, LearnerRanker from facet.validation import StratifiedBootstrapCV diff --git a/test/test/facet/test_inspection.py b/test/test/facet/test_inspection.py index afa6c8389..908532124 100644 --- a/test/test/facet/test_inspection.py +++ b/test/test/facet/test_inspection.py @@ -20,7 +20,7 @@ ) from sklearndf.pipeline import ClassifierPipelineDF, RegressorPipelineDF -from . import check_ranking +from ..conftest import check_ranking from facet.crossfit import LearnerCrossfit from facet.data import Sample from facet.inspection import ( diff --git a/test/test/facet/test_selection.py b/test/test/facet/test_selection.py index 3630e7667..94584d32e 100644 --- a/test/test/facet/test_selection.py +++ b/test/test/facet/test_selection.py @@ -19,7 +19,7 @@ ) from sklearndf.regression.extra import LGBMRegressorDF -from . import check_ranking +from ..conftest import check_ranking from facet.crossfit import LearnerCrossfit from facet.data import Sample from facet.selection import LearnerEvaluation, LearnerGrid, LearnerRanker