Skip to content

Commit

Permalink
move definitions from test/facet/__init__ to test/conftest (#121)
Browse files Browse the repository at this point in the history
  • Loading branch information
j-ittner authored Oct 20, 2020
1 parent de356d7 commit ba58c8e
Show file tree
Hide file tree
Showing 5 changed files with 50 additions and 56 deletions.
49 changes: 47 additions & 2 deletions test/test/conftest.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import functools
import logging
import operator
from typing import List, Set
from typing import Any, List, Mapping, Optional, Sequence, Set

import numpy as np
import pandas as pd
Expand All @@ -28,7 +28,6 @@
)

import facet
from .facet import STEP_IMPUTE, STEP_ONE_HOT_ENCODE
from facet.crossfit import LearnerCrossfit
from facet.data import Sample
from facet.inspection import LearnerInspector, TreeExplainerFactory
Expand All @@ -51,6 +50,9 @@
K_FOLDS = 5
N_BOOTSTRAPS = 30

STEP_IMPUTE = "impute"
STEP_ONE_HOT_ENCODE = "one-hot-encode"


@pytest.fixture
def boston_target() -> str:
Expand Down Expand Up @@ -299,3 +301,46 @@ def iris_sample_binary_dual_target(
iris_sample_binary.features.join(target).join(target.rename(iris_target_2)),
target_name=[iris_sample_binary.target_name, iris_target_2],
)


def check_ranking(
ranking: List[LearnerEvaluation],
expected_scores: Sequence[float],
expected_learners: Optional[Sequence[type]],
expected_parameters: Optional[Mapping[int, Mapping[str, Any]]],
) -> None:
"""
Test helper to check rankings produced by learner rankers
:param ranking: a list of LearnerEvaluations
:param expected_scores: expected ranking scores, rounded to 3 decimal places
:param expected_learners: expected learner classes
:param expected_parameters: expected learner parameters
:return: None
"""

if expected_learners is None:
expected_learners = [None] * len(ranking)

for rank, (learner_eval, score_expected, learner_expected) in enumerate(
zip(ranking, expected_scores, expected_learners)
):
score_actual = round(learner_eval.ranking_score, 3)
assert score_actual == pytest.approx(score_expected, abs=0.1), (
f"unexpected score for learner at rank #{rank + 1}: "
f"got {score_actual} but expected {score_expected}"
)
if learner_expected is not None:
learner_actual = learner_eval.pipeline.final_estimator
assert type(learner_actual) == learner_expected, (
f"unexpected class for learner at rank #{rank}: "
f"got {type(learner_actual)} but expected {learner_expected}"
)

if expected_parameters is not None:
for rank, parameters_expected in expected_parameters.items():
parameters_actual = ranking[rank].parameters
assert parameters_actual == parameters_expected, (
f"unexpected parameters for learner at rank #{rank}: "
f"got {parameters_actual} but expected {parameters_expected}"
)
51 changes: 0 additions & 51 deletions test/test/facet/__init__.py
Original file line number Diff line number Diff line change
@@ -1,51 +0,0 @@
from typing import Any, List, Mapping, Optional, Sequence

import pytest

from facet.selection import LearnerEvaluation

STEP_IMPUTE = "impute"
STEP_ONE_HOT_ENCODE = "one-hot-encode"


def check_ranking(
ranking: List[LearnerEvaluation],
expected_scores: Sequence[float],
expected_learners: Optional[Sequence[type]],
expected_parameters: Optional[Mapping[int, Mapping[str, Any]]],
) -> None:
"""
Test helper to check rankings produced by learner rankers
:param ranking: a list of LearnerEvaluations
:param expected_scores: expected ranking scores, rounded to 3 decimal places
:param expected_learners: expected learner classes
:param expected_parameters: expected learner parameters
:return: None
"""

if expected_learners is None:
expected_learners = [None] * len(ranking)

for rank, (learner_eval, score_expected, learner_expected) in enumerate(
zip(ranking, expected_scores, expected_learners)
):
score_actual = round(learner_eval.ranking_score, 3)
assert score_actual == pytest.approx(score_expected, abs=0.1), (
f"unexpected score for learner at rank #{rank + 1}: "
f"got {score_actual} but expected {score_expected}"
)
if learner_expected is not None:
learner_actual = learner_eval.pipeline.final_estimator
assert type(learner_actual) == learner_expected, (
f"unexpected class for learner at rank #{rank}: "
f"got {type(learner_actual)} but expected {learner_expected}"
)

if expected_parameters is not None:
for rank, parameters_expected in expected_parameters.items():
parameters_actual = ranking[rank].parameters
assert parameters_actual == parameters_expected, (
f"unexpected parameters for learner at rank #{rank}: "
f"got {parameters_actual} but expected {parameters_expected}"
)
2 changes: 1 addition & 1 deletion test/test/facet/test_crossfit.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from sklearndf.pipeline import ClassifierPipelineDF, RegressorPipelineDF
from sklearndf.regression import RandomForestRegressorDF

from . import check_ranking
from ..conftest import check_ranking
from facet.data import Sample
from facet.selection import LearnerGrid, LearnerRanker
from facet.validation import StratifiedBootstrapCV
Expand Down
2 changes: 1 addition & 1 deletion test/test/facet/test_inspection.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
)
from sklearndf.pipeline import ClassifierPipelineDF, RegressorPipelineDF

from . import check_ranking
from ..conftest import check_ranking
from facet.crossfit import LearnerCrossfit
from facet.data import Sample
from facet.inspection import (
Expand Down
2 changes: 1 addition & 1 deletion test/test/facet/test_selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
)
from sklearndf.regression.extra import LGBMRegressorDF

from . import check_ranking
from ..conftest import check_ranking
from facet.crossfit import LearnerCrossfit
from facet.data import Sample
from facet.selection import LearnerEvaluation, LearnerGrid, LearnerRanker
Expand Down

0 comments on commit ba58c8e

Please sign in to comment.