-
Notifications
You must be signed in to change notification settings - Fork 63
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Dependent features #274
Dependent features #274
Changes from all commits
9939341
d7762d0
62cea8b
3e05b42
3860408
da5ee19
99ffa8d
6722c20
c243d90
d408e12
017e54f
6e19900
4c647e5
ec4fea8
aa55c07
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ def __init__( | |
strategies: list, | ||
variants: Optional[Variants] = None, | ||
impression_data: bool = False, | ||
dependencies: list = None, | ||
) -> None: | ||
""" | ||
A representation of a feature object | ||
|
@@ -44,6 +45,12 @@ def __init__( | |
# Whether the feature exists only for tracking metrics or not. | ||
self.only_for_metrics = False | ||
|
||
# Prerequisite state of other features that this feature depends on | ||
self.dependencies = [ | ||
dict(dependency, enabled=dependency.get("enabled", True)) | ||
for dependency in dependencies or [] | ||
] | ||
|
||
def reset_stats(self) -> None: | ||
""" | ||
Resets stats after metrics reporting | ||
|
@@ -75,20 +82,20 @@ def _count_variant(self, variant_name: str) -> None: | |
""" | ||
self.variant_counts[variant_name] = self.variant_counts.get(variant_name, 0) + 1 | ||
|
||
def is_enabled(self, context: dict = None) -> bool: | ||
def is_enabled(self, context: dict = None, skip_stats: bool = False) -> bool: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there a more elegant way to do it? Without passing There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can't think of a way without rearchitecting this a little. It should be possible to move the count metrics methods to public ones and only call those in the is_enabled in the UnleashClient but... I'm not sure that's worth it. This is all internal at the moment and when Yggdrasil arrives here this will go away anyway. I'm okay with this as it stands. I think we can do better but I also don't think it's worth it to do so. I think this is a reasonable tradeoff |
||
""" | ||
Checks if feature is enabled. | ||
|
||
:param context: Context information | ||
:return: | ||
""" | ||
evaluation_result = self._get_evaluation_result(context) | ||
evaluation_result = self._get_evaluation_result(context, skip_stats) | ||
|
||
flag_value = evaluation_result.enabled | ||
|
||
return flag_value | ||
|
||
def get_variant(self, context: dict = None) -> dict: | ||
def get_variant(self, context: dict = None, skip_stats: bool = False) -> dict: | ||
""" | ||
Checks if feature is enabled and, if so, get the variant. | ||
|
||
|
@@ -109,11 +116,13 @@ def get_variant(self, context: dict = None) -> dict: | |
|
||
except Exception as variant_exception: | ||
LOGGER.warning("Error selecting variant: %s", variant_exception) | ||
|
||
self._count_variant(cast(str, variant["name"])) | ||
if not skip_stats: | ||
self._count_variant(cast(str, variant["name"])) | ||
return variant | ||
|
||
def _get_evaluation_result(self, context: dict = None) -> EvaluationResult: | ||
def _get_evaluation_result( | ||
self, context: dict = None, skip_stats: bool = False | ||
) -> EvaluationResult: | ||
strategy_result = EvaluationResult(False, None) | ||
if self.enabled: | ||
try: | ||
|
@@ -131,7 +140,8 @@ def _get_evaluation_result(self, context: dict = None) -> EvaluationResult: | |
except Exception as evaluation_except: | ||
LOGGER.warning("Error getting evaluation result: %s", evaluation_except) | ||
|
||
self.increment_stats(strategy_result.enabled) | ||
if not skip_stats: | ||
self.increment_stats(strategy_result.enabled) | ||
LOGGER.info("%s evaluation result: %s", self.name, strategy_result) | ||
return strategy_result | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
in Node we have a mechanism to only log this message once. I'm assuming we don't do the same thing in Python SDK
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not a thing in the Python SDK at the moment, unfortunately. Not against adding it, but that should probably be a different PR