diff --git a/.isort.cfg b/.isort.cfg index e569eb6fbd55..ffd43ce6d11d 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -4,4 +4,4 @@ multi_line_output=3 include_trailing_comma=true line_length=79 known_first_party=analytics,app,custom_auth,environments,integrations,organisations,projects,segments,users,webhooks,api,audit,e2etests,features,permissions,util -known_third_party=apiclient,app_analytics,axes,chargebee,core,coreapi,corsheaders,dj_database_url,django,djoser,drf_writable_nested,drf_yasg2,environs,google,influxdb_client,ordered_model,pyotp,pytest,pytz,requests,responses,rest_framework,rest_framework_nested,rest_framework_recursive,sentry_sdk,shortuuid,simple_history,six,telemetry,tests,trench,whitenoise +known_third_party=apiclient,app_analytics,axes,chargebee,core,coreapi,corsheaders,dj_database_url,django,django_lifecycle,djoser,drf_writable_nested,drf_yasg2,environs,google,influxdb_client,ordered_model,pyotp,pytest,pytz,requests,responses,rest_framework,rest_framework_nested,rest_framework_recursive,sentry_sdk,shortuuid,simple_history,six,telemetry,tests,trench,whitenoise diff --git a/requirements.in b/requirements.in index 7cc3d6b07cdb..328fd92cbb12 100644 --- a/requirements.in +++ b/requirements.in @@ -35,6 +35,7 @@ django-debug-toolbar==3.1.1 sentry-sdk==0.19.4 environs==9.2.0 analytics-python +django-lifecycle drf-writable-nested django-filter dataclasses \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 4c83da834b8f..0f8098eab396 100644 --- a/requirements.txt +++ b/requirements.txt @@ -53,6 +53,8 @@ django-health-check==3.14.3 # via -r requirements.in django-ipware==3.0.2 # via django-axes +django-lifecycle==0.9.0 + # via -r requirements.in django-ordered-model==3.4.1 # via -r requirements.in django-ses==1.0.3 @@ -249,6 +251,8 @@ urllib3==1.25.11 # influxdb-client # requests # sentry-sdk +urlman==1.4.0 + # via django-lifecycle whitenoise==3.3.1 # via -r requirements.in yubico-client==1.13.0 diff --git a/src/app/settings/common.py b/src/app/settings/common.py index 5c5a6b238597..7b7a4ab13b47 100644 --- a/src/app/settings/common.py +++ b/src/app/settings/common.py @@ -106,6 +106,7 @@ "environments.identities", "environments.identities.traits", "features", + "features.multivariate", "segments", "e2etests", "simple_history", diff --git a/src/environments/identities/helpers.py b/src/environments/identities/helpers.py index ac1febfd1f94..8ebfcdff0baa 100644 --- a/src/environments/identities/helpers.py +++ b/src/environments/identities/helpers.py @@ -1,3 +1,7 @@ +import hashlib +import itertools +import typing + from integrations.amplitude.amplitude import AmplitudeWrapper from integrations.heap.heap import HeapWrapper from integrations.mixpanel.mixpanel import MixpanelWrapper @@ -22,3 +26,32 @@ def identify_integrations(identity, all_feature_states): user_id=identity.identifier, feature_states=all_feature_states ) wrapper_instance.identify_user_async(data=user_data) + + +def get_hashed_percentage_for_object_ids( + object_ids: typing.Iterable[int], iterations: int = 1 +) -> float: + """ + Given a list of object ids, get a floating point number between 0 and 1 based on + the hash of those ids. This should give the same value every time for any + list of ids. + + :param object_ids: list of object ids to calculate the has for + :param iterations: num times to include each id in the generated string to hash + :return: (float) number between 0 (inclusive) and 1 (exclusive) + """ + + to_hash = ",".join(str(id_) for id_ in list(object_ids) * iterations) + hashed_value = hashlib.md5(to_hash.encode("utf-8")) + hashed_value_as_int = int(hashed_value.hexdigest(), base=16) + value = (hashed_value_as_int % 9999) / 9998 + + if value == 1: + # since we want a number between 0 (inclusive) and 1 (exclusive), in the + # unlikely case that we get the exact number 1, we call the method again + # and increase the number of iterations to ensure we get a different result + return get_hashed_percentage_for_object_ids( + object_ids=object_ids, iterations=iterations + 1 + ) + + return value diff --git a/src/environments/identities/models.py b/src/environments/identities/models.py index 9e91e4e16b56..24acef1d9dfb 100644 --- a/src/environments/identities/models.py +++ b/src/environments/identities/models.py @@ -1,12 +1,14 @@ +import hashlib import typing from django.db import models -from django.db.models import Q +from django.db.models import Q, Prefetch from django.utils.encoding import python_2_unicode_compatible from environments.models import Environment from environments.identities.traits.models import Trait from features.models import FeatureState +from features.multivariate.models import MultivariateFeatureStateValue @python_2_unicode_compatible @@ -61,11 +63,20 @@ def get_all_feature_states(self, traits: typing.List[Trait] = None): "feature_state_value", "feature_segment", "feature_segment__segment", + "identity", ] - # When Project's hide_disabled_flags enabled, exclude disabled Features from the list - all_flags = FeatureState.objects.select_related(*select_related_args).filter( - full_query + all_flags = ( + FeatureState.objects.select_related(*select_related_args) + .prefetch_related( + Prefetch( + "multivariate_feature_state_values", + queryset=MultivariateFeatureStateValue.objects.select_related( + "multivariate_feature_option" + ), + ) + ) + .filter(full_query) ) # iterate over all the flags and build a dictionary keyed on feature with the highest priority flag diff --git a/src/environments/identities/tests/test_helpers.py b/src/environments/identities/tests/test_helpers.py index 2e32ef2cdda8..d35c1352eff2 100644 --- a/src/environments/identities/tests/test_helpers.py +++ b/src/environments/identities/tests/test_helpers.py @@ -1,9 +1,15 @@ +import hashlib +import itertools +import typing from unittest import mock from unittest.case import TestCase import pytest -from environments.identities.helpers import identify_integrations +from environments.identities.helpers import ( + identify_integrations, + get_hashed_percentage_for_object_ids, +) from environments.identities.models import Identity from environments.models import Environment from integrations.amplitude.models import AmplitudeConfiguration @@ -75,3 +81,121 @@ def test_identify_integrations_segment_and_amplitude_called( mock_segment_wrapper.assert_called() mock_amplitude_wrapper.assert_called() + + +def test_get_hashed_percentage_for_object_ids_is_number_between_0_inc_and_1_exc(): + assert 1 > get_hashed_percentage_for_object_ids([12, 93]) >= 0 + + +def test_get_hashed_percentage_for_object_ids_is_the_same_each_time(): + # Given + object_ids = [30, 73] + + # When + result_1 = get_hashed_percentage_for_object_ids(object_ids) + result_2 = get_hashed_percentage_for_object_ids(object_ids) + + # Then + assert result_1 == result_2 + + +def test_percentage_value_is_unique_for_different_identities(): + # Given + first_object_ids = [14, 106] + second_object_ids = [53, 200] + + # When + result_1 = get_hashed_percentage_for_object_ids(first_object_ids) + result_2 = get_hashed_percentage_for_object_ids(second_object_ids) + + # Then + assert result_1 != result_2 + + +def test_get_hashed_percentage_for_object_ids_should_be_evenly_distributed(): + """ + This test checks if the percentage value returned by the helper function returns + evenly distributed values. + + Note that since it's technically random, it's not guaranteed to pass every time, + however, it should pass 99/100 times. It will likely be more accurate by increasing + the test_sample value and / or decreasing the num_test_buckets value. + """ + test_sample = 500 # number of ids to sample in each list + num_test_buckets = 50 # split the sample into 'buckets' to check that the values are evenly distributed + test_bucket_size = int(test_sample / num_test_buckets) + error_factor = 0.1 + + # Given + object_id_pairs = itertools.product(range(test_sample), range(test_sample)) + + # When + values = sorted( + get_hashed_percentage_for_object_ids(pair) for pair in object_id_pairs + ) + + # Then + for i in range(num_test_buckets): + bucket_start = i * test_bucket_size + bucket_end = (i + 1) * test_bucket_size + bucket_value_limit = min( + (i + 1) / num_test_buckets + error_factor * ((i + 1) / num_test_buckets), + 1, + ) + + assert all( + [value <= bucket_value_limit for value in values[bucket_start:bucket_end]] + ) + + +@mock.patch("environments.identities.helpers.hashlib") +def test_get_hashed_percentage_does_not_return_1(mock_hashlib): + """ + Quite complex test to ensure that the function will never return 1. + + To achieve this, we mock the hashlib module to return a magic mock so that we can + subsequently mock the hexdigest method to return known strings. These strings are + chosen such that they can be converted (via `int(s, base=16)`) to known integers. + """ + + # Given + object_ids = [12, 93] + + # -- SETTING UP THE MOCKS -- + # hash strings specifically created to return specific values when converted to + # integers via int(s, base=16). Note that the reverse function was created + # courtesy of https://code.i-harness.com/en/q/1f7c41 + hash_string_to_return_1 = "270e" + hash_string_to_return_0 = "270f" + hashed_values = [hash_string_to_return_0, hash_string_to_return_1] + + def hexdigest_side_effect(): + return hashed_values.pop() + + mock_hash = mock.MagicMock() + mock_hashlib.md5.return_value = mock_hash + + mock_hash.hexdigest.side_effect = hexdigest_side_effect + + # -- FINISH SETTING UP THE MOCKS -- + + # When + # we get the hashed percentage value for the given object ids + value = get_hashed_percentage_for_object_ids(object_ids) + + # Then + # The value is 0 as defined by the mock data + assert value == 0 + + # and the md5 function was called twice + # (i.e. the get_hashed_percentage_for_object_ids function was also called twice) + call_list = mock_hashlib.md5.call_args_list + assert len(call_list) == 2 + + # the first call, with a string (in bytes) that contains each object id once + expected_bytes_1 = ",".join(str(id_) for id_ in object_ids).encode("utf-8") + assert call_list[0][0][0] == expected_bytes_1 + + # the second call, with a string (in bytes) that contains each object id twice + expected_bytes_2 = ",".join(str(id_) for id_ in object_ids * 2).encode("utf-8") + assert call_list[1][0][0] == expected_bytes_2 diff --git a/src/environments/identities/tests/test_models.py b/src/environments/identities/tests/test_models.py index 4c5ae1d02f70..e8214a2dec83 100644 --- a/src/environments/identities/tests/test_models.py +++ b/src/environments/identities/tests/test_models.py @@ -4,7 +4,7 @@ from environments.identities.traits.models import Trait from environments.models import FLOAT, Environment from features.models import Feature, FeatureSegment, FeatureState, FeatureStateValue -from features.utils import BOOLEAN, INTEGER, STRING +from features.value_types import INTEGER, STRING, BOOLEAN from organisations.models import Organisation from projects.models import Project from segments.models import ( diff --git a/src/environments/identities/tests/test_views.py b/src/environments/identities/tests/test_views.py index 3aae7515e25a..e11b9c8a97a2 100644 --- a/src/environments/identities/tests/test_views.py +++ b/src/environments/identities/tests/test_views.py @@ -7,6 +7,7 @@ from rest_framework import status from rest_framework.test import APIClient, APITestCase +from environments.identities.helpers import get_hashed_percentage_for_object_ids from environments.identities.models import Identity from environments.identities.traits.models import Trait from environments.models import Environment @@ -477,7 +478,9 @@ def test_identities_endpoint_returns_value_for_segment_if_rule_type_percentage_s segment=segment, type=SegmentRule.ALL_RULE ) - identity_percentage_value = segment.get_identity_percentage_value(self.identity) + identity_percentage_value = get_hashed_percentage_for_object_ids( + [segment.id, self.identity.id] + ) Condition.objects.create( operator=models.PERCENTAGE_SPLIT, value=(identity_percentage_value + (1 - identity_percentage_value) / 2) @@ -522,7 +525,9 @@ def test_identities_endpoint_returns_default_value_if_rule_type_percentage_split segment=segment, type=SegmentRule.ALL_RULE ) - identity_percentage_value = segment.get_identity_percentage_value(self.identity) + identity_percentage_value = get_hashed_percentage_for_object_ids( + [segment.id, self.identity.id] + ) Condition.objects.create( operator=models.PERCENTAGE_SPLIT, value=identity_percentage_value / 2, diff --git a/src/environments/identities/traits/constants.py b/src/environments/identities/traits/constants.py index dd0552fb30d1..aab663e3207d 100644 --- a/src/environments/identities/traits/constants.py +++ b/src/environments/identities/traits/constants.py @@ -1,3 +1,3 @@ -from features.utils import INTEGER, STRING, BOOLEAN, FLOAT +from features.value_types import INTEGER, STRING, BOOLEAN, FLOAT ACCEPTED_TRAIT_VALUE_TYPES = [INTEGER, STRING, BOOLEAN, FLOAT] diff --git a/src/environments/identities/traits/fields.py b/src/environments/identities/traits/fields.py index 663d8cd5f1db..8785523d5b9e 100644 --- a/src/environments/identities/traits/fields.py +++ b/src/environments/identities/traits/fields.py @@ -1,7 +1,7 @@ from rest_framework import serializers from environments.identities.traits.constants import ACCEPTED_TRAIT_VALUE_TYPES -from features.utils import STRING +from features.value_types import STRING import logging logger = logging.getLogger(__name__) diff --git a/src/environments/identities/views.py b/src/environments/identities/views.py index 6e209a88a101..dfb4b7f8d803 100644 --- a/src/environments/identities/views.py +++ b/src/environments/identities/views.py @@ -160,13 +160,18 @@ def post(self, request): # we need to serialize the response again to ensure that the # trait values are serialized correctly - response_serializer = IdentifyWithTraitsSerializer(instance=instance) + response_serializer = IdentifyWithTraitsSerializer( + instance=instance, + context={"identity": instance.get("identity")}, # todo: improve this + ) return Response(response_serializer.data) def _get_single_feature_state_response(self, identity, feature_name): for feature_state in identity.get_all_feature_states(): if feature_state.feature.name == feature_name: - serializer = FeatureStateSerializerFull(feature_state) + serializer = FeatureStateSerializerFull( + feature_state, context={"identity": identity} + ) return Response(data=serializer.data, status=status.HTTP_200_OK) return Response( @@ -182,7 +187,9 @@ def _get_all_feature_states_for_user_response(self, identity, trait_models=None) :return: Response containing lists of both serialized flags and traits """ all_feature_states = identity.get_all_feature_states() - serialized_flags = FeatureStateSerializerFull(all_feature_states, many=True) + serialized_flags = FeatureStateSerializerFull( + all_feature_states, many=True, context={"identity": identity} + ) serialized_traits = TraitSerializerBasic( identity.identity_traits.all(), many=True ) diff --git a/src/environments/models.py b/src/environments/models.py index 6a2bb2ff214e..20801af464e6 100644 --- a/src/environments/models.py +++ b/src/environments/models.py @@ -6,6 +6,7 @@ from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ +from django_lifecycle import LifecycleModel, hook, BEFORE_SAVE, AFTER_CREATE from app.utils import create_hash from environments.exceptions import EnvironmentHeaderNotPresentError @@ -26,7 +27,7 @@ @python_2_unicode_compatible -class Environment(models.Model): +class Environment(LifecycleModel): name = models.CharField(max_length=2000) created_date = models.DateTimeField("DateCreated", auto_now_add=True) project = models.ForeignKey( @@ -47,34 +48,16 @@ class Environment(models.Model): class Meta: ordering = ["id"] - def save(self, *args, **kwargs): - """ - Override save method to initialise feature states for all features in new environment - """ - requires_feature_state_creation = True if not self.pk else False - if self.pk: - old_environment = Environment.objects.get(pk=self.pk) - if old_environment.project != self.project: - FeatureState.objects.filter( - feature__in=old_environment.project.features.values_list( - "pk", flat=True - ), - environment=self, - ).all().delete() - requires_feature_state_creation = True - - super(Environment, self).save(*args, **kwargs) - - if requires_feature_state_creation: - # also create feature states for all features in the project - features = self.project.features.all() - for feature in features: - FeatureState.objects.create( - feature=feature, - environment=self, - identity=None, - enabled=feature.default_enabled, - ) + @hook(AFTER_CREATE) + def create_feature_states(self): + features = self.project.features.all() + for feature in features: + FeatureState.objects.create( + feature=feature, + environment=self, + identity=None, + enabled=feature.default_enabled, + ) def __str__(self): return "Project %s - Environment %s" % (self.project.name, self.name) diff --git a/src/environments/serializers.py b/src/environments/serializers.py index 4ee4b44c6ab6..a65ed9e8f96e 100644 --- a/src/environments/serializers.py +++ b/src/environments/serializers.py @@ -24,7 +24,6 @@ class EnvironmentSerializerLight(serializers.ModelSerializer): class Meta: model = Environment fields = ("id", "name", "api_key", "project") - read_only_fields = ("api_key",) def create(self, validated_data): instance = super(EnvironmentSerializerLight, self).create(validated_data) diff --git a/src/features/admin.py b/src/features/admin.py index b20cbdf39a7c..0e5fcde6afc7 100644 --- a/src/features/admin.py +++ b/src/features/admin.py @@ -37,6 +37,7 @@ class FeatureAdmin(SimpleHistoryAdmin): "description", "tags__label", ) + readonly_fields = ("project",) class FeatureSegmentAdmin(admin.ModelAdmin): diff --git a/src/features/custom_lifecycle.py b/src/features/custom_lifecycle.py new file mode 100644 index 000000000000..75a64bee19a5 --- /dev/null +++ b/src/features/custom_lifecycle.py @@ -0,0 +1,43 @@ +import typing + +from django_lifecycle import LifecycleModelMixin, NotSet + + +class CustomLifecycleModelMixin(LifecycleModelMixin): + """ + Since we have an attribute named `initial_value` on the Feature model which + needs access to the LifecycleModel functionality, we need to define a custom + class here and override any methods that rely on the self.initial_value method + defined on the LifecycleModelMixin and replace the usage with the newly defined + lifecycle_initial_value method instead (which is just copy pasted from the mixin) + """ + + def lifecycle_initial_value(self, field_name: str) -> typing.Any: + """ + Get initial value of field when model was instantiated. + """ + field_name = self._sanitize_field_name(field_name) + + if field_name in self._initial_state: + return self._initial_state[field_name] + + return None + + def _check_was_condition(self, field_name: str, specs: dict) -> bool: + return specs["was"] in (self.lifecycle_initial_value(field_name), "*") + + def _check_was_not_condition(self, field_name: str, specs: dict) -> bool: + was_not = specs["was_not"] + return was_not is NotSet or self.lifecycle_initial_value(field_name) != was_not + + def _check_changes_to_condition(self, field_name: str, specs: dict) -> bool: + changes_to = specs["changes_to"] + return any( + [ + changes_to is NotSet, + ( + self.lifecycle_initial_value(field_name) != changes_to + and self._current_value(field_name) == changes_to + ), + ] + ) diff --git a/src/features/feature_states/__init__.py b/src/features/feature_states/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/features/feature_states/models.py b/src/features/feature_states/models.py new file mode 100644 index 000000000000..c5fc748587c8 --- /dev/null +++ b/src/features/feature_states/models.py @@ -0,0 +1,29 @@ +import typing + +from django.db import models + +from features.value_types import INTEGER, STRING, BOOLEAN, FEATURE_STATE_VALUE_TYPES + + +class AbstractBaseFeatureValueModel(models.Model): + class Meta: + abstract = True + + type = models.CharField( + max_length=10, + choices=FEATURE_STATE_VALUE_TYPES, + default=STRING, + null=True, + blank=True, + ) + boolean_value = models.NullBooleanField(null=True, blank=True) + integer_value = models.IntegerField(null=True, blank=True) + string_value = models.CharField(null=True, max_length=2000, blank=True) + + @property + def value(self) -> typing.Union[str, int, bool]: + return { + INTEGER: self.integer_value, + STRING: self.string_value, + BOOLEAN: self.boolean_value, + }.get(self.type, self.string_value) diff --git a/src/features/feature_types.py b/src/features/feature_types.py new file mode 100644 index 000000000000..a6e54654a18a --- /dev/null +++ b/src/features/feature_types.py @@ -0,0 +1,7 @@ +MULTIVARIATE = "MULTIVARIATE" +STANDARD = "STANDARD" + +# the following two types have been merged in terms of functionality +# but kept for now until the FE is updated +CONFIG = "CONFIG" +FLAG = "FLAG" diff --git a/src/features/fields.py b/src/features/fields.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/features/helpers.py b/src/features/helpers.py index 1f8701dbd15a..ed95d1cb9094 100644 --- a/src/features/helpers.py +++ b/src/features/helpers.py @@ -1,6 +1,6 @@ import typing -from features.utils import BOOLEAN, INTEGER +from features.value_types import INTEGER, BOOLEAN def get_correctly_typed_value(value_type: str, string_value: str) -> typing.Any: diff --git a/src/features/migrations/0028_auto_20210216_1600.py b/src/features/migrations/0028_auto_20210216_1600.py new file mode 100644 index 000000000000..cafa709e746a --- /dev/null +++ b/src/features/migrations/0028_auto_20210216_1600.py @@ -0,0 +1,17 @@ +# Generated by Django 2.2.17 on 2021-02-16 16:00 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('features', '0027_merge_20210215_1059'), + ] + + operations = [ + migrations.AlterModelOptions( + name='feature', + options={}, + ), + ] diff --git a/src/features/migrations/0030_merge_20210305_1622.py b/src/features/migrations/0030_merge_20210305_1622.py new file mode 100644 index 000000000000..02c9b2102a09 --- /dev/null +++ b/src/features/migrations/0030_merge_20210305_1622.py @@ -0,0 +1,14 @@ +# Generated by Django 2.2.17 on 2021-03-05 16:22 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('features', '0028_auto_20210216_1600'), + ('features', '0029_auto_20210223_2106'), + ] + + operations = [ + ] diff --git a/src/features/migrations/0031_merge_20210409_1621.py b/src/features/migrations/0031_merge_20210409_1621.py new file mode 100644 index 000000000000..603ff068396e --- /dev/null +++ b/src/features/migrations/0031_merge_20210409_1621.py @@ -0,0 +1,13 @@ +# Generated by Django 2.2.17 on 2021-04-09 16:21 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("features", "0030_auto_20210401_1552"), + ("features", "0030_merge_20210305_1622"), + ] + + operations = [] diff --git a/src/features/models.py b/src/features/models.py index b4e246c78cad..fc56fb2779c2 100644 --- a/src/features/models.py +++ b/src/features/models.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import logging +import typing from django.core.exceptions import ( NON_FIELD_ERRORS, @@ -11,10 +12,24 @@ from django.db.models import Q, UniqueConstraint from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ +from django_lifecycle import ( + AFTER_CREATE, + AFTER_SAVE, + BEFORE_CREATE, + LifecycleModel, + hook, +) from ordered_model.models import OrderedModelBase from simple_history.models import HistoricalRecords +from environments.identities.helpers import ( + get_hashed_percentage_for_object_ids, +) +from features.custom_lifecycle import CustomLifecycleModelMixin +from features.feature_states.models import AbstractBaseFeatureValueModel +from features.feature_types import MULTIVARIATE from features.helpers import get_correctly_typed_value +from features.multivariate.models import MultivariateFeatureStateValue from features.tasks import trigger_feature_state_change_webhooks from features.utils import ( get_boolean_from_string, @@ -32,15 +47,12 @@ logger = logging.getLogger(__name__) -FEATURE_STATE_VALUE_TYPES = ( - (INTEGER, "Integer"), - (STRING, "String"), - (BOOLEAN, "Boolean"), -) +if typing.TYPE_CHECKING: + from environments.identities.models import Identity @python_2_unicode_compatible -class Feature(models.Model): +class Feature(CustomLifecycleModelMixin, models.Model): name = models.CharField(max_length=2000) created_date = models.DateTimeField("DateCreated", auto_now_add=True) project = models.ForeignKey( @@ -62,38 +74,21 @@ class Feature(models.Model): tags = models.ManyToManyField(Tag, blank=True) class Meta: - ordering = ["id"] # Note: uniqueness is changed to reference lowercase name in explicit SQL in the migrations unique_together = ("name", "project") - def save(self, *args, **kwargs): - """ - Override save method to initialise feature states for all environments - """ - feature_state_defaults = {} - if self.pk: - # If the feature has moved to a new project, delete the feature states from the old project - old_feature = Feature.objects.get(pk=self.pk) - if old_feature.project != self.project: - FeatureState.objects.filter( - feature=self, - environment__in=old_feature.project.environments.all(), - ).delete() - else: - feature_state_defaults["enabled"] = self.default_enabled - - super(Feature, self).save(*args, **kwargs) - - # create / update feature states for all environments in the project - # todo: is update necessary here + @hook(AFTER_CREATE) + def create_feature_states(self): + # create feature states for all environments environments = self.project.environments.all() for env in environments: - FeatureState.objects.update_or_create( + # unable to bulk create as we need signals + FeatureState.objects.create( feature=self, environment=env, identity=None, feature_segment=None, - defaults=feature_state_defaults, + enabled=self.default_enabled, ) def validate_unique(self, *args, **kwargs): @@ -200,7 +195,7 @@ def __lt__(self, other): @python_2_unicode_compatible -class FeatureState(models.Model): +class FeatureState(LifecycleModel, models.Model): feature = models.ForeignKey( Feature, related_name="feature_states", on_delete=models.CASCADE ) @@ -286,82 +281,113 @@ def __gt__(self, other): # it has a feature_segment or an identity return not (other.feature_segment or other.identity) - def get_feature_state_value(self): - try: - value_type = self.feature_state_value.type - except ObjectDoesNotExist: - return None + def get_feature_state_value(self, identity: "Identity" = None) -> typing.Any: + feature_state_value = ( + self.get_multivariate_feature_state_value(identity) + if self.feature.type == MULTIVARIATE and identity + else getattr(self, "feature_state_value", None) + ) - type_mapping = { - INTEGER: self.feature_state_value.integer_value, - STRING: self.feature_state_value.string_value, - BOOLEAN: self.feature_state_value.boolean_value, - } + # return the value of the feature state value only if the feature state + # has a related feature state value. Note that we use getattr rather than + # hasattr as we want to return None if no feature state value exists. + return feature_state_value and feature_state_value.value + + def get_multivariate_feature_state_value( + self, identity: "Identity" + ) -> AbstractBaseFeatureValueModel: + # the multivariate_feature_state_values should be prefetched at this point + # so we just convert them to a list and use python operations from here to + # avoid further queries to the DB + mv_options = list(self.multivariate_feature_state_values.all()) + + percentage_value = ( + get_hashed_percentage_for_object_ids([self.id, identity.id]) * 100 + ) + + # Iterate over the mv options in order of id (so we get the same value each + # time) to determine the correct value to return to the identity based on + # the percentage allocations of the multivariate options. This gives us a + # way to ensure that the same value is returned every time we use the same + # percentage value. + start_percentage = 0 + for mv_option in sorted(mv_options, key=lambda o: o.id): + limit = getattr(mv_option, "percentage_allocation", 0) + start_percentage + if start_percentage <= percentage_value < limit: + return mv_option.multivariate_feature_option - return type_mapping.get(value_type) + start_percentage = limit + + # if none of the percentage allocations match the percentage value we got for + # the identity, then we just return the default feature state value (or None + # if there isn't one - although this should never happen) + return getattr(self, "feature_state_value", None) @property def previous_feature_state_value(self): try: history_instance = self.feature_state_value.history.first() + return ( + history_instance + and getattr(history_instance, "prev_record", None) + and history_instance.prev_record.instance.value + ) except ObjectDoesNotExist: return None - previous_feature_state_value = getattr(history_instance, "prev_record", None) - - if previous_feature_state_value: - value_type = previous_feature_state_value.type - - type_mapping = { - INTEGER: previous_feature_state_value.integer_value, - STRING: previous_feature_state_value.string_value, - BOOLEAN: previous_feature_state_value.boolean_value, - } - - return type_mapping.get(value_type) - - def save(self, *args, **kwargs): + @hook(BEFORE_CREATE) + def check_for_existing_env_feature_state(self): # prevent duplicate feature states being created for an environment - if ( - not self.pk - and FeatureState.objects.filter( - environment=self.environment, feature=self.feature - ).exists() - and not (self.identity or self.feature_segment) - ): + if FeatureState.objects.filter( + environment=self.environment, feature=self.feature + ).exists() and not (self.identity or self.feature_segment): raise ValidationError( "Feature state already exists for this environment and feature" ) - super(FeatureState, self).save(*args, **kwargs) - - # create default feature state value for feature state - # note: this is get_or_create since feature state values are updated separately, - # and hence if this is set to update_or_create, it overwrites the FSV with the - # initial value again - FeatureStateValue.objects.get_or_create( - feature_state=self, defaults=self._get_feature_state_defaults() + @hook(AFTER_CREATE) + def create_feature_state_value(self): + # note: this is only performed after create since feature state values are + # updated separately, and hence if this is performed after each save, + # it overwrites the FSV with the initial value again + FeatureStateValue.objects.create( + feature_state=self, + **self.get_feature_state_value_defaults(), ) - # TODO: move this to an async call using celery or django-rq + + @hook(AFTER_CREATE) + def create_multivariate_feature_state_values(self): + if not (self.feature_segment or self.identity): + # we only want to create the multivariate feature state values for + # feature states related to an environment only, i.e. when a new + # environment is created or a new MV feature is created + mv_feature_state_values = [ + MultivariateFeatureStateValue( + feature_state=self, + multivariate_feature_option=mv_option, + percentage_allocation=mv_option.default_percentage_allocation, + ) + for mv_option in self.feature.multivariate_options.all() + ] + MultivariateFeatureStateValue.objects.bulk_create(mv_feature_state_values) + + @hook(AFTER_SAVE) + def trigger_feature_state_change_webhooks(self): trigger_feature_state_change_webhooks(self) - def _get_feature_state_defaults(self): - if not (self.feature.initial_value or self.feature.initial_value is False): - return None + def get_feature_state_value_defaults(self) -> dict: + if self.feature.initial_value is None: + return {} value = self.feature.initial_value type = get_value_type(value) - defaults = {"type": type} - + parse_func = { + BOOLEAN: get_boolean_from_string, + INTEGER: get_integer_from_string, + }.get(type, lambda v: v) key_name = self.get_feature_state_key_name(type) - if type == BOOLEAN: - defaults[key_name] = get_boolean_from_string(value) - elif type == INTEGER: - defaults[key_name] = get_integer_from_string(value) - else: - defaults[key_name] = value - return defaults + return {"type": type, key_name: parse_func(value)} @staticmethod def get_feature_state_key_name(fsv_type): @@ -392,36 +418,20 @@ def generate_feature_state_value_data(self, value): } def __str__(self): + s = f"Feature {self.feature.name} - Enabled: {self.enabled}" if self.environment is not None: - return "Project %s - Environment %s - Feature %s - Enabled: %r" % ( - self.environment.project.name, - self.environment.name, - self.feature.name, - self.enabled, - ) + s = f"{self.environment} - {s}" elif self.identity is not None: - return "Identity %s - Feature %s - Enabled: %r" % ( - self.identity.identifier, - self.feature.name, - self.enabled, - ) - else: - return "Feature %s - Enabled: %r" % (self.feature.name, self.enabled) + s = f"Identity {self.identity.identifier} - {s}" + return s -class FeatureStateValue(models.Model): +class FeatureStateValue(AbstractBaseFeatureValueModel): feature_state = models.OneToOneField( FeatureState, related_name="feature_state_value", on_delete=models.CASCADE ) - type = models.CharField( - max_length=10, - choices=FEATURE_STATE_VALUE_TYPES, - default=STRING, - null=True, - blank=True, - ) - boolean_value = models.NullBooleanField(null=True, blank=True) - integer_value = models.IntegerField(null=True, blank=True) + # TODO: increase max length of string value on base model class string_value = models.CharField(null=True, max_length=20000, blank=True) + history = HistoricalRecords() diff --git a/src/features/multivariate/__init__.py b/src/features/multivariate/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/features/multivariate/migrations/0001_initial.py b/src/features/multivariate/migrations/0001_initial.py new file mode 100644 index 000000000000..0cf5a8142525 --- /dev/null +++ b/src/features/multivariate/migrations/0001_initial.py @@ -0,0 +1,43 @@ +# Generated by Django 2.2.17 on 2021-03-12 16:11 + +import django.core.validators +from django.db import migrations, models +import django.db.models.deletion +import django_lifecycle.mixins + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('features', '0030_merge_20210305_1622'), + ] + + operations = [ + migrations.CreateModel( + name='MultivariateFeatureOption', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('type', models.CharField(blank=True, choices=[('int', 'Integer'), ('unicode', 'String'), ('bool', 'Boolean')], default='unicode', max_length=10, null=True)), + ('boolean_value', models.NullBooleanField()), + ('integer_value', models.IntegerField(blank=True, null=True)), + ('string_value', models.CharField(blank=True, max_length=2000, null=True)), + ('default_percentage_allocation', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])), + ('feature', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='multivariate_options', to='features.Feature')), + ], + options={ + 'abstract': False, + }, + bases=(django_lifecycle.mixins.LifecycleModelMixin, models.Model), + ), + migrations.CreateModel( + name='MultivariateFeatureStateValue', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('percentage_allocation', models.FloatField(null=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])), + ('feature_state', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='multivariate_feature_state_values', to='features.FeatureState')), + ('multivariate_feature_option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multivariate.MultivariateFeatureOption')), + ], + ), + ] diff --git a/src/features/multivariate/migrations/__init__.py b/src/features/multivariate/migrations/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/features/multivariate/models.py b/src/features/multivariate/models.py new file mode 100644 index 000000000000..d1f07c9c761f --- /dev/null +++ b/src/features/multivariate/models.py @@ -0,0 +1,94 @@ +from django.core.exceptions import ValidationError +from django.core.validators import MaxValueValidator, MinValueValidator +from django.db import models +from django.db.models import QuerySet, Sum +from django_lifecycle import ( + AFTER_CREATE, + BEFORE_SAVE, + LifecycleModelMixin, + hook, +) + +from features.feature_states.models import AbstractBaseFeatureValueModel + + +class MultivariateFeatureOption(LifecycleModelMixin, AbstractBaseFeatureValueModel): + feature = models.ForeignKey( + "features.Feature", + on_delete=models.CASCADE, + related_name="multivariate_options", + ) + + # This field is stored at the feature level but not used here - it is transferred + # to the MultivariateFeatureStateValue on creation of a new option or when creating + # a new environment. + default_percentage_allocation = models.FloatField( + null=True, + blank=True, + validators=[MinValueValidator(0), MaxValueValidator(100)], + ) + + @hook(AFTER_CREATE) + def create_multivariate_feature_state_values(self): + for feature_state in self.feature.feature_states.filter( + identity=None, feature_segment=None + ): + MultivariateFeatureStateValue.objects.create( + feature_state=feature_state, + multivariate_feature_option=self, + percentage_allocation=self.default_percentage_allocation, + ) + + +class MultivariateFeatureStateValue(LifecycleModelMixin, models.Model): + feature_state = models.ForeignKey( + "features.FeatureState", + on_delete=models.CASCADE, + related_name="multivariate_feature_state_values", + ) + multivariate_feature_option = models.ForeignKey( + MultivariateFeatureOption, on_delete=models.CASCADE + ) + + percentage_allocation = models.FloatField( + blank=False, + null=True, + validators=[MinValueValidator(0), MaxValueValidator(100)], + ) + + @hook(BEFORE_SAVE) + def validate_percentage_allocations(self): + # TODO: add tests + total_percentage_allocation = self.get_siblings().aggregate( + total_percentage_allocation=Sum("percentage_allocation") + )["total_percentage_allocation"] + if total_percentage_allocation and total_percentage_allocation > 100: + raise ValidationError( + self._get_invalid_percentage_allocation_error_message() + ) + + def get_siblings(self) -> QuerySet: + # TODO: add tests + siblings = self.feature_state.multivariate_feature_state_values.all() + if self.id: + siblings = siblings.exclude(id=self.id) + return siblings + + def _get_invalid_percentage_allocation_error_message(self) -> str: + # TODO: add tests + kwargs = { + "feature_name": self.feature_state.feature.name, + "environment_name": self.feature_state.environment.name, + } + message = ( + "Total percentage allocation for feature {feature_name} " + "in environment {environment_name}" + ) + if self.feature_state.identity: + message += " for identity {identity_identifier}" + kwargs["identity_identifier"] = self.feature_state.identity.identifier + elif self.feature_state.feature_segment: + message += " for segment {segment_name}" + kwargs["segment_name"] = self.feature_state.feature_segment.segment.name + message += " is greater than 100%." + return message.format(**kwargs) diff --git a/src/features/multivariate/serializers.py b/src/features/multivariate/serializers.py new file mode 100644 index 000000000000..dca54321a044 --- /dev/null +++ b/src/features/multivariate/serializers.py @@ -0,0 +1,29 @@ +from rest_framework import serializers + +from features.multivariate.models import ( + MultivariateFeatureOption, + MultivariateFeatureStateValue, +) + + +class MultivariateFeatureOptionSerializer(serializers.ModelSerializer): + class Meta: + model = MultivariateFeatureOption + fields = ( + "id", + "type", + "integer_value", + "string_value", + "boolean_value", + "default_percentage_allocation", + ) + + +class MultivariateFeatureStateValueSerializer(serializers.ModelSerializer): + class Meta: + model = MultivariateFeatureStateValue + fields = ( + "id", + "multivariate_feature_option", + "percentage_allocation", + ) diff --git a/src/features/multivariate/tests/__init__.py b/src/features/multivariate/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/features/permissions.py b/src/features/permissions.py index 79d7c25c07ee..0d7669dbebec 100644 --- a/src/features/permissions.py +++ b/src/features/permissions.py @@ -44,7 +44,7 @@ def has_object_permission(self, request, view, obj): class FeatureStatePermissions(BasePermission): def has_permission(self, request, view): try: - if view.action == "create": + if view.action == "create" and request.data.get("environment"): environment = Environment.objects.get(id=request.data["environment"]) return request.user.is_environment_admin(environment) diff --git a/src/features/serializers.py b/src/features/serializers.py index dc983831e92b..ea2f0cc17a3c 100644 --- a/src/features/serializers.py +++ b/src/features/serializers.py @@ -1,6 +1,5 @@ -from drf_writable_nested import NestedCreateMixin, NestedUpdateMixin +from drf_writable_nested import WritableNestedModelSerializer from rest_framework import serializers -from rest_framework.exceptions import ValidationError from audit.models import ( FEATURE_CREATED_MESSAGE, @@ -13,38 +12,48 @@ from environments.identities.models import Identity from .models import Feature, FeatureState, FeatureStateValue +from .multivariate.serializers import ( + MultivariateFeatureOptionSerializer, + MultivariateFeatureStateValueSerializer, +) + +class ListCreateFeatureSerializer(WritableNestedModelSerializer): + multivariate_options = MultivariateFeatureOptionSerializer( + many=True, required=False + ) -class CreateFeatureSerializer(serializers.ModelSerializer): class Meta: model = Feature - fields = "__all__" - read_only_fields = ("feature_segments",) + fields = ( + "id", + "name", + "type", + "default_enabled", + "initial_value", + "created_date", + "description", + "tags", + "multivariate_options", + ) + read_only_fields = ("feature_segments", "created_date") def to_internal_value(self, data): - if data.get("initial_value"): - data["initial_value"] = str(data.get("initial_value")) - return super(CreateFeatureSerializer, self).to_internal_value(data) + if data.get("initial_value") and not isinstance(data["initial_value"], str): + data["initial_value"] = str(data["initial_value"]) + return super(ListCreateFeatureSerializer, self).to_internal_value(data) def create(self, validated_data): - if Feature.objects.filter( - project=validated_data["project"], name__iexact=validated_data["name"] - ).exists(): - raise serializers.ValidationError( - "Feature with that name already exists for this " - "project. Note that feature names are case " - "insensitive." - ) - - instance = super(CreateFeatureSerializer, self).create(validated_data) - + instance = super(ListCreateFeatureSerializer, self).create(validated_data) self._create_audit_log(instance, True) - return instance def update(self, instance, validated_data): - self._create_audit_log(instance, False) - return super(CreateFeatureSerializer, self).update(instance, validated_data) + updated_instance = super(ListCreateFeatureSerializer, self).update( + instance, validated_data + ) + self._create_audit_log(updated_instance, False) + return updated_instance def _create_audit_log(self, instance, created): message = ( @@ -62,20 +71,39 @@ def _create_audit_log(self, instance, created): ) def validate(self, attrs): + view = self.context["view"] + project_id = str(view.kwargs.get("project_pk")) + if not project_id.isdigit(): + raise serializers.ValidationError("Invalid project ID.") + + unique_filters = {"project__id": project_id, "name__iexact": attrs["name"]} + existing_feature_queryset = Feature.objects.filter(**unique_filters) + if self.instance: + existing_feature_queryset = existing_feature_queryset.exclude( + id=self.instance.id + ) + + if existing_feature_queryset.exists(): + raise serializers.ValidationError( + "Feature with that name already exists for this " + "project. Note that feature names are case " + "insensitive." + ) + # If tags selected check they from the same Project as Feature Project - if any(tag.project_id != attrs["project"].id for tag in attrs.get("tags", [])): - raise ValidationError( + if any(tag.project_id != int(project_id) for tag in attrs.get("tags", [])): + raise serializers.ValidationError( "Selected Tags must be from the same Project as current Feature" ) return attrs -class UpdateFeatureSerializer(CreateFeatureSerializer): +class UpdateFeatureSerializer(ListCreateFeatureSerializer): """prevent users from changing the value of default enabled after creation""" - class Meta(CreateFeatureSerializer.Meta): - read_only_fields = CreateFeatureSerializer.Meta.read_only_fields + ( + class Meta(ListCreateFeatureSerializer.Meta): + read_only_fields = ListCreateFeatureSerializer.Meta.read_only_fields + ( "default_enabled", "initial_value", ) @@ -96,22 +124,6 @@ class Meta: writeonly_fields = ("initial_value", "default_enabled") -class FeatureWithTagsSerializer(serializers.ModelSerializer): - class Meta: - model = Feature - fields = ( - "id", - "name", - "created_date", - "initial_value", - "description", - "default_enabled", - "type", - "tags", - ) - writeonly_fields = ("initial_value", "default_enabled") - - class FeatureStateSerializerFull(serializers.ModelSerializer): feature = FeatureSerializer() feature_state_value = serializers.SerializerMethodField() @@ -121,18 +133,21 @@ class Meta: fields = "__all__" def get_feature_state_value(self, obj): - return obj.get_feature_state_value() + return obj.get_feature_state_value(identity=self.context.get("identity")) -class FeatureStateSerializerBasic(serializers.ModelSerializer): +class FeatureStateSerializerBasic(WritableNestedModelSerializer): feature_state_value = serializers.SerializerMethodField() + multivariate_feature_state_values = MultivariateFeatureStateValueSerializer( + many=True, required=False + ) class Meta: model = FeatureState fields = "__all__" def get_feature_state_value(self, obj): - return obj.get_feature_state_value() + return obj.get_feature_state_value(identity=self.context.get("identity")) def create(self, validated_data): instance = super(FeatureStateSerializerBasic, self).create(validated_data) @@ -155,7 +170,7 @@ def validate(self, attrs): feature_segment = attrs.get("feature_segment") if identity and not identity.environment == environment: - raise ValidationError("Identity does not exist in environment.") + raise serializers.ValidationError("Identity does not exist in environment.") if feature_segment and not feature_segment.environment == environment: raise serializers.ValidationError( @@ -179,7 +194,7 @@ def validate(self, attrs): ).exclude(pk=getattr(self.instance, "pk", None)) if queryset.exists(): - raise ValidationError("Feature state already exists.") + raise serializers.ValidationError("Feature state already exists.") return attrs @@ -251,9 +266,7 @@ class GetInfluxDataQuerySerializer(serializers.Serializer): environment_id = serializers.CharField(required=True) -class WritableNestedFeatureStateSerializer( - NestedCreateMixin, NestedUpdateMixin, FeatureStateSerializerBasic -): +class WritableNestedFeatureStateSerializer(FeatureStateSerializerBasic): feature_state_value = FeatureStateValueSerializer(required=False) class Meta(FeatureStateSerializerBasic.Meta): diff --git a/src/features/tests/conftest.py b/src/features/tests/conftest.py new file mode 100644 index 000000000000..e3cb5ecc8faf --- /dev/null +++ b/src/features/tests/conftest.py @@ -0,0 +1,52 @@ +import pytest + +from environments.identities.models import Identity +from environments.models import Environment +from features.feature_types import MULTIVARIATE +from features.models import Feature +from features.multivariate.models import MultivariateFeatureOption +from features.value_types import STRING, INTEGER +from organisations.models import Organisation +from projects.models import Project + + +@pytest.fixture() +def organisation(db): + return Organisation.objects.create(name="Test Org") + + +@pytest.fixture() +def project(organisation): + return Project.objects.create(name="Test Project", organisation=organisation) + + +@pytest.fixture() +def environment(project): + return Environment.objects.create(name="Test Environment", project=project) + + +@pytest.fixture() +def identity(environment): + return Identity.objects.create(identifier="test_identity", environment=environment) + + +@pytest.fixture() +def multivariate_feature(project): + feature = Feature.objects.create( + name="feature", project=project, type=MULTIVARIATE, initial_value="control" + ) + + multivariate_options = [] + for percentage_allocation in (30, 30, 40): + string_value = f"multivariate option for {percentage_allocation}% of users." + multivariate_options.append( + MultivariateFeatureOption( + feature=feature, + default_percentage_allocation=percentage_allocation, + type=STRING, + string_value=string_value, + ) + ) + + MultivariateFeatureOption.objects.bulk_create(multivariate_options) + return feature diff --git a/src/features/tests/test_fields.py b/src/features/tests/test_fields.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/features/tests/test_helpers.py b/src/features/tests/test_helpers.py index afb6f38e1c44..a0bf77574438 100644 --- a/src/features/tests/test_helpers.py +++ b/src/features/tests/test_helpers.py @@ -1,7 +1,7 @@ import pytest from features.helpers import get_correctly_typed_value -from features.utils import INTEGER, BOOLEAN, STRING +from features.value_types import INTEGER, STRING, BOOLEAN @pytest.mark.parametrize( diff --git a/src/features/tests/test_models.py b/src/features/tests/test_models.py index 89c6ae5932f1..003f15043b2e 100644 --- a/src/features/tests/test_models.py +++ b/src/features/tests/test_models.py @@ -139,7 +139,7 @@ def test_updating_feature_name_should_update_feature_states(self): # Then FeatureState.objects.filter(feature__name=new_feature_name).exists() - def test_cannot_create_feature_with_same_case_insensitive_name(self): + def test_full_clean_fails_when_duplicate_case_insensitive_name(self): # unit test to validate validate_unique() method # Given @@ -354,3 +354,61 @@ def test_save_calls_trigger_webhooks(self, mock_trigger_webhooks): # Then mock_trigger_webhooks.assert_called_with(feature_state) + + +@pytest.mark.parametrize("hashed_percentage", (0.0, 0.3, 0.5, 0.8, 0.999999)) +@mock.patch("features.models.get_hashed_percentage_for_object_ids") +def test_get_multivariate_value_returns_correct_value_when_we_pass_identity( + mock_get_hashed_percentage, + hashed_percentage, + multivariate_feature, + environment, + identity, +): + # Given + mock_get_hashed_percentage.return_value = hashed_percentage + feature_state = FeatureState.objects.get( + environment=environment, + feature=multivariate_feature, + identity=None, + feature_segment=None, + ) + + # When + multivariate_value = feature_state.get_multivariate_feature_state_value( + identity=identity + ) + + # Then + # we get a multivariate value + assert multivariate_value + + # and that value is not the control (since the fixture includes values that span + # the entire 100%) + assert multivariate_value.value != multivariate_value.initial_value + + +@mock.patch.object(FeatureState, "get_multivariate_feature_state_value") +def test_get_feature_state_value_for_multivariate_features( + mock_get_mv_feature_state_value, environment, multivariate_feature, identity +): + # Given + value = "value" + mock_mv_feature_state_value = mock.MagicMock(value=value) + mock_get_mv_feature_state_value.return_value = mock_mv_feature_state_value + + feature_state = FeatureState.objects.get( + environment=environment, + feature=multivariate_feature, + identity=None, + feature_segment=None, + ) + + # When + feature_state_value = feature_state.get_feature_state_value(identity=identity) + + # Then + # the correct value is returned + assert feature_state_value == value + # and the correct call is made to get the multivariate feature state value + mock_get_mv_feature_state_value.assert_called_once_with(identity) diff --git a/src/features/tests/test_utils.py b/src/features/tests/test_utils.py index 52a25836681a..f85a98d7e653 100644 --- a/src/features/tests/test_utils.py +++ b/src/features/tests/test_utils.py @@ -1,6 +1,7 @@ import pytest -from features.utils import BOOLEAN, INTEGER, STRING, get_value_type +from features.utils import get_value_type +from features.value_types import INTEGER, STRING, BOOLEAN @pytest.mark.parametrize( diff --git a/src/features/tests/test_views.py b/src/features/tests/test_views.py index dfd37dc10f7a..9623d9701f13 100644 --- a/src/features/tests/test_views.py +++ b/src/features/tests/test_views.py @@ -23,6 +23,8 @@ FeatureState, FeatureStateValue, ) +from features.multivariate.models import MultivariateFeatureOption +from features.value_types import STRING, INTEGER, BOOLEAN from organisations.models import Organisation, OrganisationRole from projects.models import Project from projects.tags.models import Tag @@ -543,6 +545,80 @@ def test_get_influx_data(self, mock_get_event_list): period="24h", # this is the default but can be provided as a GET param ) + def test_create_feature_with_multivariate_options(self): + # Given + data = { + "name": "test_feature", + "default_enabled": True, + "multivariate_options": [{"type": "unicode", "string_value": "test-value"}], + } + url = reverse("api-v1:projects:project-features-list", args=[self.project.id]) + + # When + response = self.client.post( + url, data=json.dumps(data), content_type="application/json" + ) + + # Then + assert response.status_code == status.HTTP_201_CREATED + + response_json = response.json() + assert len(response_json["multivariate_options"]) == 1 + + def test_update_feature_with_multivariate_options(self): + # Given + # a feature + feature = Feature.objects.create(name="test_feature", project=self.project) + + # a multivariate feature option for the feature that we will leave out from + # the list in the PUT request + multivariate_option_to_delete = MultivariateFeatureOption.objects.create( + feature=feature, type=STRING, string_value="test-value" + ) + + # a multivariate feature option for the feature that we will update in the + # PUT request + multivariate_option_to_update = MultivariateFeatureOption.objects.create( + feature=feature, type=STRING, string_value="test-value" + ) + updated_mv_option_data = model_to_dict(multivariate_option_to_update) + updated_mv_option_data["string_value"] = "updated-value" + + # and the data adds a new multivariate flag, removes one and updates one + data = { + "name": "test_feature", + "multivariate_options": [ + {"type": "unicode", "string_value": "test-value"}, # new mv option + updated_mv_option_data, # the updated mv option + ], # and we removed the deleted one + } + url = reverse( + "api-v1:projects:project-features-detail", + args=[self.project.id, feature.id], + ) + + # When + response = self.client.put( + url, data=json.dumps(data), content_type="application/json" + ) + + # Then + # The response is successful + assert response.status_code == status.HTTP_200_OK + + # and the correct number of multivariate options are returned + response_json = response.json() + assert len(response_json["multivariate_options"]) == 2 + + # and the deleted option is not included, and the updated option has been + # correctly updated + for mv_option in response_json["multivariate_options"]: + assert mv_option["id"] != multivariate_option_to_delete.id + if mv_option["id"] == multivariate_option_to_update.id: + assert ( + mv_option["string_value"] == updated_mv_option_data["string_value"] + ) + @pytest.mark.django_db() class FeatureStateViewSetTestCase(TestCase): diff --git a/src/features/utils.py b/src/features/utils.py index 4dd1d98f5911..951fb1a4b418 100644 --- a/src/features/utils.py +++ b/src/features/utils.py @@ -1,8 +1,5 @@ # Feature State Value Types -INTEGER = "int" -STRING = "unicode" -BOOLEAN = "bool" -FLOAT = "float" +from features.value_types import INTEGER, STRING, BOOLEAN def get_value_type(value: str) -> str: diff --git a/src/features/views.py b/src/features/views.py index d809b13c64d7..958d3e86dc43 100644 --- a/src/features/views.py +++ b/src/features/views.py @@ -1,6 +1,7 @@ import logging import coreapi +from app_analytics.influxdb_wrapper import get_multiple_event_list_for_feature from django.conf import settings from django.core.cache import caches from django.utils.decorators import method_decorator @@ -13,7 +14,6 @@ from rest_framework.response import Response from rest_framework.schemas import AutoSchema -from app_analytics.influxdb_wrapper import get_multiple_event_list_for_feature from audit.models import ( IDENTITY_FEATURE_STATE_DELETED_MESSAGE, AuditLog, @@ -26,11 +26,10 @@ EnvironmentKeyPermissions, NestedEnvironmentPermissions, ) -from projects.models import Project -from .models import FeatureState, Feature + +from .models import Feature, FeatureState from .permissions import FeaturePermissions, FeatureStatePermissions from .serializers import ( - CreateFeatureSerializer, FeatureInfluxDataSerializer, FeatureSerializer, FeatureStateSerializerBasic, @@ -38,9 +37,9 @@ FeatureStateSerializerFull, FeatureStateSerializerWithIdentity, FeatureStateValueSerializer, - FeatureWithTagsSerializer, - UpdateFeatureSerializer, GetInfluxDataQuerySerializer, + ListCreateFeatureSerializer, + UpdateFeatureSerializer, WritableNestedFeatureStateSerializer, ) @@ -55,8 +54,8 @@ class FeatureViewSet(viewsets.ModelViewSet): def get_serializer_class(self): return { - "list": FeatureWithTagsSerializer, - "create": CreateFeatureSerializer, + "list": ListCreateFeatureSerializer, + "create": ListCreateFeatureSerializer, "update": UpdateFeatureSerializer, "partial_update": UpdateFeatureSerializer, }.get(self.action, FeatureSerializer) @@ -64,17 +63,13 @@ def get_serializer_class(self): def get_queryset(self): user_projects = self.request.user.get_permitted_projects(["VIEW_PROJECT"]) project = get_object_or_404(user_projects, pk=self.kwargs["project_pk"]) + return project.features.all().prefetch_related("multivariate_options") - return project.features.all() + def perform_create(self, serializer): + serializer.save(project_id=self.kwargs.get("project_pk")) - def create(self, request, *args, **kwargs): - project_id = request.data.get("project") - project = get_object_or_404(Project, pk=project_id) - - if project.organisation not in request.user.organisations.all(): - return Response(status=status.HTTP_403_FORBIDDEN) - - return super().create(request, *args, **kwargs) + def perform_update(self, serializer): + serializer.save(project_id=self.kwargs.get("project_pk")) @swagger_auto_schema( query_serializer=GetInfluxDataQuerySerializer(), @@ -128,7 +123,7 @@ class FeatureStateViewSet(viewsets.ModelViewSet): def get_serializer_class(self): if self.action == "list": return FeatureStateSerializerWithIdentity - elif self.action in ["retrieve", "update"]: + elif self.action in ["retrieve", "update", "create"]: return FeatureStateSerializerBasic else: return FeatureStateSerializerCreate @@ -209,9 +204,7 @@ def create(self, request, *args, **kwargs): if identity_pk: data["identity"] = identity_pk - serializer = FeatureStateSerializerBasic( - data=data, context=self.get_serializer_context() - ) + serializer = self.get_serializer(data=data) if serializer.is_valid(): feature_state = serializer.save() headers = self.get_success_headers(serializer.data) @@ -255,7 +248,7 @@ def update(self, request, *args, **kwargs): feature_state_to_update, data=feature_state_data, partial=True ) serializer.is_valid(raise_exception=True) - self.perform_update(serializer) + serializer.save() if getattr(feature_state_to_update, "_prefetched_objects_cache", None): # If 'prefetch_related' has been applied to a queryset, we need to diff --git a/src/permissions/migrations/0002_auto_20200221_2126.py b/src/permissions/migrations/0002_auto_20200221_2126.py index aa86c810c496..48fb60ec7c21 100644 --- a/src/permissions/migrations/0002_auto_20200221_2126.py +++ b/src/permissions/migrations/0002_auto_20200221_2126.py @@ -6,8 +6,13 @@ def add_edit_feature_permission(apps, schema_editor): PermissionModel = apps.get_model('permissions', 'PermissionModel') - new_permission = ('EDIT_FEATURE', 'Ability to edit features in the given project (includes editing segments).') - PermissionModel.objects.get_or_create(key=new_permission[0], description=new_permission[1], type='PROJECT') + PermissionModel.objects.get_or_create( + key='EDIT_FEATURE', + type='PROJECT', + defaults={ + 'description': 'Ability to edit features in the given project (includes editing segments).' + } + ) class Migration(migrations.Migration): diff --git a/src/projects/permissions.py b/src/projects/permissions.py index 50e1f2ee3c33..c7418d4a4f76 100644 --- a/src/projects/permissions.py +++ b/src/projects/permissions.py @@ -8,6 +8,7 @@ ("CREATE_ENVIRONMENT", "Ability to create an environment in the given project."), ("DELETE_FEATURE", "Ability to delete features in the given project."), ("CREATE_FEATURE", "Ability to create features in the given project."), + ("EDIT_FEATURE", "Ability to edit features in the given project."), ] diff --git a/src/projects/urls.py b/src/projects/urls.py index aa8a65047a8a..6ac265e05bcb 100644 --- a/src/projects/urls.py +++ b/src/projects/urls.py @@ -41,9 +41,15 @@ basename="integrations-new-relic", ) +nested_features_router = routers.NestedSimpleRouter( + projects_router, r"features", lookup="feature" +) + + app_name = "projects" urlpatterns = [ url(r"^", include(router.urls)), url(r"^", include(projects_router.urls)), + url(r"^", include(nested_features_router.urls)), ] diff --git a/src/segments/models.py b/src/segments/models.py index c1cf31e08ea9..5c9dfba7491b 100644 --- a/src/segments/models.py +++ b/src/segments/models.py @@ -7,6 +7,7 @@ from django.db import models from django.utils.encoding import python_2_unicode_compatible +from environments.identities.helpers import get_hashed_percentage_for_object_ids from environments.identities.models import Identity from environments.identities.traits.models import Trait from environments.models import BOOLEAN, FLOAT, INTEGER @@ -47,16 +48,6 @@ def does_identity_match( rule.does_identity_match(identity, traits) for rule in rules ) - def get_identity_percentage_value(self, identity: Identity) -> float: - """ - Given a segment and an identity, generate a number between 0 and 1 to determine whether the identity falls - within a given percentile when using percentage split rules. - """ - to_hash = f"{self.id},{identity.id}" - hashed_value = hashlib.md5(to_hash.encode("utf-8")) - hashed_value_as_int = int(hashed_value.hexdigest(), base=16) - return (hashed_value_as_int % 9999) / 9998 - @python_2_unicode_compatible class SegmentRule(models.Model): @@ -190,7 +181,10 @@ def _check_percentage_split_operator(self, identity): return False segment = self.rule.get_segment() - return segment.get_identity_percentage_value(identity) <= float_value + return ( + get_hashed_percentage_for_object_ids(object_ids=[segment.id, identity.id]) + <= float_value + ) def check_integer_value(self, value: int) -> bool: try: diff --git a/src/segments/tests/test_models.py b/src/segments/tests/test_models.py index 4180adf469a1..d680f49c157f 100644 --- a/src/segments/tests/test_models.py +++ b/src/segments/tests/test_models.py @@ -28,90 +28,6 @@ def tearDown(self) -> None: Segment.objects.all().delete() Identity.objects.all().delete() - def test_percentage_value_for_given_segment_rule_and_identity_is_number_between_0_and_1( - self, - ): - # Given - segment = Segment.objects.create(name="Test Segment", project=self.project) - - # When - result = segment.get_identity_percentage_value(self.identity) - - # Then - assert 1 >= result >= 0 - - def test_percentage_value_for_given_segment_rule_and_identity_is_the_same_each_time( - self, - ): - # Given - segment = Segment.objects.create(name="Test Segment", project=self.project) - - # When - result_1 = segment.get_identity_percentage_value(self.identity) - result_2 = segment.get_identity_percentage_value(self.identity) - - # Then - assert result_1 == result_2 - - def test_percentage_value_is_unique_for_different_identities(self): - # Given - segment = Segment.objects.create(name="Test Segment", project=self.project) - another_identity = Identity.objects.create( - environment=self.environment, identifier="another_test_identity" - ) - - # When - result_1 = segment.get_identity_percentage_value(self.identity) - result_2 = segment.get_identity_percentage_value(another_identity) - - # Then - assert result_1 != result_2 - - def test_percentage_values_should_be_evenly_distributed(self): - """ - This test checks if the percentage value returned by the method on SegmentRule returns evenly distributed - values. - - Note that since it's technically random, it's not guaranteed to pass every time, however, it should pass - 99/100 times. It will likely be more accurate by increasing the test_sample value and / or decreasing - the num_test_buckets value. - """ - test_sample = 10000 # number of identities to create to test with - num_test_buckets = 10 # split the sample into 'buckets' to check that the values are evenly distributed - test_bucket_size = int(test_sample / num_test_buckets) - error_factor = 0.1 - - # Given - segment = Segment.objects.create(name="Test Segment", project=self.project) - identities = [] - for i in range(test_sample): - identities.append(Identity(environment=self.environment, identifier=str(i))) - Identity.objects.bulk_create(identities) - - # When - values = [ - segment.get_identity_percentage_value(identity) - for identity in Identity.objects.all() - ] - values.sort() - - # Then - for j in range(num_test_buckets): - bucket_start = j * test_bucket_size - bucket_end = (j + 1) * test_bucket_size - bucket_value_limit = min( - (j + 1) / num_test_buckets - + error_factor * ((j + 1) / num_test_buckets), - 1, - ) - - assert all( - [ - value <= bucket_value_limit - for value in values[bucket_start:bucket_end] - ] - ) - @pytest.mark.django_db class SegmentRuleTest(TestCase): @@ -168,15 +84,15 @@ def setUp(self) -> None: segment=self.segment, type=SegmentRule.ALL_RULE ) - @mock.patch.object(segments.models.Segment, "get_identity_percentage_value") + @mock.patch("segments.models.get_hashed_percentage_for_object_ids") def test_percentage_split_calculation_divides_value_by_100_before_comparison( - self, get_identity_percentage_value + self, mock_get_hashed_percentage_for_object_ids ): # Given condition = Condition.objects.create( rule=self.rule, operator=PERCENTAGE_SPLIT, value=10 ) - get_identity_percentage_value.return_value = 0.2 + mock_get_hashed_percentage_for_object_ids.return_value = 0.2 # When res = condition.does_identity_match(self.identity) diff --git a/src/tests/integration/conftest.py b/src/tests/integration/conftest.py new file mode 100644 index 000000000000..e24e3b10232d --- /dev/null +++ b/src/tests/integration/conftest.py @@ -0,0 +1,74 @@ +import uuid + +import pytest +from django.urls import reverse +from rest_framework.test import APIClient + +from app.utils import create_hash + + +@pytest.fixture() +def admin_user(django_user_model): + return django_user_model.objects.create(email="test@example.com") + + +@pytest.fixture() +def admin_client(admin_user): + client = APIClient() + client.force_authenticate(user=admin_user) + return client + + +@pytest.fixture() +def organisation(admin_client): + organisation_data = {"name": "Test org"} + url = reverse("api-v1:organisations:organisation-list") + response = admin_client.post(url, data=organisation_data) + return response.json()["id"] + + +@pytest.fixture() +def project(admin_client, organisation): + project_data = {"name": "Test Project", "organisation": organisation} + url = reverse("api-v1:projects:project-list") + response = admin_client.post(url, data=project_data) + return response.json()["id"] + + +@pytest.fixture() +def environment_api_key(): + return create_hash() + + +@pytest.fixture() +def environment(admin_client, project, environment_api_key): + environment_data = { + "name": "Test Environment", + "api_key": environment_api_key, + "project": project, + } + url = reverse("api-v1:environments:environment-list") + response = admin_client.post(url, data=environment_data) + return response.json()["id"] + + +@pytest.fixture() +def identity_identifier(): + return uuid.uuid4() + + +@pytest.fixture() +def identity(admin_client, identity_identifier, environment, environment_api_key): + identity_data = {"identifier": identity_identifier} + url = reverse( + "api-v1:environments:environment-identities-list", args=[environment_api_key] + ) + response = admin_client.post(url, data=identity_data) + return response.json()["id"] + + +@pytest.fixture() +def sdk_client(environment_api_key): + client = APIClient() + client.credentials(HTTP_X_ENVIRONMENT_KEY=environment_api_key) + return client diff --git a/src/tests/integration/environments/__init__.py b/src/tests/integration/environments/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/tests/integration/environments/identities/__init__.py b/src/tests/integration/environments/identities/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/tests/integration/environments/identities/test_integration_identities.py b/src/tests/integration/environments/identities/test_integration_identities.py new file mode 100644 index 000000000000..6191e107b60d --- /dev/null +++ b/src/tests/integration/environments/identities/test_integration_identities.py @@ -0,0 +1,185 @@ +import json +from unittest import mock + +import pytest +from django.urls import reverse +from rest_framework import status + +from tests.integration.helpers import create_feature_with_api + +variant_1_value = "variant-1-value" +variant_2_value = "variant-2-value" +control_value = "control" + +variant_1_percentage_allocation = 20 +variant_2_percentage_allocation = 30 +total_variance_percentage = ( + variant_1_percentage_allocation + variant_2_percentage_allocation +) + + +# mock the returned percentage for the identity to simulate them falling into each of +# the percentage allocation brackets for the feature variants +@pytest.mark.parametrize( + "hashed_percentage, expected_mv_value", + ( + (variant_1_percentage_allocation / 100 - 0.01, variant_1_value), + (total_variance_percentage / 100 - 0.01, variant_2_value), + (total_variance_percentage / 100 + 0.01, control_value), + ), +) +@mock.patch("features.models.get_hashed_percentage_for_object_ids") +def test_get_feature_states_for_identity( + mock_get_hashed_percentage_value, + hashed_percentage, + expected_mv_value, + sdk_client, + admin_client, + project, + environment_api_key, + environment, + identity, + identity_identifier, +): + # Firstly, let's create some features to use + # one standard feature + standard_feature_initial_value = "control" + standard_feature_id = create_feature_with_api( + client=admin_client, + project_id=project, + feature_name="standard_feature", + initial_value=standard_feature_initial_value, + ) + + # and one multivariate feature + multivariate_feature_id = create_feature_with_api( + client=admin_client, + project_id=project, + feature_name="multivariate_feature", + initial_value=control_value, + multivariate_options=[ + (variant_1_value, variant_1_percentage_allocation), + (variant_2_value, variant_2_percentage_allocation), + ], + ) + + # Now, when we mock the hashed percentage that the user gets + # to avoid the randomness factor + mock_get_hashed_percentage_value.return_value = hashed_percentage + + # and request the flags for the identity + base_identity_flags_url = reverse("api-v1:sdk-identities") + identity_flags_url = f"{base_identity_flags_url}?identifier={identity_identifier}" + identity_response_1 = sdk_client.get(identity_flags_url) + + # Then, we get a result for both of the features we created + assert identity_response_1.status_code == status.HTTP_200_OK + identity_response_json = identity_response_1.json() + assert len(identity_response_json["flags"]) == 2 + + # and the correct values are returned for the features + values_dict = { + flag["feature"]["id"]: flag["feature_state_value"] + for flag in identity_response_json["flags"] + } + assert values_dict[standard_feature_id] == standard_feature_initial_value + assert values_dict[multivariate_feature_id] == expected_mv_value + + # Now, let's change the percentage allocations on the mv options + # first, we need to get the feature state id for the feature in the given + # environment + feature_state_id = next( + filter( + lambda flag: flag["feature"]["id"] == multivariate_feature_id, + identity_response_json["flags"], + ) + )["id"] + + # now let's get the existing data for the feature state so we can alter it and + # then PUT it back + feature_state_detail_url = reverse( + "api-v1:environments:environment-featurestates-detail", + args=[environment_api_key, feature_state_id], + ) + retrieve_feature_state_response = admin_client.get(feature_state_detail_url) + feature_state_data = retrieve_feature_state_response.json() + + # now let's amend the data so that all identities should receive variant 2 + mv_values = feature_state_data["multivariate_feature_state_values"] + mv_values[0]["percentage_allocation"] = 0 + mv_values[1]["percentage_allocation"] = 100 + + # and PUT the data back + update_feature_state_response = admin_client.put( + feature_state_detail_url, + data=json.dumps(feature_state_data), + content_type="application/json", + ) + assert update_feature_state_response.status_code == status.HTTP_200_OK + + # Then when we get the flags for an identity, the multivariate feature returns the + # value of the 2nd variate + identity_response_2 = sdk_client.get(identity_flags_url) + values_dict = { + flag["feature"]["id"]: flag["feature_state_value"] + for flag in identity_response_2.json()["flags"] + } + assert values_dict[multivariate_feature_id] == variant_2_value + + +def test_get_feature_states_for_identity_only_makes_one_query_to_get_mv_feature_states( + sdk_client, + admin_client, + project, + environment, + identity, + identity_identifier, + django_assert_num_queries, +): + # Firstly, let's create some features to use + for i in range(2): + create_feature_with_api( + client=admin_client, + project_id=project, + feature_name=f"multivariate_feature_{i}", + initial_value=control_value, + multivariate_options=[ + (variant_1_value, variant_1_percentage_allocation), + (variant_2_value, variant_2_percentage_allocation), + ], + ) + + # When we make a request to get the flags for the identity, 11 queries are made + # (although 4 of these are made in a separate thread) + # TODO: can we reduce the number of queries?! + base_url = reverse("api-v1:sdk-identities") + url = f"{base_url}?identifier={identity_identifier}" + with django_assert_num_queries(11): + first_identity_response = sdk_client.get(url) + + # Now, if we add another feature + create_feature_with_api( + client=admin_client, + project_id=project, + feature_name=f"another_multivariate_feature", + initial_value=control_value, + multivariate_options=[ + (variant_1_value, variant_1_percentage_allocation), + (variant_2_value, variant_2_percentage_allocation), + ], + ) + + # Then one fewer db queries are made (since the environment is now cached) + with django_assert_num_queries(10): + second_identity_response = sdk_client.get(url) + + # Finally, we check that the requests were successful and we got the correct number + # of flags in each case + assert first_identity_response.status_code == status.HTTP_200_OK + assert second_identity_response.status_code == status.HTTP_200_OK + + first_identity_response_json = first_identity_response.json() + assert len(first_identity_response_json["flags"]) == 2 + + second_identity_response_json = second_identity_response.json() + assert len(second_identity_response_json["flags"]) == 3 diff --git a/src/tests/integration/helpers.py b/src/tests/integration/helpers.py new file mode 100644 index 000000000000..9b1523bdd94d --- /dev/null +++ b/src/tests/integration/helpers.py @@ -0,0 +1,51 @@ +import json +import typing + +from django.urls import reverse +from rest_framework.test import APIClient + +from features.feature_types import STANDARD, MULTIVARIATE +from features.value_types import STRING + + +def create_feature_with_api( + client: APIClient, + project_id: int, + feature_name: str, + initial_value: str, + multivariate_options: typing.List[typing.Tuple[str, float]] = None, +) -> int: + """ + Create a feature against the API using the provided test client. + + :param client: DRF api client to use to make the request + :param feature_name: Name of the feature to create + :param initial_value: Initial value to give the feature + :param multivariate_options: List of 2-tuples containing the string value and percentage allocation + :return: id of the created feature + """ + multivariate_options = multivariate_options or [] + + create_feature_url = reverse( + "api-v1:projects:project-features-list", args=[project_id] + ) + create_standard_feature_data = { + "name": feature_name, + "type": MULTIVARIATE if multivariate_options else STANDARD, + "initial_value": initial_value, + "default_enabled": True, + "multivariate_options": [ + { + "type": STRING, + "string_value": mv_option[0], + "default_percentage_allocation": mv_option[1], + } + for mv_option in multivariate_options + ], + } + create_standard_feature_response = client.post( + create_feature_url, + data=json.dumps(create_standard_feature_data), + content_type="application/json", + ) + return create_standard_feature_response.json()["id"]