diff --git a/setup.cfg b/setup.cfg index 7fe43b9..cae5cef 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = ep-stats -version = 1.3.1 +version = 1.3.2 description = Statistical package to evaluate ab tests in experimentation platform. long_description = file: README.md long_description_content_type = text/markdown diff --git a/src/epstats/toolkit/statistics.py b/src/epstats/toolkit/statistics.py index 56eff37..b408363 100644 --- a/src/epstats/toolkit/statistics.py +++ b/src/epstats/toolkit/statistics.py @@ -111,7 +111,7 @@ def ttest_evaluation(cls, stats: np.array, control_variant: str) -> pd.DataFrame # We fill in zeros, when goal data are missing for some variant. # There could be division by zero here which is expected as we return # nan or inf values to the caller. - rel_diff = (mean_treat - mean_cont) / mean_cont + rel_diff = (mean_treat - mean_cont) / np.abs(mean_cont) # standard error for relative difference rel_se = ( np.sqrt(