diff --git a/orangecontrib/fairness/evaluation/scoring.py b/orangecontrib/fairness/evaluation/scoring.py
index 93b4d00..278eaa6 100644
--- a/orangecontrib/fairness/evaluation/scoring.py
+++ b/orangecontrib/fairness/evaluation/scoring.py
@@ -99,8 +99,8 @@ class StatisticalParityDifference(FairnessScorer):
name = "SPD"
long_name = str(
- "
Statistical Parity Difference (SPD): Measures the difference in ratios of "
- "favorable outcomes. An ideal value is 0.0.
"
+ "Statistical Parity Difference (SPD): The difference in favorable "
+ "outcomes proportions between groups. An ideal value is 0.0.
"
""
"- SPD < 0: The privileged group has a higher rate of favorable outcomes.
"
"- SPD > 0: The privileged group has a lower rate of favorable outcomes.
"
@@ -118,9 +118,8 @@ class EqualOpportunityDifference(FairnessScorer):
name = "EOD"
long_name = str(
- "Equal Opportunity Difference (EOD): It measures the difference in "
- "true positive rates. An ideal value is 0.0, indicating the difference "
- "in true positive rates is the same for both groups.
"
+ "Equal Opportunity Difference (EOD): The difference in true positive rates between "
+ "groups. An ideal value is 0.0 meaning both groups have the same true positive rate.
"
""
"- EOD < 0: The privileged group has a higher true positive rate.
"
"- EOD > 0: The privileged group has a lower true positive rate.
"
@@ -138,11 +137,9 @@ class AverageOddsDifference(FairnessScorer):
name = "AOD"
long_name = str(
- "Average Odds Difference (AOD): This metric calculates the average difference "
- "between the true positive rates (correctly predicting a positive outcome) and false "
- "positive rates (incorrectly predicting a positive outcome) for both the privileged "
- "and unprivileged groups. A value of 0.0 indicates equal rates for both groups, "
- "signifying fairness.
"
+ "Average Odds Difference (AOD): The average of the differences in true "
+ "and false positive rates between privileged and unprivileged groups. "
+ "A value of 0.0 indicates equal rates for both groups.
"
""
"- AOD < 0: Indicates bias in favor of the privileged group.
"
"- AOD > 0: Indicates bias against the privileged group.
"
@@ -160,20 +157,17 @@ class DisparateImpact(FairnessScorer):
name = "DI"
long_name = str(
- "Disparate Impact (DI): The ratio of ratios of favorable outcomes for an unprivileged "
- "group to that of the privileged group. An ideal value of 1.0 means the ratio is "
- "the same for both groups.
"
+ "Disparate Impact (DI) is the ratio of favorable outcome "
+ "proportions between an unprivileged and privileged group. "
+ "Value of 1.0 indicates that the ratio is equal for both groups.
"
""
- "- DI < 1.0: The privileged group receives favorable outcomes at a higher rate "
- "than the unprivileged group.
"
- "- DI > 1.0: The privileged group receives favorable outcomes at a lower rate "
- "than the unprivileged group.
"
+ "- DI < 1.0: The privileged group has a higher rate of favorable outcomes.
"
+ "- DI > 1.0: The privileged group has a lower rate of favorable outcomes.
"
"
"
)
# TODO: When using randomize, models sometimes predict the same class for all instances
# This can lead to division by zero in the Disparate Impact score
- # (and untrue results for the other scores)
- # What is the best way to handle this?
+ # and untrue results for the other scores.
def metric(self, classification_metric):
return classification_metric.disparate_impact()
diff --git a/orangecontrib/fairness/modeling/adversarial.py b/orangecontrib/fairness/modeling/adversarial.py
index 637e83c..409bf99 100644
--- a/orangecontrib/fairness/modeling/adversarial.py
+++ b/orangecontrib/fairness/modeling/adversarial.py
@@ -154,7 +154,6 @@ def _fit_model(self, data):
# Fit storage and fit functions were modified to use a Table/Storage object
# This is because it's the easiest way to get the domain, and meta attributes
- # TODO: Should I use the X,Y,W format instead of the table format ? (Same for the model)
def fit(self, data: Table) -> AdversarialDebiasingModel:
(
standard_dataset,
diff --git a/orangecontrib/fairness/modeling/postprocessing.py b/orangecontrib/fairness/modeling/postprocessing.py
index afc99c4..2848b1f 100644
--- a/orangecontrib/fairness/modeling/postprocessing.py
+++ b/orangecontrib/fairness/modeling/postprocessing.py
@@ -104,7 +104,6 @@ def incompatibility_reason(self, domain):
# Fit storage and fit functions were modified to use a Table/Storage object
# This is because it's the easiest way to get the domain, and meta attributes
- # TODO: Should I use the X,Y,W format instead of the table format ? (Same for the model)
def fit_storage(self, data):
if isinstance(data, Table):
self.fit(data)
diff --git a/orangecontrib/fairness/widgets/owadversarialdebiasing.py b/orangecontrib/fairness/widgets/owadversarialdebiasing.py
index e3e9b31..fbf50a7 100644
--- a/orangecontrib/fairness/widgets/owadversarialdebiasing.py
+++ b/orangecontrib/fairness/widgets/owadversarialdebiasing.py
@@ -72,6 +72,8 @@ class OWAdversarialDebiasing(ConcurrentWidgetMixin, OWBaseLearner):
icon = "icons/adversarial_debiasing.svg"
priority = 30
+ resizing_enabled = True
+
class Inputs(OWBaseLearner.Inputs):
"""Inputs for the widgets, which are the same as for the super class (Data, Preprocessor)"""
@@ -112,7 +114,6 @@ class Information(OWBaseLearner.Information):
)
# We define the settings we want to use
- # TODO: Should i use context/domain settings?
hidden_layers_neurons = Setting(100)
number_of_epochs = Setting(50)
batch_size = Setting(128)
diff --git a/orangecontrib/fairness/widgets/owdatasetbias.py b/orangecontrib/fairness/widgets/owdatasetbias.py
index 3596b83..0fd7630 100644
--- a/orangecontrib/fairness/widgets/owdatasetbias.py
+++ b/orangecontrib/fairness/widgets/owdatasetbias.py
@@ -37,6 +37,7 @@ class OWDatasetBias(OWWidget):
want_control_area = False
resizing_enabled = False
+ resizing_enabled = True
class Inputs:
"""Input for the widget - dataset."""
@@ -74,26 +75,25 @@ def set_data(self, data: Optional[Table]) -> None:
disparate_impact = dataset_metric.disparate_impact()
statistical_parity_difference = dataset_metric.statistical_parity_difference()
self.disparate_impact_label.setText(
- f"Disparate Impact (ideal = 1): {round(disparate_impact, 3)}"
+ f"Disparate Impact (ideal = 1): {round(disparate_impact, 3):.3f}"
)
self.disparate_impact_label.setToolTip(
- "Disparate Impact (DI): Measures the ratio of the ratios of favorable class "
- "values for an unprivileged group to that of the privileged group. An ideal value "
- "of 1.0 means the ratio of favorable class values is the same for both groups.
"
+ "Disparate Impact (DI) is the ratio of favorable outcome "
+ "proportions between an unprivileged and privileged group. "
+ "Value of 1.0 indicates that the ratio is equal for both groups.
"
""
- "- DI < 1.0: The privileged group has a higher percentage of favorable class values.
"
- "- DI > 1.0: The privileged group has a lower percentage of favorable class values.
"
+ "- DI < 1.0: The privileged group has a higher rate of favorable outcomes.
"
+ "- DI > 1.0: The privileged group has a lower rate of favorable outcomes.
"
"
"
)
self.statistical_parity_difference_label.setText(
- f"Statistical Parity Difference (ideal = 0): {round(statistical_parity_difference, 3)}"
+ f"Statistical Parity Difference (ideal = 0): {round(statistical_parity_difference, 3):.3f}"
)
self.statistical_parity_difference_label.setToolTip(
- "Statistical Parity Difference (SPD): Measures the difference in ratios of "
- "favorable class values between the unprivileged and the privileged groups. An "
- "ideal value for this metric is 0.
"
+ "Statistical Parity Difference (SPD): The difference in favorable "
+ "outcomes proportions between groups. An ideal value is 0.0.
"
""
- "- SPD < 0: The privileged group has a higher percentage of favorable class values.
"
- "- SPD > 0: The privileged group has a lower percentage of favorable class values.
"
+ "- SPD < 0: The privileged group has a higher rate of favorable outcomes.
"
+ "- SPD > 0: The privileged group has a lower rate of favorable outcomes.
"
"
"
)
diff --git a/orangecontrib/fairness/widgets/owreweighing.py b/orangecontrib/fairness/widgets/owreweighing.py
index bafd96f..ef927c2 100644
--- a/orangecontrib/fairness/widgets/owreweighing.py
+++ b/orangecontrib/fairness/widgets/owreweighing.py
@@ -41,7 +41,6 @@ def __call__(self, data):
data = self.model.transform(data)
return data.instance_weights
- # TODO: Check if this is ok
InheritEq = True
diff --git a/orangecontrib/fairness/widgets/utils.py b/orangecontrib/fairness/widgets/utils.py
index 72a6f93..6f144f4 100644
--- a/orangecontrib/fairness/widgets/utils.py
+++ b/orangecontrib/fairness/widgets/utils.py
@@ -309,7 +309,7 @@ def table_to_standard_dataset(data) -> None:
data = Impute()(data)
xdf, ydf, mdf = data.to_pandas_dfs()
- # Merge xdf and ydf TODO: Check if I need to merge mdf
+ # Merge xdf and ydf (currently not merging mdf because it is not used)
# This dataframe consists of all the data, the categorical variables values are
# represented with the index of the value in domain[attribute].values
df = ydf.merge(xdf, left_index=True, right_index=True)