Skip to content

Commit

Permalink
Metric tooltips and widget resizing changes
Browse files Browse the repository at this point in the history
  • Loading branch information
ZanMervic committed Aug 19, 2024
1 parent f129f16 commit 6085d07
Show file tree
Hide file tree
Showing 7 changed files with 28 additions and 36 deletions.
32 changes: 13 additions & 19 deletions orangecontrib/fairness/evaluation/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ class StatisticalParityDifference(FairnessScorer):

name = "SPD"
long_name = str(
"<p>Statistical Parity Difference (SPD): Measures the difference in ratios of "
"favorable outcomes. An ideal value is 0.0.</p>"
"<p>Statistical Parity Difference (SPD): The difference in favorable "
"outcomes proportions between groups. An ideal value is 0.0.</p>"
"<ul>"
"<li>SPD &lt; 0: The privileged group has a higher rate of favorable outcomes.</li>"
"<li>SPD &gt; 0: The privileged group has a lower rate of favorable outcomes.</li>"
Expand All @@ -118,9 +118,8 @@ class EqualOpportunityDifference(FairnessScorer):

name = "EOD"
long_name = str(
"<p>Equal Opportunity Difference (EOD): It measures the difference in "
"true positive rates. An ideal value is 0.0, indicating the difference "
"in true positive rates is the same for both groups.</p>"
"<p>Equal Opportunity Difference (EOD): The difference in true positive rates between "
"groups. An ideal value is 0.0 meaning both groups have the same true positive rate.</p>"
"<ul>"
"<li>EOD &lt; 0: The privileged group has a higher true positive rate.</li>"
"<li>EOD &gt; 0: The privileged group has a lower true positive rate.</li>"
Expand All @@ -138,11 +137,9 @@ class AverageOddsDifference(FairnessScorer):

name = "AOD"
long_name = str(
"<p>Average Odds Difference (AOD): This metric calculates the average difference "
"between the true positive rates (correctly predicting a positive outcome) and false "
"positive rates (incorrectly predicting a positive outcome) for both the privileged "
"and unprivileged groups. A value of 0.0 indicates equal rates for both groups, "
"signifying fairness.</p>"
"<p>Average Odds Difference (AOD): The average of the differences in true "
"and false positive rates between privileged and unprivileged groups. "
"A value of 0.0 indicates equal rates for both groups.</p>"
"<ul>"
"<li>AOD &lt; 0: Indicates bias in favor of the privileged group.</li>"
"<li>AOD &gt; 0: Indicates bias against the privileged group.</li>"
Expand All @@ -160,20 +157,17 @@ class DisparateImpact(FairnessScorer):

name = "DI"
long_name = str(
"<p>Disparate Impact (DI): The ratio of ratios of favorable outcomes for an unprivileged "
"group to that of the privileged group. An ideal value of 1.0 means the ratio is "
"the same for both groups.</p>"
"<p>Disparate Impact (DI) is the ratio of favorable outcome "
"proportions between an unprivileged and privileged group. "
"Value of 1.0 indicates that the ratio is equal for both groups.</p>"
"<ul>"
"<li>DI &lt; 1.0: The privileged group receives favorable outcomes at a higher rate "
"than the unprivileged group.</li>"
"<li>DI &gt; 1.0: The privileged group receives favorable outcomes at a lower rate "
"than the unprivileged group.</li>"
"<li>DI &lt; 1.0: The privileged group has a higher rate of favorable outcomes.</li>"
"<li>DI &gt; 1.0: The privileged group has a lower rate of favorable outcomes.</li>"
"</ul>"
)

# TODO: When using randomize, models sometimes predict the same class for all instances
# This can lead to division by zero in the Disparate Impact score
# (and untrue results for the other scores)
# What is the best way to handle this?
# and untrue results for the other scores.
def metric(self, classification_metric):
return classification_metric.disparate_impact()
1 change: 0 additions & 1 deletion orangecontrib/fairness/modeling/adversarial.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,6 @@ def _fit_model(self, data):

# Fit storage and fit functions were modified to use a Table/Storage object
# This is because it's the easiest way to get the domain, and meta attributes
# TODO: Should I use the X,Y,W format instead of the table format ? (Same for the model)
def fit(self, data: Table) -> AdversarialDebiasingModel:
(
standard_dataset,
Expand Down
1 change: 0 additions & 1 deletion orangecontrib/fairness/modeling/postprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,6 @@ def incompatibility_reason(self, domain):

# Fit storage and fit functions were modified to use a Table/Storage object
# This is because it's the easiest way to get the domain, and meta attributes
# TODO: Should I use the X,Y,W format instead of the table format ? (Same for the model)
def fit_storage(self, data):
if isinstance(data, Table):
self.fit(data)
Expand Down
3 changes: 2 additions & 1 deletion orangecontrib/fairness/widgets/owadversarialdebiasing.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ class OWAdversarialDebiasing(ConcurrentWidgetMixin, OWBaseLearner):
icon = "icons/adversarial_debiasing.svg"
priority = 30

resizing_enabled = True

class Inputs(OWBaseLearner.Inputs):
"""Inputs for the widgets, which are the same as for the super class (Data, Preprocessor)"""

Expand Down Expand Up @@ -112,7 +114,6 @@ class Information(OWBaseLearner.Information):
)

# We define the settings we want to use
# TODO: Should i use context/domain settings?
hidden_layers_neurons = Setting(100)
number_of_epochs = Setting(50)
batch_size = Setting(128)
Expand Down
24 changes: 12 additions & 12 deletions orangecontrib/fairness/widgets/owdatasetbias.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ class OWDatasetBias(OWWidget):

want_control_area = False
resizing_enabled = False
resizing_enabled = True

class Inputs:
"""Input for the widget - dataset."""
Expand Down Expand Up @@ -74,26 +75,25 @@ def set_data(self, data: Optional[Table]) -> None:
disparate_impact = dataset_metric.disparate_impact()
statistical_parity_difference = dataset_metric.statistical_parity_difference()
self.disparate_impact_label.setText(
f"Disparate Impact (ideal = 1): {round(disparate_impact, 3)}"
f"Disparate Impact (ideal = 1): {round(disparate_impact, 3):.3f}"
)
self.disparate_impact_label.setToolTip(
"<p>Disparate Impact (DI): Measures the ratio of the ratios of favorable class "
"values for an unprivileged group to that of the privileged group. An ideal value "
"of 1.0 means the ratio of favorable class values is the same for both groups.</p>"
"<p>Disparate Impact (DI) is the ratio of favorable outcome "
"proportions between an unprivileged and privileged group. "
"Value of 1.0 indicates that the ratio is equal for both groups.</p>"
"<ul>"
"<li>DI &lt; 1.0: The privileged group has a higher percentage of favorable class values.</li>"
"<li>DI &gt; 1.0: The privileged group has a lower percentage of favorable class values.</li>"
"<li>DI &lt; 1.0: The privileged group has a higher rate of favorable outcomes.</li>"
"<li>DI &gt; 1.0: The privileged group has a lower rate of favorable outcomes.</li>"
"</ul>"
)
self.statistical_parity_difference_label.setText(
f"Statistical Parity Difference (ideal = 0): {round(statistical_parity_difference, 3)}"
f"Statistical Parity Difference (ideal = 0): {round(statistical_parity_difference, 3):.3f}"
)
self.statistical_parity_difference_label.setToolTip(
"<p>Statistical Parity Difference (SPD): Measures the difference in ratios of "
"favorable class values between the unprivileged and the privileged groups. An "
"ideal value for this metric is 0.</p>"
"<p>Statistical Parity Difference (SPD): The difference in favorable "
"outcomes proportions between groups. An ideal value is 0.0.</p>"
"<ul>"
"<li>SPD &lt; 0: The privileged group has a higher percentage of favorable class values.</li>"
"<li>SPD &gt; 0: The privileged group has a lower percentage of favorable class values.</li>"
"<li>SPD &lt; 0: The privileged group has a higher rate of favorable outcomes.</li>"
"<li>SPD &gt; 0: The privileged group has a lower rate of favorable outcomes.</li>"
"</ul>"
)
1 change: 0 additions & 1 deletion orangecontrib/fairness/widgets/owreweighing.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def __call__(self, data):
data = self.model.transform(data)
return data.instance_weights

# TODO: Check if this is ok
InheritEq = True


Expand Down
2 changes: 1 addition & 1 deletion orangecontrib/fairness/widgets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ def table_to_standard_dataset(data) -> None:
data = Impute()(data)

xdf, ydf, mdf = data.to_pandas_dfs()
# Merge xdf and ydf TODO: Check if I need to merge mdf
# Merge xdf and ydf (currently not merging mdf because it is not used)
# This dataframe consists of all the data, the categorical variables values are
# represented with the index of the value in domain[attribute].values
df = ydf.merge(xdf, left_index=True, right_index=True)
Expand Down

0 comments on commit 6085d07

Please sign in to comment.