Skip to content

Commit

Permalink
Create "draw strength by rank" metric annotator
Browse files Browse the repository at this point in the history
This commit adds a new team metric that takes the opponent teams' ranks
as a sum, for use in WSDC draw pull-up rules.

As the metric relies on the rankings, this metric must be run after
having sorted the precedence metrics, and cannot be used itself for
ranking. To solve these, now, metrics can be excluded from the options
ranking metrics lists, and the check for adding metrics after sorting is
removed.
  • Loading branch information
tienne-B committed Dec 27, 2024
1 parent 2265ea3 commit 9d3e13b
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 14 deletions.
4 changes: 2 additions & 2 deletions tabbycat/options/preferences.py
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,7 @@ class TeamStandingsExtraMetrics(MultiValueChoicePreference):
verbose_name = _("Team standings extra metrics")
section = standings
name = 'team_standings_extra_metrics'
choices = TeamStandingsGenerator.get_metric_choices(ranked_only=False)
choices = TeamStandingsGenerator.get_metric_choices(ranked_only=False, for_extra=True)
nfields = 5
allow_empty = True
default = []
Expand Down Expand Up @@ -694,7 +694,7 @@ class SpeakerStandingsExtraMetrics(MultiValueChoicePreference):
verbose_name = _("Speaker standings extra metrics")
section = standings
name = 'speaker_standings_extra_metrics'
choices = SpeakerStandingsGenerator.get_metric_choices(ranked_only=False)
choices = SpeakerStandingsGenerator.get_metric_choices(ranked_only=False, for_extra=True)
nfields = 5
allow_empty = True
default = ['stdev', 'count']
Expand Down
26 changes: 14 additions & 12 deletions tabbycat/standings/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,6 @@ def record_added_ranking(self, key, name, abbr, icon):
self._ranking_specs.append((key, name, abbr, icon))

def add_metric(self, instance, key, value):
assert not self.ranked, "Can't add metrics once standings object is sorted"
self.get_standing(instance).add_metric(key, value)

def add_ranking(self, instance, key, value):
Expand Down Expand Up @@ -347,7 +346,8 @@ def generate(self, queryset, round=None):
return self.generate_from_queryset(queryset_for_metrics, standings, round)

# Otherwise (not all precedence metrics are SQL-based), need to sort Standings
self._annotate_metrics(queryset_for_metrics, self.non_queryset_annotators, standings, round)
non_qs_ranked_annotators = [annotator for annotator in self.non_queryset_annotators if annotator.key in self.precedence]
self._annotate_metrics(queryset_for_metrics, non_qs_ranked_annotators, standings, round)

standings.sort(self.precedence, self._tiebreak_func)

Expand All @@ -356,6 +356,10 @@ def generate(self, queryset, round=None):
annotator.run(standings)
logger.debug("Ranking annotators done.")

# Do Draw Strength by Rank annotator after ranking standings
non_qs_extra_annotators = [annotator for annotator in self.non_queryset_annotators if annotator.key not in self.precedence]
self._annotate_metrics(queryset_for_metrics, non_qs_extra_annotators, standings, round)

return standings

def generate_from_queryset(self, queryset, standings, round):
Expand All @@ -365,8 +369,6 @@ def generate_from_queryset(self, queryset, standings, round):
for annotator in self.ranking_annotators:
queryset = annotator.get_annotated_queryset(queryset, self.queryset_metric_annotators, *self.options["rank_filter"])

self._annotate_metrics(queryset, self.non_queryset_annotators, standings, round)

# Can use window functions to rank standings if all are from queryset
for annotator in self.ranking_annotators:
logger.debug("Running ranking queryset annotator: %s", annotator.name)
Expand All @@ -384,6 +386,10 @@ def generate_from_queryset(self, queryset, standings, round):
queryset = queryset.order_by(*ordering_keys)

standings.sort_from_rankings(tiebreak_func)

# Add metrics that aren't used for ranking (done afterwards for "draw strength by rank")
self._annotate_metrics(queryset, self.non_queryset_annotators, standings, round)

return standings

@staticmethod
Expand Down Expand Up @@ -465,17 +471,13 @@ def _tiebreak_func(self):
return self.TIEBREAK_FUNCTIONS[self.options["tiebreak"]]

@classmethod
def get_metric_choices(cls, ranked_only=True):
def get_metric_choices(cls, ranked_only=True, for_extra=False):
choices = []
for key, annotator in cls.metric_annotator_classes.items():
if not ranked_only and annotator.ranked_only:
continue
if not annotator.listed:
if (not ranked_only and annotator.ranked_only) or not annotator.listed or (not for_extra and annotator.extra_only):
continue
if hasattr(annotator, 'choice_name'):
choice_name = annotator.choice_name.capitalize()
else:
choice_name = annotator.name.capitalize()

choice_name = annotator.choice_name.capitalize() if hasattr(annotator, 'choice_name') else annotator.name.capitalize()
choices.append((key, choice_name))
choices.sort(key=lambda x: x[1])
return choices
1 change: 1 addition & 0 deletions tabbycat/standings/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ class BaseMetricAnnotator:
abbr = None # must be set by subclasses
icon = None
ranked_only = False
extra_only = False
repeatable = False
listed = True
ascending = False # if True, this metric is sorted in ascending order, not descending
Expand Down
33 changes: 33 additions & 0 deletions tabbycat/standings/teams.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,38 @@ def annotate(self, queryset, standings, round=None):
standings.add_metric(team, self.key, draw_strength)


class DrawStrengthByRankMetricAnnotator(BaseMetricAnnotator):
key = "draw_strength_rank"
name = _("draw strength by rank")
abbr = _("DSR")

ascending = True
extra_only = True # Cannot rank based on ranking

def annotate(self, queryset, standings, round=None):
if not queryset.exists():
return

logger.info("Running opponents query for rank draw strength:")

# Make a copy of teams queryset and annotate with opponents
opponents_filter = ~Q(debateteam__debate__debateteam__team_id=F('id'))
opponents_filter &= Q(debateteam__debate__round__stage=Round.Stage.PRELIMINARY)
if round is not None:
opponents_filter &= Q(debateteam__debate__round__seq__lte=round.seq)
opponents_annotation = ArrayAgg('debateteam__debate__debateteam__team_id',
filter=opponents_filter)
logger.info("Opponents annotation: %s", str(opponents_annotation))
teams_with_opponents = queryset.model.objects.annotate(opponent_ids=opponents_annotation)
opponents_by_team = {team.id: team.opponent_ids or [] for team in teams_with_opponents}
teams_by_id = {team.id: team for team in teams_with_opponents}

for team in queryset:
ranks = [standings.infos[teams_by_id[opponent_id]].rankings['rank'][0] for opponent_id in opponents_by_team[team.id]]
ranks_without_none = [rank for rank in ranks if rank is not None]
standings.add_metric(team, self.key, sum(ranks_without_none))


class DrawStrengthByWinsMetricAnnotator(BaseDrawStrengthMetricAnnotator):
"""Metric annotator for draw strength."""
key = "draw_strength" # keep this key for backwards compatibility
Expand Down Expand Up @@ -407,6 +439,7 @@ class TeamStandingsGenerator(BaseStandingsGenerator):
"speaks_stddev" : SpeakerScoreStandardDeviationMetricAnnotator,
"draw_strength" : DrawStrengthByWinsMetricAnnotator,
"draw_strength_speaks": DrawStrengthBySpeakerScoreMetricAnnotator,
"draw_strength_rank" : DrawStrengthByRankMetricAnnotator,
"margin_sum" : SumMarginMetricAnnotator,
"margin_avg" : AverageMarginMetricAnnotator,
"npullups" : TeamPullupsMetricAnnotator,
Expand Down

0 comments on commit 9d3e13b

Please sign in to comment.