Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove references to AlertGroup.is_archived and AlertGroup.unarchived_objects #2524

Merged
merged 9 commits into from
Jul 18, 2023
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Changed

- Deprecate `AlertGroup.is_archived` column. Column will be removed in a subsequent release. By @joeyorlando ([#2524](https://github.com/grafana/oncall/pull/2524)).
- Update Slack "invite" feature to use direct paging by @vadimkerr ([#2562](https://github.com/grafana/oncall/pull/2562))

## v1.3.14 (2023-07-17)
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/admin.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class AlertGroupAdmin(CustomModelAdmin):
list_filter = ("started_at",)

def get_queryset(self, request):
return AlertGroup.all_objects
return AlertGroup.objects


@admin.register(AlertGroupLogRecord)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ def start_escalation_if_needed(self, countdown=START_ESCALATION_DELAY, eta=None)
)
task_id = celery_uuid()

AlertGroup.all_objects.filter(pk=self.pk,).update(
AlertGroup.objects.filter(pk=self.pk,).update(
active_escalation_id=task_id,
is_escalation_finished=False,
raw_escalation_snapshot=raw_escalation_snapshot,
Expand Down
31 changes: 31 additions & 0 deletions engine/apps/alerts/migrations/0023_auto_20230718_0952.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Generated by Django 3.2.20 on 2023-07-18 09:52

from django.db import migrations, models


class Migration(migrations.Migration):

dependencies = [
('alerts', '0022_alter_alertgroup_manual_severity'),
]

operations = [
migrations.AlterModelManagers(
name='alertgroup',
managers=[
],
),
migrations.RemoveIndex(
model_name='alertgroup',
name='alerts_aler_channel_ee84a7_idx',
),
migrations.AlterField(
model_name='alertgroup',
name='is_archived',
field=models.BooleanField(default=False, null=True),
),
migrations.AddIndex(
model_name='alertgroup',
index=models.Index(fields=['channel_id', 'resolved', 'acknowledged', 'silenced', 'root_alert_group_id'], name='alerts_aler_channel_81aeec_idx'),
),
]
4 changes: 2 additions & 2 deletions engine/apps/alerts/models/alert.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def create(
if channel_filter is None:
channel_filter = ChannelFilter.select_filter(alert_receive_channel, raw_request_data, force_route_id)

group, group_created = AlertGroup.all_objects.get_or_create_grouping(
group, group_created = AlertGroup.objects.get_or_create_grouping(
channel=alert_receive_channel,
channel_filter=channel_filter,
group_data=group_data,
Expand Down Expand Up @@ -134,7 +134,7 @@ def create(

if maintenance_uuid is not None:
try:
maintenance_incident = AlertGroup.all_objects.get(maintenance_uuid=maintenance_uuid)
maintenance_incident = AlertGroup.objects.get(maintenance_uuid=maintenance_uuid)
group.root_alert_group = maintenance_incident
group.save(update_fields=["root_alert_group"])
log_record_for_root_incident = maintenance_incident.log_records.create(
Expand Down
40 changes: 11 additions & 29 deletions engine/apps/alerts/models/alert_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def generate_public_primary_key_for_alert_group():
new_public_primary_key = generate_public_primary_key(prefix)

failure_counter = 0
while AlertGroup.all_objects.filter(public_primary_key=new_public_primary_key).exists():
while AlertGroup.objects.filter(public_primary_key=new_public_primary_key).exists():
new_public_primary_key = increase_public_primary_key_length(
failure_counter=failure_counter, prefix=prefix, model_name="AlertGroup"
)
Expand Down Expand Up @@ -111,11 +111,6 @@ def get_or_create_grouping(self, channel, channel_filter, group_data):
raise


class UnarchivedAlertGroupQuerySet(models.QuerySet):
def filter(self, *args, **kwargs):
return super().filter(*args, **kwargs, is_archived=False)


class AlertGroupSlackRenderingMixin:
"""
Ideally this mixin should not exist. Instead of this instance of AlertGroupSlackRenderer should be created and used
Expand All @@ -140,8 +135,7 @@ def slack_templated_first_alert(self):
class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.Model):
log_records: "RelatedManager['AlertGroupLogRecord']"

all_objects = AlertGroupQuerySet.as_manager()
unarchived_objects = UnarchivedAlertGroupQuerySet.as_manager()
objects = AlertGroupQuerySet.as_manager()

(
NEW,
Expand Down Expand Up @@ -330,7 +324,7 @@ def status(self):
# NOTE: we should probably migrate this field to models.UUIDField as it's ONLY ever being
# set to the result of uuid.uuid1
last_unique_unacknowledge_process_id: UUID | None = models.CharField(max_length=100, null=True, default=None)
is_archived = models.BooleanField(default=False)
is_archived = deprecate_field(models.BooleanField(default=False))

wiped_at = models.DateTimeField(null=True, default=None)
wiped_by = models.ForeignKey(
Expand Down Expand Up @@ -414,9 +408,7 @@ class Meta:
"is_open_for_grouping",
]
indexes = [
models.Index(
fields=["channel_id", "resolved", "acknowledged", "silenced", "root_alert_group_id", "is_archived"]
),
models.Index(fields=["channel_id", "resolved", "acknowledged", "silenced", "root_alert_group_id"]),
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

come to think of it. we should first remove all references to unarchived_objects, and then remove this in the same PR as dropping the is_archived column entirely

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay so it seems like I need to remove this column from the index in this PR:

File "/Users/joeyorlando/coding/grafana/oncall/engine/apps/alerts/models/__init__.py", line 2, in <module>
    from .alert_group import AlertGroup  # noqa: F401
    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/joeyorlando/coding/grafana/oncall/engine/apps/alerts/models/alert_group.py", line 135, in <module>
    class AlertGroup(AlertGroupSlackRenderingMixin, EscalationSnapshotMixin, models.Model):
  File "/Users/joeyorlando/.pyenv/versions/3.11.3/envs/3.11.3-oncall/lib/python3.11/site-packages/django/db/models/base.py", line 320, in __new__
    new_class._prepare()
  File "/Users/joeyorlando/.pyenv/versions/3.11.3/envs/3.11.3-oncall/lib/python3.11/site-packages/django/db/models/base.py", line 372, in _prepare
    index.set_name_with_model(cls)
  File "/Users/joeyorlando/.pyenv/versions/3.11.3/envs/3.11.3-oncall/lib/python3.11/site-packages/django/db/models/indexes.py", line 151, in set_name_with_model
    column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/joeyorlando/.pyenv/versions/3.11.3/envs/3.11.3-oncall/lib/python3.11/site-packages/django/db/models/indexes.py", line 151, in <listcomp>
    column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/joeyorlando/.pyenv/versions/3.11.3/envs/3.11.3-oncall/lib/python3.11/site-packages/django/db/models/options.py", line 599, in get_field
    raise FieldDoesNotExist(
django.core.exceptions.FieldDoesNotExist: AlertGroup has no field named 'is_archived'. The app cache isn't ready yet, so if this is an auto-created related field, it won't be available yet.

Copy link
Contributor Author

@joeyorlando joeyorlando Jul 18, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm maybe a better approach here would be to briefly have two indices. First we would add the following in a separate prerequisite PR:

indexes = [
++    models.Index(fields=["channel_id", "resolved", "acknowledged", "silenced", "root_alert_group_id"]),
      models.Index(fields=["channel_id", "resolved", "acknowledged", "silenced", "root_alert_group_id", "is_archived"]),      
]

then once that has been released, merge this PR which drops the old index referencing is_archived. I don't think there should be any noticeable performance impact by briefly having two similar indices? But I'm more concerned about the brief period between when we drop the old index and create the new one. This could result in degraded performance on reads to the alert groups table.

However, I tested the CREATE INDEX (without is_archived field) DDL statement on a prod clone and it only took 4 minutes.

@Konstantinov-Innokentii wdyt?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Given that it's a four minute window, I'm going to merge this as is.

]

def __str__(self):
Expand Down Expand Up @@ -1185,7 +1177,7 @@ def _bulk_acknowledge(user: User, alert_groups_to_acknowledge: "QuerySet[AlertGr
"is_escalation_finished",
"response_time",
]
AlertGroup.all_objects.bulk_update(alert_groups_to_acknowledge_list, fields=fields_to_update, batch_size=100)
AlertGroup.objects.bulk_update(alert_groups_to_acknowledge_list, fields=fields_to_update, batch_size=100)

for alert_group in alert_groups_to_unresolve_before_acknowledge_list:
alert_group.log_records.create(
Expand Down Expand Up @@ -1226,9 +1218,7 @@ def bulk_acknowledge(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
# Find all dependent alert_groups to update them in one query
# convert qs to list to prevent changes by update
root_alert_group_pks = list(root_alert_groups_to_acknowledge.values_list("pk", flat=True))
dependent_alert_groups_to_acknowledge = AlertGroup.unarchived_objects.filter(
root_alert_group__pk__in=root_alert_group_pks
)
dependent_alert_groups_to_acknowledge = AlertGroup.objects.filter(root_alert_group__pk__in=root_alert_group_pks)
with transaction.atomic():
AlertGroup._bulk_acknowledge(user, root_alert_groups_to_acknowledge)
AlertGroup._bulk_acknowledge(user, dependent_alert_groups_to_acknowledge)
Expand Down Expand Up @@ -1273,7 +1263,7 @@ def _bulk_resolve(user: User, alert_groups_to_resolve: "QuerySet[AlertGroup]") -
"is_escalation_finished",
"response_time",
]
AlertGroup.all_objects.bulk_update(alert_groups_to_resolve_list, fields=fields_to_update, batch_size=100)
AlertGroup.objects.bulk_update(alert_groups_to_resolve_list, fields=fields_to_update, batch_size=100)

for alert_group in alert_groups_to_unsilence_before_resolve_list:
alert_group.log_records.create(
Expand Down Expand Up @@ -1315,7 +1305,7 @@ def bulk_resolve(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
)
# convert qs to list to prevent changes by update
root_alert_group_pks = list(root_alert_groups_to_resolve.values_list("pk", flat=True))
dependent_alert_groups_to_resolve = AlertGroup.all_objects.filter(root_alert_group__pk__in=root_alert_group_pks)
dependent_alert_groups_to_resolve = AlertGroup.objects.filter(root_alert_group__pk__in=root_alert_group_pks)
with transaction.atomic():
AlertGroup._bulk_resolve(user, root_alert_groups_to_resolve)
AlertGroup._bulk_resolve(user, dependent_alert_groups_to_resolve)
Expand Down Expand Up @@ -1455,15 +1445,15 @@ def bulk_restart(user: User, alert_groups: "QuerySet[AlertGroup]") -> None:
)
# convert qs to list to prevent changes by update
root_alert_group_pks = list(root_alert_groups_unack.values_list("pk", flat=True))
dependent_alert_groups_unack = AlertGroup.all_objects.filter(root_alert_group__pk__in=root_alert_group_pks)
dependent_alert_groups_unack = AlertGroup.objects.filter(root_alert_group__pk__in=root_alert_group_pks)
with transaction.atomic():
AlertGroup._bulk_restart_unack(user, root_alert_groups_unack)
AlertGroup._bulk_restart_unack(user, dependent_alert_groups_unack)

root_alert_groups_unresolve = alert_groups.filter(resolved=True, root_alert_group__isnull=True)
# convert qs to list to prevent changes by update
root_alert_group_pks = list(root_alert_groups_unresolve.values_list("pk", flat=True))
dependent_alert_groups_unresolve = AlertGroup.all_objects.filter(root_alert_group__pk__in=root_alert_group_pks)
dependent_alert_groups_unresolve = AlertGroup.objects.filter(root_alert_group__pk__in=root_alert_group_pks)
with transaction.atomic():
AlertGroup._bulk_restart_unresolve(user, root_alert_groups_unresolve)
AlertGroup._bulk_restart_unresolve(user, dependent_alert_groups_unresolve)
Expand Down Expand Up @@ -1538,7 +1528,7 @@ def _bulk_silence(user: User, alert_groups_to_silence: "QuerySet[AlertGroup]", s
"is_escalation_finished",
"response_time",
]
AlertGroup.all_objects.bulk_update(alert_groups_to_silence_list, fields=fields_to_update, batch_size=100)
AlertGroup.objects.bulk_update(alert_groups_to_silence_list, fields=fields_to_update, batch_size=100)

# create log records
for alert_group in alert_groups_to_unresolve_before_silence_list:
Expand Down Expand Up @@ -1725,12 +1715,6 @@ def un_silence(self):
]
)

def archive(self):
if self.root_alert_group:
self.root_alert_group = None
self.is_archived = True
self.save(update_fields=["is_archived", "root_alert_group"])

@property
def long_verbose_name(self):
title = str_or_backup(self.slack_templated_first_alert.title, DEFAULT_BACKUP_TITLE)
Expand All @@ -1747,8 +1731,6 @@ def long_verbose_name_without_formatting(self):
def get_resolve_text(self, mention_user=False):
if self.resolved_by == AlertGroup.SOURCE:
return "Resolved by alert source"
elif self.resolved_by == AlertGroup.ARCHIVED:
return "Resolved because alert has been archived"
elif self.resolved_by == AlertGroup.LAST_STEP:
return "Resolved automatically"
elif self.resolved_by == AlertGroup.WIPED:
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/models/maintainable_object.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def start_maintenance(self, mode, maintenance_duration, user):
self.maintenance_started_at = _self.maintenance_started_at
self.maintenance_author = _self.maintenance_author
if mode == AlertReceiveChannel.MAINTENANCE:
group = AlertGroup.all_objects.create(
group = AlertGroup.objects.create(
distinction=uuid4(),
web_title_cache=f"Maintenance of {verbal} for {maintenance_duration}",
maintenance_uuid=maintenance_uuid,
Expand Down
13 changes: 3 additions & 10 deletions engine/apps/alerts/tasks/acknowledge_reminder.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,7 @@ def acknowledge_reminder_task(alert_group_pk, unacknowledge_process_id):
task_logger.info(f"Starting a reminder task for acknowledgement timeout with process id {unacknowledge_process_id}")
with transaction.atomic():
try:
alert_group = AlertGroup.unarchived_objects.filter(pk=alert_group_pk).select_for_update()[
0
] # Lock alert_group:
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
except IndexError:
return f"acknowledge_reminder_task: Alert group with pk {alert_group_pk} doesn't exist"

Expand Down Expand Up @@ -89,17 +87,12 @@ def unacknowledge_timeout_task(alert_group_pk, unacknowledge_process_id):
)
with transaction.atomic():
try:
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
except IndexError:
return f"unacknowledge_timeout_task: Alert group with pk {alert_group_pk} doesn't exist"

if unacknowledge_process_id == alert_group.last_unique_unacknowledge_process_id:
if (
not alert_group.resolved
and not alert_group.is_archived
and alert_group.acknowledged
and alert_group.is_root_alert_group
):
if not alert_group.resolved and alert_group.acknowledged and alert_group.is_root_alert_group:
if not alert_group.acknowledged_by_confirmed:
log_record = AlertGroupLogRecord(
type=AlertGroupLogRecord.TYPE_AUTO_UN_ACK,
Expand Down
6 changes: 3 additions & 3 deletions engine/apps/alerts/tasks/alert_group_web_title_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def update_web_title_cache_for_alert_receive_channel(alert_receive_channel_pk):

countdown = 0
cursor = 0
queryset = AlertGroup.all_objects.filter(channel_id=alert_receive_channel_pk)
queryset = AlertGroup.objects.filter(channel_id=alert_receive_channel_pk)
ids = batch_ids(queryset, cursor)

while ids:
Expand Down Expand Up @@ -57,7 +57,7 @@ def update_web_title_cache(alert_receive_channel_pk, alert_group_pks):
task_logger.warning(f"AlertReceiveChannel {alert_receive_channel_pk} doesn't exist")
return

alert_groups = AlertGroup.all_objects.filter(pk__in=alert_group_pks).only("pk")
alert_groups = AlertGroup.objects.filter(pk__in=alert_group_pks).only("pk")

# get first alerts in 2 SQL queries
alerts_info = (
Expand All @@ -84,4 +84,4 @@ def update_web_title_cache(alert_receive_channel_pk, alert_group_pks):

alert_group.web_title_cache = web_title_cache

AlertGroup.all_objects.bulk_update(alert_groups, ["web_title_cache"])
AlertGroup.objects.bulk_update(alert_groups, ["web_title_cache"])
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/call_ack_url.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
def call_ack_url(ack_url, alert_group_pk, channel, http_method="GET"):
AlertGroup = apps.get_model("alerts", "AlertGroup")
SlackMessage = apps.get_model("slack", "SlackMessage")
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk)[0]
alert_group = AlertGroup.objects.filter(pk=alert_group_pk)[0]
is_successful, result_message = request_outgoing_webhook(ack_url, http_method)

if is_successful:
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/check_escalation_finished.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def check_escalation_finished_task() -> None:
now = timezone.now()
two_days_ago = now - datetime.timedelta(days=2)

alert_groups = AlertGroup.all_objects.using(get_random_readonly_database_key_if_present_otherwise_default()).filter(
alert_groups = AlertGroup.objects.using(get_random_readonly_database_key_if_present_otherwise_default()).filter(
~Q(silenced=True, silenced_until__isnull=True), # filter silenced forever alert_groups
# here we should query maintenance_uuid rather than joining on channel__integration
# and checking for something like ~Q(channel__integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE)
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/custom_button_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def custom_button_result(custom_button_pk, alert_group_pk, user_pk=None, escalat
task_logger.info(f"Custom_button {custom_button_pk} for alert_group {alert_group_pk} does not exist")
return

alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk)[0]
alert_group = AlertGroup.objects.filter(pk=alert_group_pk)[0]
escalation_policy = EscalationPolicy.objects.filter(pk=escalation_policy_pk).first()
task_logger.debug(
f"Start getting data for request in custom_button_result task for alert_group {alert_group_pk}, "
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/delete_alert_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
def delete_alert_group(alert_group_pk, user_pk):
AlertGroup = apps.get_model("alerts", "AlertGroup")
User = apps.get_model("user_management", "User")
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).first()
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).first()
if not alert_group:
logger.debug("Alert group not found, skipping delete_alert_group")
return
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/distribute_alert.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def distribute_alert(alert_id):
send_alert_create_signal.apply_async((alert_id,))
# If it's the first alert, let's launch the escalation!
if alert.is_the_first_alert_in_group:
alert_group = AlertGroup.all_objects.filter(pk=alert.group_id).get()
alert_group = AlertGroup.objects.filter(pk=alert.group_id).get()
alert_group.start_escalation_if_needed(countdown=TASK_DELAY_SECONDS)
alert_group_escalation_snapshot_built.send(sender=distribute_alert, alert_group=alert_group)

Expand Down
8 changes: 1 addition & 7 deletions engine/apps/alerts/tasks/escalate_alert_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def escalate_alert_group(alert_group_pk):

with transaction.atomic():
try:
alert_group = AlertGroup.all_objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
alert_group = AlertGroup.objects.filter(pk=alert_group_pk).select_for_update()[0] # Lock alert_group:
except IndexError:
return f"Alert group with pk {alert_group_pk} doesn't exist"

Expand All @@ -49,12 +49,6 @@ def escalate_alert_group(alert_group_pk):
# TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
return "Alert is dependent on another. No need to activate escalation."

if alert_group.is_archived:
# TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
return "Escalation stopped. Reason: incident is archived. Escalation id: {}".format(
alert_group.active_escalation_id
)

if alert_group.wiped_at is not None:
# TODO: consistent_is_escalation_finished remove this check for is_escalation_finished
return "Alert is wiped. No need to activate escalation."
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/invite_user_to_join_incident.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def invite_user_to_join_incident(invitation_pk):
except IndexError:
return f"invite_user_to_join_incident: Invitation with pk {invitation_pk} doesn't exist"

if not invitation.is_active or invitation.alert_group.is_archived:
if not invitation.is_active:
return None
if invitation.attempts_left <= 0 or invitation.alert_group.resolved:
invitation.is_active = False
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/maintenance.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def disable_maintenance(*args, **kwargs):
write_maintenance_insight_log(object_under_maintenance, user, MaintenanceEvent.FINISHED)
if object_under_maintenance.maintenance_mode == object_under_maintenance.MAINTENANCE:
mode_verbal = "Maintenance"
maintenance_incident = AlertGroup.all_objects.get(
maintenance_incident = AlertGroup.objects.get(
maintenance_uuid=object_under_maintenance.maintenance_uuid
)
transaction.on_commit(maintenance_incident.resolve_by_disable_maintenance)
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/notify_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def notify_all_task(alert_group_pk, escalation_policy_snapshot_order=None):
EscalationPolicy = apps.get_model("alerts", "EscalationPolicy")
AlertGroup = apps.get_model("alerts", "AlertGroup")

alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
alert_group = AlertGroup.objects.get(pk=alert_group_pk)

# check alert group state before notifying all users in the channel
if alert_group.resolved or alert_group.acknowledged or alert_group.silenced:
Expand Down
2 changes: 1 addition & 1 deletion engine/apps/alerts/tasks/notify_group.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def notify_group_task(alert_group_pk, escalation_policy_snapshot_order=None):
AlertGroup = apps.get_model("alerts", "AlertGroup")
EscalationDeliveryStep = scenario_step.ScenarioStep.get_step("escalation_delivery", "EscalationDeliveryStep")

alert_group = AlertGroup.all_objects.get(pk=alert_group_pk)
alert_group = AlertGroup.objects.get(pk=alert_group_pk)
# check alert group state before notifying all users in the group
if alert_group.resolved or alert_group.acknowledged or alert_group.silenced:
task_logger.info(f"alert_group {alert_group.pk} was resolved, acked or silenced. No need to notify group")
Expand Down
Loading