Fix Event-deletion from the admin

b/c of the introduction of on_delete=DO_NOTHING, this was broken.
this is the quick & dirty fix (no tests)
This commit is contained in:
Klaas van Schelven
2025-07-04 17:17:38 +02:00
parent 3e2bc290fd
commit cb3168133e
3 changed files with 98 additions and 1 deletions

View File

@@ -1,11 +1,17 @@
import json
from django.utils.html import escape, mark_safe
from django.contrib import admin
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
import json
from bugsink.transaction import immediate_atomic
from projects.admin import ProjectFilter
from .models import Event
csrf_protect_m = method_decorator(csrf_protect)
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
@@ -90,3 +96,28 @@ class EventAdmin(admin.ModelAdmin):
def on_site(self, obj):
return mark_safe('<a href="' + escape(obj.get_absolute_url()) + '">View</a>')
def get_deleted_objects(self, objs, request):
to_delete = list(objs) + ["...all its related objects... (delayed)"]
model_count = {
Event: len(objs),
}
perms_needed = set()
protected = []
return to_delete, model_count, perms_needed, protected
def delete_queryset(self, request, queryset):
# NOTE: not the most efficient; it will do for a first version.
with immediate_atomic():
for obj in queryset:
obj.delete_deferred()
def delete_model(self, request, obj):
with immediate_atomic():
obj.delete_deferred()
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
# the superclass version, but with the transaction.atomic context manager commented out (we do this ourselves)
# with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)

View File

@@ -8,12 +8,15 @@ from django.utils.functional import cached_property
from projects.models import Project
from compat.timestamp import parse_timestamp
from bugsink.transaction import delay_on_commit
from issues.utils import get_title_for_exception_type_and_value
from .retention import get_random_irrelevance
from .storage_registry import get_write_storage, get_storage
from .tasks import delete_event_deps
class Platform(models.TextChoices):
AS3 = "as3"
@@ -282,3 +285,13 @@ class Event(models.Model):
return list(
self.tags.all().select_related("value", "value__key").order_by("value__key__key")
)
def delete_deferred(self):
"""Schedules deletion of all related objects"""
# NOTE: for such a small closure, I couldn't be bothered to have an .is_deleted field and deal with it. (the
# idea being that the deletion will be relatively quick anyway). We still need "something" though, since we've
# set DO_NOTHING everywhere. An alternative would be the "full inline", i.e. delete everything right in the
# request w/o any delay. That diverges even more from the approach for Issue/Project, making such things a
# "design decision needed". Maybe if we get more `delete_deferred` impls. we'll have a bit more info to figure
# out if we can harmonize on (e.g.) 2 approaches.
delay_on_commit(delete_event_deps, str(self.project_id), str(self.id))

53
events/tasks.py Normal file
View File

@@ -0,0 +1,53 @@
from snappea.decorators import shared_task
from bugsink.utils import get_model_topography, delete_deps_with_budget
from bugsink.transaction import immediate_atomic, delay_on_commit
@shared_task
def delete_event_deps(project_id, event_id):
from .models import Event # avoid circular import
with immediate_atomic():
# matches what we do in events/retention.py (and for which argumentation exists); in practive I have seen _much_
# faster deletion times (in the order of .03s per task on my local laptop) when using a budget of 500, _but_
# it's not a given those were for "expensive objects" (e.g. events); and I'd rather err on the side of caution
# (worst case we have a bit of inefficiency; in any case this avoids hogging the global write lock / timeouts).
budget = 500
num_deleted = 0
# NOTE: for this delete_x_deps, we didn't bother optimizing the topography graph (the dependency-graph of a
# single event is believed to be small enough to not warrent further optimization).
dep_graph = get_model_topography()
for model_for_recursion, fk_name_for_recursion in dep_graph["events.Event"]:
this_num_deleted = delete_deps_with_budget(
project_id,
model_for_recursion,
fk_name_for_recursion,
[event_id],
budget - num_deleted,
dep_graph,
is_for_project=False,
)
num_deleted += this_num_deleted
if num_deleted >= budget:
delay_on_commit(delete_event_deps, project_id, event_id)
return
if budget - num_deleted <= 0:
# no more budget for the self-delete.
delay_on_commit(delete_event_deps, project_id, event_id)
else:
# final step: delete the event itself
issue = Event.objects.get(pk=event_id).issue
Event.objects.filter(pk=event_id).delete()
# manual (outside of delete_deps_with_budget) b/c the special-case in that function is (ATM) specific to
# project (it was built around Issue-deletion initially, so Issue outliving the event-deletion was not
# part of that functionality). we might refactor this at some point.
issue.stored_event_count -= 1
issue.save(update_fields=["stored_event_count"])