Merge branch 'main' into feature/add_dark_theme

This commit is contained in:
Klaas van Schelven
2025-07-07 11:08:00 +02:00
committed by GitHub
59 changed files with 1727 additions and 137 deletions

View File

@@ -1,5 +1,14 @@
# Changes
## 1.6.3 (27 June 2025)
* fix `make_consistent` on mysql (Fix #132)
* Tags in `event_data` can be lists; deal with that (Fix #130)
## 1.6.2 (19 June 2025)
* Too many quotes in local-vars display (Fix #119)
## 1.6.1 (11 June 2025)
Remove hard-coded slack `webhook_url` from the "test this connector" loop.

View File

@@ -1,16 +1,12 @@
# Bugsink: Self-hosted Error Tracking
[Bugsink](https://www.bugsink.com/) offers [Error Tracking](https://www.bugsink.com/error-tracking/) for your applications with full control
through self-hosting.
* [Error Tracking](https://www.bugsink.com/error-tracking/)
* [Built to self-host](https://www.bugsink.com/built-to-self-host/)
* [Sentry-SDK compatible](https://www.bugsink.com/connect-any-application/)
* [Scalable and reliable](https://www.bugsink.com/scalable-and-reliable/)
### Screenshot
This is what you'll get:
![Screenshot](https://www.bugsink.com/static/images/JsonSchemaDefinitionException.5e02c1544273.png)
@@ -22,7 +18,7 @@ The **quickest way to evaluate Bugsink** is to spin up a throw-away instance usi
docker pull bugsink/bugsink:latest
docker run \
-e SECRET_KEY={{ random_secret }} \
-e SECRET_KEY=PUT_AN_ACTUAL_RANDOM_SECRET_HERE_OF_AT_LEAST_50_CHARS \
-e CREATE_SUPERUSER=admin:admin \
-e PORT=8000 \
-p 8000:8000 \

View File

@@ -0,0 +1,23 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
# Django came up with 0014, whatever the reason, I'm sure that 0013 is at least required (as per comments there)
("projects", "0014_alter_projectmembership_project"),
("alerts", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="messagingserviceconfig",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="service_configs",
to="projects.project",
),
),
]

View File

@@ -5,7 +5,7 @@ from .service_backends.slack import SlackBackend
class MessagingServiceConfig(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name="service_configs")
project = models.ForeignKey(Project, on_delete=models.DO_NOTHING, related_name="service_configs")
display_name = models.CharField(max_length=100, blank=False,
help_text='For display in the UI, e.g. "#general on company Slack"')

View File

@@ -38,7 +38,7 @@ def check_event_storage_properly_configured(app_configs, **kwargs):
@register("bsmain")
def check_base_url_is_url(app_configs, **kwargs):
try:
parts = urllib.parse.urlsplit(get_settings().BASE_URL)
parts = urllib.parse.urlsplit(str(get_settings().BASE_URL))
except ValueError as e:
return [Warning(
str(e),

View File

@@ -99,33 +99,39 @@ def get_snappea_warnings():
def useful_settings_processor(request):
# name is misnomer, but "who cares".
installation = Installation.objects.get()
def get_system_warnings():
# implemented as an inner function to avoid calculating this when it's not actually needed. (i.e. anything
# except "the UI", e.g. ingest, API, admin, 404s). Actual 'cache' behavior is not needed, because this is called
# at most once per request (at the top of the template)
installation = Installation.objects.get()
system_warnings = []
system_warnings = []
# This list does not include e.g. the dummy.EmailBackend; intentional, because setting _that_ is always an
# indication of intentional "shut up I don't want to send emails" (and we don't want to warn about that). (as
# opposed to QuietConsoleEmailBackend, which is the default for the Docker "no EMAIL_HOST set" situation)
if settings.EMAIL_BACKEND in [
'bugsink.email_backends.QuietConsoleEmailBackend'] and not installation.silence_email_system_warning:
# This list does not include e.g. the dummy.EmailBackend; intentional, because setting _that_ is always an
# indication of intentional "shut up I don't want to send emails" (and we don't want to warn about that). (as
# opposed to QuietConsoleEmailBackend, which is the default for the Docker "no EMAIL_HOST set" situation)
if settings.EMAIL_BACKEND in [
'bugsink.email_backends.QuietConsoleEmailBackend'] and not installation.silence_email_system_warning:
if getattr(request, "user", AnonymousUser()).is_superuser:
ignore_url = reverse("silence_email_system_warning")
else:
# not a superuser, so can't silence the warning. I'm applying some heuristics here;
# * superusers (and only those) will be able to deal with this (have access to EMAIL_BACKEND)
# * better to still show (though not silencable) the message to non-superusers.
# this will not always be so, but it's a good start.
ignore_url = None
if getattr(request, "user", AnonymousUser()).is_superuser:
ignore_url = reverse("silence_email_system_warning")
else:
# not a superuser, so can't silence the warning. I'm applying some heuristics here;
# * superusers (and only those) will be able to deal with this (have access to EMAIL_BACKEND)
# * better to still show (though not silencable) the message to non-superusers.
# this will not always be so, but it's a good start.
ignore_url = None
system_warnings.append(SystemWarning(EMAIL_BACKEND_WARNING, ignore_url))
system_warnings.append(SystemWarning(EMAIL_BACKEND_WARNING, ignore_url))
return system_warnings + get_snappea_warnings()
return {
# Note: no way to actually set the license key yet, so nagging always happens for now.
'site_title': get_settings().SITE_TITLE,
'registration_enabled': get_settings().USER_REGISTRATION == CB_ANYBODY,
'app_settings': get_settings(),
'system_warnings': system_warnings + get_snappea_warnings(),
'system_warnings': get_system_warnings,
}

View File

@@ -39,7 +39,7 @@ def issue_membership_required(function):
if "issue_pk" not in kwargs:
raise TypeError("issue_pk must be passed as a keyword argument")
issue_pk = kwargs.pop("issue_pk")
issue = get_object_or_404(Issue, pk=issue_pk)
issue = get_object_or_404(Issue, pk=issue_pk, is_deleted=False)
kwargs["issue"] = issue
if request.user.is_superuser:
return function(request, *args, **kwargs)

View File

@@ -91,15 +91,13 @@ SNAPPEA = {
"NUM_WORKERS": 1,
}
POSTMARK_API_KEY = os.getenv('POSTMARK_API_KEY')
EMAIL_HOST = 'smtp.postmarkapp.com'
EMAIL_HOST_USER = POSTMARK_API_KEY
EMAIL_HOST_PASSWORD = POSTMARK_API_KEY
EMAIL_HOST = os.getenv("EMAIL_HOST")
EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
SERVER_EMAIL = DEFAULT_FROM_EMAIL = 'Klaas van Schelven <klaas@vanschelven.com>'
SERVER_EMAIL = DEFAULT_FROM_EMAIL = 'Klaas van Schelven <klaas@bugsink.com>'
BUGSINK = {

View File

@@ -8,6 +8,8 @@ import threading
from django.db import transaction as django_db_transaction
from django.db import DEFAULT_DB_ALIAS
from snappea.settings import get_settings as get_snappea_settings
performance_logger = logging.getLogger("bugsink.performance.db")
local_storage = threading.local()
@@ -153,7 +155,7 @@ class ImmediateAtomic(SuperDurableAtomic):
connection = django_db_transaction.get_connection(self.using)
if hasattr(connection, "_start_transaction_under_autocommit"):
connection._start_transaction_under_autocommit_original = connection._start_transaction_under_autocommit
self._start_transaction_under_autocommit_original = connection._start_transaction_under_autocommit
connection._start_transaction_under_autocommit = types.MethodType(
_start_transaction_under_autocommit_patched, connection)
@@ -183,9 +185,9 @@ class ImmediateAtomic(SuperDurableAtomic):
performance_logger.info(f"{took * 1000:6.2f}ms IMMEDIATE transaction{using_clause}")
connection = django_db_transaction.get_connection(self.using)
if hasattr(connection, "_start_transaction_under_autocommit"):
connection._start_transaction_under_autocommit = connection._start_transaction_under_autocommit_original
del connection._start_transaction_under_autocommit_original
if hasattr(self, "_start_transaction_under_autocommit_original"):
connection._start_transaction_under_autocommit = self._start_transaction_under_autocommit_original
del self._start_transaction_under_autocommit_original
@contextmanager
@@ -206,10 +208,21 @@ def immediate_atomic(using=None, savepoint=True, durable=True):
else:
immediate_atomic = ImmediateAtomic(using, savepoint, durable)
# https://stackoverflow.com/a/45681273/339144 provides some context on nesting context managers; and how to proceed
# if you want to do this with an arbitrary number of context managers.
with SemaphoreContext(using), immediate_atomic:
yield
if get_snappea_settings().TASK_ALWAYS_EAGER:
# In ALWAYS_EAGER mode we cannot use SemaphoreContext as the outermost context, because any delay_on_commit
# tasks that are triggered on __exit__ of the (in that case, inner) immediate_atomic, when themselves initiating
# a new task-with-transaction, will not be able to acquire the semaphore (it's not been released yet).
# Fundamentally the solution would be to push the "on commit" logic onto the outermost context, but that seems
# fragile (monkeypatching/heavy overriding) and since the whole SemaphoreContext is only needed as an extra
# guard against WAL growth (not something we care about in the non-production setup), we just simplify for that
# case.
with immediate_atomic:
yield
else:
# https://stackoverflow.com/a/45681273/339144 provides some context on nesting context managers; and how to
# proceed if you want to do this with an arbitrary number of context managers.
with SemaphoreContext(using), immediate_atomic:
yield
def delay_on_commit(function, *args, **kwargs):

View File

@@ -1,7 +1,10 @@
from collections import defaultdict
from urllib.parse import urlparse
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.apps import apps
from django.db.models import ForeignKey, F
from .version import version
@@ -161,3 +164,162 @@ def eat_your_own_dogfood(sentry_dsn, **kwargs):
sentry_sdk.init(
**default_kwargs,
)
def get_model_topography():
"""
Returns a dependency graph mapping:
referenced_model_key -> [
(referrer_model_class, fk_name),
...
]
"""
dep_graph = defaultdict(list)
for model in apps.get_models():
for field in model._meta.get_fields(include_hidden=True):
if isinstance(field, ForeignKey):
referenced_model = field.related_model
referenced_key = f"{referenced_model._meta.app_label}.{referenced_model.__name__}"
dep_graph[referenced_key].append((model, field.name))
return dep_graph
def fields_for_prune_orphans(model):
if model.__name__ == "IssueTag":
return ("value_id",)
return ()
def prune_orphans(model, d_ids_to_check):
"""For some model, does dangling-model-cleanup.
In a sense the oposite of delete_deps; delete_deps takes care of deleting the recursive closure of things that point
to some root. The present function cleans up things that are being pointed to (and, after some other thing is
deleted, potentially are no longer being pointed to, hence 'orphaned').
This is the hardcoded edition (IssueTag only); we _could_ try to think about doing this generically based on the
dependency graph, but it's quite questionably whether a combination of generic & performant is easy to arrive at and
worth it.
pruning of TagValue is done "inline" (as opposed to using a GC-like vacuum "later") because, whatever the exact
performance trade-offs may be, the following holds true:
1. the inline version is easier to reason about, it "just happens ASAP", and in the context of a given issue;
vacuum-based has to take into consideration the full DB including non-orphaned values.
2. repeated work is somewhat minimalized b/c of the IssueTag/EventTag relationship as described below.
"""
from tags.models import TagValue, IssueTag # avoid circular import
if model.__name__ != "IssueTag":
return # we only prune IssueTag orphans
ids_to_check = [d["value_id"] for d in d_ids_to_check]
# used_in_event check is not needed, because non-existence of IssueTag always implies non-existince of EventTag,
# since [1] EventTag creation implies IssueTag creation and [2] in the cleanup code EventTag is deleted first.
used_in_issue = set(
IssueTag.objects.filter(value_id__in=ids_to_check).values_list('value_id', flat=True)
)
unused = [pk for pk in ids_to_check if pk not in used_in_issue]
if unused:
TagValue.objects.filter(id__in=unused).delete()
# The principled approach would be to clean up TagKeys as well at this point, but in practice there will be orders
# of magnitude fewer TagKey objects, and they are much less likely to become dangling, so the GC-like algo of "just
# vacuuming once in a while" is a much better fit for that.
def do_pre_delete(project_id, model, pks_to_delete, is_for_project):
"More model-specific cleanup, if needed; only for Event model at the moment."
if model.__name__ != "Event":
return # we only do more cleanup for Event
from projects.models import Project
from events.models import Event
from events.retention import cleanup_events_on_storage
cleanup_events_on_storage(
Event.objects.filter(pk__in=pks_to_delete).exclude(storage_backend=None)
.values_list("id", "storage_backend")
)
if is_for_project:
# no need to update the stored_event_count for the project, because the project is being deleted
return
# Update project stored_event_count to reflect the deletion of the events. note: alternatively, we could do this
# on issue-delete (issue.stored_event_count is known too); potato, potato though.
# note: don't bother to do the same thing for Issue.stored_event_count, since we're in the process of deleting Issue
Project.objects.filter(id=project_id).update(stored_event_count=F('stored_event_count') - len(pks_to_delete))
def delete_deps_with_budget(project_id, referring_model, fk_name, referred_ids, budget, dep_graph, is_for_project):
r"""
Deletes all objects of type referring_model that refer to any of the referred_ids via fk_name.
Returns the number of deleted objects.
And does this recursively (i.e. if there are further dependencies, it will delete those as well).
Caller This Func
| |
V V
<unspecified> referring_model
^ /
\-------fk_name----
referred_ids relevant_ids (deduced using a query)
"""
num_deleted = 0
# Fetch ids of referring objects and their referred ids. Note that an index of fk_name can be assumed to exist,
# because fk_name is a ForeignKey field, and Django automatically creates an index for ForeignKey fields unless
# instructed otherwise: https://github.com/django/django/blob/7feafd79a481/django/db/models/fields/related.py#L1025
relevant_ids = list(
referring_model.objects.filter(**{f"{fk_name}__in": referred_ids}).order_by(f"{fk_name}_id", 'pk').values(
*(('pk',) + fields_for_prune_orphans(referring_model))
)[:budget]
)
if not relevant_ids:
# we didn't find any referring objects. optimization: skip any recursion and referring_model.delete()
return 0
# The recursing bit:
for_recursion = dep_graph.get(f"{referring_model._meta.app_label}.{referring_model.__name__}", [])
for model_for_recursion, fk_name_for_recursion in for_recursion:
num_deleted += delete_deps_with_budget(
project_id,
model_for_recursion,
fk_name_for_recursion,
[d["pk"] for d in relevant_ids],
budget - num_deleted,
dep_graph,
is_for_project,
)
if num_deleted >= budget:
return num_deleted
# If this point is reached: we have deleted all referring objects that we could delete, and we still have budget
# left. We can now delete the referring objects themselves (limited by budget).
relevant_ids_after_rec = relevant_ids[:budget - num_deleted]
do_pre_delete(project_id, referring_model, [d['pk'] for d in relevant_ids_after_rec], is_for_project)
my_num_deleted, del_d = referring_model.objects.filter(pk__in=[d['pk'] for d in relevant_ids_after_rec]).delete()
num_deleted += my_num_deleted
assert set(del_d.keys()) == {referring_model._meta.label} # assert no-cascading (we do that ourselves)
if is_for_project:
# short-circuit: project-deletion implies "no orphans" because the project kill everything with it.
return num_deleted
# Note that prune_orphans doesn't respect the budget. Reason: it's not easy to do, b/c the order is reversed (we
# would need to predict somehow at the previous step how much budget to leave unused) and we don't care _that much_
# about a precise budget "at the edges of our algo", as long as we don't have a "single huge blocking thing".
prune_orphans(referring_model, relevant_ids_after_rec)
return num_deleted

View File

@@ -1,11 +1,17 @@
import json
from django.utils.html import escape, mark_safe
from django.contrib import admin
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
import json
from bugsink.transaction import immediate_atomic
from projects.admin import ProjectFilter
from .models import Event
csrf_protect_m = method_decorator(csrf_protect)
@admin.register(Event)
class EventAdmin(admin.ModelAdmin):
@@ -90,3 +96,28 @@ class EventAdmin(admin.ModelAdmin):
def on_site(self, obj):
return mark_safe('<a href="' + escape(obj.get_absolute_url()) + '">View</a>')
def get_deleted_objects(self, objs, request):
to_delete = list(objs) + ["...all its related objects... (delayed)"]
model_count = {
Event: len(objs),
}
perms_needed = set()
protected = []
return to_delete, model_count, perms_needed, protected
def delete_queryset(self, request, queryset):
# NOTE: not the most efficient; it will do for a first version.
with immediate_atomic():
for obj in queryset:
obj.delete_deferred()
def delete_model(self, request, obj):
with immediate_atomic():
obj.delete_deferred()
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
# the superclass version, but with the transaction.atomic context manager commented out (we do this ourselves)
# with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)

View File

@@ -43,11 +43,24 @@ def create_event(project=None, issue=None, timestamp=None, event_data=None):
)
def create_event_data():
def create_event_data(exception_type=None):
# create minimal event data that is valid as per from_json()
return {
result = {
"event_id": uuid.uuid4().hex,
"timestamp": timezone.now().isoformat(),
"platform": "python",
}
if exception_type is not None:
# allow for a specific exception type to get unique groupers/issues
result["exception"] = {
"values": [
{
"type": exception_type,
"value": "This is a test exception",
}
]
}
return result

View File

@@ -23,7 +23,7 @@ def _delete_for_missing_fk(clazz, field_name):
## Dangling FKs:
Non-existing objects may come into being when people muddle in the database directly with foreign key checks turned
off (note that fk checks are turned off by default in SQLite for backwards compatibility reasons).
off (note that fk checks are turned off by default in sqlite's CLI for backwards compatibility reasons).
In the future it's further possible that there will be pieces the actual Bugsink code where FK-checks are turned off
temporarily (e.g. when deleting a project with very many related objects). (In March 2025 there was no such code
@@ -76,6 +76,8 @@ def make_consistent():
_delete_for_missing_fk(Release, 'project')
_delete_for_missing_fk(EventTag, 'issue') # See #132 for the ordering of this statement
_delete_for_missing_fk(Event, 'project')
_delete_for_missing_fk(Event, 'issue')

View File

@@ -0,0 +1,34 @@
# Generated by Django 4.2.21 on 2025-07-03 08:30
from django.db import migrations
def remove_events_with_null_fks(apps, schema_editor):
# Up until now, we have various models w/ .issue=FK(null=True, on_delete=models.SET_NULL)
# Although it is "not expected" in the interface, issue-deletion would have led to those
# objects with a null issue. We're about to change that to .issue=FK(null=False, ...) which
# would crash if we don't remove those objects first. Object-removal is "fine" though, because
# as per the meaning of the SET_NULL, these objects were "dangling" anyway.
Event = apps.get_model("events", "Event")
Event.objects.filter(issue__isnull=True).delete()
# overcomplete b/c .issue would imply this, done anyway in the name of "defensive programming"
Event.objects.filter(grouping__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
("events", "0019_event_storage_backend"),
# "in principle" order shouldn't matter, because the various objects that are being deleted here are all "fully
# contained" by the .issue; to be safe, however, we depend on the below, because of Grouping.objects.delete()
# (which would set Event.grouping=NULL, which the present migration takes into account).
("issues", "0020_remove_objects_with_null_issue"),
]
operations = [
migrations.RunPython(remove_events_with_null_fks, reverse_code=migrations.RunPython.noop),
]

View File

@@ -0,0 +1,27 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("issues", "0021_alter_do_nothing"),
("events", "0020_remove_events_with_null_issue_or_grouping"),
]
operations = [
migrations.AlterField(
model_name="event",
name="grouping",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="issues.grouping"
),
),
migrations.AlterField(
model_name="event",
name="issue",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="issues.issue"
),
),
]

View File

@@ -0,0 +1,21 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
# Django came up with 0014, whatever the reason, I'm sure that 0013 is at least required (as per comments there)
("projects", "0014_alter_projectmembership_project"),
("events", "0021_alter_do_nothing"),
]
operations = [
migrations.AlterField(
model_name="event",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
]

View File

@@ -8,12 +8,15 @@ from django.utils.functional import cached_property
from projects.models import Project
from compat.timestamp import parse_timestamp
from bugsink.transaction import delay_on_commit
from issues.utils import get_title_for_exception_type_and_value
from .retention import get_random_irrelevance
from .storage_registry import get_write_storage, get_storage
from .tasks import delete_event_deps
class Platform(models.TextChoices):
AS3 = "as3"
@@ -72,11 +75,8 @@ class Event(models.Model):
ingested_at = models.DateTimeField(blank=False, null=False)
digested_at = models.DateTimeField(db_index=True, blank=False, null=False)
# not actually expected to be null, but we want to be able to delete issues without deleting events (cleanup later)
issue = models.ForeignKey("issues.Issue", blank=False, null=True, on_delete=models.SET_NULL)
# not actually expected to be null
grouping = models.ForeignKey("issues.Grouping", blank=False, null=True, on_delete=models.SET_NULL)
issue = models.ForeignKey("issues.Issue", blank=False, null=False, on_delete=models.DO_NOTHING)
grouping = models.ForeignKey("issues.Grouping", blank=False, null=False, on_delete=models.DO_NOTHING)
# The docs say:
# > Required. Hexadecimal string representing a uuid4 value. The length is exactly 32 characters. Dashes are not
@@ -85,7 +85,7 @@ class Event(models.Model):
# uuid4 clientside". In any case, we just rely on the envelope's event_id (required per the envelope spec).
# Not a primary key: events may be duplicated across projects
event_id = models.UUIDField(primary_key=False, null=False, editable=False, help_text="As per the sent data")
project = models.ForeignKey(Project, blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
project = models.ForeignKey(Project, blank=False, null=False, on_delete=models.DO_NOTHING)
data = models.TextField(blank=False, null=False)
@@ -285,3 +285,13 @@ class Event(models.Model):
return list(
self.tags.all().select_related("value", "value__key").order_by("value__key__key")
)
def delete_deferred(self):
"""Schedules deletion of all related objects"""
# NOTE: for such a small closure, I couldn't be bothered to have an .is_deleted field and deal with it. (the
# idea being that the deletion will be relatively quick anyway). We still need "something" though, since we've
# set DO_NOTHING everywhere. An alternative would be the "full inline", i.e. delete everything right in the
# request w/o any delay. That diverges even more from the approach for Issue/Project, making such things a
# "design decision needed". Maybe if we get more `delete_deferred` impls. we'll have a bit more info to figure
# out if we can harmonize on (e.g.) 2 approaches.
delay_on_commit(delete_event_deps, str(self.project_id), str(self.id))

View File

@@ -376,7 +376,7 @@ def evict_for_epoch_and_irrelevance(project, max_epoch, max_irrelevance, max_eve
Event.objects.filter(pk__in=pks_to_delete).exclude(storage_backend=None)
.values_list("id", "storage_backend")
)
issue_deletions = {
deletions_per_issue = {
d['issue_id']: d['count'] for d in
Event.objects.filter(pk__in=pks_to_delete).values("issue_id").annotate(count=Count("issue_id"))}
@@ -387,9 +387,9 @@ def evict_for_epoch_and_irrelevance(project, max_epoch, max_irrelevance, max_eve
nr_of_deletions = Event.objects.filter(pk__in=pks_to_delete).delete()[1].get("events.Event", 0)
else:
nr_of_deletions = 0
issue_deletions = {}
deletions_per_issue = {}
return EvictionCounts(nr_of_deletions, issue_deletions)
return EvictionCounts(nr_of_deletions, deletions_per_issue)
def cleanup_events_on_storage(todos):

53
events/tasks.py Normal file
View File

@@ -0,0 +1,53 @@
from snappea.decorators import shared_task
from bugsink.utils import get_model_topography, delete_deps_with_budget
from bugsink.transaction import immediate_atomic, delay_on_commit
@shared_task
def delete_event_deps(project_id, event_id):
from .models import Event # avoid circular import
with immediate_atomic():
# matches what we do in events/retention.py (and for which argumentation exists); in practive I have seen _much_
# faster deletion times (in the order of .03s per task on my local laptop) when using a budget of 500, _but_
# it's not a given those were for "expensive objects" (e.g. events); and I'd rather err on the side of caution
# (worst case we have a bit of inefficiency; in any case this avoids hogging the global write lock / timeouts).
budget = 500
num_deleted = 0
# NOTE: for this delete_x_deps, we didn't bother optimizing the topography graph (the dependency-graph of a
# single event is believed to be small enough to not warrent further optimization).
dep_graph = get_model_topography()
for model_for_recursion, fk_name_for_recursion in dep_graph["events.Event"]:
this_num_deleted = delete_deps_with_budget(
project_id,
model_for_recursion,
fk_name_for_recursion,
[event_id],
budget - num_deleted,
dep_graph,
is_for_project=False,
)
num_deleted += this_num_deleted
if num_deleted >= budget:
delay_on_commit(delete_event_deps, project_id, event_id)
return
if budget - num_deleted <= 0:
# no more budget for the self-delete.
delay_on_commit(delete_event_deps, project_id, event_id)
else:
# final step: delete the event itself
issue = Event.objects.get(pk=event_id).issue
Event.objects.filter(pk=event_id).delete()
# manual (outside of delete_deps_with_budget) b/c the special-case in that function is (ATM) specific to
# project (it was built around Issue-deletion initially, so Issue outliving the event-deletion was not
# part of that functionality). we might refactor this at some point.
issue.stored_event_count -= 1
issue.save(update_fields=["stored_event_count"])

View File

@@ -9,8 +9,7 @@ from django.utils import timezone
from bugsink.test_utils import TransactionTestCase25251 as TransactionTestCase
from projects.models import Project, ProjectMembership
from issues.models import Issue
from issues.factories import denormalized_issue_fields
from issues.factories import get_or_create_issue
from .factories import create_event
from .retention import (
@@ -28,7 +27,7 @@ class ViewTests(TransactionTestCase):
self.user = User.objects.create_user(username='test', password='test')
self.project = Project.objects.create()
ProjectMembership.objects.create(project=self.project, user=self.user)
self.issue = Issue.objects.create(project=self.project, **denormalized_issue_fields())
self.issue, _ = get_or_create_issue(project=self.project)
self.event = create_event(self.project, self.issue)
self.client.force_login(self.user)
@@ -154,7 +153,7 @@ class RetentionTestCase(DjangoTestCase):
digested_at = timezone.now()
self.project = Project.objects.create(retention_max_event_count=5)
self.issue = Issue.objects.create(project=self.project, **denormalized_issue_fields())
self.issue, _ = get_or_create_issue(project=self.project)
for digest_order in range(1, 7):
project_stored_event_count += 1 # +1 pre-create, as in the ingestion view
@@ -180,7 +179,7 @@ class RetentionTestCase(DjangoTestCase):
project_stored_event_count = 0
self.project = Project.objects.create(retention_max_event_count=999)
self.issue = Issue.objects.create(project=self.project, **denormalized_issue_fields())
self.issue, _ = get_or_create_issue(project=self.project)
current_timestamp = datetime.datetime(2022, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)

View File

@@ -131,7 +131,7 @@ class BaseIngestAPIView(View):
@classmethod
def get_project(cls, project_pk, sentry_key):
try:
return Project.objects.get(pk=project_pk, sentry_key=sentry_key)
return Project.objects.get(pk=project_pk, sentry_key=sentry_key, is_deleted=False)
except Project.DoesNotExist:
# We don't distinguish between "project not found" and "key incorrect"; there's no real value in that from
# the user perspective (they deal in dsns). Additional advantage: no need to do constant-time-comp on
@@ -251,7 +251,12 @@ class BaseIngestAPIView(View):
ingested_at = parse_timestamp(event_metadata["ingested_at"])
digested_at = datetime.now(timezone.utc) if digested_at is None else digested_at # explicit passing: test only
project = Project.objects.get(pk=event_metadata["project_id"])
try:
project = Project.objects.get(pk=event_metadata["project_id"], is_deleted=False)
except Project.DoesNotExist:
# we may get here if the project was deleted after the event was ingested, but before it was digested
# (covers both "deletion in progress (is_deleted=True)" and "fully deleted").
return
if not cls.count_project_periods_and_act_on_it(project, digested_at):
return # if over-quota: just return (any cleanup is done calling-side)
@@ -360,6 +365,7 @@ class BaseIngestAPIView(View):
if issue_created:
TurningPoint.objects.create(
project=project,
issue=issue, triggering_event=event, timestamp=ingested_at,
kind=TurningPointKind.FIRST_SEEN)
event.never_evict = True
@@ -371,6 +377,7 @@ class BaseIngestAPIView(View):
# new issues cannot be regressions by definition, hence this is in the 'else' branch
if issue_is_regression(issue, event.release):
TurningPoint.objects.create(
project=project,
issue=issue, triggering_event=event, timestamp=ingested_at,
kind=TurningPointKind.REGRESSED)
event.never_evict = True

View File

@@ -1,8 +1,14 @@
from django.contrib import admin
from bugsink.transaction import immediate_atomic
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from .models import Issue, Grouping, TurningPoint
from .forms import IssueAdminForm
csrf_protect_m = method_decorator(csrf_protect)
class GroupingInline(admin.TabularInline):
model = Grouping
@@ -79,3 +85,28 @@ class IssueAdmin(admin.ModelAdmin):
'digested_event_count',
'stored_event_count',
]
def get_deleted_objects(self, objs, request):
to_delete = list(objs) + ["...all its related objects... (delayed)"]
model_count = {
Issue: len(objs),
}
perms_needed = set()
protected = []
return to_delete, model_count, perms_needed, protected
def delete_queryset(self, request, queryset):
# NOTE: not the most efficient; it will do for a first version.
with immediate_atomic():
for obj in queryset:
obj.delete_deferred()
def delete_model(self, request, obj):
with immediate_atomic():
obj.delete_deferred()
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
# the superclass version, but with the transaction.atomic context manager commented out (we do this ourselves)
# with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)

View File

@@ -12,6 +12,7 @@ def get_or_create_issue(project=None, event_data=None):
if event_data is None:
from events.factories import create_event_data
event_data = create_event_data()
if project is None:
project = Project.objects.create(name="Test project")

View File

@@ -0,0 +1,16 @@
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("issues", "0017_issue_list_indexes_must_start_with_project"),
]
operations = [
migrations.AddField(
model_name="issue",
name="is_deleted",
field=models.BooleanField(default=False),
),
]

View File

@@ -0,0 +1,16 @@
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("issues", "0018_issue_is_deleted"),
]
operations = [
migrations.AlterField(
model_name="grouping",
name="grouping_key_hash",
field=models.CharField(max_length=64, null=True),
),
]

View File

@@ -0,0 +1,26 @@
from django.db import migrations
def remove_objects_with_null_issue(apps, schema_editor):
# Up until now, we have various models w/ .issue=FK(null=True, on_delete=models.SET_NULL)
# Although it is "not expected" in the interface, issue-deletion would have led to those
# objects with a null issue. We're about to change that to .issue=FK(null=False, ...) which
# would crash if we don't remove those objects first. Object-removal is "fine" though, because
# as per the meaning of the SET_NULL, these objects were "dangling" anyway.
Grouping = apps.get_model("issues", "Grouping")
TurningPoint = apps.get_model("issues", "TurningPoint")
Grouping.objects.filter(issue__isnull=True).delete()
TurningPoint.objects.filter(issue__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
("issues", "0019_alter_grouping_grouping_key_hash"),
]
operations = [
migrations.RunPython(remove_objects_with_null_issue, reverse_code=migrations.RunPython.noop),
]

View File

@@ -0,0 +1,26 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("issues", "0020_remove_objects_with_null_issue"),
]
operations = [
migrations.AlterField(
model_name="grouping",
name="issue",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="issues.issue"
),
),
migrations.AlterField(
model_name="turningpoint",
name="issue",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="issues.issue"
),
),
]

View File

@@ -0,0 +1,22 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("projects", "0012_project_is_deleted"),
("issues", "0021_alter_do_nothing"),
]
operations = [
migrations.AddField(
model_name="turningpoint",
name="project",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
to="projects.project",
),
),
]

View File

@@ -0,0 +1,36 @@
from django.db import migrations
def turningpoint_set_project(apps, schema_editor):
TurningPoint = apps.get_model("issues", "TurningPoint")
# TurningPoint.objects.update(project=F("issue__project"))
# fails with 'Joined field references are not permitted in this query"
# This one's elegant and works in sqlite but not in MySQL:
# TurningPoint.objects.update(
# project=Subquery(
# TurningPoint.objects
# .filter(pk=OuterRef('pk'))
# .values('issue__project')[:1]
# )
# )
# django.db.utils.OperationalError: (1093, "You can't specify target table 'issues_turningpoint' for update in FROM
# clause")
# so in the end we'll just loop:
for turningpoint in TurningPoint.objects.all():
turningpoint.project = turningpoint.issue.project
turningpoint.save(update_fields=["project"])
class Migration(migrations.Migration):
dependencies = [
("issues", "0022_turningpoint_project"),
]
operations = [
migrations.RunPython(turningpoint_set_project, migrations.RunPython.noop),
]

View File

@@ -0,0 +1,20 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("projects", "0012_project_is_deleted"),
("issues", "0023_turningpoint_set_project"),
]
operations = [
migrations.AlterField(
model_name="turningpoint",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
]

View File

@@ -0,0 +1,28 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
# Django came up with 0014, whatever the reason, I'm sure that 0013 is at least required (as per comments there)
("projects", "0014_alter_projectmembership_project"),
("issues", "0024_turningpoint_project_alter_not_null"),
]
operations = [
migrations.AlterField(
model_name="grouping",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
migrations.AlterField(
model_name="issue",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
]

View File

@@ -10,6 +10,7 @@ from django.conf import settings
from django.utils.functional import cached_property
from bugsink.volume_based_condition import VolumeBasedCondition
from bugsink.transaction import delay_on_commit
from alerts.tasks import send_unmute_alert
from compat.timestamp import parse_timestamp, format_timestamp
from tags.models import IssueTag, TagValue
@@ -18,6 +19,8 @@ from .utils import (
parse_lines, serialize_lines, filter_qs_for_fixed_at, exclude_qs_for_fixed_at,
get_title_for_exception_type_and_value)
from .tasks import delete_issue_deps
class IncongruentStateException(Exception):
pass
@@ -32,7 +35,9 @@ class Issue(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
project = models.ForeignKey(
"projects.Project", blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
"projects.Project", blank=False, null=False, on_delete=models.DO_NOTHING)
is_deleted = models.BooleanField(default=False)
# 1-based for the same reasons as Event.digest_order
digest_order = models.PositiveIntegerField(blank=False, null=False)
@@ -72,6 +77,21 @@ class Issue(models.Model):
self.digest_order = max_current + 1 if max_current is not None else 1
super().save(*args, **kwargs)
def delete_deferred(self):
"""Marks the issue as deleted, and schedules deletion of all related objects"""
self.is_deleted = True
self.save(update_fields=["is_deleted"])
# we set grouping_key_hash to None to ensure that event digests that happen simultaneously with the delayed
# cleanup will get their own fresh Grouping and hence Issue. This matches with the behavior that would happen
# if Issue deletion would have been instantaneous (i.e. it's the least surprising behavior).
#
# `issue=None` is explicitly _not_ part of this update, such that the actual deletion of the Groupings will be
# picked up as part of the delete_issue_deps task.
self.grouping_set.all().update(grouping_key_hash=None)
delay_on_commit(delete_issue_deps, str(self.project_id), str(self.id))
def friendly_id(self):
return f"{ self.project.slug.upper() }-{ self.digest_order }"
@@ -193,14 +213,14 @@ class Grouping(models.Model):
into a single issue. (such manual merging is not yet implemented, but the data-model is already prepared for it)
"""
project = models.ForeignKey(
"projects.Project", blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
"projects.Project", blank=False, null=False, on_delete=models.DO_NOTHING)
grouping_key = models.TextField(blank=False, null=False)
# we hash the key to make it indexable on MySQL, see https://code.djangoproject.com/ticket/2495
grouping_key_hash = models.CharField(max_length=64, blank=False, null=False)
grouping_key_hash = models.CharField(max_length=64, blank=False, null=True)
issue = models.ForeignKey("Issue", blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
issue = models.ForeignKey("Issue", blank=False, null=False, on_delete=models.DO_NOTHING)
def __str__(self):
return self.grouping_key
@@ -328,10 +348,15 @@ class IssueStateManager(object):
# path is never reached via UI-based paths (because those are by definition not event-triggered); thus
# the 2 ways of creating TurningPoints do not collide.
TurningPoint.objects.create(
project_id=issue.project_id,
issue=issue, triggering_event=triggering_event, timestamp=triggering_event.ingested_at,
kind=TurningPointKind.UNMUTED, metadata=json.dumps(unmute_metadata))
triggering_event.never_evict = True # .save() will be called by the caller of this function
@staticmethod
def delete(issue):
issue.delete_deferred()
@staticmethod
def get_unmute_thresholds(issue):
unmute_vbcs = [
@@ -450,6 +475,11 @@ class IssueQuerysetStateManager(object):
for issue in issue_qs:
IssueStateManager.unmute(issue, triggering_event)
@staticmethod
def delete(issue_qs):
for issue in issue_qs:
issue.delete_deferred()
class TurningPointKind(models.IntegerChoices):
# The language of the kinds reflects a historic view of the system, e.g. "first seen" as opposed to "new issue"; an
@@ -471,7 +501,8 @@ class TurningPoint(models.Model):
# basically: an Event, but that name was already taken in our system :-) alternative names I considered:
# "milestone", "state_change", "transition", "annotation", "episode"
issue = models.ForeignKey("Issue", blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
project = models.ForeignKey("projects.Project", blank=False, null=False, on_delete=models.DO_NOTHING)
issue = models.ForeignKey("Issue", blank=False, null=False, on_delete=models.DO_NOTHING)
triggering_event = models.ForeignKey("events.Event", blank=True, null=True, on_delete=models.DO_NOTHING)
# null: the system-user

82
issues/tasks.py Normal file
View File

@@ -0,0 +1,82 @@
from snappea.decorators import shared_task
from bugsink.utils import get_model_topography, delete_deps_with_budget
from bugsink.transaction import immediate_atomic, delay_on_commit
def get_model_topography_with_issue_override():
"""
Returns the model topography with ordering adjusted to prefer deletions via .issue, when available.
This assumes that Issue is not only the root of the dependency graph, but also that if a model has an .issue
ForeignKey, deleting it via that path is sufficient, meaning we can safely avoid visiting the same model again
through other ForeignKey routes (e.g. Event.grouping or TurningPoint.triggering_event).
The preference is encoded via an explicit list of models, which are visited early and only via their .issue path.
"""
from issues.models import TurningPoint, Grouping
from events.models import Event
from tags.models import IssueTag, EventTag
preferred = [
TurningPoint, # above Event, to avoid deletions via .triggering_event
EventTag, # above Event, to avoid deletions via .event
Event, # above Grouping, to avoid deletions via .grouping
Grouping,
IssueTag,
]
def as_preferred(lst):
"""
Sorts the list of (model, fk_name) tuples such that the models are in the preferred order as indicated above,
and models which occur with another fk_name are pruned
"""
return sorted(
[(model, fk_name) for model, fk_name in lst if fk_name == "issue" or model not in preferred],
key=lambda x: preferred.index(x[0]) if x[0] in preferred else len(preferred),
)
topo = get_model_topography()
for k, lst in topo.items():
topo[k] = as_preferred(lst)
return topo
@shared_task
def delete_issue_deps(project_id, issue_id):
from .models import Issue # avoid circular import
with immediate_atomic():
# matches what we do in events/retention.py (and for which argumentation exists); in practive I have seen _much_
# faster deletion times (in the order of .03s per task on my local laptop) when using a budget of 500, _but_
# it's not a given those were for "expensive objects" (e.g. events); and I'd rather err on the side of caution
# (worst case we have a bit of inefficiency; in any case this avoids hogging the global write lock / timeouts).
budget = 500
num_deleted = 0
dep_graph = get_model_topography_with_issue_override()
for model_for_recursion, fk_name_for_recursion in dep_graph["issues.Issue"]:
this_num_deleted = delete_deps_with_budget(
project_id,
model_for_recursion,
fk_name_for_recursion,
[issue_id],
budget - num_deleted,
dep_graph,
is_for_project=False,
)
num_deleted += this_num_deleted
if num_deleted >= budget:
delay_on_commit(delete_issue_deps, project_id, issue_id)
return
if budget - num_deleted <= 0:
# no more budget for the self-delete.
delay_on_commit(delete_issue_deps, project_id, issue_id)
else:
# final step: delete the issue itself
Issue.objects.filter(pk=issue_id).delete()

View File

@@ -7,6 +7,23 @@
{% block content %}
<!-- Delete Confirmation Modal -->
<div id="deleteModal" class="hidden fixed inset-0 bg-slate-600 bg-opacity-50 overflow-y-auto h-full w-full z-50 flex items-center justify-center">
<div class="relative p-6 border border-slate-300 w-96 shadow-lg rounded-md bg-white">
<div class="text-center m-4">
<h3 class="text-2xl font-semibold text-slate-800 mt-3 mb-4">Delete Issues</h3>
<div class="mt-4 mb-6">
<p class="text-slate-700">
Deleting an Issue is a permanent action and cannot be undone. It's typically better to resolve or mute an issue instead of deleting it, as this allows you to keep track of past issues and their resolutions.
</p>
</div>
<div class="flex items-center justify-center space-x-4 mb-4">
<button id="cancelDelete" class="text-cyan-500 font-bold">Cancel</button>
<button id="confirmDelete" type="submit" class="font-bold py-2 px-4 rounded bg-red-500 text-white border-2 border-red-600 hover:bg-red-600 active:ring">Delete</button>
</div>
</div>
</div>
</div>
<div class="m-4">
@@ -35,7 +52,7 @@
<div>
<form action="." method="post">
<form action="." method="post" id="issueForm">
{% csrf_token %}
<table class="w-full">
@@ -122,8 +139,18 @@
<button disabled class="font-bold text-slate-300 dark:text-slate-600 border-slate-300 dark:border-slate-600 pl-4 pr-4 pb-2 pt-2 border-r-2 border-b-2 border-t-2 rounded-e-md" name="action" value="unmute">Unmute</button>
{% endif %}
<div class="dropdown">
<button disabled {# disabled b/c clicking the dropdown does nothing - we have hover for that #} class="font-bold text-slate-500 fill-slate-500 border-slate-300 ml-2 pl-4 pr-4 pb-2 pt-2 border-2 hover:bg-slate-200 active:ring rounded-md">...</button>
<div class="dropdown-content-right flex-col">
<button type="button" onclick="showDeleteConfirmation()" class="block self-stretch font-bold text-red-500 border-slate-300 pl-4 pr-4 pb-2 pt-2 border-l-2 border-r-2 border-b-2 bg-white hover:bg-red-50 active:ring text-left whitespace-nowrap">Delete</button>
</div>
</div>
{% endspaceless %}
{# NOTE: "reopen" is not available in the UI as per the notes in issue_detail #}
{# only for resolved/muted items <button class="font-bold text-slate-500 dark:text-slate-300 border-slate-300 dark:border-slate-600 pl-4 pr-4 pb-2 pt-2 ml-1 border-2 hover:bg-slate-200 dark:hover:bg-slate-800 active:ring rounded-md">Reopen</button> #}
@@ -281,5 +308,36 @@
{% endblock %}
{% block extra_js %}
<script>
const deleteButton = document.getElementById('');
const confirmationBox = document.getElementById('deleteModal');
const confirmDelete = document.getElementById('confirmDelete');
const cancelDelete = document.getElementById('cancelDelete');
const form = document.getElementById('issueForm');
let actionInput = null;
function showDeleteConfirmation() {
confirmationBox.style.display = 'flex';
}
cancelDelete.addEventListener('click', () => {
confirmationBox.style.display = 'none';
});
confirmDelete.addEventListener('click', () => {
// Add hidden input only for this submission
if (!actionInput) {
actionInput = document.createElement('input');
actionInput.type = 'hidden';
actionInput.name = 'action';
actionInput.value = 'delete';
form.appendChild(actionInput);
}
form.submit();
});
</script>
<script src="{% static 'js/issue_list.js' %}"></script>
{% endblock %}

View File

@@ -13,8 +13,10 @@ from django.test import TestCase as DjangoTestCase
from django.contrib.auth import get_user_model
from django.test import tag
from django.conf import settings
from django.apps import apps
from bugsink.test_utils import TransactionTestCase25251 as TransactionTestCase
from bugsink.utils import get_model_topography
from projects.models import Project, ProjectMembership
from releases.models import create_release_if_needed
from events.factories import create_event
@@ -23,11 +25,14 @@ from compat.dsn import get_header_value
from events.models import Event
from ingest.views import BaseIngestAPIView
from issues.factories import get_or_create_issue
from tags.models import store_tags
from tags.tasks import vacuum_tagvalues
from .models import Issue, IssueStateManager
from .models import Issue, IssueStateManager, TurningPoint, TurningPointKind
from .regressions import is_regression, is_regression_2, issue_is_regression
from .factories import denormalized_issue_fields
from .utils import get_issue_grouper_for_data
from .tasks import get_model_topography_with_issue_override
User = get_user_model()
@@ -351,12 +356,11 @@ class MuteUnmuteTestCase(TransactionTestCase):
def test_unmute_simple_case(self, send_unmute_alert):
project = Project.objects.create()
issue = Issue.objects.create(
project=project,
unmute_on_volume_based_conditions='[{"period": "day", "nr_of_periods": 1, "volume": 1}]',
is_muted=True,
**denormalized_issue_fields(),
)
issue, _ = get_or_create_issue(project)
issue.unmute_on_volume_based_conditions = '[{"period": "day", "nr_of_periods": 1, "volume": 1}]'
issue.is_muted = True
issue.save()
event = create_event(project, issue)
BaseIngestAPIView.count_issue_periods_and_act_on_it(issue, event, datetime.now(timezone.utc))
@@ -371,15 +375,14 @@ class MuteUnmuteTestCase(TransactionTestCase):
def test_unmute_two_simultaneously_should_lead_to_one_alert(self, send_unmute_alert):
project = Project.objects.create()
issue = Issue.objects.create(
project=project,
unmute_on_volume_based_conditions='''[
issue, _ = get_or_create_issue(project)
issue. unmute_on_volume_based_conditions = '''[
{"period": "day", "nr_of_periods": 1, "volume": 1},
{"period": "month", "nr_of_periods": 1, "volume": 1}
]''',
is_muted=True,
**denormalized_issue_fields(),
)
]'''
issue.is_muted = True
issue.save()
event = create_event(project, issue)
BaseIngestAPIView.count_issue_periods_and_act_on_it(issue, event, datetime.now(timezone.utc))
@@ -665,3 +668,98 @@ class GroupingUtilsTestCase(DjangoTestCase):
def test_fingerprint_with_default(self):
self.assertEqual("Log Message: <no log message> ⋄ <no transaction> ⋄ fixed string",
get_issue_grouper_for_data({"fingerprint": ["{{ default }}", "fixed string"]}))
class IssueDeletionTestCase(TransactionTestCase):
def setUp(self):
super().setUp()
self.project = Project.objects.create(name="Test Project", stored_event_count=1) # 1, in prep. of the below
self.issue, _ = get_or_create_issue(self.project)
self.event = create_event(self.project, issue=self.issue)
TurningPoint.objects.create(
project=self.project,
issue=self.issue, triggering_event=self.event, timestamp=self.event.ingested_at,
kind=TurningPointKind.FIRST_SEEN)
self.event.never_evict = True
self.event.save()
store_tags(self.event, self.issue, {"foo": "bar"})
def test_delete_issue(self):
models = [apps.get_model(app_label=s.split('.')[0], model_name=s.split('.')[1].lower()) for s in [
'events.Event', 'issues.Grouping', 'issues.TurningPoint', 'tags.EventTag', 'issues.Issue', 'tags.IssueTag',
'tags.TagValue', # TagValue 'feels like' a vacuum_model (FKs reversed) but is cleaned up in `prune_orphans`
]]
# see the note in `prune_orphans` about TagKey to understand why it's special.
vacuum_models = [apps.get_model(app_label=s.split('.')[0], model_name=s.split('.')[1].lower())
for s in ['tags.TagKey',]]
for model in models + vacuum_models:
# test-the-test: make sure some instances of the models actually exist after setup
self.assertTrue(model.objects.exists(), f"Some {model.__name__} should exist")
# assertNumQueries() is brittle and opaque. But at least the brittle part is quick to fix (a single number) and
# provides a canary for performance regressions.
# correct for bugsink/transaction.py's select_for_update for non-sqlite databases
correct_for_select_for_update = 1 if 'sqlite' not in settings.DATABASES['default']['ENGINE'] else 0
with self.assertNumQueries(19 + correct_for_select_for_update):
self.issue.delete_deferred()
# tests run w/ TASK_ALWAYS_EAGER, so in the below we can just check the database directly
for model in models:
self.assertFalse(model.objects.exists(), f"No {model.__name__}s should exist after issue deletion")
for model in vacuum_models:
# 'should' in quotes because this isn't so because we believe it's better if they did, but because the
# code currently does not delete them.
self.assertTrue(model.objects.exists(), f"Some {model.__name__}s 'should' exist after issue deletion")
self.assertEqual(0, Project.objects.get().stored_event_count)
vacuum_tagvalues()
# tests run w/ TASK_ALWAYS_EAGER, so any "delayed" (recursive) calls can be expected to have run
for model in vacuum_models:
self.assertFalse(model.objects.exists(), f"No {model.__name__}s should exist after vacuuming")
def test_dependency_graphs(self):
# tests for an implementation detail of defered deletion, namely 1 test that asserts what the actual
# model-topography is, and one test that shows how we manually override it; this is to trigger a failure when
# the topology changes (and forces us to double-check that the override is still correct).
orig = get_model_topography()
override = get_model_topography_with_issue_override()
def walk(topo, model_name):
results = []
for model, fk_name in topo[model_name]:
results.append((model, fk_name))
results.extend(walk(topo, model._meta.label))
return results
self.assertEqual(walk(orig, 'issues.Issue'), [
(apps.get_model('issues', 'Grouping'), 'issue'),
(apps.get_model('events', 'Event'), 'grouping'),
(apps.get_model('issues', 'TurningPoint'), 'triggering_event'),
(apps.get_model('tags', 'EventTag'), 'event'),
(apps.get_model('issues', 'TurningPoint'), 'issue'),
(apps.get_model('events', 'Event'), 'issue'),
(apps.get_model('issues', 'TurningPoint'), 'triggering_event'),
(apps.get_model('tags', 'EventTag'), 'event'),
(apps.get_model('tags', 'EventTag'), 'issue'),
(apps.get_model('tags', 'IssueTag'), 'issue'),
])
self.assertEqual(walk(override, 'issues.Issue'), [
(apps.get_model('issues', 'TurningPoint'), 'issue'),
(apps.get_model('tags', 'EventTag'), 'issue'),
(apps.get_model('events', 'Event'), 'issue'),
(apps.get_model('issues', 'Grouping'), 'issue'),
(apps.get_model('tags', 'IssueTag'), 'issue'),
])

View File

@@ -54,7 +54,8 @@ urlpatterns = [
path('issue/<uuid:issue_pk>/event/<first-last:nav>/', issue_event_stacktrace, name="event_stacktrace"),
path('issue/<uuid:issue_pk>/event/<first-last:nav>/details/', issue_event_details, name="event_details"),
path('issue/<uuid:issue_pk>/event/<first-last:nav>/breadcrumbs/', issue_event_details, name="event_breadcrumbs"),
path(
'issue/<uuid:issue_pk>/event/<first-last:nav>/breadcrumbs/', issue_event_breadcrumbs, name="event_breadcrumbs"),
path('issue/<uuid:issue_pk>/tags/', issue_tags),
path('issue/<uuid:issue_pk>/history/', issue_history),

View File

@@ -128,6 +128,10 @@ def _is_valid_action(action, issue):
"""We take the 'strict' approach of complaining even when the action is simply a no-op, because you're already in
the desired state."""
if action == "delete":
# any type of issue can be deleted
return True
if issue.is_resolved:
# any action is illegal on resolved issues (as per our current UI)
return False
@@ -153,6 +157,10 @@ def _is_valid_action(action, issue):
def _q_for_invalid_for_action(action):
"""returns a Q obj of issues for which the action is not valid."""
if action == "delete":
# delete is always valid, so we don't want any issues to be returned, https://stackoverflow.com/a/39001190
return Q(pk__in=[])
illegal_conditions = Q(is_resolved=True) # any action is illegal on resolved issues (as per our current UI)
if action.startswith("resolved_release:"):
@@ -169,7 +177,10 @@ def _q_for_invalid_for_action(action):
def _make_history(issue_or_qs, action, user):
if action == "resolve":
if action == "delete":
return # we're about to delete the issue, so no history is needed (nor possible)
elif action == "resolve":
kind = TurningPointKind.RESOLVED
elif action.startswith("resolved"):
kind = TurningPointKind.RESOLVED
@@ -210,10 +221,13 @@ def _make_history(issue_or_qs, action, user):
now = timezone.now()
if isinstance(issue_or_qs, Issue):
TurningPoint.objects.create(
project=issue_or_qs.project,
issue=issue_or_qs, kind=kind, user=user, metadata=json.dumps(metadata), timestamp=now)
else:
TurningPoint.objects.bulk_create([
TurningPoint(issue=issue, kind=kind, user=user, metadata=json.dumps(metadata), timestamp=now)
TurningPoint(
project_id=issue.project_id, issue=issue, kind=kind, user=user, metadata=json.dumps(metadata),
timestamp=now)
for issue in issue_or_qs
])
@@ -249,6 +263,8 @@ def _apply_action(manager, issue_or_qs, action, user):
}]))
elif action == "unmute":
manager.unmute(issue_or_qs)
elif action == "delete":
manager.delete(issue_or_qs)
def issue_list(request, project_pk, state_filter="open"):
@@ -292,7 +308,7 @@ def _issue_list_pt_2(request, project, state_filter, unapplied_issue_ids):
}
issue_list = d_state_filter[state_filter](
Issue.objects.filter(project=project)
Issue.objects.filter(project=project, is_deleted=False)
).order_by("-last_seen")
if request.GET.get("q"):
@@ -767,6 +783,7 @@ def history_comment_new(request, issue):
# think that's amount of magic to have: it still allows one to erase comments (possibly for non-manual
# kinds) but it saves you from what is obviously a mistake (without complaining with a red box or something)
TurningPoint.objects.create(
project=issue.project,
issue=issue, kind=TurningPointKind.MANUAL_ANNOTATION, user=request.user,
comment=form.cleaned_data["comment"],
timestamp=timezone.now())

View File

@@ -1,9 +1,17 @@
from django.contrib import admin
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
from admin_auto_filters.filters import AutocompleteFilter
from bugsink.transaction import immediate_atomic
from .models import Project, ProjectMembership
csrf_protect_m = method_decorator(csrf_protect)
class ProjectFilter(AutocompleteFilter):
title = 'Project'
field_name = 'project'
@@ -31,9 +39,8 @@ class ProjectAdmin(admin.ModelAdmin):
list_display = [
'name',
'dsn',
'alert_on_new_issue',
'alert_on_regression',
'alert_on_unmute',
'digested_event_count',
'stored_event_count',
]
readonly_fields = [
@@ -47,6 +54,31 @@ class ProjectAdmin(admin.ModelAdmin):
'slug': ['name'],
}
def get_deleted_objects(self, objs, request):
to_delete = list(objs) + ["...all its related objects... (delayed)"]
model_count = {
Project: len(objs),
}
perms_needed = set()
protected = []
return to_delete, model_count, perms_needed, protected
def delete_queryset(self, request, queryset):
# NOTE: not the most efficient; it will do for a first version.
with immediate_atomic():
for obj in queryset:
obj.delete_deferred()
def delete_model(self, request, obj):
with immediate_atomic():
obj.delete_deferred()
@csrf_protect_m
def delete_view(self, request, object_id, extra_context=None):
# the superclass version, but with the transaction.atomic context manager commented out (we do this ourselves)
# with transaction.atomic(using=router.db_for_write(self.model)):
return self._delete_view(request, object_id, extra_context)
# the preferred way to deal with ProjectMembership is actually through the inline above; however, because this may prove
# to not scale well with (very? more than 50?) memberships per project, we've left the separate admin interface here for

View File

@@ -0,0 +1,18 @@
# Generated by Django 4.2.21 on 2025-07-03 13:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("projects", "0011_fill_stored_event_count"),
]
operations = [
migrations.AddField(
model_name="project",
name="is_deleted",
field=models.BooleanField(default=False),
),
]

View File

@@ -0,0 +1,48 @@
from django.db import migrations
def delete_objects_pointing_to_null_project(apps, schema_editor):
# Up until now, we have various models w/ .project=FK(null=True, on_delete=models.SET_NULL)
# Although it is "not expected" in the interface, project-deletion would have led to those
# objects with a null project. We're about to change that to .project=FK(null=False, ...) which
# would crash if we don't remove those objects first. Object-removal is "fine" though, because
# as per the meaning of the SET_NULL, these objects were "dangling" anyway.
# We implement this as a _single_ cross-app migration so that reasoning about the order of deletions is easy (and
# we can just copy the correct order from the project/tasks.py `preferred` variable. This cross-appness does mean
# that we must specify all dependencies here, and all the set-null migrations (from various apps) must point at this
# migration as their dependency.
# from tasks.py, but in "strings" form
preferred = [
'tags.EventTag',
'tags.IssueTag',
'tags.TagValue',
'tags.TagKey',
# 'issues.TurningPoint', # not needed, .project is already not-null (we just added it)
'events.Event',
'issues.Grouping',
# 'alerts.MessagingServiceConfig', was CASCADE (not null), so no deletion needed
# 'projects.ProjectMembership', was CASCADE (not null), so no deletion needed
'releases.Release',
'issues.Issue',
]
for model_name in preferred:
model = apps.get_model(*model_name.split('.'))
model.objects.filter(project__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
("projects", "0012_project_is_deleted"),
("issues", "0024_turningpoint_project_alter_not_null"),
("tags", "0004_alter_do_nothing"),
("releases", "0002_release_releases_re_sort_ep_5c07c8_idx"),
("events", "0021_alter_do_nothing"),
]
operations = [
migrations.RunPython(delete_objects_pointing_to_null_project, reverse_code=migrations.RunPython.noop),
]

View File

@@ -0,0 +1,19 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("projects", "0013_delete_objects_pointing_to_null_project"),
]
operations = [
migrations.AlterField(
model_name="projectmembership",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
]

View File

@@ -5,11 +5,14 @@ from django.conf import settings
from django.utils.text import slugify
from bugsink.app_settings import get_settings
from bugsink.transaction import delay_on_commit
from compat.dsn import build_dsn
from teams.models import TeamMembership
from .tasks import delete_project_deps
# ## Visibility/Access-design
#
@@ -74,6 +77,7 @@ class Project(models.Model):
name = models.CharField(max_length=255, blank=False, null=False, unique=True)
slug = models.SlugField(max_length=50, blank=False, null=False, unique=True)
is_deleted = models.BooleanField(default=False)
# sentry_key mirrors the "public" part of the sentry DSN. As of late 2023 Sentry's docs say the this about DSNs:
#
@@ -143,6 +147,13 @@ class Project(models.Model):
super().save(*args, **kwargs)
def delete_deferred(self):
"""Marks the project as deleted, and schedules deletion of all related objects"""
self.is_deleted = True
self.save(update_fields=["is_deleted"])
delay_on_commit(delete_project_deps, str(self.id))
def is_joinable(self, user=None):
if user is not None:
# take the user's team membership into account
@@ -164,7 +175,7 @@ class Project(models.Model):
class ProjectMembership(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.DO_NOTHING)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
send_email_alerts = models.BooleanField(default=None, null=True)

View File

@@ -4,12 +4,13 @@ from snappea.decorators import shared_task
from bugsink.app_settings import get_settings
from bugsink.utils import send_rendered_email
from .models import Project
from bugsink.transaction import immediate_atomic, delay_on_commit
from bugsink.utils import get_model_topography, delete_deps_with_budget
@shared_task
def send_project_invite_email_new_user(email, project_pk, token):
from .models import Project # avoid circular import
project = Project.objects.get(pk=project_pk)
send_rendered_email(
@@ -30,6 +31,7 @@ def send_project_invite_email_new_user(email, project_pk, token):
@shared_task
def send_project_invite_email(email, project_pk):
from .models import Project # avoid circular import
project = Project.objects.get(pk=project_pk)
send_rendered_email(
@@ -45,3 +47,95 @@ def send_project_invite_email(email, project_pk):
}),
},
)
def get_model_topography_with_project_override():
"""
Returns the model topography with ordering adjusted to prefer deletions via .project, when available.
This assumes that Project is not only the root of the dependency graph, but also that if a model has an .project
ForeignKey, deleting it via that path is sufficient, meaning we can safely avoid visiting the same model again
through other ForeignKey routes (e.g. any of the .issue paths).
The preference is encoded via an explicit list of models, which are visited early and only via their .project path.
"""
from issues.models import Issue, TurningPoint, Grouping
from events.models import Event
from tags.models import IssueTag, EventTag, TagValue, TagKey
from alerts.models import MessagingServiceConfig
from releases.models import Release
from projects.models import ProjectMembership
preferred = [
# Tag-related: remove the "depending" models first and the most depended on last.
EventTag, # above Event, to avoid deletions via .event
IssueTag,
TagValue,
TagKey,
TurningPoint, # above Event, to avoid deletions via .triggering_event
Event, # above Grouping, to avoid deletions via .grouping
Grouping,
# these things "could be anywhere" in the ordering; they're not that connected; we put them at the end.
MessagingServiceConfig,
ProjectMembership,
Release,
Issue, # at the bottom, most everything points to this, we'd rather delete those things via .project
]
def as_preferred(lst):
"""
Sorts the list of (model, fk_name) tuples such that the models are in the preferred order as indicated above,
and models which occur with another fk_name are pruned
"""
return sorted(
[(model, fk_name) for model, fk_name in lst if fk_name == "project" or model not in preferred],
key=lambda x: preferred.index(x[0]) if x[0] in preferred else len(preferred),
)
topo = get_model_topography()
for k, lst in topo.items():
topo[k] = as_preferred(lst)
return topo
@shared_task
def delete_project_deps(project_id):
from .models import Project # avoid circular import
with immediate_atomic():
# matches what we do in events/retention.py (and for which argumentation exists); in practive I have seen _much_
# faster deletion times (in the order of .03s per task on my local laptop) when using a budget of 500, _but_
# it's not a given those were for "expensive objects" (e.g. events); and I'd rather err on the side of caution
# (worst case we have a bit of inefficiency; in any case this avoids hogging the global write lock / timeouts).
budget = 500
num_deleted = 0
dep_graph = get_model_topography_with_project_override()
for model_for_recursion, fk_name_for_recursion in dep_graph["projects.Project"]:
this_num_deleted = delete_deps_with_budget(
project_id,
model_for_recursion,
fk_name_for_recursion,
[project_id],
budget - num_deleted,
dep_graph,
is_for_project=True,
)
num_deleted += this_num_deleted
if num_deleted >= budget:
delay_on_commit(delete_project_deps, project_id)
return
if budget - num_deleted <= 0:
# no more budget for the self-delete.
delay_on_commit(delete_project_deps, project_id)
else:
# final step: delete the issue itself
Project.objects.filter(pk=project_id).delete()

View File

@@ -5,13 +5,32 @@
{% block title %}Edit {{ project.name }} · {{ site_title }}{% endblock %}
{% block content %}
{# div class="text-cyan-800" here in an attempt to trigger tailwind, which does not pick up Pyhton code #}
<!-- Delete Confirmation Modal -->
<div id="deleteModal" class="hidden fixed inset-0 bg-slate-600 bg-opacity-50 overflow-y-auto h-full w-full z-50 flex items-center justify-center">
<div class="relative p-6 border border-slate-300 w-96 shadow-lg rounded-md bg-white">
<div class="text-center m-4">
<h3 class="text-2xl font-semibold text-slate-800 mt-3 mb-4">Delete Project</h3>
<div class="mt-4 mb-6">
<p class="text-slate-700">
Are you sure you want to delete this project? This action cannot be undone and will delete all associated data.
</p>
</div>
<div class="flex items-center justify-center space-x-4 mb-4">
<button id="cancelDelete" class="text-cyan-500 font-bold">Cancel</button>
<form method="post" action="." id="deleteForm">
{% csrf_token %}
<input type="hidden" name="action" value="delete">
<button type="submit" class="font-bold py-2 px-4 rounded bg-red-500 text-white border-2 border-red-600 hover:bg-red-600 active:ring">Confirm</button>
</form>
</div>
</div>
</div>
</div>
<div class="flex items-center justify-center">
<div class="m-4 max-w-4xl flex-auto">
<form action="." method="post">
<form action="." method="post" id="projectForm">
{% csrf_token %}
<div>
@@ -27,11 +46,20 @@
{% tailwind_formfield form.retention_max_event_count %}
{% tailwind_formfield form.dsn %}
<button name="action" value="invite" class="font-bold text-slate-800 dark:text-slate-100 border-slate-500 dark:border-slate-400 pl-4 pr-4 pb-2 pt-2 border-2 bg-cyan-200 dark:bg-cyan-700 hover:bg-cyan-400 dark:hover:bg-cyan-600 active:ring rounded-md">Save</button>
<a href="{% url "project_list" %}" class="text-cyan-500 dark:text-cyan-300 font-bold ml-2">Cancel</a>
<div class="flex items-center mt-4">
<button name="action" value="invite" class="font-bold text-slate-800 dark:text-slate-100 border-slate-500 dark:border-slate-400 pl-4 pr-4 pb-2 pt-2 border-2 bg-cyan-200 dark:bg-cyan-700 hover:bg-cyan-400 dark:hover:bg-cyan-600 active:ring rounded-md">Save</button>
<a href="{% url "project_list" %}" class="text-cyan-500 dark:text-cyan-300 font-bold ml-2">Cancel</a>
<button type="button" id="deleteButton" class="font-bold py-2 px-4 rounded bg-red-500 text-white border-2 border-red-600 hover:bg-red-600 active:ring ml-auto">Delete Project</button>
</div>
</form>
</div>
</div>
{% endblock %}
{% block extra_js %}
<script>
var csrftoken = '{{ csrf_token }}';
</script>
<script src="{% static 'js/entity_edit.js' %}"></script>
{% endblock %}

View File

@@ -1 +1,138 @@
# from django.test import TestCase as DjangoTestCase
from django.conf import settings
from django.apps import apps
from django.contrib.auth import get_user_model
from bugsink.test_utils import TransactionTestCase25251 as TransactionTestCase
from bugsink.utils import get_model_topography
from projects.models import Project, ProjectMembership
from events.factories import create_event
from issues.factories import get_or_create_issue
from tags.models import store_tags
from issues.models import TurningPoint, TurningPointKind
from alerts.models import MessagingServiceConfig
from releases.models import Release
from .tasks import get_model_topography_with_project_override
User = get_user_model()
class ProjectDeletionTestCase(TransactionTestCase):
def setUp(self):
super().setUp()
self.project = Project.objects.create(name="Test Project", stored_event_count=1) # 1, in prep. of the below
self.issue, _ = get_or_create_issue(self.project)
self.event = create_event(self.project, issue=self.issue)
self.user = User.objects.create_user(username='test', password='test')
TurningPoint.objects.create(
project=self.project,
issue=self.issue, triggering_event=self.event, timestamp=self.event.ingested_at,
kind=TurningPointKind.FIRST_SEEN)
MessagingServiceConfig.objects.create(project=self.project)
ProjectMembership.objects.create(project=self.project, user=self.user)
Release.objects.create(project=self.project, version="1.0.0")
self.event.never_evict = True
self.event.save()
store_tags(self.event, self.issue, {"foo": "bar"})
def test_delete_project(self):
models = [apps.get_model(app_label=s.split('.')[0], model_name=s.split('.')[1].lower()) for s in [
"tags.EventTag",
"tags.IssueTag",
"tags.TagValue",
"tags.TagKey",
"issues.TurningPoint",
"events.Event",
"issues.Grouping",
"alerts.MessagingServiceConfig",
"projects.ProjectMembership",
"releases.Release",
"issues.Issue",
"projects.Project",
]]
for model in models:
# test-the-test: make sure some instances of the models actually exist after setup
self.assertTrue(model.objects.exists(), f"Some {model.__name__} should exist")
# assertNumQueries() is brittle and opaque. But at least the brittle part is quick to fix (a single number) and
# provides a canary for performance regressions.
# correct for bugsink/transaction.py's select_for_update for non-sqlite databases
correct_for_select_for_update = 1 if 'sqlite' not in settings.DATABASES['default']['ENGINE'] else 0
with self.assertNumQueries(27 + correct_for_select_for_update):
self.project.delete_deferred()
# tests run w/ TASK_ALWAYS_EAGER, so in the below we can just check the database directly
for model in models:
self.assertFalse(model.objects.exists(), f"No {model.__name__}s should exist after issue deletion")
def test_dependency_graphs(self):
# tests for an implementation detail of defered deletion, namely 1 test that asserts what the actual
# model-topography is, and one test that shows how we manually override it; this is to trigger a failure when
# the topology changes (and forces us to double-check that the override is still correct).
orig = get_model_topography()
override = get_model_topography_with_project_override()
def walk(topo, model_name):
results = []
for model, fk_name in topo[model_name]:
results.append((model, fk_name))
results.extend(walk(topo, model._meta.label))
return results
self.assertEqual(walk(orig, 'projects.Project'), [
(apps.get_model('projects', 'ProjectMembership'), 'project'),
(apps.get_model('releases', 'Release'), 'project'),
(apps.get_model('issues', 'Issue'), 'project'),
(apps.get_model('issues', 'Grouping'), 'issue'),
(apps.get_model('events', 'Event'), 'grouping'),
(apps.get_model('issues', 'TurningPoint'), 'triggering_event'),
(apps.get_model('tags', 'EventTag'), 'event'),
(apps.get_model('issues', 'TurningPoint'), 'issue'),
(apps.get_model('events', 'Event'), 'issue'),
(apps.get_model('issues', 'TurningPoint'), 'triggering_event'),
(apps.get_model('tags', 'EventTag'), 'event'),
(apps.get_model('tags', 'EventTag'), 'issue'),
(apps.get_model('tags', 'IssueTag'), 'issue'),
(apps.get_model('issues', 'Grouping'), 'project'),
(apps.get_model('events', 'Event'), 'grouping'),
(apps.get_model('issues', 'TurningPoint'), 'triggering_event'),
(apps.get_model('tags', 'EventTag'), 'event'),
(apps.get_model('issues', 'TurningPoint'), 'project'),
(apps.get_model('events', 'Event'), 'project'),
(apps.get_model('issues', 'TurningPoint'), 'triggering_event'),
(apps.get_model('tags', 'EventTag'), 'event'),
(apps.get_model('tags', 'TagKey'), 'project'),
(apps.get_model('tags', 'TagValue'), 'key'),
(apps.get_model('tags', 'EventTag'), 'value'),
(apps.get_model('tags', 'IssueTag'), 'value'),
(apps.get_model('tags', 'IssueTag'), 'key'),
(apps.get_model('tags', 'TagValue'), 'project'),
(apps.get_model('tags', 'EventTag'), 'value'),
(apps.get_model('tags', 'IssueTag'), 'value'),
(apps.get_model('tags', 'EventTag'), 'project'),
(apps.get_model('tags', 'IssueTag'), 'project'),
(apps.get_model('alerts', 'MessagingServiceConfig'), 'project'),
])
self.assertEqual(walk(override, 'projects.Project'), [
(apps.get_model('tags', 'EventTag'), 'project'),
(apps.get_model('tags', 'IssueTag'), 'project'),
(apps.get_model('tags', 'TagValue'), 'project'),
(apps.get_model('tags', 'TagKey'), 'project'),
(apps.get_model('issues', 'TurningPoint'), 'project'),
(apps.get_model('events', 'Event'), 'project'),
(apps.get_model('issues', 'Grouping'), 'project'),
(apps.get_model('alerts', 'MessagingServiceConfig'), 'project'),
(apps.get_model('projects', 'ProjectMembership'), 'project'),
(apps.get_model('releases', 'Release'), 'project'),
(apps.get_model('issues', 'Issue'), 'project')
])

View File

@@ -35,21 +35,24 @@ def project_list(request, ownership_filter=None):
my_memberships = ProjectMembership.objects.filter(user=request.user)
my_team_memberships = TeamMembership.objects.filter(user=request.user)
my_projects = Project.objects.filter(projectmembership__in=my_memberships).order_by('name').distinct()
my_projects = Project.objects.filter(
projectmembership__in=my_memberships, is_deleted=False).order_by('name').distinct()
my_teams_projects = \
Project.objects \
.filter(team__teammembership__in=my_team_memberships) \
.filter(team__teammembership__in=my_team_memberships, is_deleted=False) \
.exclude(projectmembership__in=my_memberships) \
.order_by('name').distinct()
if request.user.is_superuser:
# superusers can see all project, even hidden ones
other_projects = Project.objects \
.filter(is_deleted=False) \
.exclude(projectmembership__in=my_memberships) \
.exclude(team__teammembership__in=my_team_memberships) \
.order_by('name').distinct()
else:
other_projects = Project.objects \
.filter(is_deleted=False) \
.exclude(projectmembership__in=my_memberships) \
.exclude(team__teammembership__in=my_team_memberships) \
.exclude(visibility=ProjectVisibility.TEAM_MEMBERS) \
@@ -158,13 +161,28 @@ def _check_project_admin(project, user):
@atomic_for_request_method
def project_edit(request, project_pk):
project = Project.objects.get(id=project_pk)
project = Project.objects.get(id=project_pk, is_deleted=False)
_check_project_admin(project, request.user)
if request.method == 'POST':
form = ProjectForm(request.POST, instance=project)
action = request.POST.get('action')
if action == 'delete':
# Double-check that the user is an admin or superuser
if (not request.user.is_superuser
and not ProjectMembership.objects.filter(
project=project, user=request.user, role=ProjectRole.ADMIN, accepted=True).exists()
and not TeamMembership.objects.filter(
team=project.team, user=request.user, role=TeamRole.ADMIN, accepted=True).exists()):
raise PermissionDenied("Only project or team admins can delete projects")
# Delete the project
project.delete_deferred()
messages.success(request, f'Project "{project.name}" has been deleted successfully.')
return redirect('project_list')
form = ProjectForm(request.POST, instance=project)
if form.is_valid():
form.save()
return redirect('project_members', project_pk=project.id)
@@ -180,7 +198,7 @@ def project_edit(request, project_pk):
@atomic_for_request_method
def project_members(request, project_pk):
project = Project.objects.get(id=project_pk)
project = Project.objects.get(id=project_pk, is_deleted=False)
_check_project_admin(project, request.user)
if request.method == 'POST':
@@ -215,7 +233,7 @@ def project_members_invite(request, project_pk):
# NOTE: project-member invite is just that: a direct invite to a project. If you want to also/instead invite someone
# to a team, you need to just do that instead.
project = Project.objects.get(id=project_pk)
project = Project.objects.get(id=project_pk, is_deleted=False)
_check_project_admin(project, request.user)
@@ -277,7 +295,7 @@ def project_member_settings(request, project_pk, user_pk):
this_is_you = str(user_pk) == str(request.user.id)
if not this_is_you:
_check_project_admin(Project.objects.get(id=project_pk), request.user)
_check_project_admin(Project.objects.get(id=project_pk, is_deleted=False), request.user)
membership = ProjectMembership.objects.get(project=project_pk, user=user_pk)
create_form = lambda data: ProjectMembershipForm(data, instance=membership) # noqa
@@ -302,7 +320,7 @@ def project_member_settings(request, project_pk, user_pk):
return render(request, 'projects/project_member_settings.html', {
'this_is_you': this_is_you,
'user': User.objects.get(id=user_pk),
'project': Project.objects.get(id=project_pk),
'project': Project.objects.get(id=project_pk, is_deleted=False),
'form': form,
})
@@ -362,7 +380,7 @@ def project_members_accept(request, project_pk):
# invited as user B. Security-wise this is fine, but UX-wise it could be confusing. However, I'm in the assumption
# here that normal people (i.e. not me) don't have multiple accounts, so I'm not going to bother with this.
project = Project.objects.get(id=project_pk)
project = Project.objects.get(id=project_pk, is_deleted=False)
membership = ProjectMembership.objects.get(project=project, user=request.user)
if membership.accepted:
@@ -387,7 +405,7 @@ def project_members_accept(request, project_pk):
@atomic_for_request_method
def project_sdk_setup(request, project_pk, platform=""):
project = Project.objects.get(id=project_pk)
project = Project.objects.get(id=project_pk, is_deleted=False)
if not request.user.is_superuser and not ProjectMembership.objects.filter(project=project, user=request.user,
accepted=True).exists():
@@ -408,7 +426,7 @@ def project_sdk_setup(request, project_pk, platform=""):
@atomic_for_request_method
def project_alerts_setup(request, project_pk):
project = Project.objects.get(id=project_pk)
project = Project.objects.get(id=project_pk, is_deleted=False)
_check_project_admin(project, request.user)
if request.method == 'POST':
@@ -431,7 +449,7 @@ def project_alerts_setup(request, project_pk):
@atomic_for_request_method
def project_messaging_service_add(request, project_pk):
project = Project.objects.get(id=project_pk)
project = Project.objects.get(id=project_pk, is_deleted=False)
_check_project_admin(project, request.user)
if request.method == 'POST':
@@ -459,7 +477,7 @@ def project_messaging_service_add(request, project_pk):
@atomic_for_request_method
def project_messaging_service_edit(request, project_pk, service_pk):
project = Project.objects.get(id=project_pk)
project = Project.objects.get(id=project_pk, is_deleted=False)
_check_project_admin(project, request.user)
instance = project.service_configs.get(id=service_pk)

View File

@@ -0,0 +1,21 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
# Django came up with 0014, whatever the reason, I'm sure that 0013 is at least required (as per comments there)
("projects", "0014_alter_projectmembership_project"),
("releases", "0002_release_releases_re_sort_ep_5c07c8_idx"),
]
operations = [
migrations.AlterField(
model_name="release",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
]

View File

@@ -44,8 +44,7 @@ class Release(models.Model):
# sentry does releases per-org; we don't follow that example. our belief is basically: [1] in reality releases are
# per software package and a software package is basically a bugsink project and [2] any cross-project-per-org
# analysis you might do is more likely to be in the realm of "transactions", something we don't want to support.
project = models.ForeignKey(
"projects.Project", blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
project = models.ForeignKey("projects.Project", blank=False, null=False, on_delete=models.DO_NOTHING)
# full version as provided by either implicit (per-event) or explicit (some API) means, including package name
# max_length matches Even.release (which is deduced from Sentry)
@@ -124,6 +123,7 @@ def create_release_if_needed(project, version, event, issue=None):
# triggering event anymore for our timestamp.
TurningPoint.objects.bulk_create([TurningPoint(
project=project,
issue=issue, kind=TurningPointKind.NEXT_MATERIALIZED, triggering_event=event,
metadata=json.dumps({"actual_release": release.version}), timestamp=event.ingested_at)
for issue in resolved_by_next_qs

View File

@@ -1,13 +1,16 @@
from django.test import TestCase as DjangoTestCase
from datetime import timedelta
from projects.models import Project
from .models import Release, ordered_releases, RE_PACKAGE_VERSION
class ReleaseTestCase(DjangoTestCase):
def test_create_and_order(self):
r0 = Release.objects.create(version="e80f98923f7426a8087009f4c629d25a35565a6a")
project = Project.objects.create(name="Test Project")
r0 = Release.objects.create(project=project, version="e80f98923f7426a8087009f4c629d25a35565a6a")
self.assertFalse(r0.is_semver)
self.assertEqual(0, r0.sort_epoch)
@@ -17,6 +20,7 @@ class ReleaseTestCase(DjangoTestCase):
# real usage too)
# * it ensures that dates are ignored when comparing r1 and r2 (r2 has a smaller date than r1, but comes later)
r1 = Release.objects.create(
project=project,
version="2a678dbbbecd2978ccaa76c326a0fb2e70073582",
date_released=r0.date_released + timedelta(seconds=10),
)
@@ -24,17 +28,17 @@ class ReleaseTestCase(DjangoTestCase):
self.assertEqual(0, r1.sort_epoch)
# switch to semver, epoch 1
r2 = Release.objects.create(version="1.0.0")
r2 = Release.objects.create(project=project, version="1.0.0")
self.assertTrue(r2.is_semver)
self.assertEqual(1, r2.sort_epoch)
# stick with semver, but use a lower version
r3 = Release.objects.create(version="0.1.0")
r3 = Release.objects.create(project=project, version="0.1.0")
self.assertTrue(r3.is_semver)
self.assertEqual(1, r3.sort_epoch)
# put in package name; this is basically ignored for ordering purposes
r4 = Release.objects.create(version="package@2.0.0")
r4 = Release.objects.create(project=project, version="package@2.0.0")
self.assertTrue(r4.is_semver)
self.assertEqual(ordered_releases(), [r0, r1, r3, r2, r4])

View File

@@ -0,0 +1,10 @@
from django.core.management.base import BaseCommand
from tags.tasks import vacuum_tagvalues
class Command(BaseCommand):
help = "Kick off tag cleanup by vacuuming orphaned TagValue and TagKey entries."
def handle(self, *args, **options):
vacuum_tagvalues.delay()
self.stdout.write("Started tag vacuum via task queue.")

View File

@@ -0,0 +1,40 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("tags", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="eventtag",
name="value",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="tags.tagvalue"
),
),
migrations.AlterField(
model_name="issuetag",
name="key",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="tags.tagkey"
),
),
migrations.AlterField(
model_name="issuetag",
name="value",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="tags.tagvalue"
),
),
migrations.AlterField(
model_name="tagvalue",
name="key",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="tags.tagkey"
),
),
]

View File

@@ -0,0 +1,26 @@
from django.db import migrations
def remove_objects_with_null_issue(apps, schema_editor):
# Up until now, we have various models w/ .issue=FK(null=True, on_delete=models.SET_NULL)
# Although it is "not expected" in the interface, issue-deletion would have led to those
# objects with a null issue. We're about to change that to .issue=FK(null=False, ...) which
# would crash if we don't remove those objects first. Object-removal is "fine" though, because
# as per the meaning of the SET_NULL, these objects were "dangling" anyway.
EventTag = apps.get_model("tags", "EventTag")
IssueTag = apps.get_model("tags", "IssueTag")
EventTag.objects.filter(issue__isnull=True).delete()
IssueTag.objects.filter(issue__isnull=True).delete()
class Migration(migrations.Migration):
dependencies = [
("tags", "0002_no_cascade"),
]
operations = [
migrations.RunPython(remove_objects_with_null_issue, reverse_code=migrations.RunPython.noop),
]

View File

@@ -0,0 +1,31 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("issues", "0021_alter_do_nothing"),
("tags", "0003_remove_objects_with_null_issue"),
]
operations = [
migrations.AlterField(
model_name="eventtag",
name="issue",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="event_tags",
to="issues.issue",
),
),
migrations.AlterField(
model_name="issuetag",
name="issue",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="tags",
to="issues.issue",
),
),
]

View File

@@ -0,0 +1,42 @@
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
# Django came up with 0014, whatever the reason, I'm sure that 0013 is at least required (as per comments there)
("projects", "0014_alter_projectmembership_project"),
("tags", "0004_alter_do_nothing"),
]
operations = [
migrations.AlterField(
model_name="eventtag",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
migrations.AlterField(
model_name="issuetag",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
migrations.AlterField(
model_name="tagkey",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
migrations.AlterField(
model_name="tagvalue",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="projects.project"
),
),
]

View File

@@ -36,7 +36,7 @@ from tags.utils import deduce_tags, is_mostly_unique
class TagKey(models.Model):
project = models.ForeignKey(Project, blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
project = models.ForeignKey(Project, blank=False, null=False, on_delete=models.DO_NOTHING)
key = models.CharField(max_length=32, blank=False, null=False)
# Tags that are "mostly unique" are not displayed in the issue tag counts, because the distribution of values is
@@ -54,8 +54,8 @@ class TagKey(models.Model):
class TagValue(models.Model):
project = models.ForeignKey(Project, blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
key = models.ForeignKey(TagKey, blank=False, null=False, on_delete=models.CASCADE)
project = models.ForeignKey(Project, blank=False, null=False, on_delete=models.DO_NOTHING)
key = models.ForeignKey(TagKey, blank=False, null=False, on_delete=models.DO_NOTHING)
value = models.CharField(max_length=200, blank=False, null=False, db_index=True)
class Meta:
@@ -69,22 +69,21 @@ class TagValue(models.Model):
class EventTag(models.Model):
project = models.ForeignKey(Project, blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
project = models.ForeignKey(Project, blank=False, null=False, on_delete=models.DO_NOTHING)
# value already implies key in our current setup.
value = models.ForeignKey(TagValue, blank=False, null=False, on_delete=models.CASCADE)
value = models.ForeignKey(TagValue, blank=False, null=False, on_delete=models.DO_NOTHING)
# issue is a denormalization that allows for a single-table-index for efficient search.
# SET_NULL: Issue deletion is not actually possible yet, so this is moot (for now).
issue = models.ForeignKey(
'issues.Issue', blank=False, null=True, on_delete=models.SET_NULL, related_name="event_tags")
'issues.Issue', blank=False, null=False, on_delete=models.DO_NOTHING, related_name="event_tags")
# digest_order is a denormalization that allows for a single-table-index for efficient search.
digest_order = models.PositiveIntegerField(blank=False, null=False)
# DO_NOTHING: we manually implement CASCADE (i.e. when an event is cleaned up, clean up associated tags) in the
# eviction process. Why CASCADE? [1] you'll have to do it "at some point", so you might as well do it right when
# evicting (async in the 'most resilient setup' anyway, b/c that happens when ingesting) [2] the order of magnitude
# evicting (async in the 'most resilient setup' anyway, b/c that happens when digesting) [2] the order of magnitude
# is "tens of deletions per event", so that's no reason to postpone. "Why manually" is explained in events/retention
event = models.ForeignKey('events.Event', blank=False, null=False, on_delete=models.DO_NOTHING, related_name='tags')
@@ -107,16 +106,15 @@ class EventTag(models.Model):
class IssueTag(models.Model):
project = models.ForeignKey(Project, blank=False, null=True, on_delete=models.SET_NULL) # SET_NULL: cleanup 'later'
project = models.ForeignKey(Project, blank=False, null=False, on_delete=models.DO_NOTHING)
# denormalization that allows for a single-table-index for efficient search.
key = models.ForeignKey(TagKey, blank=False, null=False, on_delete=models.CASCADE)
key = models.ForeignKey(TagKey, blank=False, null=False, on_delete=models.DO_NOTHING)
# value already implies key in our current setup.
value = models.ForeignKey(TagValue, blank=False, null=False, on_delete=models.CASCADE)
value = models.ForeignKey(TagValue, blank=False, null=False, on_delete=models.DO_NOTHING)
# SET_NULL: Issue deletion is not actually possible yet, so this is moot (for now).
issue = models.ForeignKey('issues.Issue', blank=False, null=True, on_delete=models.SET_NULL, related_name='tags')
issue = models.ForeignKey('issues.Issue', blank=False, null=False, on_delete=models.DO_NOTHING, related_name='tags')
# 1. As it stands, there is only a single counter per issue-tagvalue combination. In principle/theory this type of
# denormalization will break down when you want to show this kind of information filtered by some other dimension,

84
tags/tasks.py Normal file
View File

@@ -0,0 +1,84 @@
from snappea.decorators import shared_task
from bugsink.transaction import immediate_atomic, delay_on_commit
from tags.models import TagValue, TagKey, EventTag, IssueTag
BATCH_SIZE = 10_000
@shared_task
def vacuum_tagvalues(min_id=0):
# This task cleans up unused TagValue in batches. A TagValue can be unused if no IssueTag or EventTag references it,
# this can happen if IssueTag or EventTag entries are deleted. Cleanup is avoided in that case to avoid repeated
# checks. But it still needs to be done eventually to avoid bloating the database, which is what this task does.
# Impl. notes:
#
# * select id_to_check first, and then check which of those are used in EventTag or IssueTag. This avoids doing
# TagValue.exclude(some_usage_pattern) which may be slow / for which reasoning about performance is hard.
# * batched to allow for incremental cleanup, using a defer-with-min-id pattern to implement the batching.
#
# Known limitation:
# with _many_ TagValues (whether used or not) and when running in EAGER mode, this thing overflows the stack.
# Basically: because then the "delayed recursion" is not actually delayed, it just runs immediately. Answer: for
# "big things" (basically: serious setups) set up snappea.
with immediate_atomic():
# Select candidate TagValue IDs above min_id
ids_to_check = list(
TagValue.objects
.filter(id__gt=min_id)
.order_by('id')
.values_list('id', flat=True)[:BATCH_SIZE]
)
if not ids_to_check:
# Done with TagValues → start TagKey cleanup
delay_on_commit(vacuum_tagkeys, 0)
return
# Determine which ids_to_check are referenced
used_in_event = set(
EventTag.objects.filter(value_id__in=ids_to_check).values_list('value_id', flat=True)
)
used_in_issue = set(
IssueTag.objects.filter(value_id__in=ids_to_check).values_list('value_id', flat=True)
)
unused = [pk for pk in ids_to_check if pk not in used_in_event and pk not in used_in_issue]
# Actual deletion
if unused:
TagValue.objects.filter(id__in=unused).delete()
# Defer next batch
vacuum_tagvalues.delay(ids_to_check[-1])
@shared_task
def vacuum_tagkeys(min_id=0):
with immediate_atomic():
# Select candidate TagKey IDs above min_id
ids_to_check = list(
TagKey.objects
.filter(id__gt=min_id)
.order_by('id')
.values_list('id', flat=True)[:BATCH_SIZE]
)
if not ids_to_check:
return # done
# Determine which ids_to_check are referenced
used = set(
TagValue.objects.filter(key_id__in=ids_to_check).values_list('key_id', flat=True)
)
unused = [pk for pk in ids_to_check if pk not in used]
# Actual deletion
if unused:
TagKey.objects.filter(id__in=unused).delete()
# Defer next batch
vacuum_tagkeys.delay(ids_to_check[-1])

View File

@@ -3,7 +3,7 @@ from django.test import TestCase as DjangoTestCase
from projects.models import Project
from issues.factories import get_or_create_issue, denormalized_issue_fields
from events.factories import create_event
from events.factories import create_event, create_event_data
from issues.models import Issue
from .models import store_tags, EventTag
@@ -17,7 +17,6 @@ class DeduceTagsTestCase(RegularTestCase):
self.assertEqual(deduce_tags({}), {})
self.assertEqual(deduce_tags({"tags": {"foo": "bar"}}), {"foo": "bar"})
# finally, a more complex example (more or less real-world)
event_data = {
"server_name": "server",
"release": "1.0",
@@ -179,12 +178,12 @@ class SearchTestCase(DjangoTestCase):
# scenario (in which there would be some relation between the tags of issues and events), but it allows us to
# test event_search more easily (if each event is tied to a different issue, searching for tags is meaningless,
# since you always search within the context of an issue).
self.global_issue = Issue.objects.create(project=self.project, **denormalized_issue_fields())
self.global_issue, _ = get_or_create_issue(project=self.project, event_data=create_event_data("global"))
issue_with_tags_and_text = Issue.objects.create(project=self.project, **denormalized_issue_fields())
issue_with_tags_and_text, _ = get_or_create_issue(project=self.project, event_data=create_event_data("tag_txt"))
event_with_tags_and_text = create_event(self.project, issue=self.global_issue)
issue_with_tags_no_text = Issue.objects.create(project=self.project, **denormalized_issue_fields())
issue_with_tags_no_text, _ = get_or_create_issue(project=self.project, event_data=create_event_data("no_text"))
event_with_tags_no_text = create_event(self.project, issue=self.global_issue)
store_tags(event_with_tags_and_text, issue_with_tags_and_text, {f"k-{i}": f"v-{i}" for i in range(5)})
@@ -192,7 +191,7 @@ class SearchTestCase(DjangoTestCase):
# fix the EventTag objects' issue, which is broken per the non-real-world setup (see above)
EventTag.objects.all().update(issue=self.global_issue)
issue_without_tags = Issue.objects.create(project=self.project, **denormalized_issue_fields())
issue_without_tags, _ = get_or_create_issue(project=self.project, event_data=create_event_data("no_tags"))
event_without_tags = create_event(self.project, issue=self.global_issue)
for obj in [issue_with_tags_and_text, event_with_tags_and_text, issue_without_tags, event_without_tags]:
@@ -200,7 +199,7 @@ class SearchTestCase(DjangoTestCase):
obj.calculated_value = "findable value"
obj.save()
Issue.objects.create(project=self.project, **denormalized_issue_fields())
get_or_create_issue(project=self.project, event_data=create_event_data("no_text"))
create_event(self.project, issue=self.global_issue)
def _test_search(self, search_x):

View File

@@ -87,6 +87,9 @@ def deduce_tags(event_data):
# we start with the explicitly provided tags
tags = event_data.get('tags', {})
if isinstance(tags, list):
tags = {k: v for k, v in tags}
for tag_key, lookup_path in EVENT_DATA_CONVERSION_TABLE.items():
value = get_path(event_data, *lookup_path)

View File

@@ -122,8 +122,22 @@ def team_edit(request, team_pk):
raise PermissionDenied("You are not an admin of this team")
if request.method == 'POST':
form = TeamForm(request.POST, instance=team)
action = request.POST.get('action')
if action == 'delete':
# Double-check that the user is an admin or superuser
if not (TeamMembership.objects.filter(team=team, user=request.user, role=TeamRole.ADMIN, accepted=True).exists() or
request.user.is_superuser):
raise PermissionDenied("Only team admins can delete teams")
# Delete all associated projects first
team.project_set.all().delete()
# Delete the team itself
team.delete()
messages.success(request, f'Team "{team.name}" has been deleted successfully.')
return redirect('team_list')
form = TeamForm(request.POST, instance=team)
if form.is_valid():
form.save()
return redirect('team_members', team_pk=team.id)