Use 'monofy' package now that it is extracted

This commit is contained in:
Klaas van Schelven
2024-09-04 22:54:27 +02:00
parent ce6fe06315
commit 67cfbb58d7
6 changed files with 3 additions and 193 deletions

View File

@@ -32,4 +32,4 @@ RUN ["bugsink-manage", "migrate", "snappea", "--database=snappea"]
EXPOSE 8000
CMD [ "bugsink-server-unified", "bugsink-manage", "check", "--deploy", "--fail-level", "WARNING", "&&", "gunicorn", "--bind=0.0.0.0:8000", "--workers=10", "--access-logfile", "-", "bugsink.wsgi", "|||", "bugsink-runsnappea"]
CMD [ "monofy", "bugsink-manage", "check", "--deploy", "--fail-level", "WARNING", "&&", "gunicorn", "--bind=0.0.0.0:8000", "--workers=10", "--access-logfile", "-", "bugsink.wsgi", "|||", "bugsink-runsnappea"]

View File

@@ -1,134 +0,0 @@
#!/usr/bin/env python
import subprocess
import signal
import sys
import os
from time import sleep
class ParentProcess:
def __init__(self):
"""
This script starts both the server and snappea as children of a single process.
* Output of the children is passed as our own.
* Any (relevant) signals we receive are passed to all the children.
* When either of the children exits, a signal is sent to the other child to terminate it.
* The script waits for both children to exit before exiting itself.
The script is written to be able to run the two parts of Bugsink in a single Docker container. It may, however,
be useful in other contexts as well, i.e. for [developer] ergonomics when running in a terminal.
"""
print("Server-unified starting with pid", os.getpid())
self.pre_start()
self.children = []
# I think Docker will send a SIGTERM to the main process when it wants to stop the container; SIGINT is for
# interactive use and is also supported. SIGKILL is not handle-able, so we can't do anything about that.
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
try:
self.start_children()
self.connect_childrens_fates()
finally:
for child in self.children:
child.wait()
def pre_start(self):
# I'd rather this was not needed, I don't know how to do that in a way that works with Docker: The recommended
# way of running CMD in a Dockerfile is to use the exec form, which doesn't allow for running a script that does
# some setup before starting the main process, i.e. doesn't allow for '&&'. Recommended here means: warning
# about signal-handling if you choose the other form.
for args in self.get_pre_start_command_args(sys.argv):
print("Server-unified 'pre-start' process:", " ".join(args))
proc = subprocess.run(args)
if proc.returncode != 0:
sys.exit(proc.returncode)
def start_children(self):
# Leaving stdout and stderr as None will make the output of the child processes be passed as our own.
for args in self.get_parallel_command_args(sys.argv):
try:
child = subprocess.Popen(args)
except Exception:
# Print is a bit superflous here, as the exception will be printed anyway by the raise
# print("Server-unified failed to start process:", " ".join(args), "(%s)" % e)
self.terminate_children()
raise
print("Server-unified started process %s:" % child.pid, " ".join(args))
self.children.append(child)
def terminate_children(self, except_child=None):
for child in self.children:
if child != except_child:
child.send_signal(signal.SIGTERM)
def connect_childrens_fates(self):
# Check if any of the children have exited
children_are_alive = True
while children_are_alive:
sleep(.05) # Sleep in the busy loop to avoid 100% CPU usage
for child in self.children:
if child.poll() is not None:
# One of the children has exited
children_are_alive = False
self.terminate_children(except_child=child)
@classmethod
def get_pre_start_command_args(self, argv):
"""Splits our own arguments into a list of args for each of the pre-start commands, we split on "&&"."""
# We don't want to pass the first argument, as that is the script name
args = argv[1:]
result = []
this = []
for arg in args:
if arg == "&&":
# && serves as a terminator here, i.e. we only add-to-result when we encounter it, the last bit
# is never addeded (it will be dealt with as the set of parallel commands)
result.append(this)
this = []
else:
this.append(arg)
return result
@classmethod
def get_parallel_command_args(self, argv):
"""Splits our own arguments into a list of args for each of the children each, we split on "|||"."""
# We don't want to pass the first argument, as that is the script name
args = argv[1:]
while "&&" in args:
args = args[args.index("&&") + 1:]
result = [[]]
for arg in args:
if arg == "|||":
result.append([])
else:
result[-1].append(arg)
return result
def signal_handler(self, signum, frame):
# we resist the urge to print here, as this is discouraged in signal handlers
for child in self.children:
child.send_signal(signum)
def main():
ParentProcess()
if __name__ == "__main__":
main()

View File

@@ -7,7 +7,6 @@ from .volume_based_condition import VolumeBasedCondition
from .streams import (
compress_with_zlib, GeneratorReader, WBITS_PARAM_FOR_GZIP, WBITS_PARAM_FOR_DEFLATE, MaxDataReader,
MaxDataWriter, zlib_generator, brotli_generator)
from .scripts.server_unified import ParentProcess
def apply_n(f, n, v):
@@ -130,58 +129,3 @@ class StreamsTestCase(RegularTestCase):
with self.assertRaises(ValueError):
writer.write(b"hellohello")
class ServerUnifiedTestCase(RegularTestCase):
def test_arg_parsing(self):
def _check(argv, expected_pre_start, expected_parallel):
pre_start = ParentProcess.get_pre_start_command_args(argv)
parallel = ParentProcess.get_parallel_command_args(argv)
self.assertEqual(expected_pre_start, pre_start)
self.assertEqual(expected_parallel, parallel)
_check(
# meaning: a single empty command (which would lead to a failure). It's the meaningless case anyway, so I'm
# not going to make any special-case handling for it. In other words: there must be at least one command
# (and even that is quite meaningless, since you could just run the command directly).
["script.py"],
[],
[[]],
)
_check(
["script.py", "a", "b"],
[],
[["a", "b"]],
)
_check(
["script.py", "a", "b", "|||", "c", "d", "|||", "e", "f"],
[],
[["a", "b"], ["c", "d"], ["e", "f"]],
)
_check(
["script.py", "a", "b", "&&", "c", "d", "|||", "e", "f"],
[["a", "b"]],
[["c", "d"], ["e", "f"]],
)
_check(
["script.py", "a", "b", "|||", "c", "d", "|||", "e", "f"],
[],
[["a", "b"], ["c", "d"], ["e", "f"]],
)
_check(
["script.py", "a", "b", "&&", "c", "d", "&&", "e", "f"],
[["a", "b"], ["c", "d"]],
[["e", "f"]],
)
_check(
["script.py", "a", "b", "&&", "c", "d", "&&", "e", "f", "|||", "g", "h", "|||", "i", "j"],
[["a", "b"], ["c", "d"]],
[["e", "f"], ["g", "h"], ["i", "j"]],
)

View File

@@ -24,7 +24,6 @@ bugsink-show-version = "bugsink.scripts.show_version:main"
bugsink-manage = "bugsink.scripts.manage:main"
bugsink-create-conf = "bugsink.scripts.create_conf:main"
bugsink-runsnappea = "bugsink.scripts.runsnappea:main"
bugsink-server-unified = "bugsink.scripts.server_unified:main"
[tool.setuptools]
include-package-data = true # this is the default, but explicit is better than implicit

View File

@@ -11,3 +11,4 @@ brotli
python-dateutil
whitenoise
requests # for sentry-sdk-extensions, which is loaded in non-dev setup too
monofy

View File

@@ -16,7 +16,7 @@ DEFAULTS = {
# * containers are supposed to be disposable; the message queue will be disposed of when the container is; the
# ingested (but not yet digested) events in the /tmp/ dir will be too, by the way.
# * snappea may get a TERM signal because either the container is being stopped, or when the server exits (via
# bugsink-server-unified).
# monofy).
#
# Given the above, it's better for snappea to do all the work it can before it gets killed the drastic way when
# Docker gets impatient, than to quickly shut down and leave the server with a bunch of unprocessed events. This is