From c336452f6c4f16bf4097a60e4f7a7fdc96ad8489 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gina=20H=C3=A4u=C3=9Fge?= Date: Thu, 27 Jul 2017 09:58:20 +0200 Subject: [PATCH] Do not run subprocesses while intermediary server is active Any processes inheriting the open port descriptor of that server will cause the actual server startup to fail due to the port still being claimed. We can't fully prevent this under Windows thanks to fnctl not being available and win32api being a PITA, and also close_fds on Popen not being allowed if we also need to redirect stdout/stderr/stdin for a process. So let's hope hardening against this problem when running under *nix, adding a bit fat warning to never start a subprocess during the intermediary's runtime and also moving the only actual process we so far DID start (analysis backlog processing) to after Tornado is running will suffice. Fixes #2035 --- .versioneer-lookup | 2 ++ src/octoprint/filemanager/__init__.py | 5 ++- src/octoprint/server/__init__.py | 45 ++++++++++++++++++++++++--- 3 files changed, 47 insertions(+), 5 deletions(-) diff --git a/.versioneer-lookup b/.versioneer-lookup index d08eaf68..604c50fc 100644 --- a/.versioneer-lookup +++ b/.versioneer-lookup @@ -23,7 +23,9 @@ fix/.* 1.3.6 1a6dbb3f4a5bef857cdeb13c031b9deca2cf30a2 pep440-dev improve/.* 1.3.6 1a6dbb3f4a5bef857cdeb13c031b9deca2cf30a2 pep440-dev # staging/maintenance is currently the branch for preparation of 1.3.5rc2 +# so is regressionfix/... staging/maintenance 1.3.5rc2 1a6dbb3f4a5bef857cdeb13c031b9deca2cf30a2 pep440-dev +regressionfix/.* 1.3.5rc2 1a6dbb3f4a5bef857cdeb13c031b9deca2cf30a2 pep440-dev # every other branch is a development branch and thus gets resolved to 1.4.0-dev for now .* 1.4.0 7f5d03d0549bcbd26f40e7e4a3297ea5204fb1cc pep440-dev diff --git a/src/octoprint/filemanager/__init__.py b/src/octoprint/filemanager/__init__.py index a5b1bf6a..8c59e8aa 100644 --- a/src/octoprint/filemanager/__init__.py +++ b/src/octoprint/filemanager/__init__.py @@ -180,9 +180,12 @@ class FileManager(object): import octoprint.settings self._recovery_file = os.path.join(octoprint.settings.settings().getBaseFolder("data"), "print_recovery_data.yaml") - def initialize(self): + def initialize(self, process_backlog=False): self.reload_plugins() + if process_backlog: + self.process_backlog() + def process_backlog(self): def worker(): self._logger.info("Adding backlog items from all storage types to analysis queue...".format(**locals())) for storage_type, storage_manager in self._storage_managers.items(): diff --git a/src/octoprint/server/__init__.py b/src/octoprint/server/__init__.py index bc11a1d1..61c3bc93 100644 --- a/src/octoprint/server/__init__.py +++ b/src/octoprint/server/__init__.py @@ -27,6 +27,11 @@ import atexit import signal import base64 +try: + import fcntl +except ImportError: + fcntl = None + SUCCESS = {} NO_CONTENT = ("", 204) NOT_MODIFIED = ("Not Modified", 304) @@ -214,6 +219,17 @@ class Server(object): # start the intermediary server self._start_intermediary_server() + ### IMPORTANT! + ### + ### Do not start any subprocesses until the intermediary server shuts down again or they WILL inherit the + ### open port and prevent us from firing up Tornado later. Thanks to close_fds not being available on Popen + ### on Windows if you also want to be able to redirect stdout/stderr/stdin and fnctl also not being available + ### we don't have a good way around this issue besides being careful not to spawn processes here. + ### + ### Which kinda sucks tbh. + ### + ### See also issue #2035 + # then initialize the plugin manager pluginManager.reload_plugins(startup=True, initialize_implementations=False) @@ -569,8 +585,13 @@ class Server(object): self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=self._settings.getInt(["server", "maxSize"])) self._server.listen(self._port, address=self._host) + ### From now on it's ok to launch subprocesses again + eventManager.fire(events.Events.STARTUP) + # analysis backlog + fileManager.process_backlog() + # auto connect if self._settings.getBoolean(["serial", "autoconnect"]): try: @@ -1356,21 +1377,37 @@ class Server(object): rules = map(process, filter(lambda rule: len(rule) == 2 or len(rule) == 3, rules)) - self._intermediary_server = BaseHTTPServer.HTTPServer((host, port), lambda *args, **kwargs: IntermediaryServerHandler(rules, *args, **kwargs)) + self._intermediary_server = BaseHTTPServer.HTTPServer((host, port), + lambda *args, **kwargs: IntermediaryServerHandler(rules, *args, **kwargs), + bind_and_activate=False) + + # if possible, make sure our socket's port descriptor isn't handed over to subprocesses + if fcntl is not None and hasattr(fcntl, "FD_CLOEXEC"): + flags = fcntl.fcntl(self._intermediary_server.socket, fcntl.F_GETFD) + flags |= fcntl.FD_CLOEXEC + fcntl.fcntl(self._intermediary_server.socket, fcntl.F_SETFD, flags) + + # then bind the server and have it serve our handler until stopped + try: + self._intermediary_server.server_bind() + self._intermediary_server.server_activate() + except: + self._intermediary_server.server_close() + raise thread = threading.Thread(target=self._intermediary_server.serve_forever) thread.daemon = True thread.start() - self._logger.debug("Intermediary server started") + self._logger.info("Intermediary server started") def _stop_intermediary_server(self): if self._intermediary_server is None: return - self._logger.debug("Shutting down intermediary server...") + self._logger.info("Shutting down intermediary server...") self._intermediary_server.shutdown() self._intermediary_server.server_close() - self._logger.debug("Intermediary server shut down") + self._logger.info("Intermediary server shut down") class LifecycleManager(object): def __init__(self, plugin_manager):