Do not run subprocesses while intermediary server is active
Any processes inheriting the open port descriptor of that server will cause the actual server startup to fail due to the port still being claimed. We can't fully prevent this under Windows thanks to fnctl not being available and win32api being a PITA, and also close_fds on Popen not being allowed if we also need to redirect stdout/stderr/stdin for a process. So let's hope hardening against this problem when running under *nix, adding a bit fat warning to never start a subprocess during the intermediary's runtime and also moving the only actual process we so far DID start (analysis backlog processing) to after Tornado is running will suffice. Fixes #2035
This commit is contained in:
parent
a9b6edde2b
commit
c336452f6c
3 changed files with 47 additions and 5 deletions
|
|
@ -23,7 +23,9 @@ fix/.* 1.3.6 1a6dbb3f4a5bef857cdeb13c031b9deca2cf30a2 pep440-dev
|
|||
improve/.* 1.3.6 1a6dbb3f4a5bef857cdeb13c031b9deca2cf30a2 pep440-dev
|
||||
|
||||
# staging/maintenance is currently the branch for preparation of 1.3.5rc2
|
||||
# so is regressionfix/...
|
||||
staging/maintenance 1.3.5rc2 1a6dbb3f4a5bef857cdeb13c031b9deca2cf30a2 pep440-dev
|
||||
regressionfix/.* 1.3.5rc2 1a6dbb3f4a5bef857cdeb13c031b9deca2cf30a2 pep440-dev
|
||||
|
||||
# every other branch is a development branch and thus gets resolved to 1.4.0-dev for now
|
||||
.* 1.4.0 7f5d03d0549bcbd26f40e7e4a3297ea5204fb1cc pep440-dev
|
||||
|
|
|
|||
|
|
@ -180,9 +180,12 @@ class FileManager(object):
|
|||
import octoprint.settings
|
||||
self._recovery_file = os.path.join(octoprint.settings.settings().getBaseFolder("data"), "print_recovery_data.yaml")
|
||||
|
||||
def initialize(self):
|
||||
def initialize(self, process_backlog=False):
|
||||
self.reload_plugins()
|
||||
if process_backlog:
|
||||
self.process_backlog()
|
||||
|
||||
def process_backlog(self):
|
||||
def worker():
|
||||
self._logger.info("Adding backlog items from all storage types to analysis queue...".format(**locals()))
|
||||
for storage_type, storage_manager in self._storage_managers.items():
|
||||
|
|
|
|||
|
|
@ -27,6 +27,11 @@ import atexit
|
|||
import signal
|
||||
import base64
|
||||
|
||||
try:
|
||||
import fcntl
|
||||
except ImportError:
|
||||
fcntl = None
|
||||
|
||||
SUCCESS = {}
|
||||
NO_CONTENT = ("", 204)
|
||||
NOT_MODIFIED = ("Not Modified", 304)
|
||||
|
|
@ -214,6 +219,17 @@ class Server(object):
|
|||
# start the intermediary server
|
||||
self._start_intermediary_server()
|
||||
|
||||
### IMPORTANT!
|
||||
###
|
||||
### Do not start any subprocesses until the intermediary server shuts down again or they WILL inherit the
|
||||
### open port and prevent us from firing up Tornado later. Thanks to close_fds not being available on Popen
|
||||
### on Windows if you also want to be able to redirect stdout/stderr/stdin and fnctl also not being available
|
||||
### we don't have a good way around this issue besides being careful not to spawn processes here.
|
||||
###
|
||||
### Which kinda sucks tbh.
|
||||
###
|
||||
### See also issue #2035
|
||||
|
||||
# then initialize the plugin manager
|
||||
pluginManager.reload_plugins(startup=True, initialize_implementations=False)
|
||||
|
||||
|
|
@ -569,8 +585,13 @@ class Server(object):
|
|||
self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=self._settings.getInt(["server", "maxSize"]))
|
||||
self._server.listen(self._port, address=self._host)
|
||||
|
||||
### From now on it's ok to launch subprocesses again
|
||||
|
||||
eventManager.fire(events.Events.STARTUP)
|
||||
|
||||
# analysis backlog
|
||||
fileManager.process_backlog()
|
||||
|
||||
# auto connect
|
||||
if self._settings.getBoolean(["serial", "autoconnect"]):
|
||||
try:
|
||||
|
|
@ -1356,21 +1377,37 @@ class Server(object):
|
|||
|
||||
rules = map(process, filter(lambda rule: len(rule) == 2 or len(rule) == 3, rules))
|
||||
|
||||
self._intermediary_server = BaseHTTPServer.HTTPServer((host, port), lambda *args, **kwargs: IntermediaryServerHandler(rules, *args, **kwargs))
|
||||
self._intermediary_server = BaseHTTPServer.HTTPServer((host, port),
|
||||
lambda *args, **kwargs: IntermediaryServerHandler(rules, *args, **kwargs),
|
||||
bind_and_activate=False)
|
||||
|
||||
# if possible, make sure our socket's port descriptor isn't handed over to subprocesses
|
||||
if fcntl is not None and hasattr(fcntl, "FD_CLOEXEC"):
|
||||
flags = fcntl.fcntl(self._intermediary_server.socket, fcntl.F_GETFD)
|
||||
flags |= fcntl.FD_CLOEXEC
|
||||
fcntl.fcntl(self._intermediary_server.socket, fcntl.F_SETFD, flags)
|
||||
|
||||
# then bind the server and have it serve our handler until stopped
|
||||
try:
|
||||
self._intermediary_server.server_bind()
|
||||
self._intermediary_server.server_activate()
|
||||
except:
|
||||
self._intermediary_server.server_close()
|
||||
raise
|
||||
|
||||
thread = threading.Thread(target=self._intermediary_server.serve_forever)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
self._logger.debug("Intermediary server started")
|
||||
self._logger.info("Intermediary server started")
|
||||
|
||||
def _stop_intermediary_server(self):
|
||||
if self._intermediary_server is None:
|
||||
return
|
||||
self._logger.debug("Shutting down intermediary server...")
|
||||
self._logger.info("Shutting down intermediary server...")
|
||||
self._intermediary_server.shutdown()
|
||||
self._intermediary_server.server_close()
|
||||
self._logger.debug("Intermediary server shut down")
|
||||
self._logger.info("Intermediary server shut down")
|
||||
|
||||
class LifecycleManager(object):
|
||||
def __init__(self, plugin_manager):
|
||||
|
|
|
|||
Loading…
Reference in a new issue