Automatically cancel running slicing jobs targeting the same output file if a new slicing request is received
This way there won't be two concurrent jobs trying to slice to the same file, which would practically render the cpu cycles from any slicing jobs than the final one wasted. Also adds a new event "SlicingCancelled" that will be sent in these cases.
This commit is contained in:
parent
606efd3da1
commit
44109f9d8f
7 changed files with 83 additions and 7 deletions
|
|
@ -316,6 +316,9 @@ Issue a file command
|
|||
be overridden with the supplied value. Use this if you want to specify things that change often like a different
|
||||
temperature, filament diameter or infill percentage. Profile keys are slicer specific.
|
||||
|
||||
If consecutive slicing calls are made targeting the same GCODE filename (that also holds true if the default is used),
|
||||
the slicing job already running in the background will be cancelled before the new one is started.
|
||||
|
||||
Upon success, a status code of :http:statuscode:`204` and an empty body is returned, unless specified otherwise.
|
||||
|
||||
**Example Select Request**
|
||||
|
|
|
|||
|
|
@ -338,6 +338,15 @@ SlicingDone
|
|||
* ``gcode``: the sliced GCODE's filename
|
||||
* ``time``: the time needed for slicing, in seconds (float)
|
||||
|
||||
SlicingCancelled
|
||||
The slicing of a file has been cancelled. This will happen if a second slicing job
|
||||
targeting the same GCODE file has been started by the user.
|
||||
|
||||
Payload:
|
||||
|
||||
* ``stl``: the STL's filename
|
||||
* ``gcode``: the sliced GCODE's filename
|
||||
|
||||
SlicingFailed
|
||||
The slicing of a file has failed.
|
||||
|
||||
|
|
|
|||
|
|
@ -79,6 +79,7 @@ class Events(object):
|
|||
SLICING_STARTED = "SlicingStarted"
|
||||
SLICING_DONE = "SlicingDone"
|
||||
SLICING_FAILED = "SlicingFailed"
|
||||
SLICING_CANCELLED = "SlicingCancelled"
|
||||
|
||||
|
||||
def eventManager():
|
||||
|
|
|
|||
|
|
@ -101,6 +101,10 @@ class FileManager(object):
|
|||
|
||||
self._slicing_manager = slicing_manager
|
||||
|
||||
import threading
|
||||
self._slicing_jobs = dict()
|
||||
self._slicing_jobs_mutex = threading.Lock()
|
||||
|
||||
for storage_type, storage_manager in self._storage_managers.items():
|
||||
self._determine_analysis_backlog(storage_type, storage_manager)
|
||||
|
||||
|
|
@ -136,10 +140,12 @@ class FileManager(object):
|
|||
def slice(self, slicer_name, source_location, source_path, dest_location, dest_path, profile=None, overrides=None, callback=None, callback_args=None):
|
||||
absolute_source_path = self.get_absolute_path(source_location, source_path)
|
||||
|
||||
def stlProcessed(source_location, source_path, tmp_path, dest_location, dest_path, start_time, callback, callback_args, _error=None):
|
||||
def stlProcessed(source_location, source_path, tmp_path, dest_location, dest_path, start_time, callback, callback_args, _error=None, _cancelled=False):
|
||||
try:
|
||||
if _error:
|
||||
eventManager().fire(Events.SLICING_FAILED, {"stl": source_path, "gcode": dest_path, "reason": _error})
|
||||
elif _cancelled:
|
||||
eventManager().fire(Events.SLICING_CANCELLED, {"stl": source_path, "gcode": dest_path})
|
||||
else:
|
||||
source_meta = self.get_metadata(source_location, source_path)
|
||||
hash = source_meta["hash"]
|
||||
|
|
@ -181,6 +187,15 @@ class FileManager(object):
|
|||
temp_path = f.name
|
||||
f.close()
|
||||
|
||||
with self._slicing_jobs_mutex:
|
||||
if dest_location in self._slicing_jobs:
|
||||
job_slicer_name, job_absolute_source_path, job_temp_path = self._slicing_jobs[dest_location]
|
||||
|
||||
self._slicing_manager.cancel_slicing(job_slicer_name, job_absolute_source_path, job_temp_path)
|
||||
del self._slicing_jobs[dest_location]
|
||||
|
||||
self._slicing_jobs[dest_location] = (slicer_name, absolute_source_path, temp_path)
|
||||
|
||||
args = (source_location, source_path, temp_path, dest_location, dest_path, start_time, callback, callback_args)
|
||||
return self._slicing_manager.slice(slicer_name, absolute_source_path, temp_path, profile, stlProcessed, callback_args=args, overrides=overrides)
|
||||
|
||||
|
|
|
|||
|
|
@ -215,4 +215,7 @@ class SlicerPlugin(Plugin):
|
|||
def do_slice(self, model_path, machinecode_path=None, profile_path=None):
|
||||
pass
|
||||
|
||||
def cancel_slicing(self, machinecode_path):
|
||||
pass
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -106,6 +106,12 @@ class CuraPlugin(octoprint.plugin.SlicerPlugin,
|
|||
def __init__(self):
|
||||
self._logger = logging.getLogger("octoprint.plugins." + __name__)
|
||||
|
||||
import threading
|
||||
self._slicing_commands = dict()
|
||||
self._slicing_commands_mutex = threading.Lock()
|
||||
self._cancelled_jobs = []
|
||||
self._cancelled_jobs_mutex = threading.Lock()
|
||||
|
||||
##~~ BlueprintPlugin API
|
||||
|
||||
def get_blueprint(self):
|
||||
|
|
@ -215,16 +221,42 @@ class CuraPlugin(octoprint.plugin.SlicerPlugin,
|
|||
command = " ".join(args)
|
||||
self._logger.info("Running %r in %s" % (command, working_dir))
|
||||
try:
|
||||
p = sarge.run(command, cwd=working_dir)
|
||||
p = sarge.run(command, cwd=working_dir, async=True)
|
||||
with self._slicing_commands_mutex:
|
||||
self._slicing_commands[machinecode_path] = p.commands[0]
|
||||
p.wait()
|
||||
|
||||
with self._cancelled_jobs_mutex:
|
||||
if machinecode_path in self._cancelled_jobs:
|
||||
raise octoprint.slicing.SlicingCancelled()
|
||||
|
||||
if p.returncode == 0:
|
||||
return True, None
|
||||
else:
|
||||
self._logger.warn("Could not slice via Cura, got return code %r" % p.returncode)
|
||||
return False, "Got returncode %r" % p.returncode
|
||||
|
||||
except octoprint.slicing.SlicingCancelled as e:
|
||||
raise e
|
||||
except:
|
||||
self._logger.exception("Could not slice via Cura, got an unknown error")
|
||||
return False, "Unknown error, please consult the log file"
|
||||
|
||||
finally:
|
||||
with self._cancelled_jobs_mutex:
|
||||
if machinecode_path in self._cancelled_jobs:
|
||||
self._cancelled_jobs.remove(machinecode_path)
|
||||
with self._slicing_commands_mutex:
|
||||
if machinecode_path in self._slicing_commands:
|
||||
del self._slicing_commands[machinecode_path]
|
||||
|
||||
def cancel_slicing(self, machinecode_path):
|
||||
with self._slicing_commands_mutex:
|
||||
if machinecode_path in self._slicing_commands:
|
||||
with self._cancelled_jobs_mutex:
|
||||
self._cancelled_jobs.append(machinecode_path)
|
||||
self._slicing_commands[machinecode_path].terminate()
|
||||
self._logger.info("Cancelled slicing of %s" % machinecode_path)
|
||||
|
||||
def _load_profile(self, path):
|
||||
import yaml
|
||||
|
|
|
|||
|
|
@ -44,6 +44,10 @@ class TemporaryProfile(object):
|
|||
pass
|
||||
|
||||
|
||||
class SlicingCancelled(BaseException):
|
||||
pass
|
||||
|
||||
|
||||
class SlicingManager(object):
|
||||
def __init__(self, profile_path):
|
||||
self._profile_path = profile_path
|
||||
|
|
@ -92,12 +96,16 @@ class SlicingManager(object):
|
|||
slicer = self.get_slicer(slicer_name)
|
||||
|
||||
def slicer_worker(slicer, model_path, machinecode_path, profile_name, overrides, callback, callback_args, callback_kwargs):
|
||||
with self.temporary_profile(slicer.get_slicer_type(), name=profile_name, overrides=overrides) as profile_path:
|
||||
ok, result = slicer.do_slice(model_path, machinecode_path=machinecode_path, profile_path=profile_path)
|
||||
try:
|
||||
with self.temporary_profile(slicer.get_slicer_type(), name=profile_name, overrides=overrides) as profile_path:
|
||||
ok, result = slicer.do_slice(model_path, machinecode_path=machinecode_path, profile_path=profile_path)
|
||||
|
||||
if not ok:
|
||||
callback_kwargs.update(dict(_error=result))
|
||||
callback(*callback_args, **callback_kwargs)
|
||||
if not ok:
|
||||
callback_kwargs.update(dict(_error=result))
|
||||
callback(*callback_args, **callback_kwargs)
|
||||
except SlicingCancelled:
|
||||
callback_kwargs.update(dict(_cancelled=True))
|
||||
callback(*callback_args, **callback_kwargs)
|
||||
|
||||
import threading
|
||||
slicer_worker_thread = threading.Thread(target=slicer_worker,
|
||||
|
|
@ -106,6 +114,11 @@ class SlicingManager(object):
|
|||
slicer_worker_thread.start()
|
||||
return True, None
|
||||
|
||||
def cancel_slicing(self, slicer_name, source_path, dest_path):
|
||||
if not slicer_name in self.registered_slicers:
|
||||
return
|
||||
slicer = self.get_slicer(slicer_name)
|
||||
slicer.cancel_slicing(dest_path)
|
||||
|
||||
def load_profile(self, slicer, name):
|
||||
if not slicer in self.registered_slicers:
|
||||
|
|
|
|||
Loading…
Reference in a new issue