Allow multi line commands in sending phase

(cherry picked from commit 1018946)
This commit is contained in:
Gina Häußge 2017-01-23 13:36:31 +01:00
parent a50e8bbbb5
commit 35c45df86c
3 changed files with 94 additions and 39 deletions

View file

@ -451,18 +451,21 @@ This describes actually four hooks:
should use this option.
* A 2-tuple consisting of a rewritten version of the ``cmd`` and the ``cmd_type``, e.g. ``return "M105", "temperature_poll"``.
Handlers which wish to rewrite both the command and the command type should use this option.
* **``queuing`` phase only**: A list of any of the above to allow for expanding one command into
* A list of any of the above to allow for expanding one command into
many. The following example shows how any queued command could be turned into a sequence of a temperature query,
line number reset, display of the ``gcode`` on the printer's display and finally the actual command (this example
does not make a lot of sense to be quiet honest):
does not make a lot of sense to be quite honest):
.. code-block:: python
def multi_expansion(*args, **kwargs):
return [("M105", "temperature_poll"),
("M110",),
"M117 GCODE: {}".format(gcode),
(command, command_type)]
def rewrite_foo(self, comm_instance, phase, cmd, cmd_type, gcode, *args, **kwargs):
if gcode or not cmd.startswith("@foo"):
return
return [("M105", "temperature_poll"),
("M110",),
"M117 echo foo: {}".format(cmd)]
Note: Only one command of a given ``cmd_type`` (other than None) may be queued at a time. Trying to rewrite the ``cmd_type``
to one already in the queue will give an error.

View file

@ -1046,19 +1046,58 @@ class InvariantContainer(object):
return self._data.__iter__()
class TypedQueue(queue.Queue):
class PrependQueue(queue.Queue):
def __init__(self, maxsize=0):
queue.Queue.__init__(self, maxsize=maxsize)
def prepend(self, item, block=True, timeout=True):
from time import time as _time
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise queue.Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise queue.Full
self.not_full.wait(remaining)
self._prepend(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def _prepend(self, item):
self.queue.appendleft(item)
class TypedQueue(PrependQueue):
def __init__(self, maxsize=0):
PrependQueue.__init__(self, maxsize=maxsize)
self._lookup = set()
def put(self, item, item_type=None, *args, **kwargs):
queue.Queue.put(self, (item, item_type), *args, **kwargs)
PrependQueue.put(self, (item, item_type), *args, **kwargs)
def get(self, *args, **kwargs):
item, _ = queue.Queue.get(self, *args, **kwargs)
item, _ = PrependQueue.get(self, *args, **kwargs)
return item
def prepend(self, item, item_type=None, *args, **kwargs):
PrependQueue.prepend(self, (item, item_type), *args, **kwargs)
def _put(self, item):
_, item_type = item
if item_type is not None:
@ -1070,7 +1109,7 @@ class TypedQueue(queue.Queue):
queue.Queue._put(self, item)
def _get(self):
item = queue.Queue._get(self)
item = PrependQueue._get(self)
_, item_type = item
if item_type is not None:
@ -1078,6 +1117,15 @@ class TypedQueue(queue.Queue):
return item
def _prepend(self, item):
_, item_type = item
if item_type is not None:
if item_type in self._lookup:
raise TypeAlreadyInQueue(item_type, "Type {} is already in queue".format(item_type))
else:
self._lookup.add(item_type)
PrependQueue._prepend(self, item)
class TypeAlreadyInQueue(Exception):
def __init__(self, t, *args, **kwargs):

View file

@ -2167,16 +2167,19 @@ class MachineCom(object):
def _enqueue_for_sending(self, command, linenumber=None, command_type=None, on_sent=None):
"""
Enqueues a command an optional linenumber to use for it in the send queue.
Enqueues a command and optional linenumber to use for it in the send queue.
Arguments:
command (str): The command to send.
linenumber (int): The line number with which to send the command. May be ``None`` in which case the command
will be sent without a line number and checksum.
command_type (str): Optional command type, if set and command type is already in the queue the
command won't be enqueued
on_sent (callable): Optional callable to call after command has been sent to printer.
"""
try:
self._send_queue.put((command, linenumber, command_type, on_sent), item_type=command_type)
self._send_queue.put((command, linenumber, command_type, on_sent, False), item_type=command_type)
return True
except TypeAlreadyInQueue as e:
self._logger.debug("Type already in send queue: " + e.type)
@ -2207,7 +2210,7 @@ class MachineCom(object):
self._dwelling_until = False
# fetch command, command type and optional linenumber and sent callback from queue
command, linenumber, command_type, on_sent = entry
command, linenumber, command_type, on_sent, processed = entry
# some firmwares (e.g. Smoothie) might support additional in-band communication that will not
# stick to the acknowledgement behaviour of GCODE, so we check here if we have a GCODE command
@ -2220,28 +2223,35 @@ class MachineCom(object):
self._do_send_with_checksum(command, linenumber)
else:
# trigger "sending" phase
results = self._process_command_phase("sending", command, command_type, gcode=gcode)
if not processed:
# trigger "sending" phase if we didn't so far
results = self._process_command_phase("sending", command, command_type, gcode=gcode)
if not results:
# No, we are not going to send this, that was a last-minute bail.
# However, since we already are in the send queue, our _monitor
# loop won't be triggered with the reply from this unsent command
# now, so we try to tickle the processing of any active
# command queues manually
self._continue_sending()
if not results:
# No, we are not going to send this, that was a last-minute bail.
# However, since we already are in the send queue, our _monitor
# loop won't be triggered with the reply from this unsent command
# now, so we try to tickle the processing of any active
# command queues manually
self._continue_sending()
# and now let's fetch the next item from the queue
continue
# and now let's fetch the next item from the queue
continue
# we explicitly throw away plugin hook results that try
# to perform command expansion in the sending/sent phase,
# so "results" really should only have more than one entry
# at this point if our core code contains a bug
assert len(results) == 1
if len(results) > 1:
# last command gets on_sent attached
last = results[-1]
self._send_queue.prepend((last[0], None, None, on_sent, True))
on_sent = None
# we only use the first (and only!) entry here
command, _, gcode = results[0]
# middle gets prepended reversed (so order gets restored)
if len(results) > 2:
to_prepend = reversed(results[1:-1])
for m in to_prepend:
self._send_queue.prepend((m[0], None, None, None, True))
# we only actually send the first entry here
command, _, gcode = results[0]
if command.strip() == "":
self._logger.info("Refusing to send an empty line to the printer")
@ -2315,13 +2325,7 @@ class MachineCom(object):
self._logger.exception("Error while processing hook {name} for phase {phase} and command {command}:".format(**locals()))
else:
normalized = _normalize_command_handler_result(command, command_type, gcode, hook_results)
# make sure we don't allow multi entry results in anything but the queuing phase
if not phase in ("queuing",) and len(normalized) > 1:
self._logger.error("Error while processing hook {name} for phase {phase} and command {command}: Hook returned multi-entry result for phase {phase} and command {command}. That's not supported, if you need to do multi expansion of commands you need to do this in the queuing phase. Ignoring hook result and sending command as-is.".format(**locals()))
new_results.append((command, command_type, gcode))
else:
new_results += normalized
new_results += normalized
if not new_results:
# hook handler returned None or empty list for all commands, so we'll stop here and return a full out empty result
return []