aboutsummaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authorTomasz Kramkowski <tomasz@kramkow.ski>2025-08-06 17:50:53 +0100
committerTomasz Kramkowski <tomasz@kramkow.ski>2025-08-06 17:50:53 +0100
commit581208b2ffeeb2a2128aee0741fa3fd9e46358e2 (patch)
treef0d7e0e5cb574b235a5495b0f40f79e3ded7aa4d /scripts
parente1176e4dfb9018e712d4fa86daf41e9e762a1698 (diff)
downloadkutter-581208b2ffeeb2a2128aee0741fa3fd9e46358e2.tar.gz
kutter-581208b2ffeeb2a2128aee0741fa3fd9e46358e2.tar.xz
kutter-581208b2ffeeb2a2128aee0741fa3fd9e46358e2.zip
Run black on all first party python code
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/avrsim.py133
-rw-r--r--scripts/buildcommands.py340
-rwxr-xr-xscripts/calibrate_shaper.py281
-rw-r--r--scripts/canbus_query.py52
-rwxr-xr-xscripts/check_whitespace.py31
-rwxr-xr-xscripts/checkstack.py106
-rwxr-xr-xscripts/dump_mcu.py89
-rwxr-xr-xscripts/flash-ar100.py27
-rwxr-xr-xscripts/flash_usb.py143
-rwxr-xr-xscripts/graph_accelerometer.py224
-rwxr-xr-xscripts/graph_extruder.py131
-rwxr-xr-xscripts/graph_mesh.py133
-rwxr-xr-xscripts/graph_motion.py363
-rwxr-xr-xscripts/graph_shaper.py203
-rwxr-xr-xscripts/graph_temp_sensor.py111
-rwxr-xr-xscripts/graphstats.py299
-rwxr-xr-xscripts/logextract.py373
-rw-r--r--scripts/make_version.py16
-rw-r--r--scripts/motan/analyzers.py225
-rwxr-xr-xscripts/motan/data_logger.py88
-rwxr-xr-xscripts/motan/motan_graph.py73
-rw-r--r--scripts/motan/readlog.py413
-rwxr-xr-xscripts/parsecandump.py67
-rw-r--r--scripts/spi_flash/board_defs.py304
-rw-r--r--scripts/spi_flash/fatfs_lib.py17
-rw-r--r--scripts/spi_flash/spi_flash.py917
-rw-r--r--scripts/test_klippy.py115
-rwxr-xr-xscripts/update_chitu.py34
-rwxr-xr-xscripts/update_mks_robin.py40
-rwxr-xr-xscripts/whconsole.py30
30 files changed, 3274 insertions, 2104 deletions
diff --git a/scripts/avrsim.py b/scripts/avrsim.py
index e7f191e8..0dd86200 100755
--- a/scripts/avrsim.py
+++ b/scripts/avrsim.py
@@ -7,9 +7,10 @@
import sys, optparse, time, os, pty, fcntl, termios, errno
import pysimulavr
-SERIALBITS = 10 # 8N1 = 1 start, 8 data, 1 stop
+SERIALBITS = 10 # 8N1 = 1 start, 8 data, 1 stop
SIMULAVR_FREQ = 10**9
+
# Class to read serial data from AVR serial transmit pin.
class SerialRxPin(pysimulavr.PySimulationMember, pysimulavr.Pin):
def __init__(self, baud, terminal):
@@ -20,12 +21,14 @@ class SerialRxPin(pysimulavr.PySimulationMember, pysimulavr.Pin):
self.delay = SIMULAVR_FREQ // baud
self.current = 0
self.pos = -1
+
def SetInState(self, pin):
pysimulavr.Pin.SetInState(self, pin)
self.state = pin.outState
if self.pos < 0 and pin.outState == pin.LOW:
self.pos = 0
self.sc.Add(self)
+
def DoStep(self, trueHwStep):
ishigh = self.state == self.HIGH
self.current |= ishigh << self.pos
@@ -33,26 +36,28 @@ class SerialRxPin(pysimulavr.PySimulationMember, pysimulavr.Pin):
if self.pos == 1:
return int(self.delay * 1.5)
if self.pos >= SERIALBITS:
- data = bytearray([(self.current >> 1) & 0xff])
+ data = bytearray([(self.current >> 1) & 0xFF])
self.terminal.write(data)
self.pos = -1
self.current = 0
return -1
return self.delay
+
# Class to send serial data to AVR serial receive pin.
class SerialTxPin(pysimulavr.PySimulationMember, pysimulavr.Pin):
def __init__(self, baud, terminal):
pysimulavr.Pin.__init__(self)
pysimulavr.PySimulationMember.__init__(self)
self.terminal = terminal
- self.SetPin('H')
+ self.SetPin("H")
self.sc = pysimulavr.SystemClock.Instance()
self.delay = SIMULAVR_FREQ // baud
self.current = 0
self.pos = 0
self.queue = bytearray()
self.sc.Add(self)
+
def DoStep(self, trueHwStep):
if not self.pos:
if not self.queue:
@@ -61,15 +66,16 @@ class SerialTxPin(pysimulavr.PySimulationMember, pysimulavr.Pin):
return self.delay * 100
self.queue.extend(data)
self.current = (self.queue.pop(0) << 1) | 0x200
- newstate = 'L'
+ newstate = "L"
if self.current & (1 << self.pos):
- newstate = 'H'
+ newstate = "H"
self.SetPin(newstate)
self.pos += 1
if self.pos >= SERIALBITS:
self.pos = 0
return self.delay
+
# Support for creating VCD trace files
class Tracing:
def __init__(self, filename, signals):
@@ -80,36 +86,42 @@ class Tracing:
return
self.dman = pysimulavr.DumpManager.Instance()
self.dman.SetSingleDeviceApp()
+
def show_help(self):
ostr = pysimulavr.ostringstream()
self.dman.save(ostr)
sys.stdout.write(ostr.str())
sys.exit(1)
+
def load_options(self):
if self.dman is None:
return
- if self.signals.strip() == '?':
+ if self.signals.strip() == "?":
self.show_help()
- sigs = "\n".join(["+ " + s for s in self.signals.split(',')])
+ sigs = "\n".join(["+ " + s for s in self.signals.split(",")])
self.dman.addDumpVCD(self.filename, sigs, "ns", False, False)
+
def start(self):
if self.dman is not None:
self.dman.start()
+
def finish(self):
if self.dman is not None:
self.dman.stopApplication()
+
# Pace the simulation scaled to real time
class Pacing(pysimulavr.PySimulationMember):
def __init__(self, rate):
pysimulavr.PySimulationMember.__init__(self)
self.sc = pysimulavr.SystemClock.Instance()
- self.pacing_rate = 1. / (rate * SIMULAVR_FREQ)
+ self.pacing_rate = 1.0 / (rate * SIMULAVR_FREQ)
self.next_check_clock = 0
self.rel_time = time.time()
- self.best_offset = 0.
+ self.best_offset = 0.0
self.delay = SIMULAVR_FREQ // 10000
self.sc.Add(self)
+
def DoStep(self, trueHwStep):
curtime = time.time()
clock = self.sc.GetCurrentTime()
@@ -118,19 +130,23 @@ class Pacing(pysimulavr.PySimulationMember):
if offset > 0.000050:
time.sleep(offset - 0.000040)
if clock >= self.next_check_clock:
- self.rel_time -= min(self.best_offset, 0.)
+ self.rel_time -= min(self.best_offset, 0.0)
self.next_check_clock = clock + self.delay * 500
- self.best_offset = -999999999.
+ self.best_offset = -999999999.0
return self.delay
+
# Forward data from a terminal device to the serial port pins
class TerminalIO:
def __init__(self):
self.fd = -1
+
def run(self, fd):
self.fd = fd
+
def write(self, data):
os.write(self.fd, data)
+
def read(self):
try:
return os.read(self.fd, 64)
@@ -139,6 +155,7 @@ class TerminalIO:
pysimulavr.SystemClock.Instance().stop()
return ""
+
# Support for creating a pseudo-tty for emulating a serial port
def create_pty(ptyname):
mfd, sfd = pty.openpty()
@@ -147,16 +164,22 @@ def create_pty(ptyname):
except os.error:
pass
os.symlink(os.ttyname(sfd), ptyname)
- fcntl.fcntl(mfd, fcntl.F_SETFL
- , fcntl.fcntl(mfd, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(mfd, fcntl.F_SETFL, fcntl.fcntl(mfd, fcntl.F_GETFL) | os.O_NONBLOCK)
tcattr = termios.tcgetattr(mfd)
tcattr[0] &= ~(
- termios.IGNBRK | termios.BRKINT | termios.PARMRK | termios.ISTRIP |
- termios.INLCR | termios.IGNCR | termios.ICRNL | termios.IXON)
+ termios.IGNBRK
+ | termios.BRKINT
+ | termios.PARMRK
+ | termios.ISTRIP
+ | termios.INLCR
+ | termios.IGNCR
+ | termios.ICRNL
+ | termios.IXON
+ )
tcattr[1] &= ~termios.OPOST
tcattr[3] &= ~(
- termios.ECHO | termios.ECHONL | termios.ICANON | termios.ISIG |
- termios.IEXTEN)
+ termios.ECHO | termios.ECHONL | termios.ICANON | termios.ISIG | termios.IEXTEN
+ )
tcattr[2] &= ~(termios.CSIZE | termios.PARENB)
tcattr[2] |= termios.CS8
tcattr[6][termios.VMIN] = 0
@@ -164,25 +187,66 @@ def create_pty(ptyname):
termios.tcsetattr(mfd, termios.TCSAFLUSH, tcattr)
return mfd
+
def main():
usage = "%prog [options] <program.elf>"
opts = optparse.OptionParser(usage)
- opts.add_option("-m", "--machine", type="string", dest="machine",
- default="atmega644", help="type of AVR machine to simulate")
- opts.add_option("-s", "--speed", type="int", dest="speed", default=16000000,
- help="machine speed")
- opts.add_option("-r", "--rate", type="float", dest="pacing_rate",
- default=0., help="real-time pacing rate")
- opts.add_option("-b", "--baud", type="int", dest="baud", default=250000,
- help="baud rate of the emulated serial port")
- opts.add_option("-t", "--trace", type="string", dest="trace",
- help="signals to trace (? for help)")
- opts.add_option("-p", "--port", type="string", dest="port",
- default="/tmp/pseudoserial",
- help="pseudo-tty device to create for serial port")
+ opts.add_option(
+ "-m",
+ "--machine",
+ type="string",
+ dest="machine",
+ default="atmega644",
+ help="type of AVR machine to simulate",
+ )
+ opts.add_option(
+ "-s",
+ "--speed",
+ type="int",
+ dest="speed",
+ default=16000000,
+ help="machine speed",
+ )
+ opts.add_option(
+ "-r",
+ "--rate",
+ type="float",
+ dest="pacing_rate",
+ default=0.0,
+ help="real-time pacing rate",
+ )
+ opts.add_option(
+ "-b",
+ "--baud",
+ type="int",
+ dest="baud",
+ default=250000,
+ help="baud rate of the emulated serial port",
+ )
+ opts.add_option(
+ "-t",
+ "--trace",
+ type="string",
+ dest="trace",
+ help="signals to trace (? for help)",
+ )
+ opts.add_option(
+ "-p",
+ "--port",
+ type="string",
+ dest="port",
+ default="/tmp/pseudoserial",
+ help="pseudo-tty device to create for serial port",
+ )
deffile = os.path.splitext(os.path.basename(sys.argv[0]))[0] + ".vcd"
- opts.add_option("-f", "--tracefile", type="string", dest="tracefile",
- default=deffile, help="filename to write signal trace to")
+ opts.add_option(
+ "-f",
+ "--tracefile",
+ type="string",
+ dest="tracefile",
+ default=deffile,
+ help="filename to write signal trace to",
+ )
options, args = opts.parse_args()
if len(args) != 1:
opts.error("Incorrect number of arguments")
@@ -236,10 +300,11 @@ def main():
try:
io.run(fd)
trace.start()
- sc.RunTimeRange(0x7fff0000ffff0000)
+ sc.RunTimeRange(0x7FFF0000FFFF0000)
trace.finish()
finally:
os.unlink(ptyname)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/buildcommands.py b/scripts/buildcommands.py
index b3587384..fa4303ed 100644
--- a/scripts/buildcommands.py
+++ b/scripts/buildcommands.py
@@ -6,7 +6,8 @@
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, subprocess, optparse, logging, shlex, socket, time, traceback
import json, zlib
-sys.path.append('./klippy')
+
+sys.path.append("./klippy")
import msgproto
FILEHEADER = """
@@ -19,10 +20,12 @@ FILEHEADER = """
#include "initial_pins.h"
"""
+
def error(msg):
sys.stderr.write(msg + "\n")
sys.exit(-1)
+
Handlers = []
@@ -30,23 +33,26 @@ Handlers = []
# C call list generation
######################################################################
+
# Create dynamic C functions that call a list of other C functions
class HandleCallList:
def __init__(self):
- self.call_lists = {'ctr_run_initfuncs': []}
- self.ctr_dispatch = { '_DECL_CALLLIST': self.decl_calllist }
+ self.call_lists = {"ctr_run_initfuncs": []}
+ self.ctr_dispatch = {"_DECL_CALLLIST": self.decl_calllist}
+
def decl_calllist(self, req):
funcname, callname = req.split()[1:]
self.call_lists.setdefault(funcname, []).append(callname)
+
def update_data_dictionary(self, data):
pass
+
def generate_code(self, options):
code = []
for funcname, funcs in self.call_lists.items():
- func_code = [' extern void %s(void);\n %s();' % (f, f)
- for f in funcs]
- if funcname == 'ctr_run_taskfuncs':
- add_poll = ' irq_poll();\n'
+ func_code = [" extern void %s(void);\n %s();" % (f, f) for f in funcs]
+ if funcname == "ctr_run_taskfuncs":
+ add_poll = " irq_poll();\n"
func_code = [add_poll + fc for fc in func_code]
func_code.append(add_poll)
fmt = """
@@ -59,6 +65,7 @@ void
code.append(fmt % (funcname, "\n".join(func_code).strip()))
return "".join(code)
+
Handlers.append(HandleCallList())
@@ -68,41 +75,49 @@ Handlers.append(HandleCallList())
STATIC_STRING_MIN = 2
+
# Generate a dynamic string to integer mapping
class HandleEnumerations:
def __init__(self):
self.static_strings = []
self.enumerations = {}
self.ctr_dispatch = {
- '_DECL_STATIC_STR': self.decl_static_str,
- 'DECL_ENUMERATION': self.decl_enumeration,
- 'DECL_ENUMERATION_RANGE': self.decl_enumeration_range
+ "_DECL_STATIC_STR": self.decl_static_str,
+ "DECL_ENUMERATION": self.decl_enumeration,
+ "DECL_ENUMERATION_RANGE": self.decl_enumeration_range,
}
+
def add_enumeration(self, enum, name, value):
enums = self.enumerations.setdefault(enum, {})
if name in enums and enums[name] != value:
- error("Conflicting definition for enumeration '%s %s'" % (
- enum, name))
+ error("Conflicting definition for enumeration '%s %s'" % (enum, name))
enums[name] = value
+
def decl_enumeration(self, req):
enum, name, value = req.split()[1:]
self.add_enumeration(enum, name, int(value, 0))
+
def decl_enumeration_range(self, req):
enum, name, value, count = req.split()[1:]
self.add_enumeration(enum, name, (int(value, 0), int(count, 0)))
+
def decl_static_str(self, req):
msg = req.split(None, 1)[1]
if msg not in self.static_strings:
self.static_strings.append(msg)
+
def update_data_dictionary(self, data):
for i, s in enumerate(self.static_strings):
self.add_enumeration("static_string_id", s, i + STATIC_STRING_MIN)
- data['enumerations'] = self.enumerations
+ data["enumerations"] = self.enumerations
+
def generate_code(self, options):
code = []
for i, s in enumerate(self.static_strings):
- code.append(' if (__builtin_strcmp(str, "%s") == 0)\n'
- ' return %d;\n' % (s, i + STATIC_STRING_MIN))
+ code.append(
+ ' if (__builtin_strcmp(str, "%s") == 0)\n'
+ " return %d;\n" % (s, i + STATIC_STRING_MIN)
+ )
fmt = """
uint8_t __always_inline
ctr_lookup_static_string(const char *str)
@@ -113,6 +128,7 @@ ctr_lookup_static_string(const char *str)
"""
return fmt % ("".join(code).strip(),)
+
HandlerEnumerations = HandleEnumerations()
Handlers.append(HandlerEnumerations)
@@ -121,32 +137,39 @@ Handlers.append(HandlerEnumerations)
# Constants
######################################################################
+
# Allow adding build time constants to the data dictionary
class HandleConstants:
def __init__(self):
self.constants = {}
self.ctr_dispatch = {
- 'DECL_CONSTANT': self.decl_constant,
- 'DECL_CONSTANT_STR': self.decl_constant_str,
+ "DECL_CONSTANT": self.decl_constant,
+ "DECL_CONSTANT_STR": self.decl_constant_str,
}
+
def set_value(self, name, value):
if name in self.constants and self.constants[name] != value:
error("Conflicting definition for constant '%s'" % name)
self.constants[name] = value
+
def decl_constant(self, req):
name, value = req.split()[1:]
self.set_value(name, int(value, 0))
+
def decl_constant_str(self, req):
name, value = req.split(None, 2)[1:]
value = value.strip()
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
self.set_value(name, value)
+
def update_data_dictionary(self, data):
- data['config'] = self.constants
+ data["config"] = self.constants
+
def generate_code(self, options):
return ""
+
HandlerConstants = HandleConstants()
Handlers.append(HandlerConstants)
@@ -155,37 +178,42 @@ Handlers.append(HandlerConstants)
# Initial pins
######################################################################
+
class HandleInitialPins:
def __init__(self):
self.initial_pins = []
- self.ctr_dispatch = { 'DECL_INITIAL_PINS': self.decl_initial_pins }
+ self.ctr_dispatch = {"DECL_INITIAL_PINS": self.decl_initial_pins}
+
def decl_initial_pins(self, req):
pins = req.split(None, 1)[1].strip()
if pins.startswith('"') and pins.endswith('"'):
pins = pins[1:-1]
if pins:
- self.initial_pins = [p.strip() for p in pins.split(',')]
+ self.initial_pins = [p.strip() for p in pins.split(",")]
HandlerConstants.decl_constant_str(
- "_DECL_CONSTANT_STR INITIAL_PINS "
- + ','.join(self.initial_pins))
+ "_DECL_CONSTANT_STR INITIAL_PINS " + ",".join(self.initial_pins)
+ )
+
def update_data_dictionary(self, data):
pass
+
def map_pins(self):
if not self.initial_pins:
return []
mp = msgproto.MessageParser()
mp.fill_enumerations(HandlerEnumerations.enumerations)
- pinmap = mp.get_enumerations().get('pin', {})
+ pinmap = mp.get_enumerations().get("pin", {})
out = []
for p in self.initial_pins:
flag = "IP_OUT_HIGH"
- if p.startswith('!'):
+ if p.startswith("!"):
flag = "0"
p = p[1:].strip()
if p not in pinmap:
error("Unknown initial pin '%s'" % (p,))
out.append("\n {%d, %s}, // %s" % (pinmap[p], flag, p))
return out
+
def generate_code(self, options):
out = self.map_pins()
fmt = """
@@ -193,7 +221,8 @@ const struct initial_pin_s initial_pins[] PROGMEM = {%s
};
const int initial_pins_size PROGMEM = ARRAY_SIZE(initial_pins);
"""
- return fmt % (''.join(out),)
+ return fmt % ("".join(out),)
+
Handlers.append(HandleInitialPins())
@@ -202,20 +231,26 @@ Handlers.append(HandleInitialPins())
# ARM IRQ vector table generation
######################################################################
+
# Create ARM IRQ vector table from interrupt handler declarations
class Handle_arm_irq:
def __init__(self):
self.irqs = {}
- self.ctr_dispatch = { 'DECL_ARMCM_IRQ': self.decl_armcm_irq }
+ self.ctr_dispatch = {"DECL_ARMCM_IRQ": self.decl_armcm_irq}
+
def decl_armcm_irq(self, req):
func, num = req.split()[1:]
num = int(num, 0)
if num in self.irqs and self.irqs[num] != func:
- error("Conflicting IRQ definition %d (old %s new %s)"
- % (num, self.irqs[num], func))
+ error(
+ "Conflicting IRQ definition %d (old %s new %s)"
+ % (num, self.irqs[num], func)
+ )
self.irqs[num] = func
+
def update_data_dictionary(self, data):
pass
+
def generate_code(self, options):
armcm_offset = 16
if 1 - armcm_offset not in self.irqs:
@@ -237,7 +272,8 @@ extern uint32_t _stack_end;
const void *VectorTable[] __visible __section(".vector_table") = {
%s};
"""
- return fmt % (''.join(defs), ''.join(table))
+ return fmt % ("".join(defs), "".join(table))
+
Handlers.append(Handle_arm_irq())
@@ -246,6 +282,7 @@ Handlers.append(Handle_arm_irq())
# Wire protocol commands and responses
######################################################################
+
# Dynamic command and response registration
class HandleCommandGeneration:
def __init__(self):
@@ -253,13 +290,14 @@ class HandleCommandGeneration:
self.encoders = []
self.msg_to_encid = dict(msgproto.DefaultMessages)
self.encid_to_msgid = {}
- self.messages_by_name = { m.split()[0]: m for m in self.msg_to_encid }
+ self.messages_by_name = {m.split()[0]: m for m in self.msg_to_encid}
self.all_param_types = {}
self.ctr_dispatch = {
- 'DECL_COMMAND_FLAGS': self.decl_command,
- '_DECL_ENCODER': self.decl_encoder,
- '_DECL_OUTPUT': self.decl_output
+ "DECL_COMMAND_FLAGS": self.decl_command,
+ "_DECL_ENCODER": self.decl_encoder,
+ "_DECL_OUTPUT": self.decl_output,
}
+
def decl_command(self, req):
funcname, flags, msgname = req.split()[1:4]
if msgname in self.commands:
@@ -270,6 +308,7 @@ class HandleCommandGeneration:
if m is not None and m != msg:
error("Conflicting definition for command '%s'" % msgname)
self.messages_by_name[msgname] = msg
+
def decl_encoder(self, req):
msg = req.split(None, 1)[1]
msgname = msg.split()[0]
@@ -278,15 +317,18 @@ class HandleCommandGeneration:
error("Conflicting definition for message '%s'" % msgname)
self.messages_by_name[msgname] = msg
self.encoders.append((msgname, msg))
+
def decl_output(self, req):
msg = req.split(None, 1)[1]
self.encoders.append((None, msg))
+
def convert_encoded_msgid(self, encoded_msgid):
if encoded_msgid >= 0x80:
- data = [(encoded_msgid >> 7) | 0x80, encoded_msgid & 0x7f]
+ data = [(encoded_msgid >> 7) | 0x80, encoded_msgid & 0x7F]
else:
data = [encoded_msgid]
return msgproto.PT_int32().parse(data, 0)[0]
+
def create_message_ids(self):
# Create unique ids for each message type
encoded_msgid = max(self.msg_to_encid.values())
@@ -296,31 +338,44 @@ class HandleCommandGeneration:
if msg not in self.msg_to_encid:
encoded_msgid += 1
self.msg_to_encid[msg] = encoded_msgid
- if encoded_msgid >= 1<<14:
+ if encoded_msgid >= 1 << 14:
# The mcu currently assumes all message ids encode to 1 or 2 bytes
error("Too many message ids")
self.encid_to_msgid = {
encoded_msgid: self.convert_encoded_msgid(encoded_msgid)
for encoded_msgid in self.msg_to_encid.values()
}
+
def update_data_dictionary(self, data):
# Convert ids to standard form (use both positive and negative numbers)
- msg_to_msgid = {msg: self.encid_to_msgid[encoded_msgid]
- for msg, encoded_msgid in self.msg_to_encid.items()}
- command_ids = [msg_to_msgid[msg]
- for msgname, msg in self.messages_by_name.items()
- if msgname in self.commands]
- response_ids = [msg_to_msgid[msg]
- for msgname, msg in self.messages_by_name.items()
- if msgname not in self.commands]
- data['commands'] = { msg: msgid for msg, msgid in msg_to_msgid.items()
- if msgid in command_ids }
- data['responses'] = { msg: msgid for msg, msgid in msg_to_msgid.items()
- if msgid in response_ids }
- output = {msg: msgid for msg, msgid in msg_to_msgid.items()
- if msgid not in command_ids and msgid not in response_ids}
+ msg_to_msgid = {
+ msg: self.encid_to_msgid[encoded_msgid]
+ for msg, encoded_msgid in self.msg_to_encid.items()
+ }
+ command_ids = [
+ msg_to_msgid[msg]
+ for msgname, msg in self.messages_by_name.items()
+ if msgname in self.commands
+ ]
+ response_ids = [
+ msg_to_msgid[msg]
+ for msgname, msg in self.messages_by_name.items()
+ if msgname not in self.commands
+ ]
+ data["commands"] = {
+ msg: msgid for msg, msgid in msg_to_msgid.items() if msgid in command_ids
+ }
+ data["responses"] = {
+ msg: msgid for msg, msgid in msg_to_msgid.items() if msgid in response_ids
+ }
+ output = {
+ msg: msgid
+ for msg, msgid in msg_to_msgid.items()
+ if msgid not in command_ids and msgid not in response_ids
+ }
if output:
- data['output'] = output
+ data["output"] = output
+
def build_parser(self, encoded_msgid, msgformat, msgtype):
if msgtype == "output":
param_types = msgproto.lookup_output_params(msgformat)
@@ -328,34 +383,46 @@ class HandleCommandGeneration:
else:
param_types = [t for name, t in msgproto.lookup_params(msgformat)]
comment = msgformat
- params = '0'
+ params = "0"
types = tuple([t.__class__.__name__ for t in param_types])
if types:
paramid = self.all_param_types.get(types)
if paramid is None:
paramid = len(self.all_param_types)
self.all_param_types[types] = paramid
- params = 'command_parameters%d' % (paramid,)
+ params = "command_parameters%d" % (paramid,)
out = """
// %s
.encoded_msgid=%d, // msgid=%d
.num_params=%d,
.param_types = %s,
-""" % (comment, encoded_msgid, self.encid_to_msgid[encoded_msgid],
- len(types), params)
- if msgtype == 'response':
- num_args = (len(types) + types.count('PT_progmem_buffer')
- + types.count('PT_buffer'))
+""" % (
+ comment,
+ encoded_msgid,
+ self.encid_to_msgid[encoded_msgid],
+ len(types),
+ params,
+ )
+ if msgtype == "response":
+ num_args = (
+ len(types) + types.count("PT_progmem_buffer") + types.count("PT_buffer")
+ )
out += " .num_args=%d," % (num_args,)
else:
msgid_size = 1
if encoded_msgid >= 0x80:
msgid_size = 2
- max_size = min(msgproto.MESSAGE_MAX,
- (msgproto.MESSAGE_MIN + msgid_size
- + sum([t.max_length for t in param_types])))
+ max_size = min(
+ msgproto.MESSAGE_MAX,
+ (
+ msgproto.MESSAGE_MIN
+ + msgid_size
+ + sum([t.max_length for t in param_types])
+ ),
+ )
out += " .max_size=%d," % (max_size,)
return out
+
def generate_responses_code(self):
encoder_defs = []
output_code = []
@@ -366,19 +433,20 @@ class HandleCommandGeneration:
if encoded_msgid in did_output:
continue
did_output[encoded_msgid] = True
- code = (' if (__builtin_strcmp(str, "%s") == 0)\n'
- ' return &command_encoder_%s;\n'
- % (msg, encoded_msgid))
+ code = (
+ ' if (__builtin_strcmp(str, "%s") == 0)\n'
+ " return &command_encoder_%s;\n" % (msg, encoded_msgid)
+ )
if msgname is None:
- parsercode = self.build_parser(encoded_msgid, msg, 'output')
+ parsercode = self.build_parser(encoded_msgid, msg, "output")
output_code.append(code)
else:
- parsercode = self.build_parser(encoded_msgid, msg, 'command')
+ parsercode = self.build_parser(encoded_msgid, msg, "command")
encoder_code.append(code)
encoder_defs.append(
"const struct command_encoder command_encoder_%s PROGMEM = {"
- " %s\n};\n" % (
- encoded_msgid, parsercode))
+ " %s\n};\n" % (encoded_msgid, parsercode)
+ )
fmt = """
%s
@@ -396,9 +464,12 @@ ctr_lookup_output(const char *str)
return NULL;
}
"""
- return fmt % ("".join(encoder_defs).strip(),
- "".join(encoder_code).strip(),
- "".join(output_code).strip())
+ return fmt % (
+ "".join(encoder_defs).strip(),
+ "".join(encoder_code).strip(),
+ "".join(output_code).strip(),
+ )
+
def generate_commands_code(self):
cmd_by_encid = {
self.msg_to_encid[self.messages_by_name.get(msgname, msgname)]: cmd
@@ -407,19 +478,21 @@ ctr_lookup_output(const char *str)
max_cmd_encid = max(cmd_by_encid.keys())
index = []
externs = {}
- for encoded_msgid in range(max_cmd_encid+1):
+ for encoded_msgid in range(max_cmd_encid + 1):
if encoded_msgid not in cmd_by_encid:
index.append(" {\n},")
continue
funcname, flags, msgname = cmd_by_encid[encoded_msgid]
msg = self.messages_by_name[msgname]
externs[funcname] = 1
- parsercode = self.build_parser(encoded_msgid, msg, 'response')
- index.append(" {%s\n .flags=%s,\n .func=%s\n}," % (
- parsercode, flags, funcname))
+ parsercode = self.build_parser(encoded_msgid, msg, "response")
+ index.append(
+ " {%s\n .flags=%s,\n .func=%s\n}," % (parsercode, flags, funcname)
+ )
index = "".join(index).strip()
- externs = "\n".join(["extern void "+funcname+"(uint32_t*);"
- for funcname in sorted(externs)])
+ externs = "\n".join(
+ ["extern void " + funcname + "(uint32_t*);" for funcname in sorted(externs)]
+ )
fmt = """
%s
@@ -430,17 +503,22 @@ const struct command_parser command_index[] PROGMEM = {
const uint16_t command_index_size PROGMEM = ARRAY_SIZE(command_index);
"""
return fmt % (externs, index)
+
def generate_param_code(self):
- sorted_param_types = sorted(
- [(i, a) for a, i in self.all_param_types.items()])
- params = ['']
+ sorted_param_types = sorted([(i, a) for a, i in self.all_param_types.items()])
+ params = [""]
for paramid, argtypes in sorted_param_types:
params.append(
- 'static const uint8_t command_parameters%d[] PROGMEM = {\n'
- ' %s };' % (
- paramid, ', '.join(argtypes),))
- params.append('')
+ "static const uint8_t command_parameters%d[] PROGMEM = {\n"
+ " %s };"
+ % (
+ paramid,
+ ", ".join(argtypes),
+ )
+ )
+ params.append("")
return "\n".join(params)
+
def generate_code(self, options):
self.create_message_ids()
parsercode = self.generate_responses_code()
@@ -448,6 +526,7 @@ const uint16_t command_index_size PROGMEM = ARRAY_SIZE(command_index);
paramcode = self.generate_param_code()
return paramcode + parsercode + cmdcode
+
Handlers.append(HandleCommandGeneration())
@@ -455,6 +534,7 @@ Handlers.append(HandleCommandGeneration())
# Version generation
######################################################################
+
# Run program and return the specified output
def check_output(prog):
logging.debug("Running %s" % (repr(prog),))
@@ -469,26 +549,28 @@ def check_output(prog):
if retcode:
return ""
try:
- return str(output.decode('utf8'))
+ return str(output.decode("utf8"))
except UnicodeError:
logging.debug("Exception on decode: %s" % (traceback.format_exc(),))
return ""
+
# Obtain version info from "git" program
def git_version():
- if not os.path.exists('.git'):
+ if not os.path.exists(".git"):
logging.debug("No '.git' file/directory found")
return ""
ver = check_output("git describe --always --tags --long --dirty").strip()
logging.debug("Got git version: %s" % (repr(ver),))
return ver
+
def build_version(extra, cleanbuild):
version = git_version()
if not version:
cleanbuild = False
version = "?"
- elif 'dirty' in version:
+ elif "dirty" in version:
cleanbuild = False
if not cleanbuild:
btime = time.strftime("%Y%m%d_%H%M%S")
@@ -496,29 +578,31 @@ def build_version(extra, cleanbuild):
version = "%s-%s-%s" % (version, btime, hostname)
return version + extra
+
# Run "tool --version" for each specified tool and extract versions
def tool_versions(tools):
- tools = [t.strip() for t in tools.split(';')]
- versions = ['', '']
+ tools = [t.strip() for t in tools.split(";")]
+ versions = ["", ""]
success = 0
for tool in tools:
# Extract first line from "tool --version" output
- verstr = check_output("%s --version" % (tool,)).split('\n')[0]
+ verstr = check_output("%s --version" % (tool,)).split("\n")[0]
# Check if this tool looks like a binutils program
isbinutils = 0
- if verstr.startswith('GNU '):
+ if verstr.startswith("GNU "):
isbinutils = 1
verstr = verstr[4:]
# Extract version information and exclude program name
- if ' ' not in verstr:
+ if " " not in verstr:
continue
- prog, ver = verstr.split(' ', 1)
+ prog, ver = verstr.split(" ", 1)
if not prog or not ver:
continue
# Check for any version conflicts
if versions[isbinutils] and versions[isbinutils] != ver:
- logging.debug("Mixed version %s vs %s" % (
- repr(versions[isbinutils]), repr(ver)))
+ logging.debug(
+ "Mixed version %s vs %s" % (repr(versions[isbinutils]), repr(ver))
+ )
versions[isbinutils] = "mixed"
continue
versions[isbinutils] = ver
@@ -526,22 +610,28 @@ def tool_versions(tools):
cleanbuild = versions[0] and versions[1] and success == len(tools)
return cleanbuild, "gcc: %s binutils: %s" % (versions[0], versions[1])
+
# Add version information to the data dictionary
class HandleVersions:
def __init__(self):
self.ctr_dispatch = {}
self.toolstr = self.version = ""
+
def update_data_dictionary(self, data):
- data['version'] = self.version
- data['build_versions'] = self.toolstr
- data['app'] = 'Klipper'
- data['license'] = 'GNU GPLv3'
+ data["version"] = self.version
+ data["build_versions"] = self.toolstr
+ data["app"] = "Klipper"
+ data["license"] = "GNU GPLv3"
+
def generate_code(self, options):
cleanbuild, self.toolstr = tool_versions(options.tools)
self.version = build_version(options.extra, cleanbuild)
sys.stdout.write("Version: %s\n" % (self.version,))
return "\n// version: %s\n// build_versions: %s\n" % (
- self.version, self.toolstr)
+ self.version,
+ self.toolstr,
+ )
+
Handlers.append(HandleVersions())
@@ -550,22 +640,25 @@ Handlers.append(HandleVersions())
# Identify data dictionary generation
######################################################################
+
# Automatically generate the wire protocol data dictionary
class HandleIdentify:
def __init__(self):
self.ctr_dispatch = {}
+
def update_data_dictionary(self, data):
pass
+
def generate_code(self, options):
# Generate data dictionary
data = {}
for h in Handlers:
h.update_data_dictionary(data)
- datadict = json.dumps(data, separators=(',', ':'), sort_keys=True)
+ datadict = json.dumps(data, separators=(",", ":"), sort_keys=True)
# Write data dictionary
if options.write_dictionary:
- f = open(options.write_dictionary, 'w')
+ f = open(options.write_dictionary, "w")
f.write(datadict)
f.close()
@@ -574,7 +667,7 @@ class HandleIdentify:
out = []
for i in range(len(zdatadict)):
if i % 8 == 0:
- out.append('\n ')
+ out.append("\n ")
out.append(" 0x%02x," % (zdatadict[i],))
fmt = """
const uint8_t command_identify_data[] PROGMEM = {%s
@@ -584,7 +677,8 @@ const uint8_t command_identify_data[] PROGMEM = {%s
const uint32_t command_identify_size PROGMEM
= ARRAY_SIZE(command_identify_data);
"""
- return fmt % (''.join(out), len(zdatadict), len(datadict))
+ return fmt % ("".join(out), len(zdatadict), len(datadict))
+
Handlers.append(HandleIdentify())
@@ -593,17 +687,30 @@ Handlers.append(HandleIdentify())
# Main code
######################################################################
+
def main():
usage = "%prog [options] <cmd section file> <output.c>"
opts = optparse.OptionParser(usage)
- opts.add_option("-e", "--extra", dest="extra", default="",
- help="extra version string to append to version")
- opts.add_option("-d", dest="write_dictionary",
- help="file to write mcu protocol dictionary")
- opts.add_option("-t", "--tools", dest="tools", default="",
- help="list of build programs to extract version from")
- opts.add_option("-v", action="store_true", dest="verbose",
- help="enable debug messages")
+ opts.add_option(
+ "-e",
+ "--extra",
+ dest="extra",
+ default="",
+ help="extra version string to append to version",
+ )
+ opts.add_option(
+ "-d", dest="write_dictionary", help="file to write mcu protocol dictionary"
+ )
+ opts.add_option(
+ "-t",
+ "--tools",
+ dest="tools",
+ default="",
+ help="list of build programs to extract version from",
+ )
+ opts.add_option(
+ "-v", action="store_true", dest="verbose", help="enable debug messages"
+ )
options, args = opts.parse_args()
if len(args) != 2:
@@ -613,11 +720,11 @@ def main():
logging.basicConfig(level=logging.DEBUG)
# Parse request file
- ctr_dispatch = { k: v for h in Handlers for k, v in h.ctr_dispatch.items() }
- f = open(incmdfile, 'r')
+ ctr_dispatch = {k: v for h in Handlers for k, v in h.ctr_dispatch.items()}
+ f = open(incmdfile, "r")
data = f.read()
f.close()
- for req in data.split('\n'):
+ for req in data.split("\n"):
req = req.lstrip()
if not req:
continue
@@ -628,9 +735,10 @@ def main():
# Write output
code = "".join([FILEHEADER] + [h.generate_code(options) for h in Handlers])
- f = open(outcfile, 'w')
+ f = open(outcfile, "w")
f.write(code)
f.close()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/calibrate_shaper.py b/scripts/calibrate_shaper.py
index b56ce5da..6109d843 100755
--- a/scripts/calibrate_shaper.py
+++ b/scripts/calibrate_shaper.py
@@ -9,40 +9,58 @@ from __future__ import print_function
import importlib, optparse, os, sys
from textwrap import wrap
import numpy as np, matplotlib
-sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
- '..', 'klippy'))
-shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras')
-MAX_TITLE_LENGTH=65
+sys.path.append(
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "klippy")
+)
+shaper_calibrate = importlib.import_module(".shaper_calibrate", "extras")
+
+MAX_TITLE_LENGTH = 65
+
def parse_log(logname):
with open(logname) as f:
for header in f:
- if not header.startswith('#'):
+ if not header.startswith("#"):
break
- if not header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'):
+ if not header.startswith("freq,psd_x,psd_y,psd_z,psd_xyz"):
# Raw accelerometer data
- return np.loadtxt(logname, comments='#', delimiter=',')
+ return np.loadtxt(logname, comments="#", delimiter=",")
# Parse power spectral density data
- data = np.loadtxt(logname, skiprows=1, comments='#', delimiter=',')
+ data = np.loadtxt(logname, skiprows=1, comments="#", delimiter=",")
calibration_data = shaper_calibrate.CalibrationData(
- freq_bins=data[:,0], psd_sum=data[:,4],
- psd_x=data[:,1], psd_y=data[:,2], psd_z=data[:,3])
+ freq_bins=data[:, 0],
+ psd_sum=data[:, 4],
+ psd_x=data[:, 1],
+ psd_y=data[:, 2],
+ psd_z=data[:, 3],
+ )
calibration_data.set_numpy(np)
# If input shapers are present in the CSV file, the frequency
# response is already normalized to input frequencies
- if 'mzv' not in header:
+ if "mzv" not in header:
calibration_data.normalize_to_frequencies()
return calibration_data
+
######################################################################
# Shaper calibration
######################################################################
+
# Find the best shaper parameters
-def calibrate_shaper(datas, csv_output, *, shapers, damping_ratio, scv,
- shaper_freqs, max_smoothing, test_damping_ratios,
- max_freq):
+def calibrate_shaper(
+ datas,
+ csv_output,
+ *,
+ shapers,
+ damping_ratio,
+ scv,
+ shaper_freqs,
+ max_smoothing,
+ test_damping_ratios,
+ max_freq
+):
helper = shaper_calibrate.ShaperCalibrate(printer=None)
if isinstance(datas[0], shaper_calibrate.CalibrationData):
calibration_data = datas[0]
@@ -55,28 +73,35 @@ def calibrate_shaper(datas, csv_output, *, shapers, damping_ratio, scv,
calibration_data.add_data(helper.process_accelerometer_data(data))
calibration_data.normalize_to_frequencies()
-
shaper, all_shapers = helper.find_best_shaper(
- calibration_data, shapers=shapers, damping_ratio=damping_ratio,
- scv=scv, shaper_freqs=shaper_freqs, max_smoothing=max_smoothing,
- test_damping_ratios=test_damping_ratios, max_freq=max_freq,
- logger=print)
+ calibration_data,
+ shapers=shapers,
+ damping_ratio=damping_ratio,
+ scv=scv,
+ shaper_freqs=shaper_freqs,
+ max_smoothing=max_smoothing,
+ test_damping_ratios=test_damping_ratios,
+ max_freq=max_freq,
+ logger=print,
+ )
if not shaper:
- print("No recommended shaper, possibly invalid value for --shapers=%s" %
- (','.join(shapers)))
+ print(
+ "No recommended shaper, possibly invalid value for --shapers=%s"
+ % (",".join(shapers))
+ )
return None, None, None
print("Recommended shaper is %s @ %.1f Hz" % (shaper.name, shaper.freq))
if csv_output is not None:
- helper.save_calibration_data(
- csv_output, calibration_data, all_shapers)
+ helper.save_calibration_data(csv_output, calibration_data, all_shapers)
return shaper.name, all_shapers, calibration_data
+
######################################################################
# Plot frequency response and suggested input shapers
######################################################################
-def plot_freq_response(lognames, calibration_data, shapers,
- selected_shaper, max_freq):
+
+def plot_freq_response(lognames, calibration_data, shapers, selected_shaper, max_freq):
freqs = calibration_data.freq_bins
psd = calibration_data.psd_sum[freqs <= max_freq]
px = calibration_data.psd_x[freqs <= max_freq]
@@ -85,89 +110,140 @@ def plot_freq_response(lognames, calibration_data, shapers,
freqs = freqs[freqs <= max_freq]
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
+ fontP.set_size("x-small")
fig, ax = matplotlib.pyplot.subplots()
- ax.set_xlabel('Frequency, Hz')
+ ax.set_xlabel("Frequency, Hz")
ax.set_xlim([0, max_freq])
- ax.set_ylabel('Power spectral density')
+ ax.set_ylabel("Power spectral density")
- ax.plot(freqs, psd, label='X+Y+Z', color='purple')
- ax.plot(freqs, px, label='X', color='red')
- ax.plot(freqs, py, label='Y', color='green')
- ax.plot(freqs, pz, label='Z', color='blue')
+ ax.plot(freqs, psd, label="X+Y+Z", color="purple")
+ ax.plot(freqs, px, label="X", color="red")
+ ax.plot(freqs, py, label="Y", color="green")
+ ax.plot(freqs, pz, label="Z", color="blue")
- title = "Frequency response and shapers (%s)" % (', '.join(lognames))
+ title = "Frequency response and shapers (%s)" % (", ".join(lognames))
ax.set_title("\n".join(wrap(title, MAX_TITLE_LENGTH)))
ax.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(5))
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
- ax.ticklabel_format(axis='y', style='scientific', scilimits=(0,0))
- ax.grid(which='major', color='grey')
- ax.grid(which='minor', color='lightgrey')
+ ax.ticklabel_format(axis="y", style="scientific", scilimits=(0, 0))
+ ax.grid(which="major", color="grey")
+ ax.grid(which="minor", color="lightgrey")
ax2 = ax.twinx()
- ax2.set_ylabel('Shaper vibration reduction (ratio)')
+ ax2.set_ylabel("Shaper vibration reduction (ratio)")
best_shaper_vals = None
for shaper in shapers:
label = "%s (%.1f Hz, vibr=%.1f%%, sm~=%.2f, accel<=%.f)" % (
- shaper.name.upper(), shaper.freq,
- shaper.vibrs * 100., shaper.smoothing,
- round(shaper.max_accel / 100.) * 100.)
- linestyle = 'dotted'
+ shaper.name.upper(),
+ shaper.freq,
+ shaper.vibrs * 100.0,
+ shaper.smoothing,
+ round(shaper.max_accel / 100.0) * 100.0,
+ )
+ linestyle = "dotted"
if shaper.name == selected_shaper:
- linestyle = 'dashdot'
+ linestyle = "dashdot"
best_shaper_vals = shaper.vals
ax2.plot(freqs, shaper.vals, label=label, linestyle=linestyle)
- ax.plot(freqs, psd * best_shaper_vals,
- label='After\nshaper', color='cyan')
+ ax.plot(freqs, psd * best_shaper_vals, label="After\nshaper", color="cyan")
# A hack to add a human-readable shaper recommendation to legend
- ax2.plot([], [], ' ',
- label="Recommended shaper: %s" % (selected_shaper.upper()))
+ ax2.plot([], [], " ", label="Recommended shaper: %s" % (selected_shaper.upper()))
- ax.legend(loc='upper left', prop=fontP)
- ax2.legend(loc='upper right', prop=fontP)
+ ax.legend(loc="upper left", prop=fontP)
+ ax2.legend(loc="upper right", prop=fontP)
fig.tight_layout()
return fig
+
######################################################################
# Startup
######################################################################
+
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
- matplotlib.rcParams.update({'figure.autolayout': True})
- matplotlib.use('Agg')
+ matplotlib.rcParams.update({"figure.autolayout": True})
+ matplotlib.use("Agg")
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
+
def main():
# Parse command-line arguments
usage = "%prog [options] <logs>"
opts = optparse.OptionParser(usage)
- opts.add_option("-o", "--output", type="string", dest="output",
- default=None, help="filename of output graph")
- opts.add_option("-c", "--csv", type="string", dest="csv",
- default=None, help="filename of output csv file")
- opts.add_option("-f", "--max_freq", type="float", default=200.,
- help="maximum frequency to plot")
- opts.add_option("-s", "--max_smoothing", type="float", dest="max_smoothing",
- default=None, help="maximum shaper smoothing to allow")
- opts.add_option("--scv", "--square_corner_velocity", type="float",
- dest="scv", default=5., help="square corner velocity")
- opts.add_option("--shaper_freq", type="string", dest="shaper_freq",
- default=None, help="shaper frequency(-ies) to test, " +
- "either a comma-separated list of floats, or a range in " +
- "the format [start]:end[:step]")
- opts.add_option("--shapers", type="string", dest="shapers", default=None,
- help="a comma-separated list of shapers to test")
- opts.add_option("--damping_ratio", type="float", dest="damping_ratio",
- default=None, help="shaper damping_ratio parameter")
- opts.add_option("--test_damping_ratios", type="string",
- dest="test_damping_ratios", default=None,
- help="a comma-separated liat of damping ratios to test " +
- "input shaper for")
+ opts.add_option(
+ "-o",
+ "--output",
+ type="string",
+ dest="output",
+ default=None,
+ help="filename of output graph",
+ )
+ opts.add_option(
+ "-c",
+ "--csv",
+ type="string",
+ dest="csv",
+ default=None,
+ help="filename of output csv file",
+ )
+ opts.add_option(
+ "-f",
+ "--max_freq",
+ type="float",
+ default=200.0,
+ help="maximum frequency to plot",
+ )
+ opts.add_option(
+ "-s",
+ "--max_smoothing",
+ type="float",
+ dest="max_smoothing",
+ default=None,
+ help="maximum shaper smoothing to allow",
+ )
+ opts.add_option(
+ "--scv",
+ "--square_corner_velocity",
+ type="float",
+ dest="scv",
+ default=5.0,
+ help="square corner velocity",
+ )
+ opts.add_option(
+ "--shaper_freq",
+ type="string",
+ dest="shaper_freq",
+ default=None,
+ help="shaper frequency(-ies) to test, "
+ + "either a comma-separated list of floats, or a range in "
+ + "the format [start]:end[:step]",
+ )
+ opts.add_option(
+ "--shapers",
+ type="string",
+ dest="shapers",
+ default=None,
+ help="a comma-separated list of shapers to test",
+ )
+ opts.add_option(
+ "--damping_ratio",
+ type="float",
+ dest="damping_ratio",
+ default=None,
+ help="shaper damping_ratio parameter",
+ )
+ opts.add_option(
+ "--test_damping_ratios",
+ type="string",
+ dest="test_damping_ratios",
+ default=None,
+ help="a comma-separated liat of damping ratios to test " + "input shaper for",
+ )
options, args = opts.parse_args()
if len(args) < 1:
opts.error("Incorrect number of arguments")
@@ -177,59 +253,68 @@ def main():
max_freq = options.max_freq
if options.shaper_freq is None:
shaper_freqs = []
- elif options.shaper_freq.find(':') >= 0:
+ elif options.shaper_freq.find(":") >= 0:
freq_start = None
freq_end = None
freq_step = None
try:
- freqs_parsed = options.shaper_freq.partition(':')
+ freqs_parsed = options.shaper_freq.partition(":")
if freqs_parsed[0]:
freq_start = float(freqs_parsed[0])
- freqs_parsed = freqs_parsed[-1].partition(':')
+ freqs_parsed = freqs_parsed[-1].partition(":")
freq_end = float(freqs_parsed[0])
if freq_start and freq_start > freq_end:
- opts.error("Invalid --shaper_freq param: start range larger " +
- "than its end")
- if freqs_parsed[-1].find(':') >= 0:
+ opts.error(
+ "Invalid --shaper_freq param: start range larger " + "than its end"
+ )
+ if freqs_parsed[-1].find(":") >= 0:
opts.error("Invalid --shaper_freq param format")
if freqs_parsed[-1]:
freq_step = float(freqs_parsed[-1])
except ValueError:
- opts.error("--shaper_freq param does not specify correct range " +
- "in the format [start]:end[:step]")
+ opts.error(
+ "--shaper_freq param does not specify correct range "
+ + "in the format [start]:end[:step]"
+ )
shaper_freqs = (freq_start, freq_end, freq_step)
- max_freq = max(max_freq, freq_end * 4./3.)
+ max_freq = max(max_freq, freq_end * 4.0 / 3.0)
else:
try:
- shaper_freqs = [float(s) for s in options.shaper_freq.split(',')]
+ shaper_freqs = [float(s) for s in options.shaper_freq.split(",")]
except ValueError:
opts.error("invalid floating point value in --shaper_freq param")
- max_freq = max(max_freq, max(shaper_freqs) * 4./3.)
+ max_freq = max(max_freq, max(shaper_freqs) * 4.0 / 3.0)
if options.test_damping_ratios:
try:
- test_damping_ratios = [float(s) for s in
- options.test_damping_ratios.split(',')]
+ test_damping_ratios = [
+ float(s) for s in options.test_damping_ratios.split(",")
+ ]
except ValueError:
- opts.error("invalid floating point value in " +
- "--test_damping_ratios param")
+ opts.error(
+ "invalid floating point value in " + "--test_damping_ratios param"
+ )
else:
test_damping_ratios = None
if options.shapers is None:
shapers = None
else:
- shapers = options.shapers.lower().split(',')
+ shapers = options.shapers.lower().split(",")
# Parse data
datas = [parse_log(fn) for fn in args]
# Calibrate shaper and generate outputs
selected_shaper, shapers, calibration_data = calibrate_shaper(
- datas, options.csv, shapers=shapers,
- damping_ratio=options.damping_ratio,
- scv=options.scv, shaper_freqs=shaper_freqs,
- max_smoothing=options.max_smoothing,
- test_damping_ratios=test_damping_ratios,
- max_freq=max_freq)
+ datas,
+ options.csv,
+ shapers=shapers,
+ damping_ratio=options.damping_ratio,
+ scv=options.scv,
+ shaper_freqs=shaper_freqs,
+ max_smoothing=options.max_smoothing,
+ test_damping_ratios=test_damping_ratios,
+ max_freq=max_freq,
+ )
if selected_shaper is None:
return
@@ -237,8 +322,9 @@ def main():
# Draw graph
setup_matplotlib(options.output is not None)
- fig = plot_freq_response(args, calibration_data, shapers,
- selected_shaper, max_freq)
+ fig = plot_freq_response(
+ args, calibration_data, shapers, selected_shaper, max_freq
+ )
# Show graph
if options.output is None:
@@ -247,5 +333,6 @@ def main():
fig.set_size_inches(8, 6)
fig.savefig(options.output)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/canbus_query.py b/scripts/canbus_query.py
index 52dd4706..12032ee7 100644
--- a/scripts/canbus_query.py
+++ b/scripts/canbus_query.py
@@ -7,49 +7,66 @@
import sys, os, optparse, time
import can
-CANBUS_ID_ADMIN = 0x3f0
+CANBUS_ID_ADMIN = 0x3F0
CMD_QUERY_UNASSIGNED = 0x00
RESP_NEED_NODEID = 0x20
CMD_SET_KLIPPER_NODEID = 0x01
CMD_SET_CANBOOT_NODEID = 0x11
+
def query_unassigned(canbus_iface):
# Open CAN socket
- filters = [{"can_id": CANBUS_ID_ADMIN + 1, "can_mask": 0x7ff,
- "extended": False}]
- bus = can.interface.Bus(channel=canbus_iface, can_filters=filters,
- bustype='socketcan')
+ filters = [{"can_id": CANBUS_ID_ADMIN + 1, "can_mask": 0x7FF, "extended": False}]
+ bus = can.interface.Bus(
+ channel=canbus_iface, can_filters=filters, bustype="socketcan"
+ )
# Send query
- msg = can.Message(arbitration_id=CANBUS_ID_ADMIN,
- data=[CMD_QUERY_UNASSIGNED], is_extended_id=False)
+ msg = can.Message(
+ arbitration_id=CANBUS_ID_ADMIN,
+ data=[CMD_QUERY_UNASSIGNED],
+ is_extended_id=False,
+ )
bus.send(msg)
# Read responses
found_ids = {}
start_time = curtime = time.time()
while 1:
- tdiff = start_time + 2. - curtime
- if tdiff <= 0.:
+ tdiff = start_time + 2.0 - curtime
+ if tdiff <= 0.0:
break
msg = bus.recv(tdiff)
curtime = time.time()
- if (msg is None or msg.arbitration_id != CANBUS_ID_ADMIN + 1
- or msg.dlc < 7 or msg.data[0] != RESP_NEED_NODEID):
+ if (
+ msg is None
+ or msg.arbitration_id != CANBUS_ID_ADMIN + 1
+ or msg.dlc < 7
+ or msg.data[0] != RESP_NEED_NODEID
+ ):
continue
- uuid = sum([v << ((5-i)*8) for i, v in enumerate(msg.data[1:7])])
+ uuid = sum([v << ((5 - i) * 8) for i, v in enumerate(msg.data[1:7])])
if uuid in found_ids:
continue
found_ids[uuid] = 1
AppNames = {
CMD_SET_KLIPPER_NODEID: "Klipper",
- CMD_SET_CANBOOT_NODEID: "CanBoot"
+ CMD_SET_CANBOOT_NODEID: "CanBoot",
}
app_id = CMD_SET_KLIPPER_NODEID
if msg.dlc > 7:
app_id = msg.data[7]
app_name = AppNames.get(app_id, "Unknown")
- sys.stdout.write("Found canbus_uuid=%012x, Application: %s\n"
- % (uuid, app_name))
- sys.stdout.write("Total %d uuids found\n" % (len(found_ids,)))
+ sys.stdout.write(
+ "Found canbus_uuid=%012x, Application: %s\n" % (uuid, app_name)
+ )
+ sys.stdout.write(
+ "Total %d uuids found\n"
+ % (
+ len(
+ found_ids,
+ )
+ )
+ )
+
def main():
usage = "%prog [options] <can interface>"
@@ -60,5 +77,6 @@ def main():
canbus_iface = args[0]
query_unassigned(canbus_iface)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/check_whitespace.py b/scripts/check_whitespace.py
index fe8c7ae8..9223213a 100755
--- a/scripts/check_whitespace.py
+++ b/scripts/check_whitespace.py
@@ -8,6 +8,7 @@ import sys, os.path, unicodedata
HaveError = False
+
def report_error(filename, lineno, msg):
global HaveError
if not HaveError:
@@ -15,10 +16,11 @@ def report_error(filename, lineno, msg):
HaveError = True
sys.stderr.write("%s:%d: %s\n" % (filename, lineno + 1, msg))
+
def check_file(filename):
# Open and read file
try:
- f = open(filename, 'rb')
+ f = open(filename, "rb")
data = f.read()
f.close()
except IOError:
@@ -27,37 +29,37 @@ def check_file(filename):
# Empty files are okay
return
# Do checks
- is_source_code = any([filename.endswith(s) for s in ['.c', '.h', '.py']])
+ is_source_code = any([filename.endswith(s) for s in [".c", ".h", ".py"]])
lineno = 0
- for lineno, line in enumerate(data.split(b'\n')):
+ for lineno, line in enumerate(data.split(b"\n")):
# Verify line is valid utf-8
try:
- line = line.decode('utf-8')
+ line = line.decode("utf-8")
except UnicodeDecodeError:
report_error(filename, lineno, "Found non utf-8 character")
continue
# Check for control characters
for c in line:
- if unicodedata.category(c).startswith('C'):
+ if unicodedata.category(c).startswith("C"):
char_name = repr(c)
- if c == '\t':
- if os.path.basename(filename).lower() == 'makefile':
+ if c == "\t":
+ if os.path.basename(filename).lower() == "makefile":
continue
- char_name = 'tab'
- report_error(filename, lineno, "Invalid %s character" % (
- char_name,))
+ char_name = "tab"
+ report_error(filename, lineno, "Invalid %s character" % (char_name,))
break
# Check for trailing space
- if line.endswith(' ') or line.endswith('\t'):
+ if line.endswith(" ") or line.endswith("\t"):
report_error(filename, lineno, "Line has trailing spaces")
# Check for more than 80 characters
if is_source_code and len(line) > 80:
report_error(filename, lineno, "Line longer than 80 characters")
- if not data.endswith(b'\n'):
+ if not data.endswith(b"\n"):
report_error(filename, lineno, "No newline at end of file")
- if data.endswith(b'\n\n'):
+ if data.endswith(b"\n\n"):
report_error(filename, lineno, "Extra newlines at end of file")
+
def main():
files = sys.argv[1:]
for filename in files:
@@ -66,5 +68,6 @@ def main():
sys.stderr.write("\n\n")
sys.exit(-1)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/checkstack.py b/scripts/checkstack.py
index 1a6605fe..6ae9c473 100755
--- a/scripts/checkstack.py
+++ b/scripts/checkstack.py
@@ -25,6 +25,7 @@ OUTPUTDESC = """
# insn_addr:called_function [u+c,t,usage_to_yield_point]
"""
+
class function:
def __init__(self, funcaddr, funcname):
self.funcaddr = funcaddr
@@ -37,10 +38,12 @@ class function:
# called_funcs = [(insnaddr, calladdr, stackusage), ...]
self.called_funcs = []
self.subfuncs = {}
+
# Update function info with a found "yield" point.
def noteYield(self, stackusage):
if self.yield_usage < stackusage:
self.yield_usage = stackusage
+
# Update function info with a found "call" point.
def noteCall(self, insnaddr, calladdr, stackusage):
if (calladdr, stackusage) in self.subfuncs:
@@ -49,6 +52,7 @@ class function:
self.called_funcs.append((insnaddr, calladdr, stackusage))
self.subfuncs[(calladdr, stackusage)] = 1
+
# Find out maximum stack usage for a function
def calcmaxstack(info, funcs):
if info.max_stack_usage is not None:
@@ -66,7 +70,7 @@ def calcmaxstack(info, funcs):
if callinfo.funcname not in seenbefore:
seenbefore[callinfo.funcname] = 1
total_calls += callinfo.total_calls + 1
- funcnameroot = callinfo.funcname.split('.')[0]
+ funcnameroot = callinfo.funcname.split(".")[0]
if funcnameroot in IGNORE:
# This called function is ignored - don't contribute it to
# the max stack.
@@ -84,12 +88,15 @@ def calcmaxstack(info, funcs):
info.max_yield_usage = max_yield_usage
info.total_calls = total_calls
+
# Try to arrange output so that functions that call each other are
# near each other.
def orderfuncs(funcaddrs, availfuncs):
- l = [(availfuncs[funcaddr].total_calls
- , availfuncs[funcaddr].funcname, funcaddr)
- for funcaddr in funcaddrs if funcaddr in availfuncs]
+ l = [
+ (availfuncs[funcaddr].total_calls, availfuncs[funcaddr].funcname, funcaddr)
+ for funcaddr in funcaddrs
+ if funcaddr in availfuncs
+ ]
l.sort()
l.reverse()
out = []
@@ -103,17 +110,22 @@ def orderfuncs(funcaddrs, availfuncs):
out = out + orderfuncs(calladdrs, availfuncs) + [info]
return out
-hex_s = r'[0-9a-f]+'
-re_func = re.compile(r'^(?P<funcaddr>' + hex_s + r') <(?P<func>.*)>:$')
+
+hex_s = r"[0-9a-f]+"
+re_func = re.compile(r"^(?P<funcaddr>" + hex_s + r") <(?P<func>.*)>:$")
re_asm = re.compile(
- r'^[ ]*(?P<insnaddr>' + hex_s
- + r'):\t[^\t]*\t(?P<insn>[^\t]+?)(?P<params>\t[^;]*)?'
- + r'[ ]*(; (?P<calladdr>0x' + hex_s
- + r') <(?P<ref>.*)>)?$')
+ r"^[ ]*(?P<insnaddr>"
+ + hex_s
+ + r"):\t[^\t]*\t(?P<insn>[^\t]+?)(?P<params>\t[^;]*)?"
+ + r"[ ]*(; (?P<calladdr>0x"
+ + hex_s
+ + r") <(?P<ref>.*)>)?$"
+)
+
def main():
unknownfunc = function(None, "<unknown>")
- indirectfunc = function(-1, '<indirect>')
+ indirectfunc = function(-1, "<indirect>")
unknownfunc.max_stack_usage = indirectfunc.max_stack_usage = 0
unknownfunc.max_yield_usage = indirectfunc.max_yield_usage = -1
funcs = {-1: indirectfunc}
@@ -128,38 +140,38 @@ def main():
m = re_func.match(line)
if m is not None:
# Found function
- funcaddr = int(m.group('funcaddr'), 16)
- funcs[funcaddr] = cur = function(funcaddr, m.group('func'))
+ funcaddr = int(m.group("funcaddr"), 16)
+ funcs[funcaddr] = cur = function(funcaddr, m.group("func"))
stackusage = 0
atstart = 1
continue
m = re_asm.match(line)
if m is None:
datalines.setdefault(funcaddr, []).append(line)
- #print("other", repr(line))
+ # print("other", repr(line))
continue
- insn = m.group('insn')
+ insn = m.group("insn")
- if insn == 'push':
+ if insn == "push":
stackusage += 1
continue
- if insn == 'rcall' and m.group('params').strip() == '.+0':
+ if insn == "rcall" and m.group("params").strip() == ".+0":
stackusage += 2
continue
if atstart:
- if insn in ['in', 'eor']:
+ if insn in ["in", "eor"]:
continue
cur.basic_stack_usage = stackusage
atstart = 0
- insnaddr = m.group('insnaddr')
- calladdr = m.group('calladdr')
+ insnaddr = m.group("insnaddr")
+ calladdr = m.group("calladdr")
if calladdr is None:
- if insn == 'ijmp':
+ if insn == "ijmp":
# Indirect tail call
cur.noteCall(insnaddr, -1, 0)
- elif insn == 'icall':
+ elif insn == "icall":
cur.noteCall(insnaddr, -1, stackusage + 2)
else:
# misc instruction
@@ -167,17 +179,17 @@ def main():
else:
# Jump or call insn
calladdr = int(calladdr, 16)
- ref = m.group('ref')
- if '+' in ref:
+ ref = m.group("ref")
+ if "+" in ref:
# Inter-function jump.
continue
- elif insn.startswith('ld') or insn.startswith('st'):
+ elif insn.startswith("ld") or insn.startswith("st"):
# memory access
continue
- elif insn in ('rjmp', 'jmp', 'brne', 'brcs'):
+ elif insn in ("rjmp", "jmp", "brne", "brcs"):
# Tail call
cur.noteCall(insnaddr, calladdr, 0)
- elif insn in ('rcall', 'call'):
+ elif insn in ("rcall", "call"):
cur.noteCall(insnaddr, calladdr, stackusage + 2)
else:
print("unknown call", ref)
@@ -188,29 +200,29 @@ def main():
# Update for known indirect functions
funcsbyname = {}
for info in funcs.values():
- funcnameroot = info.funcname.split('.')[0]
+ funcnameroot = info.funcname.split(".")[0]
funcsbyname[funcnameroot] = info
- cmdfunc = funcsbyname.get('sched_main')
- command_index = funcsbyname.get('command_index')
+ cmdfunc = funcsbyname.get("sched_main")
+ command_index = funcsbyname.get("command_index")
if command_index is not None and cmdfunc is not None:
for line in datalines[command_index.funcaddr]:
parts = line.split()
if len(parts) < 9:
continue
- calladdr = int(parts[8]+parts[7], 16) * 2
+ calladdr = int(parts[8] + parts[7], 16) * 2
numparams = int(parts[2], 16)
stackusage = cmdfunc.basic_stack_usage + 2 + numparams * 4
cmdfunc.noteCall(0, calladdr, stackusage)
if len(parts) < 17:
continue
- calladdr = int(parts[16]+parts[15], 16) * 2
+ calladdr = int(parts[16] + parts[15], 16) * 2
numparams = int(parts[10], 16)
stackusage = cmdfunc.basic_stack_usage + 2 + numparams * 4
cmdfunc.noteCall(0, calladdr, stackusage)
- eventfunc = funcsbyname.get('__vector_13', funcsbyname.get('__vector_17'))
+ eventfunc = funcsbyname.get("__vector_13", funcsbyname.get("__vector_17"))
for funcnameroot, info in funcsbyname.items():
- if funcnameroot.endswith('_event') and eventfunc is not None:
- eventfunc.noteCall(0, info.funcaddr, eventfunc.basic_stack_usage+2)
+ if funcnameroot.endswith("_event") and eventfunc is not None:
+ eventfunc.noteCall(0, info.funcaddr, eventfunc.basic_stack_usage + 2)
# Calculate maxstackusage
for info in funcs.values():
@@ -227,17 +239,27 @@ def main():
yieldstr = ""
if info.max_yield_usage >= 0:
yieldstr = ",%d" % info.max_yield_usage
- print("\n%s[%d,%d%s]:" % (info.funcname, info.basic_stack_usage
- , info.max_stack_usage, yieldstr))
+ print(
+ "\n%s[%d,%d%s]:"
+ % (info.funcname, info.basic_stack_usage, info.max_stack_usage, yieldstr)
+ )
for insnaddr, calladdr, stackusage in info.called_funcs:
callinfo = funcs.get(calladdr, unknownfunc)
yieldstr = ""
if callinfo.max_yield_usage >= 0:
yieldstr = ",%d" % (stackusage + callinfo.max_yield_usage)
- print(" %04s:%-40s [%d+%d,%d%s]" % (
- insnaddr, callinfo.funcname, stackusage
- , callinfo.basic_stack_usage
- , stackusage+callinfo.max_stack_usage, yieldstr))
+ print(
+ " %04s:%-40s [%d+%d,%d%s]"
+ % (
+ insnaddr,
+ callinfo.funcname,
+ stackusage,
+ callinfo.basic_stack_usage,
+ stackusage + callinfo.max_stack_usage,
+ yieldstr,
+ )
+ )
+
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/scripts/dump_mcu.py b/scripts/dump_mcu.py
index d9345a96..153962e5 100755
--- a/scripts/dump_mcu.py
+++ b/scripts/dump_mcu.py
@@ -10,8 +10,8 @@ import argparse
import os
import traceback
import logging
-KLIPPER_DIR = os.path.abspath(os.path.join(
- os.path.dirname(__file__), "../"))
+
+KLIPPER_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.append(os.path.join(KLIPPER_DIR, "klippy"))
import reactor
import serialhdl
@@ -23,20 +23,25 @@ import clocksync
#
###########################################################
+
def output_line(msg):
sys.stdout.write("%s\n" % (msg,))
sys.stdout.flush()
+
def output(msg):
sys.stdout.write("%s" % (msg,))
sys.stdout.flush()
-DUMP_CMD="debug_read order=%d addr=%d"
-DUMP_RESP="debug_result"
+
+DUMP_CMD = "debug_read order=%d addr=%d"
+DUMP_RESP = "debug_result"
+
class MCUDumpError(Exception):
pass
+
class MCUDump:
def __init__(self, args):
self.reactor = reactor.Reactor()
@@ -49,9 +54,7 @@ class MCUDump:
self.read_start = int(args.read_start, 0)
self.read_length = int(args.read_length, 0)
except ValueError as e:
- raise MCUDumpError(
- "Error converting flash address: %s " % (str(e),)
- )
+ raise MCUDumpError("Error converting flash address: %s " % (str(e),))
if self.read_length <= 0:
raise MCUDumpError("Read count must be greater than 0")
self._serial = serialhdl.SerialReader(self.reactor)
@@ -66,7 +69,7 @@ class MCUDump:
self.reactor.register_callback(self._do_serial_connect)
curtime = self.reactor.monotonic()
while True:
- curtime = self.reactor.pause(curtime + 1.)
+ curtime = self.reactor.pause(curtime + 1.0)
output(".")
if self.connect_completion.test():
self.connected = self.connect_completion.wait()
@@ -83,16 +86,15 @@ class MCUDump:
output_line("Frequency: %s\n" % (freq,))
def _do_serial_connect(self, eventtime):
- endtime = eventtime + 60.
+ endtime = eventtime + 60.0
while True:
try:
if self.canbus_iface is not None:
self._serial.connect_canbus(
self.device, self.canbus_nodeid, self.canbus_iface
)
- elif (
- self.device.startswith("/dev/rpmsg_") or
- self.device.startswith("/tmp/")
+ elif self.device.startswith("/dev/rpmsg_") or self.device.startswith(
+ "/tmp/"
):
self._serial.connect_pipe(self.device)
else:
@@ -105,7 +107,7 @@ class MCUDump:
return
output("Connection Error, retrying..")
self._serial.disconnect()
- self.reactor.pause(curtime + 2.)
+ self.reactor.pause(curtime + 2.0)
else:
break
self.connect_completion.complete(True)
@@ -122,10 +124,7 @@ class MCUDump:
order = [2, 0, 1, 0][(addr | count) & 3]
bsize = 1 << order
# Query data from mcu
- output_line(
- "Reading %d bytes from flash, start address 0x%x\n"
- % (count, addr)
- )
+ output_line("Reading %d bytes from flash, start address 0x%x\n" % (count, addr))
output("[")
bytes_read = last_reported_pct = 0
vals = []
@@ -133,9 +132,9 @@ class MCUDump:
caddr = addr + (i << order)
cmd = DUMP_CMD % (order, caddr)
params = self._serial.send_with_response(cmd, DUMP_RESP)
- vals.append(params['val'])
+ vals.append(params["val"])
bytes_read += bsize
- pct = int(bytes_read / float(count) * 100 + .5)
+ pct = int(bytes_read / float(count) * 100 + 0.5)
diff = (pct - last_reported_pct) // 2
if diff:
last_reported_pct = pct
@@ -145,7 +144,7 @@ class MCUDump:
data = bytearray()
for val in vals:
for b in range(bsize):
- data.append((val >> (8 * b)) & 0xff)
+ data.append((val >> (8 * b)) & 0xFF)
data = data[:count]
with open(self.output_file, "wb") as f:
f.write(data)
@@ -164,29 +163,49 @@ class MCUDump:
self.disconnect()
self.reactor.finalize()
+
def main():
parser = argparse.ArgumentParser(description="MCU Flash Dump Utility")
parser.add_argument(
- "-b", "--baud", metavar="<baud rate>", type=int,
- default=250000, help="Baud Rate")
- parser.add_argument(
- "-c", "--canbus_iface", metavar="<canbus iface>", default=None,
- help="Use CAN bus interface; <device> is the chip UUID")
- parser.add_argument(
- "-i", "--canbus_nodeid", metavar="<canbus nodeid>", type=int,
- default=64, help="The CAN nodeid to use (default 64)")
+ "-b",
+ "--baud",
+ metavar="<baud rate>",
+ type=int,
+ default=250000,
+ help="Baud Rate",
+ )
parser.add_argument(
- "-s", "--read_start", metavar="<read start>", default="0x0",
- help="Flash address to start reading")
+ "-c",
+ "--canbus_iface",
+ metavar="<canbus iface>",
+ default=None,
+ help="Use CAN bus interface; <device> is the chip UUID",
+ )
parser.add_argument(
- "-l", "--read_length", metavar="<read length>", default="0x400",
- help="Number of bytes to read")
+ "-i",
+ "--canbus_nodeid",
+ metavar="<canbus nodeid>",
+ type=int,
+ default=64,
+ help="The CAN nodeid to use (default 64)",
+ )
parser.add_argument(
- "device", metavar="<device>", help="Device Serial Port")
+ "-s",
+ "--read_start",
+ metavar="<read start>",
+ default="0x0",
+ help="Flash address to start reading",
+ )
parser.add_argument(
- "outfile", metavar="<outfile>",
- help="Path to output file")
+ "-l",
+ "--read_length",
+ metavar="<read length>",
+ default="0x400",
+ help="Number of bytes to read",
+ )
+ parser.add_argument("device", metavar="<device>", help="Device Serial Port")
+ parser.add_argument("outfile", metavar="<outfile>", help="Path to output file")
args = parser.parse_args()
logging.basicConfig(level=logging.CRITICAL)
try:
diff --git a/scripts/flash-ar100.py b/scripts/flash-ar100.py
index 33198e23..0896ae07 100755
--- a/scripts/flash-ar100.py
+++ b/scripts/flash-ar100.py
@@ -21,11 +21,11 @@ R_CPU_CFG_SIZE = R_CPU_CFG_PAGE_LIMIT - R_CPU_CFG_PAGE_BASE
R_CPU_CFG_OFFSET = 0xC00
R_CPU_CLK_OFFSET = 0x400
-parser = argparse.ArgumentParser(description='Flash and reset SRAM A2 of A64')
-parser.add_argument('filename', nargs='?', help='binary file to write')
-parser.add_argument('--reset', action='store_true', help='reset the AR100')
-parser.add_argument('--halt', action='store_true', help='Halt the AR100')
-parser.add_argument('--bl31', action='store_true', help='write bl31')
+parser = argparse.ArgumentParser(description="Flash and reset SRAM A2 of A64")
+parser.add_argument("filename", nargs="?", help="binary file to write")
+parser.add_argument("--reset", action="store_true", help="reset the AR100")
+parser.add_argument("--halt", action="store_true", help="Halt the AR100")
+parser.add_argument("--bl31", action="store_true", help="write bl31")
args = parser.parse_args()
@@ -33,21 +33,20 @@ args = parser.parse_args()
def write_exception_vectors():
print("Writing exception vectors")
with open("/dev/mem", "w+b") as f:
- exc = mmap.mmap(f.fileno(),
- length=EXCEPTIONS_SIZE,
- offset=EXCEPTIONS_BASE)
+ exc = mmap.mmap(f.fileno(), length=EXCEPTIONS_SIZE, offset=EXCEPTIONS_BASE)
for i in range(NR_OF_EXCEPTIONS):
add = i * 0x100
- exc[add:add + 4] = ((EXCEPTIONS_JUMP - add) >> 2).to_bytes(
- 4, byteorder='little')
+ exc[add : add + 4] = ((EXCEPTIONS_JUMP - add) >> 2).to_bytes(
+ 4, byteorder="little"
+ )
exc.close()
def assert_deassert_reset(ass):
with open("/dev/mem", "w+b") as f:
- r_cpucfg = mmap.mmap(f.fileno(),
- length=R_CPU_CFG_SIZE,
- offset=R_CPU_CFG_PAGE_BASE)
+ r_cpucfg = mmap.mmap(
+ f.fileno(), length=R_CPU_CFG_SIZE, offset=R_CPU_CFG_PAGE_BASE
+ )
if ass:
r_cpucfg[R_CPU_CFG_OFFSET] &= ~0x01
if r_cpucfg[R_CPU_CFG_OFFSET] & 0x01:
@@ -68,7 +67,7 @@ def write_file(filename):
print("Writing file to SRAM A2")
with open("/dev/mem", "w+b") as f:
sram_a2 = mmap.mmap(f.fileno(), length=FW_SIZE, offset=FW_BASE)
- sram_a2[0:len(data)] = data
+ sram_a2[0 : len(data)] = data
sram_a2.close()
diff --git a/scripts/flash_usb.py b/scripts/flash_usb.py
index e290f7f3..99e291fc 100755
--- a/scripts/flash_usb.py
+++ b/scripts/flash_usb.py
@@ -6,35 +6,39 @@
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, re, subprocess, optparse, time, fcntl, termios, struct
+
class error(Exception):
pass
+
# Attempt to enter bootloader via 1200 baud request
def enter_bootloader(device):
try:
- f = open(device, 'rb')
+ f = open(device, "rb")
fd = f.fileno()
- fcntl.ioctl(fd, termios.TIOCMBIS, struct.pack('I', termios.TIOCM_DTR))
+ fcntl.ioctl(fd, termios.TIOCMBIS, struct.pack("I", termios.TIOCM_DTR))
t = termios.tcgetattr(fd)
t[4] = t[5] = termios.B1200
sys.stderr.write("Entering bootloader on %s\n" % (device,))
termios.tcsetattr(fd, termios.TCSANOW, t)
- fcntl.ioctl(fd, termios.TIOCMBIC, struct.pack('I', termios.TIOCM_DTR))
+ fcntl.ioctl(fd, termios.TIOCMBIC, struct.pack("I", termios.TIOCM_DTR))
f.close()
except (IOError, OSError) as e:
pass
+
# Translate a serial device name to a stable serial name in /dev/serial/by-path/
def translate_serial_to_tty(device):
ttyname = os.path.realpath(device)
- if not os.path.exists('/dev/serial/by-path/'):
+ if not os.path.exists("/dev/serial/by-path/"):
raise error("Unable to find serial 'by-path' folder")
- for fname in os.listdir('/dev/serial/by-path/'):
- fname = '/dev/serial/by-path/' + fname
+ for fname in os.listdir("/dev/serial/by-path/"):
+ fname = "/dev/serial/by-path/" + fname
if os.path.realpath(fname) == ttyname:
return ttyname, fname
return ttyname, ttyname
+
# Translate a serial device name to a usb path (suitable for dfu-util)
def translate_serial_to_usb_path(device):
realdev = os.path.realpath(device)
@@ -50,9 +54,10 @@ def translate_serial_to_usb_path(device):
devpath = os.path.realpath("/sys/class/tty/%s/device" % (fname,))
return m.group("path"), devpath
+
# Wait for a given path to appear
def wait_path(path, alt_path=None):
- time.sleep(.100)
+ time.sleep(0.100)
start_alt_path = None
end_time = time.time() + 4.0
while 1:
@@ -67,13 +72,14 @@ def wait_path(path, alt_path=None):
start_alt_path = cur_time
continue
if cur_time >= start_alt_path + 0.300:
- sys.stderr.write("Device reconnect on alt path %s\n" % (
- alt_path,))
+ sys.stderr.write("Device reconnect on alt path %s\n" % (alt_path,))
return alt_path
if cur_time > end_time:
return path
-CANBOOT_ID ="1d50:6177"
+
+CANBOOT_ID = "1d50:6177"
+
def detect_canboot(devpath):
usbdir = os.path.dirname(devpath)
@@ -87,6 +93,7 @@ def detect_canboot(devpath):
usbid = "%s:%s" % (vid, pid)
return usbid == CANBOOT_ID
+
def call_flashcan(device, binfile):
try:
import serial
@@ -97,18 +104,19 @@ def call_flashcan(device, binfile):
" %s -m pip install pyserial\n\n" % (sys.executable,)
)
sys.exit(-1)
- args = [sys.executable, "lib/canboot/flash_can.py", "-d",
- device, "-f", binfile]
- sys.stderr.write(" ".join(args) + '\n\n')
+ args = [sys.executable, "lib/canboot/flash_can.py", "-d", device, "-f", binfile]
+ sys.stderr.write(" ".join(args) + "\n\n")
res = subprocess.call(args)
if res != 0:
sys.stderr.write("Error running flash_can.py\n")
sys.exit(-1)
+
def flash_canboot(options, binfile):
ttyname, pathname = translate_serial_to_tty(options.device)
call_flashcan(pathname, binfile)
+
# Flash via a call to bossac
def flash_bossac(device, binfile, extra_flags=[]):
ttyname, pathname = translate_serial_to_tty(device)
@@ -116,7 +124,7 @@ def flash_bossac(device, binfile, extra_flags=[]):
pathname = wait_path(pathname, ttyname)
baseargs = ["lib/bossac/bin/bossac", "-U", "-p", pathname]
args = baseargs + extra_flags + ["-w", binfile, "-v"]
- sys.stderr.write(" ".join(args) + '\n\n')
+ sys.stderr.write(" ".join(args) + "\n\n")
res = subprocess.call(args)
if res != 0:
raise error("Error running bossac")
@@ -130,21 +138,23 @@ def flash_bossac(device, binfile, extra_flags=[]):
except subprocess.CalledProcessError as e:
pass
+
# Invoke the dfu-util program
def call_dfuutil(flags, binfile, sudo):
args = ["dfu-util"] + flags + ["-D", binfile]
if sudo:
args.insert(0, "sudo")
- sys.stderr.write(" ".join(args) + '\n\n')
+ sys.stderr.write(" ".join(args) + "\n\n")
res = subprocess.call(args)
if res != 0:
raise error("Error running dfu-util")
+
# Flash via a call to dfu-util
def flash_dfuutil(device, binfile, extra_flags=[], sudo=True):
hexfmt_r = re.compile(r"^[a-fA-F0-9]{4}:[a-fA-F0-9]{4}$")
if hexfmt_r.match(device.strip()):
- call_dfuutil(["-d", ","+device.strip()] + extra_flags, binfile, sudo)
+ call_dfuutil(["-d", "," + device.strip()] + extra_flags, binfile, sudo)
return
ttyname, serbypath = translate_serial_to_tty(device)
buspath, devpath = translate_serial_to_usb_path(device)
@@ -155,15 +165,17 @@ def flash_dfuutil(device, binfile, extra_flags=[], sudo=True):
else:
call_dfuutil(["-p", buspath] + extra_flags, binfile, sudo)
+
def call_hidflash(binfile, sudo):
args = ["lib/hidflash/hid-flash", binfile]
if sudo:
args.insert(0, "sudo")
- sys.stderr.write(" ".join(args) + '\n\n')
+ sys.stderr.write(" ".join(args) + "\n\n")
res = subprocess.call(args)
if res != 0:
raise error("Error running hid-flash")
+
# Flash via call to hid-flash
def flash_hidflash(device, binfile, sudo=True):
hexfmt_r = re.compile(r"^[a-fA-F0-9]{4}:[a-fA-F0-9]{4}$")
@@ -179,6 +191,7 @@ def flash_hidflash(device, binfile, sudo=True):
else:
call_hidflash(binfile, sudo)
+
# Call Klipper modified "picoboot"
def call_picoboot(bus, addr, binfile, sudo):
args = ["lib/rp2040_flash/rp2040_flash", binfile]
@@ -186,11 +199,12 @@ def call_picoboot(bus, addr, binfile, sudo):
args.extend([bus, addr])
if sudo:
args.insert(0, "sudo")
- sys.stderr.write(" ".join(args) + '\n\n')
+ sys.stderr.write(" ".join(args) + "\n\n")
res = subprocess.call(args)
if res != 0:
raise error("Error running rp2040_flash")
+
# Flash via Klipper modified "picoboot"
def flash_picoboot(device, binfile, sudo):
ttyname, serbypath = translate_serial_to_tty(device)
@@ -213,31 +227,32 @@ def flash_picoboot(device, binfile, sudo):
# Device specific helpers
######################################################################
+
def flash_atsam3(options, binfile):
try:
flash_bossac(options.device, binfile, ["-e", "-b"])
except error as e:
- sys.stderr.write("Failed to flash to %s: %s\n" % (
- options.device, str(e)))
+ sys.stderr.write("Failed to flash to %s: %s\n" % (options.device, str(e)))
sys.exit(-1)
+
def flash_atsam4(options, binfile):
try:
flash_bossac(options.device, binfile, ["-e"])
except error as e:
- sys.stderr.write("Failed to flash to %s: %s\n" % (
- options.device, str(e)))
+ sys.stderr.write("Failed to flash to %s: %s\n" % (options.device, str(e)))
sys.exit(-1)
+
def flash_atsamd(options, binfile):
extra_flags = ["--offset=0x%x" % (options.start,), "-b", "-R"]
try:
flash_bossac(options.device, binfile, extra_flags)
except error as e:
- sys.stderr.write("Failed to flash to %s: %s\n" % (
- options.device, str(e)))
+ sys.stderr.write("Failed to flash to %s: %s\n" % (options.device, str(e)))
sys.exit(-1)
+
SMOOTHIE_HELP = """
Failed to flash to %s: %s
@@ -256,6 +271,7 @@ and then restart the Smoothieboard with that SD card.
"""
+
def flash_lpc176x(options, binfile):
try:
flash_dfuutil(options.device, binfile, [], options.sudo)
@@ -263,6 +279,7 @@ def flash_lpc176x(options, binfile):
sys.stderr.write(SMOOTHIE_HELP % (options.device, str(e)))
sys.exit(-1)
+
STM32F1_HELP = """
Failed to flash to %s: %s
@@ -277,18 +294,18 @@ If attempting to flash via 3.3V serial, then use:
"""
+
def flash_stm32f1(options, binfile):
try:
if options.start == 0x8000800:
flash_hidflash(options.device, binfile, options.sudo)
else:
- flash_dfuutil(options.device, binfile, ["-R", "-a", "2"],
- options.sudo)
+ flash_dfuutil(options.device, binfile, ["-R", "-a", "2"], options.sudo)
except error as e:
- sys.stderr.write(STM32F1_HELP % (
- options.device, str(e), options.device))
+ sys.stderr.write(STM32F1_HELP % (options.device, str(e), options.device))
sys.exit(-1)
+
STM32F4_HELP = """
Failed to flash to %s: %s
@@ -303,19 +320,21 @@ If attempting to flash via 3.3V serial, then use:
"""
+
def flash_stm32f4(options, binfile):
start = "0x%x:leave" % (options.start,)
try:
if options.start == 0x8004000:
flash_hidflash(options.device, binfile, options.sudo)
else:
- flash_dfuutil(options.device, binfile,
- ["-R", "-a", "0", "-s", start], options.sudo)
+ flash_dfuutil(
+ options.device, binfile, ["-R", "-a", "0", "-s", start], options.sudo
+ )
except error as e:
- sys.stderr.write(STM32F4_HELP % (
- options.device, str(e), options.device))
+ sys.stderr.write(STM32F4_HELP % (options.device, str(e), options.device))
sys.exit(-1)
+
RP2040_HELP = """
Failed to flash to %s: %s
@@ -329,9 +348,10 @@ device as a usb drive, and copy klipper.uf2 to the device.
"""
+
def flash_rp2040(options, binfile):
rawdev = "2e8a:0003"
- if options.mcutype == 'rp2350':
+ if options.mcutype == "rp2350":
rawdev = "2e8a:000f"
try:
if options.device.lower() == rawdev:
@@ -342,15 +362,25 @@ def flash_rp2040(options, binfile):
sys.stderr.write(RP2040_HELP % (options.device, str(e), rawdev))
sys.exit(-1)
+
MCUTYPES = {
- 'sam3': flash_atsam3, 'sam4': flash_atsam4, 'same70': flash_atsam4,
- 'samd': flash_atsamd, 'same5': flash_atsamd,
- 'lpc176': flash_lpc176x, 'stm32f103': flash_stm32f1,
- 'stm32f4': flash_stm32f4, 'stm32f042': flash_stm32f4,
- 'stm32f070': flash_stm32f4, 'stm32f072': flash_stm32f4,
- 'stm32g0b1': flash_stm32f4, 'stm32f7': flash_stm32f4,
- 'stm32h7': flash_stm32f4, 'stm32l4': flash_stm32f4,
- 'stm32g4': flash_stm32f4, 'rp2': flash_rp2040,
+ "sam3": flash_atsam3,
+ "sam4": flash_atsam4,
+ "same70": flash_atsam4,
+ "samd": flash_atsamd,
+ "same5": flash_atsamd,
+ "lpc176": flash_lpc176x,
+ "stm32f103": flash_stm32f1,
+ "stm32f4": flash_stm32f4,
+ "stm32f042": flash_stm32f4,
+ "stm32f070": flash_stm32f4,
+ "stm32f072": flash_stm32f4,
+ "stm32g0b1": flash_stm32f4,
+ "stm32f7": flash_stm32f4,
+ "stm32h7": flash_stm32f4,
+ "stm32l4": flash_stm32f4,
+ "stm32g4": flash_stm32f4,
+ "rp2": flash_rp2040,
}
@@ -358,17 +388,26 @@ MCUTYPES = {
# Startup
######################################################################
+
def main():
usage = "%prog [options] -t <type> -d <device> <klipper.bin>"
opts = optparse.OptionParser(usage)
- opts.add_option("-t", "--type", type="string", dest="mcutype",
- help="micro-controller type")
- opts.add_option("-d", "--device", type="string", dest="device",
- help="serial port device")
- opts.add_option("-s", "--start", type="int", dest="start",
- help="start address in flash")
- opts.add_option("--no-sudo", action="store_false", dest="sudo",
- default=True, help="do not run sudo")
+ opts.add_option(
+ "-t", "--type", type="string", dest="mcutype", help="micro-controller type"
+ )
+ opts.add_option(
+ "-d", "--device", type="string", dest="device", help="serial port device"
+ )
+ opts.add_option(
+ "-s", "--start", type="int", dest="start", help="start address in flash"
+ )
+ opts.add_option(
+ "--no-sudo",
+ action="store_false",
+ dest="sudo",
+ default=True,
+ help="do not run sudo",
+ )
options, args = opts.parse_args()
if len(args) != 1:
opts.error("Incorrect number of arguments")
@@ -379,12 +418,12 @@ def main():
flash_func = func
break
if flash_func is None:
- opts.error("USB flashing is not supported for MCU '%s'"
- % (options.mcutype,))
+ opts.error("USB flashing is not supported for MCU '%s'" % (options.mcutype,))
if not options.device:
sys.stderr.write("\nPlease specify FLASH_DEVICE\n\n")
sys.exit(-1)
flash_func(options, args[0])
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/graph_accelerometer.py b/scripts/graph_accelerometer.py
index 84b31311..7286c041 100755
--- a/scripts/graph_accelerometer.py
+++ b/scripts/graph_accelerometer.py
@@ -8,59 +8,73 @@
import importlib, optparse, os, sys
from textwrap import wrap
import numpy as np, matplotlib
-sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
- '..', 'klippy'))
-shaper_calibrate = importlib.import_module('.shaper_calibrate', 'extras')
-MAX_TITLE_LENGTH=65
+sys.path.append(
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "klippy")
+)
+shaper_calibrate = importlib.import_module(".shaper_calibrate", "extras")
+
+MAX_TITLE_LENGTH = 65
+
def parse_log(logname, opts):
with open(logname) as f:
for header in f:
- if header.startswith('#'):
+ if header.startswith("#"):
continue
- if header.startswith('freq,psd_x,psd_y,psd_z,psd_xyz'):
+ if header.startswith("freq,psd_x,psd_y,psd_z,psd_xyz"):
# Processed power spectral density file
break
# Raw accelerometer data
- return np.loadtxt(logname, comments='#', delimiter=',')
+ return np.loadtxt(logname, comments="#", delimiter=",")
# Parse power spectral density data
- data = np.loadtxt(logname, skiprows=1, comments='#', delimiter=',')
+ data = np.loadtxt(logname, skiprows=1, comments="#", delimiter=",")
calibration_data = shaper_calibrate.CalibrationData(
- freq_bins=data[:,0], psd_sum=data[:,4],
- psd_x=data[:,1], psd_y=data[:,2], psd_z=data[:,3])
+ freq_bins=data[:, 0],
+ psd_sum=data[:, 4],
+ psd_x=data[:, 1],
+ psd_y=data[:, 2],
+ psd_z=data[:, 3],
+ )
calibration_data.set_numpy(np)
return calibration_data
+
######################################################################
# Raw accelerometer graphing
######################################################################
+
def plot_accel(datas, lognames):
fig, axes = matplotlib.pyplot.subplots(nrows=3, sharex=True)
- axes[0].set_title("\n".join(wrap(
- "Accelerometer data (%s)" % (', '.join(lognames)), MAX_TITLE_LENGTH)))
- axis_names = ['x', 'y', 'z']
+ axes[0].set_title(
+ "\n".join(
+ wrap("Accelerometer data (%s)" % (", ".join(lognames)), MAX_TITLE_LENGTH)
+ )
+ )
+ axis_names = ["x", "y", "z"]
for data, logname in zip(datas, lognames):
if isinstance(data, shaper_calibrate.CalibrationData):
- raise error("Cannot plot raw accelerometer data using the processed"
- " resonances, raw_data input is required")
+ raise error(
+ "Cannot plot raw accelerometer data using the processed"
+ " resonances, raw_data input is required"
+ )
first_time = data[0, 0]
- times = data[:,0] - first_time
+ times = data[:, 0] - first_time
for i in range(len(axis_names)):
- avg = data[:,i+1].mean()
- adata = data[:,i+1] - data[:,i+1].mean()
+ avg = data[:, i + 1].mean()
+ adata = data[:, i + 1] - data[:, i + 1].mean()
ax = axes[i]
- label = '\n'.join(wrap(logname, 60)) + ' (%+.3f mm/s^2)' % (-avg,)
+ label = "\n".join(wrap(logname, 60)) + " (%+.3f mm/s^2)" % (-avg,)
ax.plot(times, adata, alpha=0.8, label=label)
- axes[-1].set_xlabel('Time (s)')
+ axes[-1].set_xlabel("Time (s)")
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
+ fontP.set_size("x-small")
for i in range(len(axis_names)):
ax = axes[i]
ax.grid(True)
- ax.legend(loc='best', prop=fontP)
- ax.set_ylabel('%s accel' % (axis_names[i],))
+ ax.legend(loc="best", prop=fontP)
+ ax.set_ylabel("%s accel" % (axis_names[i],))
fig.tight_layout()
return fig
@@ -69,6 +83,7 @@ def plot_accel(datas, lognames):
# Frequency graphing
######################################################################
+
# Calculate estimated "power spectral density"
def calc_freq_response(data, max_freq):
if isinstance(data, shaper_calibrate.CalibrationData):
@@ -76,29 +91,41 @@ def calc_freq_response(data, max_freq):
helper = shaper_calibrate.ShaperCalibrate(printer=None)
return helper.process_accelerometer_data(data)
+
def calc_specgram(data, axis):
if isinstance(data, shaper_calibrate.CalibrationData):
- raise error("Cannot calculate the spectrogram using the processed"
- " resonances, raw_data input is required")
+ raise error(
+ "Cannot calculate the spectrogram using the processed"
+ " resonances, raw_data input is required"
+ )
N = data.shape[0]
- Fs = N / (data[-1,0] - data[0,0])
+ Fs = N / (data[-1, 0] - data[0, 0])
# Round up to a power of 2 for faster FFT
- M = 1 << int(.5 * Fs - 1).bit_length()
- window = np.kaiser(M, 6.)
+ M = 1 << int(0.5 * Fs - 1).bit_length()
+ window = np.kaiser(M, 6.0)
+
def _specgram(x):
return matplotlib.mlab.specgram(
- x, Fs=Fs, NFFT=M, noverlap=M//2, window=window,
- mode='psd', detrend='mean', scale_by_freq=False)
+ x,
+ Fs=Fs,
+ NFFT=M,
+ noverlap=M // 2,
+ window=window,
+ mode="psd",
+ detrend="mean",
+ scale_by_freq=False,
+ )
- d = {'x': data[:,1], 'y': data[:,2], 'z': data[:,3]}
- if axis != 'all':
+ d = {"x": data[:, 1], "y": data[:, 2], "z": data[:, 3]}
+ if axis != "all":
pdata, bins, t = _specgram(d[axis])
else:
- pdata, bins, t = _specgram(d['x'])
- for ax in 'yz':
+ pdata, bins, t = _specgram(d["x"])
+ for ax in "yz":
pdata += _specgram(d[ax])[0]
return pdata, bins, t
+
def plot_frequency(datas, lognames, max_freq):
calibration_data = calc_freq_response(datas[0], max_freq)
for data in datas[1:]:
@@ -111,33 +138,37 @@ def plot_frequency(datas, lognames, max_freq):
freqs = freqs[freqs <= max_freq]
fig, ax = matplotlib.pyplot.subplots()
- ax.set_title("\n".join(wrap(
- "Frequency response (%s)" % (', '.join(lognames)), MAX_TITLE_LENGTH)))
- ax.set_xlabel('Frequency (Hz)')
- ax.set_ylabel('Power spectral density')
+ ax.set_title(
+ "\n".join(
+ wrap("Frequency response (%s)" % (", ".join(lognames)), MAX_TITLE_LENGTH)
+ )
+ )
+ ax.set_xlabel("Frequency (Hz)")
+ ax.set_ylabel("Power spectral density")
- ax.plot(freqs, psd, label='X+Y+Z', alpha=0.6)
- ax.plot(freqs, px, label='X', alpha=0.6)
- ax.plot(freqs, py, label='Y', alpha=0.6)
- ax.plot(freqs, pz, label='Z', alpha=0.6)
+ ax.plot(freqs, psd, label="X+Y+Z", alpha=0.6)
+ ax.plot(freqs, px, label="X", alpha=0.6)
+ ax.plot(freqs, py, label="Y", alpha=0.6)
+ ax.plot(freqs, pz, label="Z", alpha=0.6)
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
- ax.grid(which='major', color='grey')
- ax.grid(which='minor', color='lightgrey')
- ax.ticklabel_format(axis='y', style='scientific', scilimits=(0,0))
+ ax.grid(which="major", color="grey")
+ ax.grid(which="minor", color="lightgrey")
+ ax.ticklabel_format(axis="y", style="scientific", scilimits=(0, 0))
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax.legend(loc='best', prop=fontP)
+ fontP.set_size("x-small")
+ ax.legend(loc="best", prop=fontP)
fig.tight_layout()
return fig
+
def plot_compare_frequency(datas, lognames, max_freq, axis):
fig, ax = matplotlib.pyplot.subplots()
- ax.set_title('Frequency responses comparison')
- ax.set_xlabel('Frequency (Hz)')
- ax.set_ylabel('Power spectral density')
+ ax.set_title("Frequency responses comparison")
+ ax.set_xlabel("Frequency (Hz)")
+ ax.set_ylabel("Power spectral density")
for data, logname in zip(datas, lognames):
calibration_data = calc_freq_response(data, max_freq)
@@ -148,32 +179,36 @@ def plot_compare_frequency(datas, lognames, max_freq, axis):
ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
- ax.grid(which='major', color='grey')
- ax.grid(which='minor', color='lightgrey')
+ ax.grid(which="major", color="grey")
+ ax.grid(which="minor", color="lightgrey")
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax.legend(loc='best', prop=fontP)
+ fontP.set_size("x-small")
+ ax.legend(loc="best", prop=fontP)
fig.tight_layout()
return fig
+
# Plot data in a "spectrogram colormap"
def plot_specgram(data, logname, max_freq, axis):
pdata, bins, t = calc_specgram(data, axis)
fig, ax = matplotlib.pyplot.subplots()
- ax.set_title("\n".join(wrap("Spectrogram %s (%s)" % (axis, logname),
- MAX_TITLE_LENGTH)))
+ ax.set_title(
+ "\n".join(wrap("Spectrogram %s (%s)" % (axis, logname), MAX_TITLE_LENGTH))
+ )
ax.pcolormesh(t, bins, pdata, norm=matplotlib.colors.LogNorm())
- ax.set_ylim([0., max_freq])
- ax.set_ylabel('frequency (hz)')
- ax.set_xlabel('Time (s)')
+ ax.set_ylim([0.0, max_freq])
+ ax.set_ylabel("frequency (hz)")
+ ax.set_xlabel("Time (s)")
fig.tight_layout()
return fig
+
######################################################################
# CSV output
######################################################################
+
def write_frequency_response(datas, output):
helper = shaper_calibrate.ShaperCalibrate(printer=None)
calibration_data = helper.process_accelerometer_data(datas[0])
@@ -181,6 +216,7 @@ def write_frequency_response(datas, output):
calibration_data.add_data(helper.process_accelerometer_data(data))
helper.save_calibration_data(output, calibration_data)
+
def write_specgram(psd, freq_bins, time, output):
M = freq_bins.shape[0]
with open(output, "w") as csvfile:
@@ -190,46 +226,76 @@ def write_specgram(psd, freq_bins, time, output):
csvfile.write("\n")
for i in range(M):
csvfile.write("%.1f" % (freq_bins[i],))
- for value in psd[i,:]:
+ for value in psd[i, :]:
csvfile.write(",%.6e" % (value,))
csvfile.write("\n")
+
######################################################################
# Startup
######################################################################
+
def is_csv_output(output):
- return output and os.path.splitext(output)[1].lower() == '.csv'
+ return output and os.path.splitext(output)[1].lower() == ".csv"
+
def setup_matplotlib(output):
global matplotlib
if is_csv_output(output):
# Only mlab may be necessary with CSV output
import matplotlib.mlab
+
return
if output:
- matplotlib.rcParams.update({'figure.autolayout': True})
- matplotlib.use('Agg')
+ matplotlib.rcParams.update({"figure.autolayout": True})
+ matplotlib.use("Agg")
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
+
def main():
# Parse command-line arguments
usage = "%prog [options] <raw logs>"
opts = optparse.OptionParser(usage)
- opts.add_option("-o", "--output", type="string", dest="output",
- default=None, help="filename of output graph")
- opts.add_option("-f", "--max_freq", type="float", default=200.,
- help="maximum frequency to graph")
- opts.add_option("-r", "--raw", action="store_true",
- help="graph raw accelerometer data")
- opts.add_option("-c", "--compare", action="store_true",
- help="graph comparison of power spectral density "
- "between different accelerometer data files")
- opts.add_option("-s", "--specgram", action="store_true",
- help="graph spectrogram of accelerometer data")
- opts.add_option("-a", type="string", dest="axis", default="all",
- help="axis to graph (one of 'all', 'x', 'y', or 'z')")
+ opts.add_option(
+ "-o",
+ "--output",
+ type="string",
+ dest="output",
+ default=None,
+ help="filename of output graph",
+ )
+ opts.add_option(
+ "-f",
+ "--max_freq",
+ type="float",
+ default=200.0,
+ help="maximum frequency to graph",
+ )
+ opts.add_option(
+ "-r", "--raw", action="store_true", help="graph raw accelerometer data"
+ )
+ opts.add_option(
+ "-c",
+ "--compare",
+ action="store_true",
+ help="graph comparison of power spectral density "
+ "between different accelerometer data files",
+ )
+ opts.add_option(
+ "-s",
+ "--specgram",
+ action="store_true",
+ help="graph spectrogram of accelerometer data",
+ )
+ opts.add_option(
+ "-a",
+ type="string",
+ dest="axis",
+ default="all",
+ help="axis to graph (one of 'all', 'x', 'y', or 'z')",
+ )
options, args = opts.parse_args()
if len(args) < 1:
opts.error("Incorrect number of arguments")
@@ -261,8 +327,7 @@ def main():
opts.error("Only 1 input is supported in specgram mode")
fig = plot_specgram(datas[0], args[0], options.max_freq, options.axis)
elif options.compare:
- fig = plot_compare_frequency(datas, args, options.max_freq,
- options.axis)
+ fig = plot_compare_frequency(datas, args, options.max_freq, options.axis)
else:
fig = plot_frequency(datas, args, options.max_freq)
@@ -273,5 +338,6 @@ def main():
fig.set_size_inches(8, 6)
fig.savefig(options.output)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/graph_extruder.py b/scripts/graph_extruder.py
index 6f31ed3c..7475c6aa 100755
--- a/scripts/graph_extruder.py
+++ b/scripts/graph_extruder.py
@@ -7,8 +7,8 @@
import math, optparse, datetime
import matplotlib
-SEG_TIME = .000100
-INV_SEG_TIME = 1. / SEG_TIME
+SEG_TIME = 0.000100
+INV_SEG_TIME = 1.0 / SEG_TIME
######################################################################
@@ -17,27 +17,32 @@ INV_SEG_TIME = 1. / SEG_TIME
# List of moves: [(start_v, end_v, move_t), ...]
Moves = [
- (0., 0., .100),
- (0., 100., None), (100., 100., .200), (100., 60., None),
- (60., 100., None), (100., 100., .200), (100., 0., None),
- (0., 0., .300)
+ (0.0, 0.0, 0.100),
+ (0.0, 100.0, None),
+ (100.0, 100.0, 0.200),
+ (100.0, 60.0, None),
+ (60.0, 100.0, None),
+ (100.0, 100.0, 0.200),
+ (100.0, 0.0, None),
+ (0.0, 0.0, 0.300),
]
-EXTRUDE_R = (.4 * .4 * .75) / (math.pi * (1.75 / 2.)**2)
-ACCEL = 3000. * EXTRUDE_R
+EXTRUDE_R = (0.4 * 0.4 * 0.75) / (math.pi * (1.75 / 2.0) ** 2)
+ACCEL = 3000.0 * EXTRUDE_R
+
def gen_positions():
out = []
- start_d = start_t = t = 0.
+ start_d = start_t = t = 0.0
for start_v, end_v, move_t in Moves:
start_v *= EXTRUDE_R
end_v *= EXTRUDE_R
if move_t is None:
move_t = abs(end_v - start_v) / ACCEL
- half_accel = 0.
+ half_accel = 0.0
if end_v > start_v:
- half_accel = .5 * ACCEL
+ half_accel = 0.5 * ACCEL
elif start_v > end_v:
- half_accel = -.5 * ACCEL
+ half_accel = -0.5 * ACCEL
end_t = start_t + move_t
while t <= end_t:
rel_t = t - start_t
@@ -54,15 +59,18 @@ def gen_positions():
MARGIN_TIME = 0.050
+
def time_to_index(t):
- return int(t * INV_SEG_TIME + .5)
+ return int(t * INV_SEG_TIME + 0.5)
+
def indexes(positions):
drop = time_to_index(MARGIN_TIME)
- return range(drop, len(positions)-drop)
+ return range(drop, len(positions) - drop)
+
def trim_lists(*lists):
- keep = len(lists[0]) - time_to_index(2. * MARGIN_TIME)
+ keep = len(lists[0]) - time_to_index(2.0 * MARGIN_TIME)
for l in lists:
del l[keep:]
@@ -71,36 +79,42 @@ def trim_lists(*lists):
# Common data filters
######################################################################
+
# Generate estimated first order derivative
def gen_deriv(data):
- return [0.] + [(data[i+1] - data[i]) * INV_SEG_TIME
- for i in range(len(data)-1)]
+ return [0.0] + [
+ (data[i + 1] - data[i]) * INV_SEG_TIME for i in range(len(data) - 1)
+ ]
+
# Simple average between two points smooth_time away
def calc_average(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ out = [0.0] * len(positions)
for i in indexes(positions):
- out[i] = .5 * (positions[i-offset] + positions[i+offset])
+ out[i] = 0.5 * (positions[i - offset] + positions[i + offset])
return out
+
# Average (via integration) of smooth_time range
def calc_smooth(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- weight = 1. / (2*offset - 1)
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ weight = 1.0 / (2 * offset - 1)
+ out = [0.0] * len(positions)
for i in indexes(positions):
- out[i] = sum(positions[i-offset+1:i+offset]) * weight
+ out[i] = sum(positions[i - offset + 1 : i + offset]) * weight
return out
+
# Time weighted average (via integration) of smooth_time range
def calc_weighted(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- weight = 1. / offset**2
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ weight = 1.0 / offset**2
+ out = [0.0] * len(positions)
for i in indexes(positions):
- weighted_data = [positions[j] * (offset - abs(j-i))
- for j in range(i-offset, i+offset)]
+ weighted_data = [
+ positions[j] * (offset - abs(j - i)) for j in range(i - offset, i + offset)
+ ]
out[i] = sum(weighted_data) * weight
return out
@@ -109,17 +123,19 @@ def calc_weighted(positions, smooth_time):
# Pressure advance
######################################################################
-SMOOTH_TIME = .040
-PRESSURE_ADVANCE = .045
+SMOOTH_TIME = 0.040
+PRESSURE_ADVANCE = 0.045
+
# Calculate raw pressure advance positions
def calc_pa_raw(positions):
pa = PRESSURE_ADVANCE * INV_SEG_TIME
- out = [0.] * len(positions)
+ out = [0.0] * len(positions)
for i in indexes(positions):
- out[i] = positions[i] + pa * (positions[i+1] - positions[i])
+ out[i] = positions[i] + pa * (positions[i + 1] - positions[i])
return out
+
# Pressure advance after smoothing
def calc_pa(positions):
return calc_weighted(calc_pa_raw(positions), SMOOTH_TIME)
@@ -129,6 +145,7 @@ def calc_pa(positions):
# Plotting and startup
######################################################################
+
def plot_motion():
# Nominal motion
positions = gen_positions()
@@ -142,38 +159,53 @@ def plot_motion():
sm_velocities = gen_deriv(sm_positions)
# Build plot
times = [SEG_TIME * i for i in range(len(positions))]
- trim_lists(times, velocities, accels,
- pa_positions, pa_velocities,
- sm_positions, sm_velocities)
+ trim_lists(
+ times,
+ velocities,
+ accels,
+ pa_positions,
+ pa_velocities,
+ sm_positions,
+ sm_velocities,
+ )
fig, ax1 = matplotlib.pyplot.subplots(nrows=1, sharex=True)
ax1.set_title("Extruder Velocity")
- ax1.set_ylabel('Velocity (mm/s)')
- pa_plot, = ax1.plot(times, pa_velocities, 'r',
- label='Pressure Advance', alpha=0.3)
- nom_plot, = ax1.plot(times, velocities, 'black', label='Nominal')
- sm_plot, = ax1.plot(times, sm_velocities, 'g', label='Smooth PA', alpha=0.9)
+ ax1.set_ylabel("Velocity (mm/s)")
+ (pa_plot,) = ax1.plot(
+ times, pa_velocities, "r", label="Pressure Advance", alpha=0.3
+ )
+ (nom_plot,) = ax1.plot(times, velocities, "black", label="Nominal")
+ (sm_plot,) = ax1.plot(times, sm_velocities, "g", label="Smooth PA", alpha=0.9)
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax1.legend(handles=[nom_plot, pa_plot, sm_plot], loc='best', prop=fontP)
- ax1.set_xlabel('Time (s)')
+ fontP.set_size("x-small")
+ ax1.legend(handles=[nom_plot, pa_plot, sm_plot], loc="best", prop=fontP)
+ ax1.set_xlabel("Time (s)")
ax1.grid(True)
fig.tight_layout()
return fig
+
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
- matplotlib.rcParams.update({'figure.autolayout': True})
- matplotlib.use('Agg')
+ matplotlib.rcParams.update({"figure.autolayout": True})
+ matplotlib.use("Agg")
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
+
def main():
# Parse command-line arguments
usage = "%prog [options]"
opts = optparse.OptionParser(usage)
- opts.add_option("-o", "--output", type="string", dest="output",
- default=None, help="filename of output graph")
+ opts.add_option(
+ "-o",
+ "--output",
+ type="string",
+ dest="output",
+ default=None,
+ help="filename of output graph",
+ )
options, args = opts.parse_args()
if len(args) != 0:
opts.error("Incorrect number of arguments")
@@ -189,5 +221,6 @@ def main():
fig.set_size_inches(6, 2.5)
fig.savefig(options.output)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/graph_mesh.py b/scripts/graph_mesh.py
index 3a331e5d..a62f9df2 100755
--- a/scripts/graph_mesh.py
+++ b/scripts/graph_mesh.py
@@ -20,14 +20,14 @@ import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.animation as ani
-MESH_DUMP_REQUEST = json.dumps(
- {"id": 1, "method": "bed_mesh/dump_mesh"}
-)
+MESH_DUMP_REQUEST = json.dumps({"id": 1, "method": "bed_mesh/dump_mesh"})
+
def sock_error_exit(msg):
sys.stderr.write(msg + "\n")
sys.exit(-1)
+
def webhook_socket_create(uds_filename):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
while 1:
@@ -45,6 +45,7 @@ def webhook_socket_create(uds_filename):
print("Connected")
return sock
+
def process_message(msg):
try:
resp = json.loads(msg)
@@ -54,16 +55,14 @@ def process_message(msg):
return None
if "error" in resp:
err = resp["error"].get("message", "Unknown")
- sock_error_exit(
- "Error requesting mesh dump: %s" % (err,)
- )
+ sock_error_exit("Error requesting mesh dump: %s" % (err,))
return resp["result"]
def request_from_unixsocket(unix_sock_name):
print("Connecting to Unix Socket File '%s'" % (unix_sock_name,))
whsock = webhook_socket_create(unix_sock_name)
- whsock.settimeout(1.)
+ whsock.settimeout(1.0)
# send mesh query
whsock.send(MESH_DUMP_REQUEST.encode() + b"\x03")
sock_data = b""
@@ -84,11 +83,12 @@ def request_from_unixsocket(unix_sock_name):
result = process_message(msg)
if result is not None:
return result
- time.sleep(.1)
+ time.sleep(0.1)
finally:
whsock.close()
sock_error_exit("Mesh dump request timed out")
+
def request_from_websocket(url):
print("Connecting to websocket url '%s'" % (url,))
try:
@@ -101,15 +101,16 @@ def request_from_websocket(url):
end_time = time.monotonic() + 20.0
while time.monotonic() < end_time:
try:
- msg = websocket.recv(10.)
+ msg = websocket.recv(10.0)
except TimeoutError:
continue
result = process_message(msg)
if result is not None:
return result
- time.sleep(.1)
+ time.sleep(0.1)
sock_error_exit("Mesh dump request timed out")
+
def request_mesh_data(input_name):
url_match = re.match(r"((?:https?)|(?:wss?))://(.+)", input_name.lower())
if url_match is None:
@@ -129,16 +130,21 @@ def request_mesh_data(input_name):
url = "%s://%s/klippysocket" % (scheme, host)
return request_from_websocket(url)
+
class PathAnimation:
instance = None
+
def __init__(self, artist, x_travel, y_travel):
self.travel_artist = artist
self.x_travel = x_travel
self.y_travel = y_travel
fig = plt.gcf()
self.animation = ani.FuncAnimation(
- fig=fig, func=self.update, frames=self.gen_path_position(),
- cache_frame_data=False, interval=60
+ fig=fig,
+ func=self.update,
+ frames=self.gen_path_position(),
+ cache_frame_data=False,
+ interval=60,
)
PathAnimation.instance = self
@@ -164,6 +170,7 @@ def _gen_mesh_coords(min_c, max_c, count):
dist = (max_c - min_c) / (count - 1)
return [min_c + i * dist for i in range(count)]
+
def _plot_path(travel_path, probed, diff, cmd_args):
x_travel, y_travel = np.array(travel_path).transpose()
x_probed, y_probed = np.array(probed).transpose()
@@ -183,6 +190,7 @@ def _plot_path(travel_path, probed, diff, cmd_args):
if cmd_args.animate and cmd_args.output is None:
PathAnimation(travel_line, x_travel, y_travel)
+
def _format_mesh_data(matrix, params):
min_pt = (params["min_x"], params["min_y"])
max_pt = (params["max_x"], params["max_y"])
@@ -192,6 +200,7 @@ def _format_mesh_data(matrix, params):
z = np.array(matrix)
return x, y, z
+
def _set_xy_limits(mesh_data, cmd_args):
if not cmd_args.scale_plot:
return
@@ -201,12 +210,14 @@ def _set_xy_limits(mesh_data, cmd_args):
ax.set_xlim((axis_min[0], axis_max[0]))
ax.set_ylim((axis_min[1], axis_max[1]))
+
def _plot_mesh(ax, matrix, params, cmap=cm.viridis, label=None):
x, y, z = _format_mesh_data(matrix, params)
surface = ax.plot_surface(x, y, z, cmap=cmap, label=label)
scale = max(abs(z.min()), abs(z.max())) * 3
return surface, scale
+
def plot_probe_points(mesh_data, cmd_args):
"""Plot original generated points"""
calibration = mesh_data["calibration"]
@@ -217,6 +228,7 @@ def plot_probe_points(mesh_data, cmd_args):
plt.plot(x, y, "b.")
_set_xy_limits(mesh_data, cmd_args)
+
def plot_probe_path(mesh_data, cmd_args):
"""Plot probe travel path"""
calibration = mesh_data["calibration"]
@@ -227,6 +239,7 @@ def plot_probe_path(mesh_data, cmd_args):
_plot_path(path_pts, path_pts[1:-1], diff, cmd_args)
_set_xy_limits(mesh_data, cmd_args)
+
def plot_rapid_path(mesh_data, cmd_args):
"""Plot rapid scan travel path"""
calibration = mesh_data["calibration"]
@@ -239,6 +252,7 @@ def plot_rapid_path(mesh_data, cmd_args):
_plot_path(rapid_path, probed, diff, cmd_args)
_set_xy_limits(mesh_data, cmd_args)
+
def plot_probed_matrix(mesh_data, cmd_args):
"""Plot probed Z values"""
ax = plt.subplot(projection="3d")
@@ -259,9 +273,10 @@ def plot_probed_matrix(mesh_data, cmd_args):
surface, scale = _plot_mesh(ax, matrix, params)
ax.set_title("Probed Mesh (%s)" % (name,))
ax.set(zlim=(-scale, scale))
- plt.gcf().colorbar(surface, shrink=.75)
+ plt.gcf().colorbar(surface, shrink=0.75)
_set_xy_limits(mesh_data, cmd_args)
+
def plot_mesh_matrix(mesh_data, cmd_args):
"""Plot mesh Z values"""
ax = plt.subplot(projection="3d")
@@ -274,9 +289,10 @@ def plot_mesh_matrix(mesh_data, cmd_args):
name = req_mesh["name"]
ax.set_title("Interpolated Mesh (%s)" % (name,))
ax.set(zlim=(-scale, scale))
- plt.gcf().colorbar(surface, shrink=.75)
+ plt.gcf().colorbar(surface, shrink=0.75)
_set_xy_limits(mesh_data, cmd_args)
+
def plot_overlay(mesh_data, cmd_args):
"""Plots the current probed mesh overlaid with a profile"""
ax = plt.subplot(projection="3d")
@@ -301,10 +317,11 @@ def plot_overlay(mesh_data, cmd_args):
ax.set_title("Probed Mesh Overlay")
scale = max(cur_scale, prof_scale)
ax.set(zlim=(-scale, scale))
- ax.legend(loc='best')
- plt.gcf().colorbar(prof_surf, shrink=.75)
+ ax.legend(loc="best")
+ plt.gcf().colorbar(prof_surf, shrink=0.75)
_set_xy_limits(mesh_data, cmd_args)
+
def plot_delta(mesh_data, cmd_args):
"""Plots the delta between current probed mesh and a profile"""
ax = plt.subplot(projection="3d")
@@ -327,9 +344,7 @@ def plot_delta(mesh_data, cmd_args):
pfields = ("x_count", "y_count", "min_x", "max_x", "min_y", "max_y")
for field in pfields:
if abs(prof_params[field] - cur_params[field]) >= 1e-6:
- raise Exception(
- "Values for field %s do not match, cant plot deviation"
- )
+ raise Exception("Values for field %s do not match, cant plot deviation")
delta = np.array(cur_matrix) - np.array(prof_matix)
surface, scale = _plot_mesh(ax, delta, cur_params)
ax.set(zlim=(-scale, scale))
@@ -347,12 +362,12 @@ PLOT_TYPES = {
"delta": plot_delta,
}
+
def print_types(cmd_args):
- typelist = [
- "%-10s%s" % (name, func.__doc__) for name, func in PLOT_TYPES.items()
- ]
+ typelist = ["%-10s%s" % (name, func.__doc__) for name, func in PLOT_TYPES.items()]
print("\n".join(typelist))
+
def plot_mesh_data(cmd_args):
mesh_data = request_mesh_data(cmd_args.input)
if cmd_args.output is not None:
@@ -368,6 +383,7 @@ def plot_mesh_data(cmd_args):
else:
fig.savefig(cmd_args.output)
+
def _check_path_unique(name, path):
path = np.array(path)
unique_pts, counts = np.unique(path, return_counts=True, axis=0)
@@ -380,6 +396,7 @@ def _check_path_unique(name, path):
% (name, coord)
)
+
def _analyze_mesh(name, mesh_axes):
print("\nAnalyzing Probed Mesh %s..." % (name,))
x, y, z = mesh_axes
@@ -389,8 +406,8 @@ def _analyze_mesh(name, mesh_axes):
print(
" Min Coord (%.2f, %.2f), Max Coord (%.2f, %.2f), "
- "Probe Count: (%d, %d)" %
- (x.min(), y.min(), x.max(), y.max(), len(z), len(z[0]))
+ "Probe Count: (%d, %d)"
+ % (x.min(), y.min(), x.max(), y.max(), len(z), len(z[0]))
)
print(
" Mesh range: min %.4f (%.2f, %.2f), max %.4f (%.2f, %.2f)"
@@ -398,6 +415,7 @@ def _analyze_mesh(name, mesh_axes):
)
print(" Mean: %.4f, Standard Deviation: %.4f" % (z.mean(), z.std()))
+
def _compare_mesh(name_a, name_b, mesh_a, mesh_b):
ax, ay, az = mesh_a
bx, by, bz = mesh_b
@@ -414,10 +432,21 @@ def _compare_mesh(name_a, name_b, mesh_a, mesh_b):
" Range: min %.4f (%.2f, %.2f), max %.4f (%.2f, %.2f)\n"
" Mean: %.6f, Standard Deviation: %.6f\n"
" Absolute Max: %.6f, Absolute Mean: %.6f"
- % (delta.min(), min_x, min_y, delta.max(), max_x, max_y,
- delta.mean(), delta.std(), abs_max, abs_mean)
+ % (
+ delta.min(),
+ min_x,
+ min_y,
+ delta.max(),
+ max_x,
+ max_y,
+ delta.mean(),
+ delta.std(),
+ abs_max,
+ abs_mean,
+ )
)
+
def analyze(cmd_args):
mesh_data = request_mesh_data(cmd_args.input)
print("Analyzing Travel Path...")
@@ -461,6 +490,7 @@ def analyze(cmd_args):
for prof_name, prof_axes in formatted_data.items():
_compare_mesh(name, prof_name, current_axes, prof_axes)
+
def dump_request(cmd_args):
mesh_data = request_mesh_data(cmd_args.input)
outfile = cmd_args.output
@@ -472,57 +502,60 @@ def dump_request(cmd_args):
with open(outfile, "w") as f:
f.write(json.dumps(mesh_data))
+
def main():
parser = argparse.ArgumentParser(description="Graph Bed Mesh Data")
sub_parsers = parser.add_subparsers()
- list_parser = sub_parsers.add_parser(
- "list", help="List available plot types"
- )
+ list_parser = sub_parsers.add_parser("list", help="List available plot types")
list_parser.set_defaults(func=print_types)
plot_parser = sub_parsers.add_parser("plot", help="Plot a specified type")
analyze_parser = sub_parsers.add_parser(
"analyze", help="Perform analysis on mesh data"
)
- dump_parser = sub_parsers.add_parser(
- "dump", help="Dump API response to json file"
- )
+ dump_parser = sub_parsers.add_parser("dump", help="Dump API response to json file")
plot_parser.add_argument(
- "-a", "--animate", action="store_true",
- help="Animate paths in live preview"
+ "-a", "--animate", action="store_true", help="Animate paths in live preview"
)
plot_parser.add_argument(
- "-s", "--scale-plot", action="store_true",
- help="Use axis limits reported by Klipper to scale plot X/Y"
+ "-s",
+ "--scale-plot",
+ action="store_true",
+ help="Use axis limits reported by Klipper to scale plot X/Y",
)
plot_parser.add_argument(
- "-p", "--profile-name", type=str, default=None,
- help="Optional name of a profile to plot for 'probedz'"
+ "-p",
+ "--profile-name",
+ type=str,
+ default=None,
+ help="Optional name of a profile to plot for 'probedz'",
)
plot_parser.add_argument(
- "-o", "--output", type=str, default=None,
- help="Output file path"
+ "-o", "--output", type=str, default=None, help="Output file path"
)
plot_parser.add_argument(
- "type", metavar="<plot type>", type=str, choices=PLOT_TYPES.keys(),
- help="Type of data to graph"
+ "type",
+ metavar="<plot type>",
+ type=str,
+ choices=PLOT_TYPES.keys(),
+ help="Type of data to graph",
)
plot_parser.add_argument(
- "input", metavar="<input>",
- help="Path/url to Klipper Socket or path to json file"
+ "input",
+ metavar="<input>",
+ help="Path/url to Klipper Socket or path to json file",
)
plot_parser.set_defaults(func=plot_mesh_data)
analyze_parser.add_argument(
- "input", metavar="<input>",
- help="Path/url to Klipper Socket or path to json file"
+ "input",
+ metavar="<input>",
+ help="Path/url to Klipper Socket or path to json file",
)
analyze_parser.set_defaults(func=analyze)
dump_parser.add_argument(
- "-o", "--output", type=str, default=None,
- help="Json output file path"
+ "-o", "--output", type=str, default=None, help="Json output file path"
)
dump_parser.add_argument(
- "input", metavar="<input>",
- help="Path or url to Klipper Socket"
+ "input", metavar="<input>", help="Path or url to Klipper Socket"
)
dump_parser.set_defaults(func=dump_request)
cmd_args = parser.parse_args()
diff --git a/scripts/graph_motion.py b/scripts/graph_motion.py
index 0520343f..83072680 100755
--- a/scripts/graph_motion.py
+++ b/scripts/graph_motion.py
@@ -8,14 +8,14 @@
import optparse, datetime, math
import matplotlib
-SEG_TIME = .000100
-INV_SEG_TIME = 1. / SEG_TIME
+SEG_TIME = 0.000100
+INV_SEG_TIME = 1.0 / SEG_TIME
-SPRING_FREQ=35.0
-DAMPING_RATIO=0.05
+SPRING_FREQ = 35.0
+DAMPING_RATIO = 0.05
-CONFIG_FREQ=40.0
-CONFIG_DAMPING_RATIO=0.1
+CONFIG_FREQ = 40.0
+CONFIG_DAMPING_RATIO = 0.1
######################################################################
# Basic trapezoid motion
@@ -23,60 +23,72 @@ CONFIG_DAMPING_RATIO=0.1
# List of moves: [(start_v, end_v, move_t), ...]
Moves = [
- (0., 0., .100),
- (6.869, 89.443, None), (89.443, 89.443, .120), (89.443, 17.361, None),
- (19.410, 120., None), (120., 120., .130), (120., 5., None),
- (0., 0., 0.01),
- (-5., -100., None), (-100., -100., .100), (-100., -.5, None),
- (0., 0., .200)
+ (0.0, 0.0, 0.100),
+ (6.869, 89.443, None),
+ (89.443, 89.443, 0.120),
+ (89.443, 17.361, None),
+ (19.410, 120.0, None),
+ (120.0, 120.0, 0.130),
+ (120.0, 5.0, None),
+ (0.0, 0.0, 0.01),
+ (-5.0, -100.0, None),
+ (-100.0, -100.0, 0.100),
+ (-100.0, -0.5, None),
+ (0.0, 0.0, 0.200),
]
-ACCEL = 3000.
+ACCEL = 3000.0
MAX_JERK = ACCEL * 0.6 * SPRING_FREQ
+
def get_accel(start_v, end_v):
return ACCEL
+
def get_accel_jerk_limit(start_v, end_v):
- effective_accel = math.sqrt(MAX_JERK * abs(end_v - start_v) / 6.)
+ effective_accel = math.sqrt(MAX_JERK * abs(end_v - start_v) / 6.0)
return min(effective_accel, ACCEL)
+
# Standard constant acceleration generator
def get_acc_pos_ao2(rel_t, start_v, accel, move_t):
return (start_v + 0.5 * accel * rel_t) * rel_t
+
# Bezier curve "accel_order=4" generator
def get_acc_pos_ao4(rel_t, start_v, accel, move_t):
- inv_accel_t = 1. / move_t
+ inv_accel_t = 1.0 / move_t
accel_div_accel_t = accel * inv_accel_t
accel_div_accel_t2 = accel_div_accel_t * inv_accel_t
- c4 = -.5 * accel_div_accel_t2;
- c3 = accel_div_accel_t;
+ c4 = -0.5 * accel_div_accel_t2
+ c3 = accel_div_accel_t
c1 = start_v
return ((c4 * rel_t + c3) * rel_t * rel_t + c1) * rel_t
+
# Bezier curve "accel_order=6" generator
def get_acc_pos_ao6(rel_t, start_v, accel, move_t):
- inv_accel_t = 1. / move_t
+ inv_accel_t = 1.0 / move_t
accel_div_accel_t = accel * inv_accel_t
accel_div_accel_t2 = accel_div_accel_t * inv_accel_t
accel_div_accel_t3 = accel_div_accel_t2 * inv_accel_t
accel_div_accel_t4 = accel_div_accel_t3 * inv_accel_t
- c6 = accel_div_accel_t4;
- c5 = -3. * accel_div_accel_t3;
- c4 = 2.5 * accel_div_accel_t2;
- c1 = start_v;
- return (((c6 * rel_t + c5) * rel_t + c4)
- * rel_t * rel_t * rel_t + c1) * rel_t
+ c6 = accel_div_accel_t4
+ c5 = -3.0 * accel_div_accel_t3
+ c4 = 2.5 * accel_div_accel_t2
+ c1 = start_v
+ return (((c6 * rel_t + c5) * rel_t + c4) * rel_t * rel_t * rel_t + c1) * rel_t
+
get_acc_pos = get_acc_pos_ao2
get_acc = get_accel
+
# Calculate positions based on 'Moves' list
def gen_positions():
out = []
- start_d = start_t = t = 0.
+ start_d = start_t = t = 0.0
for start_v, end_v, move_t in Moves:
if move_t is None:
move_t = abs(end_v - start_v) / get_acc(start_v, end_v)
@@ -95,10 +107,11 @@ def gen_positions():
# Estimated motion with belt as spring
######################################################################
+
def estimate_spring(positions):
- ang_freq2 = (SPRING_FREQ * 2. * math.pi)**2
- damping_factor = 4. * math.pi * DAMPING_RATIO * SPRING_FREQ
- head_pos = head_v = 0.
+ ang_freq2 = (SPRING_FREQ * 2.0 * math.pi) ** 2
+ damping_factor = 4.0 * math.pi * DAMPING_RATIO * SPRING_FREQ
+ head_pos = head_v = 0.0
out = []
for stepper_pos in positions:
head_pos += head_v * SEG_TIME
@@ -115,15 +128,18 @@ def estimate_spring(positions):
MARGIN_TIME = 0.050
+
def time_to_index(t):
- return int(t * INV_SEG_TIME + .5)
+ return int(t * INV_SEG_TIME + 0.5)
+
def indexes(positions):
drop = time_to_index(MARGIN_TIME)
- return range(drop, len(positions)-drop)
+ return range(drop, len(positions) - drop)
+
def trim_lists(*lists):
- keep = len(lists[0]) - time_to_index(2. * MARGIN_TIME)
+ keep = len(lists[0]) - time_to_index(2.0 * MARGIN_TIME)
for l in lists:
del l[keep:]
@@ -132,70 +148,84 @@ def trim_lists(*lists):
# Common data filters
######################################################################
+
# Generate estimated first order derivative
def gen_deriv(data):
- return [0.] + [(data[i+1] - data[i]) * INV_SEG_TIME
- for i in range(len(data)-1)]
+ return [0.0] + [
+ (data[i + 1] - data[i]) * INV_SEG_TIME for i in range(len(data) - 1)
+ ]
+
# Simple average between two points smooth_time away
def calc_average(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ out = [0.0] * len(positions)
for i in indexes(positions):
- out[i] = .5 * (positions[i-offset] + positions[i+offset])
+ out[i] = 0.5 * (positions[i - offset] + positions[i + offset])
return out
+
# Average (via integration) of smooth_time range
def calc_smooth(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- weight = 1. / (2*offset - 1)
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ weight = 1.0 / (2 * offset - 1)
+ out = [0.0] * len(positions)
for i in indexes(positions):
- out[i] = sum(positions[i-offset+1:i+offset]) * weight
+ out[i] = sum(positions[i - offset + 1 : i + offset]) * weight
return out
+
# Time weighted average (via integration) of smooth_time range
def calc_weighted(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- weight = 1. / offset**2
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ weight = 1.0 / offset**2
+ out = [0.0] * len(positions)
for i in indexes(positions):
- weighted_data = [positions[j] * (offset - abs(j-i))
- for j in range(i-offset, i+offset)]
+ weighted_data = [
+ positions[j] * (offset - abs(j - i)) for j in range(i - offset, i + offset)
+ ]
out[i] = sum(weighted_data) * weight
return out
+
# Weighted average (`h**2 - (t-T)**2`) of smooth_time range
def calc_weighted2(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- weight = .75 / offset**3
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ weight = 0.75 / offset**3
+ out = [0.0] * len(positions)
for i in indexes(positions):
- weighted_data = [positions[j] * (offset**2 - (j-i)**2)
- for j in range(i-offset, i+offset)]
+ weighted_data = [
+ positions[j] * (offset**2 - (j - i) ** 2)
+ for j in range(i - offset, i + offset)
+ ]
out[i] = sum(weighted_data) * weight
return out
+
# Weighted average (`(h**2 - (t-T)**2)**2`) of smooth_time range
def calc_weighted4(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- weight = 15 / (16. * offset**5)
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ weight = 15 / (16.0 * offset**5)
+ out = [0.0] * len(positions)
for i in indexes(positions):
- weighted_data = [positions[j] * ((offset**2 - (j-i)**2))**2
- for j in range(i-offset, i+offset)]
+ weighted_data = [
+ positions[j] * ((offset**2 - (j - i) ** 2)) ** 2
+ for j in range(i - offset, i + offset)
+ ]
out[i] = sum(weighted_data) * weight
return out
+
# Weighted average (`(h - abs(t-T))**2 * (2 * abs(t-T) + h)`) of range
def calc_weighted3(positions, smooth_time):
- offset = time_to_index(smooth_time * .5)
- weight = 1. / offset**4
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.5)
+ weight = 1.0 / offset**4
+ out = [0.0] * len(positions)
for i in indexes(positions):
- weighted_data = [positions[j] * (offset - abs(j-i))**2
- * (2. * abs(j-i) + offset)
- for j in range(i-offset, i+offset)]
+ weighted_data = [
+ positions[j] * (offset - abs(j - i)) ** 2 * (2.0 * abs(j - i) + offset)
+ for j in range(i - offset, i + offset)
+ ]
out[i] = sum(weighted_data) * weight
return out
@@ -204,107 +234,119 @@ def calc_weighted3(positions, smooth_time):
# Spring motion estimation
######################################################################
+
def calc_spring_raw(positions):
- sa = (INV_SEG_TIME / (CONFIG_FREQ * 2. * math.pi))**2
- ra = 2. * CONFIG_DAMPING_RATIO * math.sqrt(sa)
- out = [0.] * len(positions)
+ sa = (INV_SEG_TIME / (CONFIG_FREQ * 2.0 * math.pi)) ** 2
+ ra = 2.0 * CONFIG_DAMPING_RATIO * math.sqrt(sa)
+ out = [0.0] * len(positions)
for i in indexes(positions):
- out[i] = (positions[i]
- + sa * (positions[i-1] - 2.*positions[i] + positions[i+1])
- + ra * (positions[i+1] - positions[i]))
+ out[i] = (
+ positions[i]
+ + sa * (positions[i - 1] - 2.0 * positions[i] + positions[i + 1])
+ + ra * (positions[i + 1] - positions[i])
+ )
return out
+
def calc_spring_double_weighted(positions, smooth_time):
- offset = time_to_index(smooth_time * .25)
- sa = (INV_SEG_TIME / (offset * CONFIG_FREQ * 2. * math.pi))**2
- ra = 2. * CONFIG_DAMPING_RATIO * math.sqrt(sa)
- out = [0.] * len(positions)
+ offset = time_to_index(smooth_time * 0.25)
+ sa = (INV_SEG_TIME / (offset * CONFIG_FREQ * 2.0 * math.pi)) ** 2
+ ra = 2.0 * CONFIG_DAMPING_RATIO * math.sqrt(sa)
+ out = [0.0] * len(positions)
for i in indexes(positions):
- out[i] = (positions[i]
- + sa * (positions[i-offset] - 2.*positions[i]
- + positions[i+offset])
- + ra * (positions[i+1] - positions[i]))
- return calc_weighted(out, smooth_time=.5 * smooth_time)
+ out[i] = (
+ positions[i]
+ + sa * (positions[i - offset] - 2.0 * positions[i] + positions[i + offset])
+ + ra * (positions[i + 1] - positions[i])
+ )
+ return calc_weighted(out, smooth_time=0.5 * smooth_time)
+
######################################################################
# Input shapers
######################################################################
+
def get_zv_shaper():
- df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
+ df = math.sqrt(1.0 - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (CONFIG_FREQ * df)
- A = [1., K]
- T = [0., .5*t_d]
+ t_d = 1.0 / (CONFIG_FREQ * df)
+ A = [1.0, K]
+ T = [0.0, 0.5 * t_d]
return (A, T, "ZV")
+
def get_zvd_shaper():
- df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
+ df = math.sqrt(1.0 - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (CONFIG_FREQ * df)
- A = [1., 2.*K, K**2]
- T = [0., .5*t_d, t_d]
+ t_d = 1.0 / (CONFIG_FREQ * df)
+ A = [1.0, 2.0 * K, K**2]
+ T = [0.0, 0.5 * t_d, t_d]
return (A, T, "ZVD")
+
def get_mzv_shaper():
- df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
- K = math.exp(-.75 * CONFIG_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (CONFIG_FREQ * df)
+ df = math.sqrt(1.0 - CONFIG_DAMPING_RATIO**2)
+ K = math.exp(-0.75 * CONFIG_DAMPING_RATIO * math.pi / df)
+ t_d = 1.0 / (CONFIG_FREQ * df)
- a1 = 1. - 1. / math.sqrt(2.)
- a2 = (math.sqrt(2.) - 1.) * K
+ a1 = 1.0 - 1.0 / math.sqrt(2.0)
+ a2 = (math.sqrt(2.0) - 1.0) * K
a3 = a1 * K * K
A = [a1, a2, a3]
- T = [0., .375*t_d, .75*t_d]
+ T = [0.0, 0.375 * t_d, 0.75 * t_d]
return (A, T, "MZV")
+
def get_ei_shaper():
- v_tol = 0.05 # vibration tolerance
- df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
+ v_tol = 0.05 # vibration tolerance
+ df = math.sqrt(1.0 - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (CONFIG_FREQ * df)
+ t_d = 1.0 / (CONFIG_FREQ * df)
- a1 = .25 * (1. + v_tol)
- a2 = .5 * (1. - v_tol) * K
+ a1 = 0.25 * (1.0 + v_tol)
+ a2 = 0.5 * (1.0 - v_tol) * K
a3 = a1 * K * K
A = [a1, a2, a3]
- T = [0., .5*t_d, t_d]
+ T = [0.0, 0.5 * t_d, t_d]
return (A, T, "EI")
+
def get_2hump_ei_shaper():
- v_tol = 0.05 # vibration tolerance
- df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
+ v_tol = 0.05 # vibration tolerance
+ df = math.sqrt(1.0 - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (CONFIG_FREQ * df)
+ t_d = 1.0 / (CONFIG_FREQ * df)
V2 = v_tol**2
- X = pow(V2 * (math.sqrt(1. - V2) + 1.), 1./3.)
- a1 = (3.*X*X + 2.*X + 3.*V2) / (16.*X)
- a2 = (.5 - a1) * K
+ X = pow(V2 * (math.sqrt(1.0 - V2) + 1.0), 1.0 / 3.0)
+ a1 = (3.0 * X * X + 2.0 * X + 3.0 * V2) / (16.0 * X)
+ a2 = (0.5 - a1) * K
a3 = a2 * K
a4 = a1 * K * K * K
A = [a1, a2, a3, a4]
- T = [0., .5*t_d, t_d, 1.5*t_d]
+ T = [0.0, 0.5 * t_d, t_d, 1.5 * t_d]
return (A, T, "2-hump EI")
+
def get_3hump_ei_shaper():
- v_tol = 0.05 # vibration tolerance
- df = math.sqrt(1. - CONFIG_DAMPING_RATIO**2)
+ v_tol = 0.05 # vibration tolerance
+ df = math.sqrt(1.0 - CONFIG_DAMPING_RATIO**2)
K = math.exp(-CONFIG_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (CONFIG_FREQ * df)
+ t_d = 1.0 / (CONFIG_FREQ * df)
- K2 = K*K
- a1 = 0.0625 * (1. + 3. * v_tol + 2. * math.sqrt(2. * (v_tol + 1.) * v_tol))
- a2 = 0.25 * (1. - v_tol) * K
- a3 = (0.5 * (1. + v_tol) - 2. * a1) * K2
+ K2 = K * K
+ a1 = 0.0625 * (1.0 + 3.0 * v_tol + 2.0 * math.sqrt(2.0 * (v_tol + 1.0) * v_tol))
+ a2 = 0.25 * (1.0 - v_tol) * K
+ a3 = (0.5 * (1.0 + v_tol) - 2.0 * a1) * K2
a4 = a2 * K2
a5 = a1 * K2 * K2
A = [a1, a2, a3, a4, a5]
- T = [0., .5*t_d, t_d, 1.5*t_d, 2.*t_d]
+ T = [0.0, 0.5 * t_d, t_d, 1.5 * t_d, 2.0 * t_d]
return (A, T, "3-hump EI")
@@ -315,24 +357,27 @@ def shift_pulses(shaper):
for i in range(n):
T[i] -= ts
+
def calc_shaper(shaper, positions):
shift_pulses(shaper)
A = shaper[0]
- inv_D = 1. / sum(A)
+ inv_D = 1.0 / sum(A)
n = len(A)
T = [time_to_index(-shaper[1][j]) for j in range(n)]
- out = [0.] * len(positions)
+ out = [0.0] * len(positions)
for i in indexes(positions):
out[i] = sum([positions[i + T[j]] * A[j] for j in range(n)]) * inv_D
return out
+
# Ideal values
-SMOOTH_TIME = (2./3.) / CONFIG_FREQ
+SMOOTH_TIME = (2.0 / 3.0) / CONFIG_FREQ
+
def gen_updated_position(positions):
- #return calc_weighted(positions, 0.040)
- #return calc_spring_double_weighted(positions, SMOOTH_TIME)
- #return calc_weighted4(calc_spring_raw(positions), SMOOTH_TIME)
+ # return calc_weighted(positions, 0.040)
+ # return calc_spring_double_weighted(positions, SMOOTH_TIME)
+ # return calc_weighted4(calc_spring_raw(positions), SMOOTH_TIME)
return calc_shaper(get_ei_shaper(), positions)
@@ -340,6 +385,7 @@ def gen_updated_position(positions):
# Plotting and startup
######################################################################
+
def plot_motion():
# Nominal motion
positions = gen_positions()
@@ -352,62 +398,80 @@ def plot_motion():
# Estimated position with model of belt as spring
spring_orig = estimate_spring(positions)
spring_upd = estimate_spring(upd_positions)
- spring_diff_orig = [n-o for n, o in zip(spring_orig, positions)]
- spring_diff_upd = [n-o for n, o in zip(spring_upd, positions)]
+ spring_diff_orig = [n - o for n, o in zip(spring_orig, positions)]
+ spring_diff_upd = [n - o for n, o in zip(spring_upd, positions)]
head_velocities = gen_deriv(spring_orig)
head_accels = gen_deriv(head_velocities)
head_upd_velocities = gen_deriv(spring_upd)
head_upd_accels = gen_deriv(head_upd_velocities)
# Build plot
times = [SEG_TIME * i for i in range(len(positions))]
- trim_lists(times, velocities, accels,
- upd_velocities, upd_velocities, upd_accels,
- spring_diff_orig, spring_diff_upd,
- head_velocities, head_upd_velocities,
- head_accels, head_upd_accels)
+ trim_lists(
+ times,
+ velocities,
+ accels,
+ upd_velocities,
+ upd_velocities,
+ upd_accels,
+ spring_diff_orig,
+ spring_diff_upd,
+ head_velocities,
+ head_upd_velocities,
+ head_accels,
+ head_upd_accels,
+ )
fig, (ax1, ax2, ax3) = matplotlib.pyplot.subplots(nrows=3, sharex=True)
- ax1.set_title("Simulation: resonance freq=%.1f Hz, damping_ratio=%.3f,\n"
- "configured freq=%.1f Hz, damping_ratio = %.3f"
- % (SPRING_FREQ, DAMPING_RATIO, CONFIG_FREQ
- , CONFIG_DAMPING_RATIO))
- ax1.set_ylabel('Velocity (mm/s)')
- ax1.plot(times, upd_velocities, 'r', label='New Velocity', alpha=0.8)
- ax1.plot(times, velocities, 'g', label='Nominal Velocity', alpha=0.8)
- ax1.plot(times, head_velocities, label='Head Velocity', alpha=0.4)
- ax1.plot(times, head_upd_velocities, label='New Head Velocity', alpha=0.4)
+ ax1.set_title(
+ "Simulation: resonance freq=%.1f Hz, damping_ratio=%.3f,\n"
+ "configured freq=%.1f Hz, damping_ratio = %.3f"
+ % (SPRING_FREQ, DAMPING_RATIO, CONFIG_FREQ, CONFIG_DAMPING_RATIO)
+ )
+ ax1.set_ylabel("Velocity (mm/s)")
+ ax1.plot(times, upd_velocities, "r", label="New Velocity", alpha=0.8)
+ ax1.plot(times, velocities, "g", label="Nominal Velocity", alpha=0.8)
+ ax1.plot(times, head_velocities, label="Head Velocity", alpha=0.4)
+ ax1.plot(times, head_upd_velocities, label="New Head Velocity", alpha=0.4)
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax1.legend(loc='best', prop=fontP)
+ fontP.set_size("x-small")
+ ax1.legend(loc="best", prop=fontP)
ax1.grid(True)
- ax2.set_ylabel('Acceleration (mm/s^2)')
- ax2.plot(times, upd_accels, 'r', label='New Accel', alpha=0.8)
- ax2.plot(times, accels, 'g', label='Nominal Accel', alpha=0.8)
+ ax2.set_ylabel("Acceleration (mm/s^2)")
+ ax2.plot(times, upd_accels, "r", label="New Accel", alpha=0.8)
+ ax2.plot(times, accels, "g", label="Nominal Accel", alpha=0.8)
ax2.plot(times, head_accels, alpha=0.4)
ax2.plot(times, head_upd_accels, alpha=0.4)
- ax2.set_ylim([-5. * ACCEL, 5. * ACCEL])
- ax2.legend(loc='best', prop=fontP)
+ ax2.set_ylim([-5.0 * ACCEL, 5.0 * ACCEL])
+ ax2.legend(loc="best", prop=fontP)
ax2.grid(True)
- ax3.set_ylabel('Deviation (mm)')
- ax3.plot(times, spring_diff_upd, 'r', label='New', alpha=0.8)
- ax3.plot(times, spring_diff_orig, 'g', label='Nominal', alpha=0.8)
+ ax3.set_ylabel("Deviation (mm)")
+ ax3.plot(times, spring_diff_upd, "r", label="New", alpha=0.8)
+ ax3.plot(times, spring_diff_orig, "g", label="Nominal", alpha=0.8)
ax3.grid(True)
- ax3.legend(loc='best', prop=fontP)
- ax3.set_xlabel('Time (s)')
+ ax3.legend(loc="best", prop=fontP)
+ ax3.set_xlabel("Time (s)")
return fig
+
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
- matplotlib.use('Agg')
+ matplotlib.use("Agg")
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
+
def main():
# Parse command-line arguments
usage = "%prog [options]"
opts = optparse.OptionParser(usage)
- opts.add_option("-o", "--output", type="string", dest="output",
- default=None, help="filename of output graph")
+ opts.add_option(
+ "-o",
+ "--output",
+ type="string",
+ dest="output",
+ default=None,
+ help="filename of output graph",
+ )
options, args = opts.parse_args()
if len(args) != 0:
opts.error("Incorrect number of arguments")
@@ -423,5 +487,6 @@ def main():
fig.set_size_inches(8, 6)
fig.savefig(options.output)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/graph_shaper.py b/scripts/graph_shaper.py
index b9a6627c..b7886e28 100755
--- a/scripts/graph_shaper.py
+++ b/scripts/graph_shaper.py
@@ -9,118 +9,125 @@ import optparse, math
import matplotlib
# A set of damping ratios to calculate shaper response for
-DAMPING_RATIOS=[0.05, 0.1, 0.2]
+DAMPING_RATIOS = [0.05, 0.1, 0.2]
# Parameters of the input shaper
-SHAPER_FREQ=50.0
-SHAPER_DAMPING_RATIO=0.1
+SHAPER_FREQ = 50.0
+SHAPER_DAMPING_RATIO = 0.1
# Simulate input shaping of step function for these true resonance frequency
# and damping ratio
-STEP_SIMULATION_RESONANCE_FREQ=60.
-STEP_SIMULATION_DAMPING_RATIO=0.15
+STEP_SIMULATION_RESONANCE_FREQ = 60.0
+STEP_SIMULATION_DAMPING_RATIO = 0.15
# If set, defines which range of frequencies to plot shaper frequency response
PLOT_FREQ_RANGE = [] # If empty, will be automatically determined
-#PLOT_FREQ_RANGE = [10., 100.]
+# PLOT_FREQ_RANGE = [10., 100.]
-PLOT_FREQ_STEP = .01
+PLOT_FREQ_STEP = 0.01
######################################################################
# Input shapers
######################################################################
+
def get_zv_shaper():
- df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
+ df = math.sqrt(1.0 - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (SHAPER_FREQ * df)
- A = [1., K]
- T = [0., .5*t_d]
+ t_d = 1.0 / (SHAPER_FREQ * df)
+ A = [1.0, K]
+ T = [0.0, 0.5 * t_d]
return (A, T, "ZV")
+
def get_zvd_shaper():
- df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
+ df = math.sqrt(1.0 - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (SHAPER_FREQ * df)
- A = [1., 2.*K, K**2]
- T = [0., .5*t_d, t_d]
+ t_d = 1.0 / (SHAPER_FREQ * df)
+ A = [1.0, 2.0 * K, K**2]
+ T = [0.0, 0.5 * t_d, t_d]
return (A, T, "ZVD")
+
def get_mzv_shaper():
- df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
- K = math.exp(-.75 * SHAPER_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (SHAPER_FREQ * df)
+ df = math.sqrt(1.0 - SHAPER_DAMPING_RATIO**2)
+ K = math.exp(-0.75 * SHAPER_DAMPING_RATIO * math.pi / df)
+ t_d = 1.0 / (SHAPER_FREQ * df)
- a1 = 1. - 1. / math.sqrt(2.)
- a2 = (math.sqrt(2.) - 1.) * K
+ a1 = 1.0 - 1.0 / math.sqrt(2.0)
+ a2 = (math.sqrt(2.0) - 1.0) * K
a3 = a1 * K * K
A = [a1, a2, a3]
- T = [0., .375*t_d, .75*t_d]
+ T = [0.0, 0.375 * t_d, 0.75 * t_d]
return (A, T, "MZV")
+
def get_ei_shaper():
- v_tol = 0.05 # vibration tolerance
- df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
+ v_tol = 0.05 # vibration tolerance
+ df = math.sqrt(1.0 - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (SHAPER_FREQ * df)
+ t_d = 1.0 / (SHAPER_FREQ * df)
- a1 = .25 * (1. + v_tol)
- a2 = .5 * (1. - v_tol) * K
+ a1 = 0.25 * (1.0 + v_tol)
+ a2 = 0.5 * (1.0 - v_tol) * K
a3 = a1 * K * K
A = [a1, a2, a3]
- T = [0., .5*t_d, t_d]
+ T = [0.0, 0.5 * t_d, t_d]
return (A, T, "EI")
+
def get_2hump_ei_shaper():
- v_tol = 0.05 # vibration tolerance
- df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
+ v_tol = 0.05 # vibration tolerance
+ df = math.sqrt(1.0 - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (SHAPER_FREQ * df)
+ t_d = 1.0 / (SHAPER_FREQ * df)
V2 = v_tol**2
- X = pow(V2 * (math.sqrt(1. - V2) + 1.), 1./3.)
- a1 = (3.*X*X + 2.*X + 3.*V2) / (16.*X)
- a2 = (.5 - a1) * K
+ X = pow(V2 * (math.sqrt(1.0 - V2) + 1.0), 1.0 / 3.0)
+ a1 = (3.0 * X * X + 2.0 * X + 3.0 * V2) / (16.0 * X)
+ a2 = (0.5 - a1) * K
a3 = a2 * K
a4 = a1 * K * K * K
A = [a1, a2, a3, a4]
- T = [0., .5*t_d, t_d, 1.5*t_d]
+ T = [0.0, 0.5 * t_d, t_d, 1.5 * t_d]
return (A, T, "2-hump EI")
+
def get_3hump_ei_shaper():
- v_tol = 0.05 # vibration tolerance
- df = math.sqrt(1. - SHAPER_DAMPING_RATIO**2)
+ v_tol = 0.05 # vibration tolerance
+ df = math.sqrt(1.0 - SHAPER_DAMPING_RATIO**2)
K = math.exp(-SHAPER_DAMPING_RATIO * math.pi / df)
- t_d = 1. / (SHAPER_FREQ * df)
+ t_d = 1.0 / (SHAPER_FREQ * df)
- K2 = K*K
- a1 = 0.0625 * (1. + 3. * v_tol + 2. * math.sqrt(2. * (v_tol + 1.) * v_tol))
- a2 = 0.25 * (1. - v_tol) * K
- a3 = (0.5 * (1. + v_tol) - 2. * a1) * K2
+ K2 = K * K
+ a1 = 0.0625 * (1.0 + 3.0 * v_tol + 2.0 * math.sqrt(2.0 * (v_tol + 1.0) * v_tol))
+ a2 = 0.25 * (1.0 - v_tol) * K
+ a3 = (0.5 * (1.0 + v_tol) - 2.0 * a1) * K2
a4 = a2 * K2
a5 = a1 * K2 * K2
A = [a1, a2, a3, a4, a5]
- T = [0., .5*t_d, t_d, 1.5*t_d, 2.*t_d]
+ T = [0.0, 0.5 * t_d, t_d, 1.5 * t_d, 2.0 * t_d]
return (A, T, "3-hump EI")
def estimate_shaper(shaper, freq, damping_ratio):
A, T, _ = shaper
n = len(T)
- inv_D = 1. / sum(A)
- omega = 2. * math.pi * freq
+ inv_D = 1.0 / sum(A)
+ omega = 2.0 * math.pi * freq
damping = damping_ratio * omega
- omega_d = omega * math.sqrt(1. - damping_ratio**2)
+ omega_d = omega * math.sqrt(1.0 - damping_ratio**2)
S = C = 0
for i in range(n):
W = A[i] * math.exp(-damping * (T[-1] - T[i]))
S += W * math.sin(omega_d * T[i])
C += W * math.cos(omega_d * T[i])
- return math.sqrt(S*S + C*C) * inv_D
+ return math.sqrt(S * S + C * C) * inv_D
+
def shift_pulses(shaper):
A, T, name = shaper
@@ -129,6 +136,7 @@ def shift_pulses(shaper):
for i in range(n):
T[i] -= ts
+
# Shaper selection
get_shaper = get_ei_shaper
@@ -137,27 +145,31 @@ get_shaper = get_ei_shaper
# Plotting and startup
######################################################################
+
def bisect(func, left, right):
- lhs_sign = math.copysign(1., func(left))
- while right-left > 1e-8:
- mid = .5 * (left + right)
+ lhs_sign = math.copysign(1.0, func(left))
+ while right - left > 1e-8:
+ mid = 0.5 * (left + right)
val = func(mid)
- if math.copysign(1., val) == lhs_sign:
+ if math.copysign(1.0, val) == lhs_sign:
left = mid
else:
right = mid
- return .5 * (left + right)
+ return 0.5 * (left + right)
+
def find_shaper_plot_range(shaper, vib_tol):
def eval_shaper(freq):
return estimate_shaper(shaper, freq, DAMPING_RATIOS[0]) - vib_tol
+
if not PLOT_FREQ_RANGE:
- left = bisect(eval_shaper, 0., SHAPER_FREQ)
+ left = bisect(eval_shaper, 0.0, SHAPER_FREQ)
right = bisect(eval_shaper, SHAPER_FREQ, 2.4 * SHAPER_FREQ)
else:
left, right = PLOT_FREQ_RANGE
return (left, right)
+
def gen_shaper_response(shaper):
# Calculate shaper vibration response on a range of frequencies
response = []
@@ -170,39 +182,41 @@ def gen_shaper_response(shaper):
response.append(vals)
freqs.append(freq)
freq += PLOT_FREQ_STEP
- legend = ['damping ratio = %.3f' % d_r for d_r in DAMPING_RATIOS]
+ legend = ["damping ratio = %.3f" % d_r for d_r in DAMPING_RATIOS]
return freqs, response, legend
+
def gen_shaped_step_function(shaper):
# Calculate shaping of a step function
A, T, _ = shaper
- inv_D = 1. / sum(A)
+ inv_D = 1.0 / sum(A)
n = len(T)
- omega = 2. * math.pi * STEP_SIMULATION_RESONANCE_FREQ
+ omega = 2.0 * math.pi * STEP_SIMULATION_RESONANCE_FREQ
damping = STEP_SIMULATION_DAMPING_RATIO * omega
- omega_d = omega * math.sqrt(1. - STEP_SIMULATION_DAMPING_RATIO**2)
+ omega_d = omega * math.sqrt(1.0 - STEP_SIMULATION_DAMPING_RATIO**2)
phase = math.acos(STEP_SIMULATION_DAMPING_RATIO)
- t_start = T[0] - .5 / SHAPER_FREQ
+ t_start = T[0] - 0.5 / SHAPER_FREQ
t_end = T[-1] + 1.5 / STEP_SIMULATION_RESONANCE_FREQ
result = []
time = []
t = t_start
def step_response(t):
- if t < 0.:
- return 0.
- return 1. - math.exp(-damping * t) * math.sin(omega_d * t
- + phase) / math.sin(phase)
+ if t < 0.0:
+ return 0.0
+ return 1.0 - math.exp(-damping * t) * math.sin(omega_d * t + phase) / math.sin(
+ phase
+ )
while t <= t_end:
val = []
- val.append(1. if t >= 0. else 0.)
- #val.append(step_response(t))
+ val.append(1.0 if t >= 0.0 else 0.0)
+ # val.append(step_response(t))
- commanded = 0.
- response = 0.
+ commanded = 0.0
+ response = 0.0
S = C = 0
for i in range(n):
if t < T[i]:
@@ -214,8 +228,8 @@ def gen_shaped_step_function(shaper):
result.append(val)
time.append(t)
- t += .01 / SHAPER_FREQ
- legend = ['step', 'shaper commanded', 'system response']
+ t += 0.01 / SHAPER_FREQ
+ legend = ["step", "shaper commanded", "system response"]
return time, result, legend
@@ -224,46 +238,58 @@ def plot_shaper(shaper):
freqs, response, response_legend = gen_shaper_response(shaper)
time, step_vals, step_legend = gen_shaped_step_function(shaper)
- fig, (ax1, ax2) = matplotlib.pyplot.subplots(nrows=2, figsize=(10,9))
- ax1.set_title("Vibration response simulation for shaper '%s',\n"
- "shaper_freq=%.1f Hz, damping_ratio=%.3f"
- % (shaper[-1], SHAPER_FREQ, SHAPER_DAMPING_RATIO))
+ fig, (ax1, ax2) = matplotlib.pyplot.subplots(nrows=2, figsize=(10, 9))
+ ax1.set_title(
+ "Vibration response simulation for shaper '%s',\n"
+ "shaper_freq=%.1f Hz, damping_ratio=%.3f"
+ % (shaper[-1], SHAPER_FREQ, SHAPER_DAMPING_RATIO)
+ )
ax1.plot(freqs, response)
- ax1.set_ylim(bottom=0.)
+ ax1.set_ylim(bottom=0.0)
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax1.legend(response_legend, loc='best', prop=fontP)
- ax1.set_xlabel('Resonance frequency, Hz')
- ax1.set_ylabel('Remaining vibrations, ratio')
+ fontP.set_size("x-small")
+ ax1.legend(response_legend, loc="best", prop=fontP)
+ ax1.set_xlabel("Resonance frequency, Hz")
+ ax1.set_ylabel("Remaining vibrations, ratio")
ax1.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
ax1.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator())
- ax1.grid(which='major', color='grey')
- ax1.grid(which='minor', color='lightgrey')
+ ax1.grid(which="major", color="grey")
+ ax1.grid(which="minor", color="lightgrey")
- ax2.set_title("Unit step input, resonance frequency=%.1f Hz, "
- "damping ratio=%.3f" % (STEP_SIMULATION_RESONANCE_FREQ,
- STEP_SIMULATION_DAMPING_RATIO))
+ ax2.set_title(
+ "Unit step input, resonance frequency=%.1f Hz, "
+ "damping ratio=%.3f"
+ % (STEP_SIMULATION_RESONANCE_FREQ, STEP_SIMULATION_DAMPING_RATIO)
+ )
ax2.plot(time, step_vals)
- ax2.legend(step_legend, loc='best', prop=fontP)
- ax2.set_xlabel('Time, sec')
- ax2.set_ylabel('Amplitude')
+ ax2.legend(step_legend, loc="best", prop=fontP)
+ ax2.set_xlabel("Time, sec")
+ ax2.set_ylabel("Amplitude")
ax2.grid()
fig.tight_layout()
return fig
+
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
- matplotlib.use('Agg')
+ matplotlib.use("Agg")
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
+
def main():
# Parse command-line arguments
usage = "%prog [options]"
opts = optparse.OptionParser(usage)
- opts.add_option("-o", "--output", type="string", dest="output",
- default=None, help="filename of output graph")
+ opts.add_option(
+ "-o",
+ "--output",
+ type="string",
+ dest="output",
+ default=None,
+ help="filename of output graph",
+ )
options, args = opts.parse_args()
if len(args) != 0:
opts.error("Incorrect number of arguments")
@@ -279,5 +305,6 @@ def main():
fig.set_size_inches(8, 6)
fig.savefig(options.output)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/graph_temp_sensor.py b/scripts/graph_temp_sensor.py
index c1d18526..fa6a680c 100755
--- a/scripts/graph_temp_sensor.py
+++ b/scripts/graph_temp_sensor.py
@@ -12,35 +12,47 @@ import matplotlib
# Dummy config / printer / etc. class emulation
######################################################################
+
class DummyConfig:
def __init__(self, config_settings):
self.config_settings = config_settings
self.sensor_factories = {}
+
# Emulate config class
def getfloat(self, option, default, **kw):
return self.config_settings.get(option, default)
+
def get(self, option, default=None):
return default
+
def get_printer(self):
return self
+
def get_name(self):
return "dummy"
+
# Emulate printer class
def load_object(self, config, name):
return self
+
def lookup_object(self, name):
return self
+
# Emulate heaters class
def add_sensor_factory(self, name, factory):
self.sensor_factories[name] = factory
+
def do_create_sensor(self, sensor_type):
return self.sensor_factories[sensor_type](self).adc_convert
+
# Emulate query_adc class
def register_adc(self, name, klass):
pass
+
# Emulate pins class
def setup_pin(self, pin_type, pin_name):
return self
+
# Emulate mcu_adc class
def setup_adc_callback(self, time, callback):
pass
@@ -50,50 +62,53 @@ class DummyConfig:
# Plotting
######################################################################
+
def plot_adc_resolution(config, sensors):
# Temperature list
all_temps = [float(i) for i in range(1, 351)]
temps = all_temps[:-1]
# Build plot
fig, (ax1, ax2) = matplotlib.pyplot.subplots(nrows=2, sharex=True)
- pullup = config.getfloat('pullup_resistor', 0.)
- adc_voltage = config.getfloat('adc_voltage', 0.)
- ax1.set_title("Temperature Sensor (pullup=%.0f, adc_voltage=%.3f)"
- % (pullup, adc_voltage))
- ax1.set_ylabel('ADC')
- ax2.set_ylabel('ADC change per 1C')
+ pullup = config.getfloat("pullup_resistor", 0.0)
+ adc_voltage = config.getfloat("adc_voltage", 0.0)
+ ax1.set_title(
+ "Temperature Sensor (pullup=%.0f, adc_voltage=%.3f)" % (pullup, adc_voltage)
+ )
+ ax1.set_ylabel("ADC")
+ ax2.set_ylabel("ADC change per 1C")
for sensor in sensors:
sc = config.do_create_sensor(sensor)
adcs = [sc.calc_adc(t) for t in all_temps]
ax1.plot(temps, adcs[:-1], label=sensor, alpha=0.6)
- adc_deltas = [abs(adcs[i+1] - adcs[i]) for i in range(len(temps))]
+ adc_deltas = [abs(adcs[i + 1] - adcs[i]) for i in range(len(temps))]
ax2.plot(temps, adc_deltas, alpha=0.6)
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax1.legend(loc='best', prop=fontP)
- ax2.set_xlabel('Temperature (C)')
+ fontP.set_size("x-small")
+ ax1.legend(loc="best", prop=fontP)
+ ax2.set_xlabel("Temperature (C)")
ax1.grid(True)
ax2.grid(True)
fig.tight_layout()
return fig
+
def plot_resistance(config, sensors):
# Temperature list
all_temps = [float(i) for i in range(1, 351)]
# Build plot
fig, ax = matplotlib.pyplot.subplots()
- pullup = config.getfloat('pullup_resistor', 0.)
+ pullup = config.getfloat("pullup_resistor", 0.0)
ax.set_title("Temperature Sensor (pullup=%.0f)" % (pullup,))
- ax.set_ylabel('Resistance (Ohms)')
+ ax.set_ylabel("Resistance (Ohms)")
for sensor in sensors:
sc = config.do_create_sensor(sensor)
adcs = [sc.calc_adc(t) for t in all_temps]
rs = [pullup * adc / (1.0 - adc) for adc in adcs]
ax.plot(all_temps, rs, label=sensor, alpha=0.6)
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax.legend(loc='best', prop=fontP)
- ax.set_xlabel('Temperature (C)')
+ fontP.set_size("x-small")
+ ax.legend(loc="best", prop=fontP)
+ ax.set_xlabel("Temperature (C)")
ax.grid(True)
fig.tight_layout()
return fig
@@ -103,50 +118,81 @@ def plot_resistance(config, sensors):
# Startup
######################################################################
+
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
- matplotlib.rcParams.update({'figure.autolayout': True})
- matplotlib.use('Agg')
+ matplotlib.rcParams.update({"figure.autolayout": True})
+ matplotlib.use("Agg")
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
+
def import_sensors(config):
global extras
# Load adc_temperature.py and thermistor.py modules
- kdir = os.path.join(os.path.dirname(__file__), '..', 'klippy')
+ kdir = os.path.join(os.path.dirname(__file__), "..", "klippy")
sys.path.append(kdir)
import extras.adc_temperature, extras.thermistor
+
extras.thermistor.load_config(config)
extras.adc_temperature.load_config(config)
+
def main():
# Parse command-line arguments
usage = "%prog [options]"
opts = optparse.OptionParser(usage)
- opts.add_option("-o", "--output", type="string", dest="output",
- default=None, help="filename of output graph")
- opts.add_option("-p", "--pullup", type="float", dest="pullup",
- default=4700., help="pullup resistor")
- opts.add_option("-v", "--voltage", type="float", dest="voltage",
- default=5., help="pullup resistor")
- opts.add_option("-s", "--sensors", type="string", dest="sensors",
- default="", help="list of sensors (comma separated)")
- opts.add_option("-r", "--resistance", action="store_true",
- help="graph sensor resistance")
+ opts.add_option(
+ "-o",
+ "--output",
+ type="string",
+ dest="output",
+ default=None,
+ help="filename of output graph",
+ )
+ opts.add_option(
+ "-p",
+ "--pullup",
+ type="float",
+ dest="pullup",
+ default=4700.0,
+ help="pullup resistor",
+ )
+ opts.add_option(
+ "-v",
+ "--voltage",
+ type="float",
+ dest="voltage",
+ default=5.0,
+ help="pullup resistor",
+ )
+ opts.add_option(
+ "-s",
+ "--sensors",
+ type="string",
+ dest="sensors",
+ default="",
+ help="list of sensors (comma separated)",
+ )
+ opts.add_option(
+ "-r", "--resistance", action="store_true", help="graph sensor resistance"
+ )
options, args = opts.parse_args()
if len(args) != 0:
opts.error("Incorrect number of arguments")
# Import sensors
- config_settings = {'pullup_resistor': options.pullup,
- 'adc_voltage': options.voltage}
+ config_settings = {
+ "pullup_resistor": options.pullup,
+ "adc_voltage": options.voltage,
+ }
config = DummyConfig(config_settings)
import_sensors(config)
# Determine sensors to graph
if options.sensors:
- sensors = [s.strip() for s in options.sensors.split(',')]
+ sensors = [s.strip() for s in options.sensors.split(",")]
else:
sensors = sorted(config.sensor_factories.keys())
@@ -164,5 +210,6 @@ def main():
fig.set_size_inches(8, 6)
fig.savefig(options.output)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/graphstats.py b/scripts/graphstats.py
index 5cd2ad34..4a8a7164 100755
--- a/scripts/graphstats.py
+++ b/scripts/graphstats.py
@@ -7,88 +7,107 @@
import optparse, datetime
import matplotlib
-MAXBANDWIDTH=25000.
-MAXBUFFER=2.
-STATS_INTERVAL=5.
-TASK_MAX=0.0025
+MAXBANDWIDTH = 25000.0
+MAXBUFFER = 2.0
+STATS_INTERVAL = 5.0
+TASK_MAX = 0.0025
APPLY_PREFIX = [
- 'mcu_awake', 'mcu_task_avg', 'mcu_task_stddev', 'bytes_write',
- 'bytes_read', 'bytes_retransmit', 'freq', 'adj',
- 'target', 'temp', 'pwm'
+ "mcu_awake",
+ "mcu_task_avg",
+ "mcu_task_stddev",
+ "bytes_write",
+ "bytes_read",
+ "bytes_retransmit",
+ "freq",
+ "adj",
+ "target",
+ "temp",
+ "pwm",
]
+
def parse_log(logname, mcu):
if mcu is None:
mcu = "mcu"
mcu_prefix = mcu + ":"
- apply_prefix = { p: 1 for p in APPLY_PREFIX }
- f = open(logname, 'r')
+ apply_prefix = {p: 1 for p in APPLY_PREFIX}
+ f = open(logname, "r")
out = []
for line in f:
parts = line.split()
- if not parts or parts[0] not in ('Stats', 'INFO:root:Stats'):
- #if parts and parts[0] == 'INFO:root:shutdown:':
+ if not parts or parts[0] not in ("Stats", "INFO:root:Stats"):
+ # if parts and parts[0] == 'INFO:root:shutdown:':
# break
continue
prefix = ""
keyparts = {}
for p in parts[2:]:
- if '=' not in p:
+ if "=" not in p:
prefix = p
if prefix == mcu_prefix:
- prefix = ''
+ prefix = ""
continue
- name, val = p.split('=', 1)
+ name, val = p.split("=", 1)
if name in apply_prefix:
name = prefix + name
keyparts[name] = val
- if 'print_time' not in keyparts:
+ if "print_time" not in keyparts:
continue
- keyparts['#sampletime'] = float(parts[1][:-1])
+ keyparts["#sampletime"] = float(parts[1][:-1])
out.append(keyparts)
f.close()
return out
+
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
- matplotlib.use('Agg')
+ matplotlib.use("Agg")
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
+
def find_print_restarts(data):
runoff_samples = {}
- last_runoff_start = last_buffer_time = last_sampletime = 0.
+ last_runoff_start = last_buffer_time = last_sampletime = 0.0
last_print_stall = 0
for d in reversed(data):
# Check for buffer runoff
- sampletime = d['#sampletime']
- buffer_time = float(d.get('buffer_time', 0.))
- if (last_runoff_start and last_sampletime - sampletime < 5
- and buffer_time > last_buffer_time):
+ sampletime = d["#sampletime"]
+ buffer_time = float(d.get("buffer_time", 0.0))
+ if (
+ last_runoff_start
+ and last_sampletime - sampletime < 5
+ and buffer_time > last_buffer_time
+ ):
runoff_samples[last_runoff_start][1].append(sampletime)
- elif buffer_time < 1.:
+ elif buffer_time < 1.0:
last_runoff_start = sampletime
runoff_samples[last_runoff_start] = [False, [sampletime]]
else:
- last_runoff_start = 0.
+ last_runoff_start = 0.0
last_buffer_time = buffer_time
last_sampletime = sampletime
# Check for print stall
- print_stall = int(d['print_stall'])
+ print_stall = int(d["print_stall"])
if print_stall < last_print_stall:
if last_runoff_start:
runoff_samples[last_runoff_start][0] = True
last_print_stall = print_stall
- sample_resets = {sampletime: 1 for stall, samples in runoff_samples.values()
- for sampletime in samples if not stall}
+ sample_resets = {
+ sampletime: 1
+ for stall, samples in runoff_samples.values()
+ for sampletime in samples
+ if not stall
+ }
return sample_resets
+
def plot_mcu(data, maxbw):
# Generate data for plot
- basetime = lasttime = data[0]['#sampletime']
- lastbw = float(data[0]['bytes_write']) + float(data[0]['bytes_retransmit'])
+ basetime = lasttime = data[0]["#sampletime"]
+ lastbw = float(data[0]["bytes_write"]) + float(data[0]["bytes_retransmit"])
sample_resets = find_print_restarts(data)
times = []
bwdeltas = []
@@ -96,165 +115,171 @@ def plot_mcu(data, maxbw):
awake = []
hostbuffers = []
for d in data:
- st = d['#sampletime']
+ st = d["#sampletime"]
timedelta = st - lasttime
- if timedelta <= 0.:
+ if timedelta <= 0.0:
continue
- bw = float(d['bytes_write']) + float(d['bytes_retransmit'])
+ bw = float(d["bytes_write"]) + float(d["bytes_retransmit"])
if bw < lastbw:
lastbw = bw
continue
- load = float(d['mcu_task_avg']) + 3*float(d['mcu_task_stddev'])
- if st - basetime < 15.:
- load = 0.
- pt = float(d['print_time'])
- hb = float(d['buffer_time'])
+ load = float(d["mcu_task_avg"]) + 3 * float(d["mcu_task_stddev"])
+ if st - basetime < 15.0:
+ load = 0.0
+ pt = float(d["print_time"])
+ hb = float(d["buffer_time"])
if hb >= MAXBUFFER or st in sample_resets:
- hb = 0.
+ hb = 0.0
else:
- hb = 100. * (MAXBUFFER - hb) / MAXBUFFER
+ hb = 100.0 * (MAXBUFFER - hb) / MAXBUFFER
hostbuffers.append(hb)
times.append(datetime.datetime.utcfromtimestamp(st))
- bwdeltas.append(100. * (bw - lastbw) / (maxbw * timedelta))
- loads.append(100. * load / TASK_MAX)
- awake.append(100. * float(d.get('mcu_awake', 0.)) / STATS_INTERVAL)
+ bwdeltas.append(100.0 * (bw - lastbw) / (maxbw * timedelta))
+ loads.append(100.0 * load / TASK_MAX)
+ awake.append(100.0 * float(d.get("mcu_awake", 0.0)) / STATS_INTERVAL)
lasttime = st
lastbw = bw
# Build plot
fig, ax1 = matplotlib.pyplot.subplots()
ax1.set_title("MCU bandwidth and load utilization")
- ax1.set_xlabel('Time')
- ax1.set_ylabel('Usage (%)')
- ax1.plot_date(times, bwdeltas, 'g', label='Bandwidth', alpha=0.8)
- ax1.plot_date(times, loads, 'r', label='MCU load', alpha=0.8)
- ax1.plot_date(times, hostbuffers, 'c', label='Host buffer', alpha=0.8)
- ax1.plot_date(times, awake, 'y', label='Awake time', alpha=0.6)
+ ax1.set_xlabel("Time")
+ ax1.set_ylabel("Usage (%)")
+ ax1.plot_date(times, bwdeltas, "g", label="Bandwidth", alpha=0.8)
+ ax1.plot_date(times, loads, "r", label="MCU load", alpha=0.8)
+ ax1.plot_date(times, hostbuffers, "c", label="Host buffer", alpha=0.8)
+ ax1.plot_date(times, awake, "y", label="Awake time", alpha=0.6)
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax1.legend(loc='best', prop=fontP)
- ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
+ fontP.set_size("x-small")
+ ax1.legend(loc="best", prop=fontP)
+ ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M"))
ax1.grid(True)
return fig
+
def plot_system(data):
# Generate data for plot
- lasttime = data[0]['#sampletime']
- lastcputime = float(data[0]['cputime'])
+ lasttime = data[0]["#sampletime"]
+ lastcputime = float(data[0]["cputime"])
times = []
sysloads = []
cputimes = []
memavails = []
for d in data:
- st = d['#sampletime']
+ st = d["#sampletime"]
timedelta = st - lasttime
- if timedelta <= 0.:
+ if timedelta <= 0.0:
continue
lasttime = st
times.append(datetime.datetime.utcfromtimestamp(st))
- cputime = float(d['cputime'])
- cpudelta = max(0., min(1.5, (cputime - lastcputime) / timedelta))
+ cputime = float(d["cputime"])
+ cpudelta = max(0.0, min(1.5, (cputime - lastcputime) / timedelta))
lastcputime = cputime
- cputimes.append(cpudelta * 100.)
- sysloads.append(float(d['sysload']) * 100.)
- memavails.append(float(d['memavail']))
+ cputimes.append(cpudelta * 100.0)
+ sysloads.append(float(d["sysload"]) * 100.0)
+ memavails.append(float(d["memavail"]))
# Build plot
fig, ax1 = matplotlib.pyplot.subplots()
ax1.set_title("System load utilization")
- ax1.set_xlabel('Time')
- ax1.set_ylabel('Load (% of a core)')
- ax1.plot_date(times, sysloads, '-', label='system load',
- color='cyan', alpha=0.8)
- ax1.plot_date(times, cputimes, '-', label='process time',
- color='red', alpha=0.8)
+ ax1.set_xlabel("Time")
+ ax1.set_ylabel("Load (% of a core)")
+ ax1.plot_date(times, sysloads, "-", label="system load", color="cyan", alpha=0.8)
+ ax1.plot_date(times, cputimes, "-", label="process time", color="red", alpha=0.8)
ax2 = ax1.twinx()
- ax2.set_ylabel('Available memory (KB)')
- ax2.plot_date(times, memavails, '-', label='system memory',
- color='yellow', alpha=0.3)
+ ax2.set_ylabel("Available memory (KB)")
+ ax2.plot_date(
+ times, memavails, "-", label="system memory", color="yellow", alpha=0.3
+ )
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
+ fontP.set_size("x-small")
ax1li, ax1la = ax1.get_legend_handles_labels()
ax2li, ax2la = ax2.get_legend_handles_labels()
- ax1.legend(ax1li + ax2li, ax1la + ax2la, loc='best', prop=fontP)
- ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
+ ax1.legend(ax1li + ax2li, ax1la + ax2la, loc="best", prop=fontP)
+ ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M"))
ax1.grid(True)
return fig
+
def plot_mcu_frequencies(data):
all_keys = {}
for d in data:
all_keys.update(d)
- graph_keys = { key: ([], []) for key in all_keys
- if (key in ("freq", "adj")
- or (key.endswith(":freq") or key.endswith(":adj"))) }
+ graph_keys = {
+ key: ([], [])
+ for key in all_keys
+ if (key in ("freq", "adj") or (key.endswith(":freq") or key.endswith(":adj")))
+ }
for d in data:
- st = datetime.datetime.utcfromtimestamp(d['#sampletime'])
+ st = datetime.datetime.utcfromtimestamp(d["#sampletime"])
for key, (times, values) in graph_keys.items():
val = d.get(key)
- if val not in (None, '0', '1'):
+ if val not in (None, "0", "1"):
times.append(st)
values.append(float(val))
- est_mhz = { key: round((sum(values)/len(values)) / 1000000.)
- for key, (times, values) in graph_keys.items() }
+ est_mhz = {
+ key: round((sum(values) / len(values)) / 1000000.0)
+ for key, (times, values) in graph_keys.items()
+ }
# Build plot
fig, ax1 = matplotlib.pyplot.subplots()
ax1.set_title("MCU frequencies")
- ax1.set_xlabel('Time')
- ax1.set_ylabel('Microsecond deviation')
+ ax1.set_xlabel("Time")
+ ax1.set_ylabel("Microsecond deviation")
for key in sorted(graph_keys):
times, values = graph_keys[key]
mhz = est_mhz[key]
label = "%s(%dMhz)" % (key, mhz)
- hz = mhz * 1000000.
- ax1.plot_date(times, [(v - hz)/mhz for v in values], '.', label=label)
+ hz = mhz * 1000000.0
+ ax1.plot_date(times, [(v - hz) / mhz for v in values], ".", label=label)
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax1.legend(loc='best', prop=fontP)
- ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
- ax1.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
+ fontP.set_size("x-small")
+ ax1.legend(loc="best", prop=fontP)
+ ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M"))
+ ax1.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%d"))
ax1.grid(True)
return fig
+
def plot_mcu_frequency(data, mcu):
all_keys = {}
for d in data:
all_keys.update(d)
- graph_keys = { key: ([], []) for key in all_keys
- if key in ("freq", "adj") }
+ graph_keys = {key: ([], []) for key in all_keys if key in ("freq", "adj")}
for d in data:
- st = datetime.datetime.utcfromtimestamp(d['#sampletime'])
+ st = datetime.datetime.utcfromtimestamp(d["#sampletime"])
for key, (times, values) in graph_keys.items():
val = d.get(key)
- if val not in (None, '0', '1'):
+ if val not in (None, "0", "1"):
times.append(st)
values.append(float(val))
# Build plot
fig, ax1 = matplotlib.pyplot.subplots()
ax1.set_title("MCU '%s' frequency" % (mcu,))
- ax1.set_xlabel('Time')
- ax1.set_ylabel('Frequency')
+ ax1.set_xlabel("Time")
+ ax1.set_ylabel("Frequency")
for key in sorted(graph_keys):
times, values = graph_keys[key]
- ax1.plot_date(times, values, '.', label=key)
+ ax1.plot_date(times, values, ".", label=key)
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
- ax1.legend(loc='best', prop=fontP)
- ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
- ax1.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter('%d'))
+ fontP.set_size("x-small")
+ ax1.legend(loc="best", prop=fontP)
+ ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M"))
+ ax1.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%d"))
ax1.grid(True)
return fig
+
def plot_temperature(data, heaters):
fig, ax1 = matplotlib.pyplot.subplots()
ax2 = ax1.twinx()
- for heater in heaters.split(','):
+ for heater in heaters.split(","):
heater = heater.strip()
- temp_key = heater + ':' + 'temp'
- target_key = heater + ':' + 'target'
- pwm_key = heater + ':' + 'pwm'
+ temp_key = heater + ":" + "temp"
+ target_key = heater + ":" + "target"
+ pwm_key = heater + ":" + "pwm"
times = []
temps = []
targets = []
@@ -263,45 +288,64 @@ def plot_temperature(data, heaters):
temp = d.get(temp_key)
if temp is None:
continue
- times.append(datetime.datetime.utcfromtimestamp(d['#sampletime']))
+ times.append(datetime.datetime.utcfromtimestamp(d["#sampletime"]))
temps.append(float(temp))
- pwm.append(float(d.get(pwm_key, 0.)))
- targets.append(float(d.get(target_key, 0.)))
- ax1.plot_date(times, temps, '-', label='%s temp' % (heater,), alpha=0.8)
+ pwm.append(float(d.get(pwm_key, 0.0)))
+ targets.append(float(d.get(target_key, 0.0)))
+ ax1.plot_date(times, temps, "-", label="%s temp" % (heater,), alpha=0.8)
if any(targets):
- label = '%s target' % (heater,)
- ax1.plot_date(times, targets, '-', label=label, alpha=0.3)
+ label = "%s target" % (heater,)
+ ax1.plot_date(times, targets, "-", label=label, alpha=0.3)
if any(pwm):
- label = '%s pwm' % (heater,)
- ax2.plot_date(times, pwm, '-', label=label, alpha=0.2)
+ label = "%s pwm" % (heater,)
+ ax2.plot_date(times, pwm, "-", label=label, alpha=0.2)
# Build plot
ax1.set_title("Temperature of %s" % (heaters,))
- ax1.set_xlabel('Time')
- ax1.set_ylabel('Temperature')
- ax2.set_ylabel('pwm')
+ ax1.set_xlabel("Time")
+ ax1.set_ylabel("Temperature")
+ ax2.set_ylabel("pwm")
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
+ fontP.set_size("x-small")
ax1li, ax1la = ax1.get_legend_handles_labels()
ax2li, ax2la = ax2.get_legend_handles_labels()
- ax1.legend(ax1li + ax2li, ax1la + ax2la, loc='best', prop=fontP)
- ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
+ ax1.legend(ax1li + ax2li, ax1la + ax2la, loc="best", prop=fontP)
+ ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%H:%M"))
ax1.grid(True)
return fig
+
def main():
# Parse command-line arguments
usage = "%prog [options] <logfile>"
opts = optparse.OptionParser(usage)
- opts.add_option("-f", "--frequency", action="store_true",
- help="graph mcu frequency")
- opts.add_option("-s", "--system", action="store_true",
- help="graph system load")
- opts.add_option("-o", "--output", type="string", dest="output",
- default=None, help="filename of output graph")
- opts.add_option("-t", "--temperature", type="string", dest="heater",
- default=None, help="graph heater temperature")
- opts.add_option("-m", "--mcu", type="string", dest="mcu", default=None,
- help="limit stats to the given mcu")
+ opts.add_option(
+ "-f", "--frequency", action="store_true", help="graph mcu frequency"
+ )
+ opts.add_option("-s", "--system", action="store_true", help="graph system load")
+ opts.add_option(
+ "-o",
+ "--output",
+ type="string",
+ dest="output",
+ default=None,
+ help="filename of output graph",
+ )
+ opts.add_option(
+ "-t",
+ "--temperature",
+ type="string",
+ dest="heater",
+ default=None,
+ help="graph heater temperature",
+ )
+ opts.add_option(
+ "-m",
+ "--mcu",
+ type="string",
+ dest="mcu",
+ default=None,
+ help="limit stats to the given mcu",
+ )
options, args = opts.parse_args()
if len(args) != 1:
opts.error("Incorrect number of arguments")
@@ -333,5 +377,6 @@ def main():
fig.set_size_inches(8, 6)
fig.savefig(options.output)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/logextract.py b/scripts/logextract.py
index 592f6f8a..3050ff0b 100755
--- a/scripts/logextract.py
+++ b/scripts/logextract.py
@@ -6,6 +6,7 @@
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, re, collections, ast, itertools
+
def format_comment(line_num, line):
return "# %6d: %s" % (line_num, line)
@@ -14,6 +15,7 @@ def format_comment(line_num, line):
# Config file extraction
######################################################################
+
class GatherConfig:
def __init__(self, configs, line_num, recent_lines, logname):
self.configs = configs
@@ -22,12 +24,14 @@ class GatherConfig:
self.filename = "%s.config%04d.cfg" % (logname, self.config_num)
self.config_lines = []
self.comments = []
+
def add_line(self, line_num, line):
- if line != '=======================':
+ if line != "=======================":
self.config_lines.append(line)
return True
self.finalize()
return False
+
def finalize(self):
lines = tuple(self.config_lines)
ch = self.configs.get(lines)
@@ -36,13 +40,15 @@ class GatherConfig:
else:
ch.comments.extend(self.comments)
ch.comments.append(format_comment(self.line_num, "config file"))
+
def add_comment(self, comment):
if comment is not None:
self.comments.append(comment)
+
def write_file(self):
lines = itertools.chain(self.comments, self.config_lines)
- lines = ('%s\n' % l for l in lines)
- with open(self.filename, 'wt') as f:
+ lines = ("%s\n" % l for l in lines)
+ with open(self.filename, "wt") as f:
f.writelines(lines)
@@ -52,6 +58,7 @@ class GatherConfig:
uart_r = re.compile(r"tmcuart_(?:send|response) oid=[0-9]+ (?:read|write)=")
+
class TMCUartHelper:
def _calc_crc8(self, data):
# Generate a CRC8-ATM value for a bytearray
@@ -61,33 +68,46 @@ class TMCUartHelper:
if (crc >> 7) ^ (b & 0x01):
crc = (crc << 1) ^ 0x07
else:
- crc = (crc << 1)
- crc &= 0xff
+ crc = crc << 1
+ crc &= 0xFF
b >>= 1
return crc
+
def _add_serial_bits(self, data):
# Add serial start and stop bits to a message in a bytearray
out = 0
pos = 0
for d in data:
b = (d << 1) | 0x200
- out |= (b << pos)
+ out |= b << pos
pos += 10
res = bytearray()
- for i in range((pos+7)//8):
- res.append((out >> (i*8)) & 0xff)
+ for i in range((pos + 7) // 8):
+ res.append((out >> (i * 8)) & 0xFF)
return res
+
def _encode_read(self, sync, addr, reg):
# Generate a uart read register message
msg = bytearray([sync, addr, reg])
msg.append(self._calc_crc8(msg))
return self._add_serial_bits(msg)
+
def _encode_write(self, sync, addr, reg, val):
# Generate a uart write register message
- msg = bytearray([sync, addr, reg, (val >> 24) & 0xff,
- (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff])
+ msg = bytearray(
+ [
+ sync,
+ addr,
+ reg,
+ (val >> 24) & 0xFF,
+ (val >> 16) & 0xFF,
+ (val >> 8) & 0xFF,
+ val & 0xFF,
+ ]
+ )
msg.append(self._calc_crc8(msg))
return self._add_serial_bits(msg)
+
def _decode_read(self, data):
# Extract a uart read request message
if len(data) != 5:
@@ -98,13 +118,14 @@ class TMCUartHelper:
mval |= d << pos
pos += 8
# Extract register value
- addr = (mval >> 11) & 0xff
- reg = (mval >> 21) & 0xff
+ addr = (mval >> 11) & 0xFF
+ reg = (mval >> 21) & 0xFF
# Verify start/stop bits and crc
- encoded_data = self._encode_read(0xf5, addr, reg)
+ encoded_data = self._encode_read(0xF5, addr, reg)
if data != encoded_data:
return "Invalid: %s" % (self.pretty_print(addr, reg),)
return self.pretty_print(addr, reg)
+
def _decode_reg(self, data):
# Extract a uart read response message
if len(data) != 10:
@@ -115,25 +136,31 @@ class TMCUartHelper:
mval |= d << pos
pos += 8
# Extract register value
- addr = (mval >> 11) & 0xff
- reg = (mval >> 21) & 0xff
- val = ((((mval >> 31) & 0xff) << 24) | (((mval >> 41) & 0xff) << 16)
- | (((mval >> 51) & 0xff) << 8) | ((mval >> 61) & 0xff))
- sync = 0xf5
- if addr == 0xff:
+ addr = (mval >> 11) & 0xFF
+ reg = (mval >> 21) & 0xFF
+ val = (
+ (((mval >> 31) & 0xFF) << 24)
+ | (((mval >> 41) & 0xFF) << 16)
+ | (((mval >> 51) & 0xFF) << 8)
+ | ((mval >> 61) & 0xFF)
+ )
+ sync = 0xF5
+ if addr == 0xFF:
sync = 0x05
# Verify start/stop bits and crc
encoded_data = self._encode_write(sync, addr, reg, val)
if data != encoded_data:
- #print("Got %s vs %s" % (repr(data), repr(encoded_data)))
+ # print("Got %s vs %s" % (repr(data), repr(encoded_data)))
return "Invalid:%s" % (self.pretty_print(addr, reg, val),)
return self.pretty_print(addr, reg, val)
+
def pretty_print(self, addr, reg, val=None):
if val is None:
return "(%x@%x)" % (reg, addr)
if reg & 0x80:
return "(%x@%x=%08x)" % (reg & ~0x80, addr, val)
return "(%x@%x==%08x)" % (reg, addr, val)
+
def parse_msg(self, msg):
data = bytearray(msg)
if len(data) == 10:
@@ -149,16 +176,28 @@ class TMCUartHelper:
# Shutdown extraction
######################################################################
+
def add_high_bits(val, ref, mask):
half = (mask + 1) // 2
return ref + ((val - (ref & mask) + half) & mask) - half
+
count_s = r"(?P<count>[0-9]+)"
time_s = r"(?P<time>[0-9]+[.][0-9]+)"
esttime_s = r"(?P<esttime>[0-9]+[.][0-9]+)"
shortseq_s = r"(?P<shortseq>[0-9a-f])"
-sent_r = re.compile(r"^Sent " + count_s + " " + esttime_s + " " + time_s
- + " [0-9]+: seq: 1" + shortseq_s + ",")
+sent_r = re.compile(
+ r"^Sent "
+ + count_s
+ + " "
+ + esttime_s
+ + " "
+ + time_s
+ + " [0-9]+: seq: 1"
+ + shortseq_s
+ + ","
+)
+
# MCU "Sent" shutdown message parsing
class MCUSentStream:
@@ -166,61 +205,81 @@ class MCUSentStream:
self.mcu = mcu
self.sent_stream = []
self.send_count = count
+
def parse_line(self, line_num, line):
m = sent_r.match(line)
if m is not None:
- shortseq = int(m.group('shortseq'), 16)
- seq = (self.mcu.shutdown_seq + int(m.group('count'))
- - self.send_count)
- seq = add_high_bits(shortseq, seq, 0xf)
- ts = float(m.group('time'))
- esttime = float(m.group('esttime'))
- self.mcu.sent_time_to_seq[(esttime, seq & 0xf)] = seq
+ shortseq = int(m.group("shortseq"), 16)
+ seq = self.mcu.shutdown_seq + int(m.group("count")) - self.send_count
+ seq = add_high_bits(shortseq, seq, 0xF)
+ ts = float(m.group("time"))
+ esttime = float(m.group("esttime"))
+ self.mcu.sent_time_to_seq[(esttime, seq & 0xF)] = seq
self.mcu.sent_seq_to_time[seq] = ts
line = self.mcu.annotate(line, seq, ts)
self.sent_stream.append((ts, line_num, line))
return True, None
return self.mcu.parse_line(line_num, line)
+
def get_lines(self):
return self.sent_stream
-receive_r = re.compile(r"^Receive: " + count_s + " " + time_s + " " + esttime_s
- + " [0-9]+: seq: 1" + shortseq_s + ",")
+
+receive_r = re.compile(
+ r"^Receive: "
+ + count_s
+ + " "
+ + time_s
+ + " "
+ + esttime_s
+ + " [0-9]+: seq: 1"
+ + shortseq_s
+ + ","
+)
+
# MCU "Receive" shutdown message parsing
class MCUReceiveStream:
def __init__(self, mcu):
self.mcu = mcu
self.receive_stream = []
+
def parse_line(self, line_num, line):
m = receive_r.match(line)
if m is not None:
- shortseq = int(m.group('shortseq'), 16)
- ts = float(m.group('time'))
- esttime = float(m.group('esttime'))
- seq = self.mcu.sent_time_to_seq.get((esttime, (shortseq - 1) & 0xf))
+ shortseq = int(m.group("shortseq"), 16)
+ ts = float(m.group("time"))
+ esttime = float(m.group("esttime"))
+ seq = self.mcu.sent_time_to_seq.get((esttime, (shortseq - 1) & 0xF))
if seq is not None:
self.mcu.receive_seq_to_time[seq + 1] = ts
line = self.mcu.annotate(line, seq, ts)
self.receive_stream.append((ts, line_num, line))
return True, None
return self.mcu.parse_line(line_num, line)
+
def get_lines(self):
return self.receive_stream
+
stats_seq_s = r" send_seq=(?P<sseq>[0-9]+) receive_seq=(?P<rseq>[0-9]+) "
serial_dump_r = re.compile(r"^Dumping serial stats: .*" + stats_seq_s)
send_dump_r = re.compile(r"^Dumping send queue " + count_s + " messages$")
receive_dump_r = re.compile(r"^Dumping receive queue " + count_s + " messages$")
-clock_r = re.compile(r"^clocksync state: mcu_freq=(?P<freq>[0-9]+) .*"
- + r" clock_est=\((?P<st>[^ ]+)"
- + r" (?P<sc>[0-9]+) (?P<f>[^ ]+)\)")
+clock_r = re.compile(
+ r"^clocksync state: mcu_freq=(?P<freq>[0-9]+) .*"
+ + r" clock_est=\((?P<st>[^ ]+)"
+ + r" (?P<sc>[0-9]+) (?P<f>[^ ]+)\)"
+)
repl_seq_r = re.compile(r": seq: 1" + shortseq_s)
clock_s = r"(?P<clock>[0-9]+)"
repl_clock_r = re.compile(r"clock=" + clock_s)
-repl_uart_r = re.compile(r"tmcuart_(?:response|send) oid=[0-9]+"
- + r" (?:read|write)=b?(?P<msg>(?:'[^']*'"
- + r'|"[^"]*"))')
+repl_uart_r = re.compile(
+ r"tmcuart_(?:response|send) oid=[0-9]+"
+ + r" (?:read|write)=b?(?P<msg>(?:'[^']*'"
+ + r'|"[^"]*"))'
+)
+
# MCU shutdown message parsing
class MCUStream:
@@ -230,191 +289,223 @@ class MCUStream:
self.sent_seq_to_time = {}
self.receive_seq_to_time = {}
self.mcu_freq = 1
- self.clock_est = (0., 0., 1.)
+ self.clock_est = (0.0, 0.0, 1.0)
self.shutdown_seq = None
+
def trans_clock(self, clock, ts):
sample_time, sample_clock, freq = self.clock_est
exp_clock = int(sample_clock + (ts - sample_time) * freq)
- ext_clock = add_high_bits(clock, exp_clock, 0xffffffff)
+ ext_clock = add_high_bits(clock, exp_clock, 0xFFFFFFFF)
return sample_time + (ext_clock - sample_clock) / freq
+
def annotate(self, line, seq, ts):
if seq is not None:
line = repl_seq_r.sub(r"\g<0>(%d)" % (seq,), line)
+
def clock_update(m):
return m.group(0).rstrip() + "(%.6f)" % (
- self.trans_clock(int(m.group('clock')), ts),)
+ self.trans_clock(int(m.group("clock")), ts),
+ )
+
line = repl_clock_r.sub(clock_update, line)
+
def uart_update(m):
- msg = ast.literal_eval('b' + m.group('msg'))
+ msg = ast.literal_eval("b" + m.group("msg"))
msg = TMCUartHelper().parse_msg(msg)
return m.group(0).rstrip() + msg
+
line = repl_uart_r.sub(uart_update, line)
- if self.name != 'mcu':
+ if self.name != "mcu":
line = "mcu '%s': %s" % (self.name, line)
return line
+
def parse_line(self, line_num, line):
m = clock_r.match(line)
if m is not None:
- self.mcu_freq = int(m.group('freq'))
- st = float(m.group('st'))
- sc = int(m.group('sc'))
- f = float(m.group('f'))
+ self.mcu_freq = int(m.group("freq"))
+ st = float(m.group("st"))
+ sc = int(m.group("sc"))
+ f = float(m.group("f"))
self.clock_est = (st, sc, f)
m = serial_dump_r.match(line)
if m is not None:
- self.shutdown_seq = int(m.group('rseq'))
+ self.shutdown_seq = int(m.group("rseq"))
m = send_dump_r.match(line)
if m is not None:
- return True, MCUSentStream(self, int(m.group('count')))
+ return True, MCUSentStream(self, int(m.group("count")))
m = receive_dump_r.match(line)
if m is not None:
return True, MCUReceiveStream(self)
return False, None
+
def get_lines(self):
return []
-stepper_move_r = re.compile(r"^queue_step " + count_s + r": t=" + clock_s
- + r" ")
+
+stepper_move_r = re.compile(r"^queue_step " + count_s + r": t=" + clock_s + r" ")
+
# Kinematic "trapq" shutdown message parsing
class StepperStream:
def __init__(self, name, mcu_name, mcus):
self.name = name
self.stepper_stream = []
- self.clock_est = (0., 0., 1.)
+ self.clock_est = (0.0, 0.0, 1.0)
mcu = mcus.get(mcu_name)
if mcu is not None:
self.clock_est = mcu.clock_est
+
def parse_line(self, line_num, line):
m = stepper_move_r.match(line)
if m is not None:
# Convert clock to systime
- clock = int(m.group('clock'))
+ clock = int(m.group("clock"))
sample_time, sample_clock, freq = self.clock_est
ts = sample_time + (clock - sample_clock) / freq
# Add systime to log
- parts = line.split(' ', 4)
+ parts = line.split(" ", 4)
parts[0] = "%s queue_step" % (self.name,)
- parts[2] += '(%.6f)' % (ts,)
- self.stepper_stream.append((ts, line_num, ' '.join(parts)))
+ parts[2] += "(%.6f)" % (ts,)
+ self.stepper_stream.append((ts, line_num, " ".join(parts)))
return True, None
return False, None
+
def get_lines(self):
return self.stepper_stream
+
trapq_move_r = re.compile(r"^move " + count_s + r": pt=" + time_s)
+
# Kinematic "trapq" shutdown message parsing
class TrapQStream:
def __init__(self, name, mcus):
self.name = name
self.trapq_stream = []
self.mcu_freq = 1
- self.clock_est = (0., 0., 1.)
+ self.clock_est = (0.0, 0.0, 1.0)
mcu = mcus.get("mcu")
if mcu is not None:
self.mcu_freq = mcu.mcu_freq
self.clock_est = mcu.clock_est
+
def parse_line(self, line_num, line):
m = trapq_move_r.match(line)
if m is not None:
# Convert print_time to systime
- pt = float(m.group('time'))
+ pt = float(m.group("time"))
clock = pt * self.mcu_freq
sample_time, sample_clock, freq = self.clock_est
ts = sample_time + (clock - sample_clock) / freq
# Add systime to log
- parts = line.split(' ', 4)
+ parts = line.split(" ", 4)
parts[0] = "%s move" % (self.name,)
- parts[2] += '(%.6f)' % (ts,)
- self.trapq_stream.append((ts, line_num, ' '.join(parts)))
+ parts[2] += "(%.6f)" % (ts,)
+ self.trapq_stream.append((ts, line_num, " ".join(parts)))
return True, None
return False, None
+
def get_lines(self):
return self.trapq_stream
+
gcode_cmd_r = re.compile(r"^Read " + time_s + r": (?P<gcode>['\"].*)$")
varlist_split_r = re.compile(r"([^ ]+)=")
+
# G-Code shutdown message parsing
class GCodeStream:
def __init__(self, shutdown_line_num, logname):
self.gcode_stream = []
self.gcode_commands = []
- self.gcode_state = ''
+ self.gcode_state = ""
self.gcode_filename = "%s.gcode%05d" % (logname, shutdown_line_num)
+
def extract_params(self, line):
parts = varlist_split_r.split(line)
try:
- return { parts[i]: ast.literal_eval(parts[i+1].strip())
- for i in range(1, len(parts), 2) }
+ return {
+ parts[i]: ast.literal_eval(parts[i + 1].strip())
+ for i in range(1, len(parts), 2)
+ }
except:
return {}
+
def handle_gcode_state(self, line):
kv = self.extract_params(line)
- out = ['; Start g-code state restore', 'G28']
- if not kv.get('absolute_coord', kv.get('absolutecoord')):
- out.append('G91')
- if not kv.get('absolute_extrude', kv.get('absoluteextrude')):
- out.append('M83')
- lp = kv['last_position']
- out.append('G1 X%f Y%f Z%f F%f' % (
- lp[0], lp[1], lp[2], kv['speed'] * 60.))
- bp = kv['base_position']
- if bp[:3] != [0., 0., 0.]:
- out.append('; Must manually set base position...')
- out.append('G92 E%f' % (lp[3] - bp[3],))
- hp = kv['homing_position']
- if hp != [0., 0., 0., 0.]:
- out.append('; Must manually set homing position...')
- if abs(kv['speed_factor'] - 1. / 60.) > .000001:
- out.append('M220 S%f' % (kv['speed_factor'] * 60. * 100.,))
- if kv['extrude_factor'] != 1.:
- out.append('M221 S%f' % (kv['extrude_factor'] * 100.,))
- out.extend(['; End of state restore', '', ''])
- self.gcode_state = '\n'.join(out)
+ out = ["; Start g-code state restore", "G28"]
+ if not kv.get("absolute_coord", kv.get("absolutecoord")):
+ out.append("G91")
+ if not kv.get("absolute_extrude", kv.get("absoluteextrude")):
+ out.append("M83")
+ lp = kv["last_position"]
+ out.append("G1 X%f Y%f Z%f F%f" % (lp[0], lp[1], lp[2], kv["speed"] * 60.0))
+ bp = kv["base_position"]
+ if bp[:3] != [0.0, 0.0, 0.0]:
+ out.append("; Must manually set base position...")
+ out.append("G92 E%f" % (lp[3] - bp[3],))
+ hp = kv["homing_position"]
+ if hp != [0.0, 0.0, 0.0, 0.0]:
+ out.append("; Must manually set homing position...")
+ if abs(kv["speed_factor"] - 1.0 / 60.0) > 0.000001:
+ out.append("M220 S%f" % (kv["speed_factor"] * 60.0 * 100.0,))
+ if kv["extrude_factor"] != 1.0:
+ out.append("M221 S%f" % (kv["extrude_factor"] * 100.0,))
+ out.extend(["; End of state restore", "", ""])
+ self.gcode_state = "\n".join(out)
+
def parse_line(self, line_num, line):
m = gcode_cmd_r.match(line)
if m is not None:
- ts = float(m.group('time'))
+ ts = float(m.group("time"))
self.gcode_stream.append((ts, line_num, line))
- self.gcode_commands.append(m.group('gcode'))
+ self.gcode_commands.append(m.group("gcode"))
return True, None
return False, None
+
def get_lines(self):
# Produce output gcode stream
if self.gcode_stream:
data = (ast.literal_eval(gc) for gc in self.gcode_commands)
- with open(self.gcode_filename, 'wt') as f:
- f.write(self.gcode_state + ''.join(data))
+ with open(self.gcode_filename, "wt") as f:
+ f.write(self.gcode_state + "".join(data))
return self.gcode_stream
+
api_cmd_r = re.compile(r"^Received " + time_s + r": \{.*\}$")
+
# API server shutdowm message parsing
class APIStream:
def __init__(self):
self.api_stream = []
+
def parse_line(self, line_num, line):
m = api_cmd_r.match(line)
if m is not None:
- ts = float(m.group('time'))
+ ts = float(m.group("time"))
self.api_stream.append((ts, line_num, line))
return True, None
return False, None
+
def get_lines(self):
return self.api_stream
+
stats_r = re.compile(r"^Stats " + time_s + ": ")
mcu_r = re.compile(r"MCU '(?P<mcu>[^']+)' (is_)?shutdown: (?P<reason>.*)$")
-stepper_r = re.compile(r"^Dumping stepper '(?P<name>[^']*)' \((?P<mcu>[^)]+)\) "
- + count_s + r" queue_step:$")
-trapq_r = re.compile(r"^Dumping trapq '(?P<name>[^']*)' " + count_s
- + r" moves:$")
+stepper_r = re.compile(
+ r"^Dumping stepper '(?P<name>[^']*)' \((?P<mcu>[^)]+)\) "
+ + count_s
+ + r" queue_step:$"
+)
+trapq_r = re.compile(r"^Dumping trapq '(?P<name>[^']*)' " + count_s + r" moves:$")
gcode_r = re.compile(r"Dumping gcode input " + count_s + r" blocks$")
gcode_state_r = re.compile(r"^gcode state: ")
-api_r = re.compile(r"Dumping " + count_s + r" requests for client "
- + r"(?P<client>[0-9]+)" + r"$")
+api_r = re.compile(
+ r"Dumping " + count_s + r" requests for client " + r"(?P<client>[0-9]+)" + r"$"
+)
+
# Stats message parsing and high-level message dispatch
class StatsStream:
@@ -424,39 +515,49 @@ class StatsStream:
self.mcus = {}
self.first_stat_time = self.last_stat_time = None
self.stats_stream = []
+
def reset_first_stat_time(self):
self.first_stat_time = self.last_stat_time
+
def get_stat_times(self):
return self.first_stat_time, self.last_stat_time
+
def check_stats_seq(self, ts, line):
# Parse stats
parts = line.split()
mcu = ""
keyparts = {}
for p in parts[2:]:
- if '=' not in p:
+ if "=" not in p:
mcu = p
continue
- name, val = p.split('=', 1)
+ name, val = p.split("=", 1)
keyparts[mcu + name] = val
min_ts = 0
max_ts = 999999999999
for mcu_name, mcu in self.mcus.items():
- sname = '%s:send_seq' % (mcu_name,)
- rname = '%s:receive_seq' % (mcu_name,)
+ sname = "%s:send_seq" % (mcu_name,)
+ rname = "%s:receive_seq" % (mcu_name,)
if sname not in keyparts:
continue
sseq = int(keyparts[sname])
rseq = int(keyparts[rname])
- min_ts = max(min_ts, mcu.sent_seq_to_time.get(sseq-1, 0),
- mcu.receive_seq_to_time.get(rseq, 0))
- max_ts = min(max_ts, mcu.sent_seq_to_time.get(sseq, 999999999999),
- mcu.receive_seq_to_time.get(rseq+1, 999999999999))
+ min_ts = max(
+ min_ts,
+ mcu.sent_seq_to_time.get(sseq - 1, 0),
+ mcu.receive_seq_to_time.get(rseq, 0),
+ )
+ max_ts = min(
+ max_ts,
+ mcu.sent_seq_to_time.get(sseq, 999999999999),
+ mcu.receive_seq_to_time.get(rseq + 1, 999999999999),
+ )
return min(max(ts, min_ts + 0.00000001), max_ts - 0.00000001)
+
def parse_line(self, line_num, line):
m = stats_r.match(line)
if m is not None:
- ts = float(m.group('time'))
+ ts = float(m.group("time"))
self.last_stat_time = ts
if self.first_stat_time is None:
self.first_stat_time = ts
@@ -465,17 +566,16 @@ class StatsStream:
self.stats_stream.append((None, line_num, line))
m = mcu_r.match(line)
if m is not None:
- mcu_name = m.group('mcu')
+ mcu_name = m.group("mcu")
mcu_stream = MCUStream(mcu_name)
self.mcus[mcu_name] = mcu_stream
return True, mcu_stream
m = stepper_r.match(line)
if m is not None:
- return True, StepperStream(m.group('name'), m.group('mcu'),
- self.mcus)
+ return True, StepperStream(m.group("name"), m.group("mcu"), self.mcus)
m = trapq_r.match(line)
if m is not None:
- return True, TrapQStream(m.group('name'), self.mcus)
+ return True, TrapQStream(m.group("name"), self.mcus)
m = gcode_r.match(line)
if m is not None:
return True, self.gcode_stream
@@ -487,6 +587,7 @@ class StatsStream:
if m is not None:
return True, APIStream()
return False, None
+
def get_lines(self):
# Ignore old stats
all_ts = []
@@ -498,7 +599,7 @@ class StatsStream:
min_stream_ts = min(all_ts)
max_stream_ts = max(all_ts)
for i, info in enumerate(self.stats_stream):
- if info[0] is not None and info[0] >= min_stream_ts - 5.:
+ if info[0] is not None and info[0] >= min_stream_ts - 5.0:
del self.stats_stream[:i]
break
# Find the first stats timestamp
@@ -511,12 +612,12 @@ class StatsStream:
for i, (ts, line_num, line) in enumerate(self.stats_stream):
if ts is not None:
last_ts = self.check_stats_seq(ts, line)
- elif (line_num >= self.shutdown_line_num
- and last_ts <= max_stream_ts):
+ elif line_num >= self.shutdown_line_num and last_ts <= max_stream_ts:
last_ts = max_stream_ts + 0.00000001
self.stats_stream[i] = (last_ts, line_num, line)
return self.stats_stream
+
# Main handler for creating shutdown diagnostics file
class GatherShutdown:
def __init__(self, configs, line_num, recent_lines, logname):
@@ -533,21 +634,26 @@ class GatherShutdown:
for line_num, line in recent_lines:
self.parse_line(line_num, line)
self.stats_stream.reset_first_stat_time()
+
def add_comment(self, comment):
if comment is not None:
self.comments.append(comment)
+
def add_line(self, line_num, line):
self.parse_line(line_num, line)
first, last = self.stats_stream.get_stat_times()
- if first is not None and last > first + 5.:
+ if first is not None and last > first + 5.0:
self.finalize()
return False
- if (line.startswith('Git version')
- or line.startswith('Start printer at')
- or line == '===== Config file ====='):
+ if (
+ line.startswith("Git version")
+ or line.startswith("Start printer at")
+ or line == "===== Config file ====="
+ ):
self.finalize()
return False
return True
+
def parse_line(self, line_num, line):
for s in self.active_streams:
did_parse, new_stream = s.parse_line(line_num, line)
@@ -556,19 +662,20 @@ class GatherShutdown:
self.all_streams.append(new_stream)
self.active_streams = [new_stream, self.stats_stream]
break
+
def finalize(self):
# Make sure no timestamp goes backwards
streams = [p.get_lines() for p in self.all_streams]
for s in streams:
for i in range(1, len(s)):
- if s[i-1][0] > s[i][0]:
- s[i] = (s[i-1][0], s[i][1], s[i][2])
+ if s[i - 1][0] > s[i][0]:
+ s[i] = (s[i - 1][0], s[i][1], s[i][2])
# Produce output sorted by timestamp
out = [i for s in streams for i in s]
out.sort()
lines = itertools.chain(self.comments, (i[2] for i in out))
- lines = ('%s\n' % l for l in lines)
- with open(self.filename, 'wt') as f:
+ lines = ("%s\n" % l for l in lines)
+ with open(self.filename, "wt") as f:
f.writelines(lines)
@@ -576,6 +683,7 @@ class GatherShutdown:
# Startup
######################################################################
+
def main():
logname = sys.argv[1]
last_git = last_start = None
@@ -583,7 +691,7 @@ def main():
handler = None
recent_lines = collections.deque([], 200)
# Parse log file
- with open(logname, 'rt') as f:
+ with open(logname, "rt") as f:
for line_num, line in enumerate(f):
line = line.rstrip()
line_num += 1
@@ -594,18 +702,16 @@ def main():
continue
recent_lines.clear()
handler = None
- if line.startswith('Git version'):
+ if line.startswith("Git version"):
last_git = format_comment(line_num, line)
- elif line.startswith('Start printer at'):
+ elif line.startswith("Start printer at"):
last_start = format_comment(line_num, line)
- elif line == '===== Config file =====':
- handler = GatherConfig(configs, line_num,
- recent_lines, logname)
+ elif line == "===== Config file =====":
+ handler = GatherConfig(configs, line_num, recent_lines, logname)
handler.add_comment(last_git)
handler.add_comment(last_start)
- elif 'shutdown: ' in line or line.startswith('Dumping '):
- handler = GatherShutdown(configs, line_num,
- recent_lines, logname)
+ elif "shutdown: " in line or line.startswith("Dumping "):
+ handler = GatherShutdown(configs, line_num, recent_lines, logname)
handler.add_comment(last_git)
handler.add_comment(last_start)
if handler is not None:
@@ -614,5 +720,6 @@ def main():
for cfg in configs.values():
cfg.write_file()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/make_version.py b/scripts/make_version.py
index e64ef7d6..7a48018a 100644
--- a/scripts/make_version.py
+++ b/scripts/make_version.py
@@ -11,21 +11,21 @@ import argparse
import os
import sys
-sys.path.append(os.path.join(os.path.dirname(__file__), '../klippy'))
+sys.path.append(os.path.join(os.path.dirname(__file__), "../klippy"))
import util
def main(argv):
p = argparse.ArgumentParser()
- p.add_argument(
- 'distroname',
- help='Name of distro this package is intended for'
- )
+ p.add_argument("distroname", help="Name of distro this package is intended for")
args = p.parse_args()
- print(util.get_git_version(from_file=False)["version"],
- args.distroname.replace(' ', ''), sep='-')
+ print(
+ util.get_git_version(from_file=False)["version"],
+ args.distroname.replace(" ", ""),
+ sep="-",
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main(sys.argv[1:])
diff --git a/scripts/motan/analyzers.py b/scripts/motan/analyzers.py
index 2796362f..917cb032 100644
--- a/scripts/motan/analyzers.py
+++ b/scripts/motan/analyzers.py
@@ -14,49 +14,57 @@ import readlog
# Analyzer handlers: {name: class, ...}
AHandlers = {}
+
# Calculate a derivative (position to velocity, or velocity to accel)
class GenDerivative:
ParametersMin = ParametersMax = 1
DataSets = [
- ('derivative(<dataset>)', 'Derivative of the given dataset'),
+ ("derivative(<dataset>)", "Derivative of the given dataset"),
]
+
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.source = name_parts[1]
amanager.setup_dataset(self.source)
+
def get_label(self):
label = self.amanager.get_label(self.source)
- lname = label['label']
- units = label['units']
- if '(mm)' in units:
- rep = [('Position', 'Velocity'), ('(mm)', '(mm/s)')]
- elif '(mm/s)' in units:
- rep = [('Velocity', 'Acceleration'), ('(mm/s)', '(mm/s^2)')]
+ lname = label["label"]
+ units = label["units"]
+ if "(mm)" in units:
+ rep = [("Position", "Velocity"), ("(mm)", "(mm/s)")]
+ elif "(mm/s)" in units:
+ rep = [("Velocity", "Acceleration"), ("(mm/s)", "(mm/s^2)")]
else:
- return {'label': 'Derivative', 'units': 'Unknown'}
+ return {"label": "Derivative", "units": "Unknown"}
for old, new in rep:
lname = lname.replace(old, new).replace(old.lower(), new.lower())
units = units.replace(old, new).replace(old.lower(), new.lower())
- return {'label': lname, 'units': units}
+ return {"label": lname, "units": units}
+
def generate_data(self):
- inv_seg_time = 1. / self.amanager.get_segment_time()
+ inv_seg_time = 1.0 / self.amanager.get_segment_time()
data = self.amanager.get_datasets()[self.source]
- deriv = [(data[i+1] - data[i]) * inv_seg_time
- for i in range(len(data)-1)]
+ deriv = [(data[i + 1] - data[i]) * inv_seg_time for i in range(len(data) - 1)]
return [deriv[0]] + deriv
+
+
AHandlers["derivative"] = GenDerivative
+
# Calculate an integral (accel to velocity, or velocity to position)
class GenIntegral:
ParametersMin = 1
ParametersMax = 3
DataSets = [
- ('integral(<dataset>)', 'Integral of the given dataset'),
- ('integral(<dataset1>,<dataset2>)',
- 'Integral with dataset2 as reference'),
- ('integral(<dataset1>,<dataset2>,<half_life>)',
- 'Integral with weighted half-life time'),
+ ("integral(<dataset>)", "Integral of the given dataset"),
+ ("integral(<dataset1>,<dataset2>)", "Integral with dataset2 as reference"),
+ (
+ "integral(<dataset1>,<dataset2>,<half_life>)",
+ "Integral with weighted half-life time",
+ ),
]
+
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.source = name_parts[1]
@@ -68,54 +76,58 @@ class GenIntegral:
amanager.setup_dataset(self.ref)
if len(name_parts) == 4:
self.half_life = float(name_parts[3])
+
def get_label(self):
label = self.amanager.get_label(self.source)
- lname = label['label']
- units = label['units']
- if '(mm/s)' in units:
- rep = [('Velocity', 'Position'), ('(mm/s)', '(mm)')]
- elif '(mm/s^2)' in units:
- rep = [('Acceleration', 'Velocity'), ('(mm/s^2)', '(mm/s)')]
+ lname = label["label"]
+ units = label["units"]
+ if "(mm/s)" in units:
+ rep = [("Velocity", "Position"), ("(mm/s)", "(mm)")]
+ elif "(mm/s^2)" in units:
+ rep = [("Acceleration", "Velocity"), ("(mm/s^2)", "(mm/s)")]
else:
- return {'label': 'Integral', 'units': 'Unknown'}
+ return {"label": "Integral", "units": "Unknown"}
for old, new in rep:
lname = lname.replace(old, new).replace(old.lower(), new.lower())
units = units.replace(old, new).replace(old.lower(), new.lower())
- return {'label': lname, 'units': units}
+ return {"label": lname, "units": units}
+
def generate_data(self):
seg_time = self.amanager.get_segment_time()
src = self.amanager.get_datasets()[self.source]
offset = sum(src) / len(src)
- total = 0.
+ total = 0.0
ref = None
if self.ref is not None:
ref = self.amanager.get_datasets()[self.ref]
offset -= (ref[-1] - ref[0]) / (len(src) * seg_time)
total = ref[0]
- src_weight = 1.
+ src_weight = 1.0
if self.half_life:
- src_weight = math.exp(math.log(.5) * seg_time / self.half_life)
- ref_weight = 1. - src_weight
- data = [0.] * len(src)
+ src_weight = math.exp(math.log(0.5) * seg_time / self.half_life)
+ ref_weight = 1.0 - src_weight
+ data = [0.0] * len(src)
for i, v in enumerate(src):
total += (v - offset) * seg_time
if ref is not None:
total = src_weight * total + ref_weight * ref[i]
data[i] = total
return data
+
+
AHandlers["integral"] = GenIntegral
+
# Calculate a pointwise 2-norm of several datasets (e.g. compute velocity or
# accel from its x, y,... components)
class GenNorm2:
ParametersMin = 2
ParametersMax = 3
DataSets = [
- ('norm2(<dataset1>,<dataset2>)',
- 'pointwise 2-norm of dataset1 and dataset2'),
- ('norm2(<dataset1>,<dataset2>,<dataset3>)',
- 'pointwise 2-norm of 3 datasets'),
+ ("norm2(<dataset1>,<dataset2>)", "pointwise 2-norm of dataset1 and dataset2"),
+ ("norm2(<dataset1>,<dataset2>,<dataset3>)", "pointwise 2-norm of 3 datasets"),
]
+
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.datasets = []
@@ -125,48 +137,56 @@ class GenNorm2:
self.datasets.append(name_parts[3])
for dataset in self.datasets:
amanager.setup_dataset(dataset)
+
def get_label(self):
label = self.amanager.get_label(self.datasets[0])
- units = label['units']
- datas = ['position', 'velocity', 'acceleration']
- data_name = ''
+ units = label["units"]
+ datas = ["position", "velocity", "acceleration"]
+ data_name = ""
for d in datas:
- if d in label['label']:
+ if d in label["label"]:
data_name = d
break
- lname = ''
+ lname = ""
for d in self.datasets:
- l = self.amanager.get_label(d)['label']
+ l = self.amanager.get_label(d)["label"]
for r in datas:
- l = l.replace(r, '').strip()
+ l = l.replace(r, "").strip()
if lname:
- lname += '+'
+ lname += "+"
lname += l
- lname += ' ' + data_name + ' norm2'
- return {'label': lname, 'units': units}
+ lname += " " + data_name + " norm2"
+ return {"label": lname, "units": units}
+
def generate_data(self):
seg_time = self.amanager.get_segment_time()
data = []
for dataset in self.datasets:
data.append(self.amanager.get_datasets()[dataset])
- res = [0.] * len(data[0])
+ res = [0.0] * len(data[0])
for i in range(len(data[0])):
- norm2 = 0.
+ norm2 = 0.0
for dataset in data:
norm2 += dataset[i] * dataset[i]
res[i] = math.sqrt(norm2)
return res
+
+
AHandlers["norm2"] = GenNorm2
+
class GenSmoothed:
ParametersMin = 1
ParametersMax = 2
DataSets = [
- ('smooth(<dataset>)', 'Generate moving weighted average of a dataset'),
- ('smooth(<dataset>,<smooth_time>)',
- 'Generate moving weighted average of a dataset with a given'
- ' smoothing time that defines the window size'),
+ ("smooth(<dataset>)", "Generate moving weighted average of a dataset"),
+ (
+ "smooth(<dataset>,<smooth_time>)",
+ "Generate moving weighted average of a dataset with a given"
+ " smoothing time that defines the window size",
+ ),
]
+
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.source = name_parts[1]
@@ -174,125 +194,152 @@ class GenSmoothed:
self.smooth_time = 0.01
if len(name_parts) > 2:
self.smooth_time = float(name_parts[2])
+
def get_label(self):
label = self.amanager.get_label(self.source)
- return {'label': 'Smoothed ' + label['label'], 'units': label['units']}
+ return {"label": "Smoothed " + label["label"], "units": label["units"]}
+
def generate_data(self):
seg_time = self.amanager.get_segment_time()
src = self.amanager.get_datasets()[self.source]
n = len(src)
- data = [0.] * n
+ data = [0.0] * n
hst = 0.5 * self.smooth_time
seg_half_len = round(hst / seg_time)
- inv_norm = 1. / sum([min(k + 1, seg_half_len + seg_half_len - k)
- for k in range(2 * seg_half_len)])
+ inv_norm = 1.0 / sum(
+ [
+ min(k + 1, seg_half_len + seg_half_len - k)
+ for k in range(2 * seg_half_len)
+ ]
+ )
for i in range(n):
j = max(0, i - seg_half_len)
je = min(n, i + seg_half_len)
- avg_val = 0.
+ avg_val = 0.0
for k, v in enumerate(src[j:je]):
avg_val += v * min(k + 1, seg_half_len + seg_half_len - k)
data[i] = avg_val * inv_norm
return data
+
+
AHandlers["smooth"] = GenSmoothed
+
# Calculate a kinematic stepper position from the toolhead requested position
class GenKinematicPosition:
ParametersMin = ParametersMax = 1
DataSets = [
- ('kin(<stepper>)', 'Stepper position derived from toolhead kinematics'),
+ ("kin(<stepper>)", "Stepper position derived from toolhead kinematics"),
]
+
def __init__(self, amanager, name_parts):
self.amanager = amanager
stepper = name_parts[1]
status = self.amanager.get_initial_status()
- kin = status['configfile']['settings']['printer']['kinematics']
- if kin not in ['cartesian', 'corexy']:
+ kin = status["configfile"]["settings"]["printer"]["kinematics"]
+ if kin not in ["cartesian", "corexy"]:
raise amanager.error("Unsupported kinematics '%s'" % (kin,))
- if stepper not in ['stepper_x', 'stepper_y', 'stepper_z']:
+ if stepper not in ["stepper_x", "stepper_y", "stepper_z"]:
raise amanager.error("Unknown stepper '%s'" % (stepper,))
- if kin == 'corexy' and stepper in ['stepper_x', 'stepper_y']:
- self.source1 = 'trapq(toolhead,x)'
- self.source2 = 'trapq(toolhead,y)'
- if stepper == 'stepper_x':
+ if kin == "corexy" and stepper in ["stepper_x", "stepper_y"]:
+ self.source1 = "trapq(toolhead,x)"
+ self.source2 = "trapq(toolhead,y)"
+ if stepper == "stepper_x":
self.generate_data = self.generate_data_corexy_plus
else:
self.generate_data = self.generate_data_corexy_minus
amanager.setup_dataset(self.source1)
amanager.setup_dataset(self.source2)
else:
- self.source1 = 'trapq(toolhead,%s)' % (stepper[-1:],)
+ self.source1 = "trapq(toolhead,%s)" % (stepper[-1:],)
self.source2 = None
self.generate_data = self.generate_data_passthrough
amanager.setup_dataset(self.source1)
+
def get_label(self):
- return {'label': 'Position', 'units': 'Position\n(mm)'}
+ return {"label": "Position", "units": "Position\n(mm)"}
+
def generate_data_corexy_plus(self):
datasets = self.amanager.get_datasets()
data1 = datasets[self.source1]
data2 = datasets[self.source2]
return [d1 + d2 for d1, d2 in zip(data1, data2)]
+
def generate_data_corexy_minus(self):
datasets = self.amanager.get_datasets()
data1 = datasets[self.source1]
data2 = datasets[self.source2]
return [d1 - d2 for d1, d2 in zip(data1, data2)]
+
def generate_data_passthrough(self):
return self.amanager.get_datasets()[self.source1]
+
+
AHandlers["kin"] = GenKinematicPosition
+
# Calculate a toolhead x/y position from corexy stepper positions
class GenCorexyPosition:
ParametersMin = ParametersMax = 3
DataSets = [
- ('corexy(x,<stepper>,<stepper>)', 'Toolhead x position from steppers'),
- ('corexy(y,<stepper>,<stepper>)', 'Toolhead y position from steppers'),
+ ("corexy(x,<stepper>,<stepper>)", "Toolhead x position from steppers"),
+ ("corexy(y,<stepper>,<stepper>)", "Toolhead y position from steppers"),
]
+
def __init__(self, amanager, name_parts):
self.amanager = amanager
- self.is_plus = name_parts[1] == 'x'
+ self.is_plus = name_parts[1] == "x"
self.source1, self.source2 = name_parts[2:]
amanager.setup_dataset(self.source1)
amanager.setup_dataset(self.source2)
+
def get_label(self):
- axis = 'x'
+ axis = "x"
if not self.is_plus:
- axis = 'y'
- return {'label': 'Derived %s position' % (axis,),
- 'units': 'Position\n(mm)'}
+ axis = "y"
+ return {"label": "Derived %s position" % (axis,), "units": "Position\n(mm)"}
+
def generate_data(self):
datasets = self.amanager.get_datasets()
data1 = datasets[self.source1]
data2 = datasets[self.source2]
if self.is_plus:
- return [.5 * (d1 + d2) for d1, d2 in zip(data1, data2)]
- return [.5 * (d1 - d2) for d1, d2 in zip(data1, data2)]
+ return [0.5 * (d1 + d2) for d1, d2 in zip(data1, data2)]
+ return [0.5 * (d1 - d2) for d1, d2 in zip(data1, data2)]
+
+
AHandlers["corexy"] = GenCorexyPosition
+
# Calculate a position deviation
class GenDeviation:
ParametersMin = ParametersMax = 2
DataSets = [
- ('deviation(<dataset1>,<dataset2>)', 'Difference between datasets'),
+ ("deviation(<dataset1>,<dataset2>)", "Difference between datasets"),
]
+
def __init__(self, amanager, name_parts):
self.amanager = amanager
self.source1, self.source2 = name_parts[1:]
amanager.setup_dataset(self.source1)
amanager.setup_dataset(self.source2)
+
def get_label(self):
label1 = self.amanager.get_label(self.source1)
label2 = self.amanager.get_label(self.source2)
- if label1['units'] != label2['units']:
- return {'label': 'Deviation', 'units': 'Unknown'}
- parts = label1['units'].split('\n')
- units = '\n'.join([parts[0]] + ['Deviation'] + parts[1:])
- return {'label': label1['label'] + ' deviation', 'units': units}
+ if label1["units"] != label2["units"]:
+ return {"label": "Deviation", "units": "Unknown"}
+ parts = label1["units"].split("\n")
+ units = "\n".join([parts[0]] + ["Deviation"] + parts[1:])
+ return {"label": label1["label"] + " deviation", "units": units}
+
def generate_data(self):
datasets = self.amanager.get_datasets()
data1 = datasets[self.source1]
data2 = datasets[self.source2]
return [d1 - d2 for d1, d2 in zip(data1, data2)]
+
+
AHandlers["deviation"] = GenDeviation
@@ -300,6 +347,7 @@ AHandlers["deviation"] = GenDeviation
# Analyzer management and data generation
######################################################################
+
# Return a description of available analyzers
def list_datasets():
datasets = []
@@ -307,9 +355,11 @@ def list_datasets():
datasets += AHandlers[ah].DataSets
return datasets
+
# Manage raw and generated data samples
class AnalyzerManager:
error = None
+
def __init__(self, lmanager, segment_time):
self.lmanager = lmanager
self.error = lmanager.error
@@ -318,17 +368,23 @@ class AnalyzerManager:
self.gen_datasets = collections.OrderedDict()
self.datasets = {}
self.dataset_times = []
- self.duration = 5.
+ self.duration = 5.0
+
def set_duration(self, duration):
self.duration = duration
+
def get_segment_time(self):
return self.segment_time
+
def get_datasets(self):
return self.datasets
+
def get_dataset_times(self):
return self.dataset_times
+
def get_initial_status(self):
return self.lmanager.get_initial_status()
+
def setup_dataset(self, name):
name = name.strip()
if name in self.raw_datasets:
@@ -350,6 +406,7 @@ class AnalyzerManager:
self.gen_datasets[name] = hdl
self.datasets[name] = []
return hdl
+
def get_label(self, dataset):
hdl = self.raw_datasets.get(dataset)
if hdl is None:
@@ -357,10 +414,12 @@ class AnalyzerManager:
if hdl is None:
raise self.error("Unknown dataset '%s'" % (dataset,))
return hdl.get_label()
+
def generate_datasets(self):
# Generate raw data
- list_hdls = [(self.datasets[name], hdl)
- for name, hdl in self.raw_datasets.items()]
+ list_hdls = [
+ (self.datasets[name], hdl) for name, hdl in self.raw_datasets.items()
+ ]
initial_start_time = self.lmanager.get_initial_start_time()
start_time = t = self.lmanager.get_start_time()
end_time = start_time + self.duration
diff --git a/scripts/motan/data_logger.py b/scripts/motan/data_logger.py
index fd4de7a5..00023c2f 100755
--- a/scripts/motan/data_logger.py
+++ b/scripts/motan/data_logger.py
@@ -7,7 +7,8 @@
import sys, os, optparse, socket, select, json, errno, time, zlib
INDEX_UPDATE_TIME = 5.0
-ClientInfo = {'program': 'motan_data_logger', 'version': 'v0.1'}
+ClientInfo = {"program": "motan_data_logger", "version": "v0.1"}
+
def webhook_socket_create(uds_filename):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -20,25 +21,28 @@ def webhook_socket_create(uds_filename):
if e.errno == errno.ECONNREFUSED:
time.sleep(0.1)
continue
- sys.stderr.write("Unable to connect socket %s [%d,%s]\n"
- % (uds_filename, e.errno,
- errno.errorcode[e.errno]))
+ sys.stderr.write(
+ "Unable to connect socket %s [%d,%s]\n"
+ % (uds_filename, e.errno, errno.errorcode[e.errno])
+ )
sys.exit(-1)
break
sys.stderr.write("Connection.\n")
return sock
+
class LogWriter:
def __init__(self, filename):
self.file = open(filename, "wb")
- self.comp = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
- zlib.DEFLATED, 31)
+ self.comp = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, 31)
self.raw_pos = self.file_pos = 0
+
def add_data(self, data):
d = self.comp.compress(data + b"\x03")
self.file.write(d)
self.file_pos += len(d)
self.raw_pos += len(data) + 1
+
def flush(self, flag=zlib.Z_FULL_FLUSH):
if not self.raw_pos:
return self.file_pos
@@ -46,12 +50,14 @@ class LogWriter:
self.file.write(d)
self.file_pos += len(d)
return self.file_pos
+
def close(self):
self.flush(zlib.Z_FINISH)
self.file.close()
self.file = None
self.comp = None
+
class DataLogger:
def __init__(self, uds_filename, log_prefix):
# IO
@@ -67,23 +73,26 @@ class DataLogger:
self.async_handlers = {}
# get_status databasing
self.db = {}
- self.next_index_time = 0.
+ self.next_index_time = 0.0
# Start login process
- self.send_query("info", "info", {"client_info": ClientInfo},
- self.handle_info)
+ self.send_query("info", "info", {"client_info": ClientInfo}, self.handle_info)
+
def error(self, msg):
sys.stderr.write(msg + "\n")
+
def finish(self, msg):
self.error(msg)
self.logger.close()
self.index.close()
sys.exit(0)
+
# Unix Domain Socket IO
def send_query(self, msg_id, method, params, cb):
self.query_handlers[msg_id] = cb
msg = {"id": msg_id, "method": method, "params": params}
- cm = json.dumps(msg, separators=(',', ':')).encode()
+ cm = json.dumps(msg, separators=(",", ":")).encode()
self.webhook_socket.send(cm + b"\x03")
+
def process_socket(self):
data = self.webhook_socket.recv(4096)
if not data:
@@ -113,15 +122,17 @@ class DataLogger:
self.flush_index()
continue
self.error("ERROR: Message with unknown id")
+
def run(self):
try:
while 1:
- res = self.poll.poll(1000.)
+ res = self.poll.poll(1000.0)
for fd, event in res:
if fd == self.webhook_socket.fileno():
self.process_socket()
except KeyboardInterrupt as e:
self.finish("Keyboard Interrupt")
+
# Query response handlers
def send_subscribe(self, msg_id, method, params, cb=None, async_cb=None):
if cb is None:
@@ -130,14 +141,22 @@ class DataLogger:
self.async_handlers[msg_id] = async_cb
params["response_template"] = {"q": msg_id}
self.send_query(msg_id, method, params, cb)
+
def handle_info(self, msg, raw_msg):
if msg["result"]["state"] != "ready":
self.finish("Klipper not in ready state")
self.send_query("list", "objects/list", {}, self.handle_list)
+
def handle_list(self, msg, raw_msg):
subreq = {o: None for o in msg["result"]["objects"]}
- self.send_subscribe("status", "objects/subscribe", {"objects": subreq},
- self.handle_subscribe, self.handle_async_db)
+ self.send_subscribe(
+ "status",
+ "objects/subscribe",
+ {"objects": subreq},
+ self.handle_subscribe,
+ self.handle_async_db,
+ )
+
def handle_subscribe(self, msg, raw_msg):
result = msg["result"]
self.next_index_time = result["eventtime"] + INDEX_UPDATE_TIME
@@ -145,15 +164,17 @@ class DataLogger:
# Subscribe to trapq and stepper queue updates
motion_report = status.get("motion_report", {})
for trapq in motion_report.get("trapq", []):
- self.send_subscribe("trapq:" + trapq, "motion_report/dump_trapq",
- {"name": trapq})
+ self.send_subscribe(
+ "trapq:" + trapq, "motion_report/dump_trapq", {"name": trapq}
+ )
for stepper in motion_report.get("steppers", []):
- self.send_subscribe("stepq:" + stepper,
- "motion_report/dump_stepper", {"name": stepper})
+ self.send_subscribe(
+ "stepq:" + stepper, "motion_report/dump_stepper", {"name": stepper}
+ )
# Subscribe to additional sensor data
stypes = ["adxl345", "lis2dw", "mpu9250", "angle"]
- stypes = {st:st for st in stypes}
- stypes['probe_eddy_current'] = 'ldc1612'
+ stypes = {st: st for st in stypes}
+ stypes["probe_eddy_current"] = "ldc1612"
config = status["configfile"]["settings"]
for cfgname in config.keys():
for capprefix, st in sorted(stypes.items()):
@@ -163,30 +184,37 @@ class DataLogger:
qcmd = "%s/dump_%s" % (st, st)
self.send_subscribe(lname, qcmd, {"sensor": aname})
if cfgname.startswith("tmc"):
- driver = ' '.join(cfgname.split()[1:])
- self.send_subscribe("stallguard:" + driver,
- "tmc/stallguard_dump", {"name": driver})
+ driver = " ".join(cfgname.split()[1:])
+ self.send_subscribe(
+ "stallguard:" + driver, "tmc/stallguard_dump", {"name": driver}
+ )
+
def handle_dump(self, msg, raw_msg):
msg_id = msg["id"]
if "result" not in msg:
- self.error("Unable to subscribe to '%s': %s"
- % (msg_id, msg.get("error", {}).get("message", "")))
+ self.error(
+ "Unable to subscribe to '%s': %s"
+ % (msg_id, msg.get("error", {}).get("message", ""))
+ )
return
self.db.setdefault("subscriptions", {})[msg_id] = msg["result"]
+
def flush_index(self):
- self.db['file_position'] = self.logger.flush()
- self.index.add_data(json.dumps(self.db, separators=(',', ':')).encode())
+ self.db["file_position"] = self.logger.flush()
+ self.index.add_data(json.dumps(self.db, separators=(",", ":")).encode())
self.db = {"status": {}}
+
def handle_async_db(self, msg, raw_msg):
params = msg["params"]
- db_status = self.db['status']
+ db_status = self.db["status"]
for k, v in params.get("status", {}).items():
db_status.setdefault(k, {}).update(v)
- eventtime = params['eventtime']
+ eventtime = params["eventtime"]
if eventtime >= self.next_index_time:
self.next_index_time = eventtime + INDEX_UPDATE_TIME
self.flush_index()
+
def nice():
try:
# Try to re-nice writing process
@@ -194,6 +222,7 @@ def nice():
except:
pass
+
def main():
usage = "%prog [options] <socket filename> <log name>"
opts = optparse.OptionParser(usage)
@@ -205,5 +234,6 @@ def main():
dl = DataLogger(args[0], args[1])
dl.run()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/motan/motan_graph.py b/scripts/motan/motan_graph.py
index fc1dee17..8d551340 100755
--- a/scripts/motan/motan_graph.py
+++ b/scripts/motan/motan_graph.py
@@ -7,6 +7,7 @@
import sys, optparse, ast
import matplotlib
import readlog, analyzers
+
try:
import urlparse
except:
@@ -17,6 +18,7 @@ except:
# Graphing
######################################################################
+
def plot_motion(amanager, graphs, log_prefix):
# Generate data
for graph in graphs:
@@ -27,7 +29,7 @@ def plot_motion(amanager, graphs, log_prefix):
times = amanager.get_dataset_times()
# Build plot
fontP = matplotlib.font_manager.FontProperties()
- fontP.set_size('x-small')
+ fontP.set_size("x-small")
fig, rows = matplotlib.pyplot.subplots(nrows=len(graphs), sharex=True)
if len(graphs) == 1:
rows = [rows]
@@ -38,29 +40,29 @@ def plot_motion(amanager, graphs, log_prefix):
label = amanager.get_label(dataset)
ax = graph_ax
if graph_units is None:
- graph_units = label['units']
+ graph_units = label["units"]
ax.set_ylabel(graph_units)
- elif label['units'] != graph_units:
+ elif label["units"] != graph_units:
if graph_twin_units is None:
ax = twin_ax = graph_ax.twinx()
- graph_twin_units = label['units']
+ graph_twin_units = label["units"]
ax.set_ylabel(graph_twin_units)
- elif label['units'] == graph_twin_units:
+ elif label["units"] == graph_twin_units:
ax = twin_ax
else:
graph_units = "Unknown"
ax.set_ylabel(graph_units)
- pparams = {'label': label['label'], 'alpha': 0.8}
+ pparams = {"label": label["label"], "alpha": 0.8}
pparams.update(plot_params)
ax.plot(times, datasets[dataset], **pparams)
if twin_ax is not None:
li1, la1 = graph_ax.get_legend_handles_labels()
li2, la2 = twin_ax.get_legend_handles_labels()
- twin_ax.legend(li1 + li2, la1 + la2, loc='best', prop=fontP)
+ twin_ax.legend(li1 + li2, la1 + la2, loc="best", prop=fontP)
else:
- graph_ax.legend(loc='best', prop=fontP)
+ graph_ax.legend(loc="best", prop=fontP)
graph_ax.grid(True)
- rows[-1].set_xlabel('Time (s)')
+ rows[-1].set_xlabel("Time (s)")
return fig
@@ -68,23 +70,26 @@ def plot_motion(amanager, graphs, log_prefix):
# Startup
######################################################################
+
def setup_matplotlib(output_to_file):
global matplotlib
if output_to_file:
- matplotlib.use('Agg')
+ matplotlib.use("Agg")
import matplotlib.pyplot, matplotlib.dates, matplotlib.font_manager
import matplotlib.ticker
+
def parse_graph_description(desc):
- if '?' not in desc:
+ if "?" not in desc:
return (desc, {})
- dataset, params = desc.split('?', 1)
+ dataset, params = desc.split("?", 1)
params = {k: v for k, v in urlparse.parse_qsl(params)}
- for fkey in ['alpha']:
+ for fkey in ["alpha"]:
if fkey in params:
params[fkey] = float(params[fkey])
return (dataset, params)
+
def list_datasets():
datasets = readlog.list_datasets() + analyzers.list_datasets()
out = ["\nAvailable datasets:\n"]
@@ -94,21 +99,35 @@ def list_datasets():
sys.stdout.write("".join(out))
sys.exit(0)
+
def main():
# Parse command-line arguments
usage = "%prog [options] <logname>"
opts = optparse.OptionParser(usage)
- opts.add_option("-o", "--output", type="string", dest="output",
- default=None, help="filename of output graph")
- opts.add_option("-s", "--skip", type="float", default=0.,
- help="Set the start time to graph")
- opts.add_option("-d", "--duration", type="float", default=5.,
- help="Number of seconds to graph")
- opts.add_option("--segment-time", type="float", default=0.000100,
- help="Analysis segment time (default 0.000100 seconds)")
+ opts.add_option(
+ "-o",
+ "--output",
+ type="string",
+ dest="output",
+ default=None,
+ help="filename of output graph",
+ )
+ opts.add_option(
+ "-s", "--skip", type="float", default=0.0, help="Set the start time to graph"
+ )
+ opts.add_option(
+ "-d", "--duration", type="float", default=5.0, help="Number of seconds to graph"
+ )
+ opts.add_option(
+ "--segment-time",
+ type="float",
+ default=0.000100,
+ help="Analysis segment time (default 0.000100 seconds)",
+ )
opts.add_option("-g", "--graph", help="Graph to generate (python literal)")
- opts.add_option("-l", "--list-datasets", action="store_true",
- help="List available datasets")
+ opts.add_option(
+ "-l", "--list-datasets", action="store_true", help="List available datasets"
+ )
options, args = opts.parse_args()
if options.list_datasets:
list_datasets()
@@ -131,8 +150,9 @@ def main():
]
if options.graph is not None:
graph_descs = ast.literal_eval(options.graph)
- graphs = [[parse_graph_description(g) for g in graph_row]
- for graph_row in graph_descs]
+ graphs = [
+ [parse_graph_description(g) for g in graph_row] for graph_row in graph_descs
+ ]
# Draw graph
setup_matplotlib(options.output is not None)
@@ -145,5 +165,6 @@ def main():
fig.set_size_inches(8, 6)
fig.savefig(options.output)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/motan/readlog.py b/scripts/motan/readlog.py
index 43c01619..8f86d4e2 100644
--- a/scripts/motan/readlog.py
+++ b/scripts/motan/readlog.py
@@ -5,6 +5,7 @@
# This file may be distributed under the terms of the GNU GPLv3 license.
import json, zlib
+
class error(Exception):
pass
@@ -16,81 +17,98 @@ class error(Exception):
# Log data handlers: {name: class, ...}
LogHandlers = {}
+
# Extract status fields from log
class HandleStatusField:
SubscriptionIdParts = 0
ParametersMin = ParametersMax = 1
DataSets = [
- ('status(<field>)', 'A get_status field name (separate by periods)'),
+ ("status(<field>)", "A get_status field name (separate by periods)"),
]
+
def __init__(self, lmanager, name, name_parts):
self.status_tracker = lmanager.get_status_tracker()
self.field_name = name_parts[1]
- self.field_parts = name_parts[1].split('.')
- self.next_update_time = 0.
+ self.field_parts = name_parts[1].split(".")
+ self.next_update_time = 0.0
self.result = None
+
def get_label(self):
- label = '%s field' % (self.field_name,)
- return {'label': label, 'units': 'Unknown'}
+ label = "%s field" % (self.field_name,)
+ return {"label": label, "units": "Unknown"}
+
def pull_data(self, req_time):
if req_time < self.next_update_time:
return self.result
db, next_update_time = self.status_tracker.pull_status(req_time)
for fp in self.field_parts[:-1]:
db = db.get(fp, {})
- self.result = db.get(self.field_parts[-1], 0.)
+ self.result = db.get(self.field_parts[-1], 0.0)
self.next_update_time = next_update_time
return self.result
+
+
LogHandlers["status"] = HandleStatusField
+
# Extract requested position, velocity, and accel from a trapq log
class HandleTrapQ:
SubscriptionIdParts = 2
ParametersMin = ParametersMax = 2
DataSets = [
- ('trapq(<name>,velocity)', 'Requested velocity for the given trapq'),
- ('trapq(<name>,accel)', 'Requested acceleration for the given trapq'),
- ('trapq(<name>,<axis>)', 'Requested axis (x, y, or z) position'),
- ('trapq(<name>,<axis>_velocity)', 'Requested axis velocity'),
- ('trapq(<name>,<axis>_accel)', 'Requested axis acceleration'),
+ ("trapq(<name>,velocity)", "Requested velocity for the given trapq"),
+ ("trapq(<name>,accel)", "Requested acceleration for the given trapq"),
+ ("trapq(<name>,<axis>)", "Requested axis (x, y, or z) position"),
+ ("trapq(<name>,<axis>_velocity)", "Requested axis velocity"),
+ ("trapq(<name>,<axis>_accel)", "Requested axis acceleration"),
]
+
def __init__(self, lmanager, name, name_parts):
self.name = name
self.jdispatch = lmanager.get_jdispatch()
- self.cur_data = [(0., 0., 0., 0., (0., 0., 0.), (0., 0., 0.))]
+ self.cur_data = [(0.0, 0.0, 0.0, 0.0, (0.0, 0.0, 0.0), (0.0, 0.0, 0.0))]
self.data_pos = 0
tq, trapq_name, datasel = name_parts
ptypes = {}
- ptypes['velocity'] = {
- 'label': '%s velocity' % (trapq_name,),
- 'units': 'Velocity\n(mm/s)', 'func': self._pull_velocity
+ ptypes["velocity"] = {
+ "label": "%s velocity" % (trapq_name,),
+ "units": "Velocity\n(mm/s)",
+ "func": self._pull_velocity,
}
- ptypes['accel'] = {
- 'label': '%s acceleration' % (trapq_name,),
- 'units': 'Acceleration\n(mm/s^2)', 'func': self._pull_accel
+ ptypes["accel"] = {
+ "label": "%s acceleration" % (trapq_name,),
+ "units": "Acceleration\n(mm/s^2)",
+ "func": self._pull_accel,
}
for axis, name in enumerate("xyz"):
- ptypes['%s' % (name,)] = {
- 'label': '%s %s position' % (trapq_name, name), 'axis': axis,
- 'units': 'Position\n(mm)', 'func': self._pull_axis_position
+ ptypes["%s" % (name,)] = {
+ "label": "%s %s position" % (trapq_name, name),
+ "axis": axis,
+ "units": "Position\n(mm)",
+ "func": self._pull_axis_position,
}
- ptypes['%s_velocity' % (name,)] = {
- 'label': '%s %s velocity' % (trapq_name, name), 'axis': axis,
- 'units': 'Velocity\n(mm/s)', 'func': self._pull_axis_velocity
+ ptypes["%s_velocity" % (name,)] = {
+ "label": "%s %s velocity" % (trapq_name, name),
+ "axis": axis,
+ "units": "Velocity\n(mm/s)",
+ "func": self._pull_axis_velocity,
}
- ptypes['%s_accel' % (name,)] = {
- 'label': '%s %s acceleration' % (trapq_name, name),
- 'axis': axis, 'units': 'Acceleration\n(mm/s^2)',
- 'func': self._pull_axis_accel
+ ptypes["%s_accel" % (name,)] = {
+ "label": "%s %s acceleration" % (trapq_name, name),
+ "axis": axis,
+ "units": "Acceleration\n(mm/s^2)",
+ "func": self._pull_axis_accel,
}
pinfo = ptypes.get(datasel)
if pinfo is None:
raise error("Unknown trapq data selection '%s'" % (datasel,))
- self.label = {'label': pinfo['label'], 'units': pinfo['units']}
- self.axis = pinfo.get('axis')
- self.pull_data = pinfo['func']
+ self.label = {"label": pinfo["label"], "units": pinfo["units"]}
+ self.axis = pinfo.get("axis")
+ self.pull_data = pinfo["func"]
+
def get_label(self):
return self.label
+
def _find_move(self, req_time):
data_pos = self.data_pos
while 1:
@@ -105,54 +123,63 @@ class HandleTrapQ:
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
return move, False
- self.cur_data = jmsg['data']
+ self.cur_data = jmsg["data"]
self.data_pos = data_pos = 0
+
def _pull_axis_position(self, req_time):
move, in_range = self._find_move(req_time)
print_time, move_t, start_v, accel, start_pos, axes_r = move
- mtime = max(0., min(move_t, req_time - print_time))
- dist = (start_v + .5 * accel * mtime) * mtime;
+ mtime = max(0.0, min(move_t, req_time - print_time))
+ dist = (start_v + 0.5 * accel * mtime) * mtime
return start_pos[self.axis] + axes_r[self.axis] * dist
+
def _pull_axis_velocity(self, req_time):
move, in_range = self._find_move(req_time)
if not in_range:
- return 0.
+ return 0.0
print_time, move_t, start_v, accel, start_pos, axes_r = move
return (start_v + accel * (req_time - print_time)) * axes_r[self.axis]
+
def _pull_axis_accel(self, req_time):
move, in_range = self._find_move(req_time)
if not in_range:
- return 0.
+ return 0.0
print_time, move_t, start_v, accel, start_pos, axes_r = move
return accel * axes_r[self.axis]
+
def _pull_velocity(self, req_time):
move, in_range = self._find_move(req_time)
if not in_range:
- return 0.
+ return 0.0
print_time, move_t, start_v, accel, start_pos, axes_r = move
return start_v + accel * (req_time - print_time)
+
def _pull_accel(self, req_time):
move, in_range = self._find_move(req_time)
if not in_range:
- return 0.
+ return 0.0
print_time, move_t, start_v, accel, start_pos, axes_r = move
return accel
+
+
LogHandlers["trapq"] = HandleTrapQ
+
# Extract positions from queue_step log
class HandleStepQ:
SubscriptionIdParts = 2
ParametersMin = 1
ParametersMax = 2
DataSets = [
- ('stepq(<stepper>)', 'Commanded position of the given stepper'),
- ('stepq(<stepper>,<time>)', 'Commanded position with smooth time'),
+ ("stepq(<stepper>)", "Commanded position of the given stepper"),
+ ("stepq(<stepper>,<time>)", "Commanded position with smooth time"),
]
+
def __init__(self, lmanager, name, name_parts):
self.name = name
self.stepper_name = name_parts[1]
self.jdispatch = lmanager.get_jdispatch()
- self.step_data = [(0., 0., 0.), (0., 0., 0.)] # [(time, half_pos, pos)]
+ self.step_data = [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)] # [(time, half_pos, pos)]
self.data_pos = 0
self.smooth_time = 0.010
if len(name_parts) == 3:
@@ -160,9 +187,11 @@ class HandleStepQ:
self.smooth_time = float(name_parts[2])
except ValueError:
raise error("Invalid stepq smooth time '%s'" % (name_parts[2],))
+
def get_label(self):
- label = '%s position' % (self.stepper_name,)
- return {'label': label, 'units': 'Position\n(mm)'}
+ label = "%s position" % (self.stepper_name,)
+ return {"label": label, "units": "Position\n(mm)"}
+
def pull_data(self, req_time):
smooth_time = self.smooth_time
while 1:
@@ -183,7 +212,7 @@ class HandleStepQ:
if stime <= smooth_time:
pdiff = next_halfpos - last_halfpos
return last_halfpos + rtdiff * pdiff / stime
- stime = .5 * smooth_time
+ stime = 0.5 * smooth_time
if rtdiff < stime:
pdiff = last_pos - last_halfpos
return last_halfpos + rtdiff * pdiff / stime
@@ -192,6 +221,7 @@ class HandleStepQ:
pdiff = last_pos - next_halfpos
return next_halfpos + rtdiff * pdiff / stime
return last_pos
+
def _pull_block(self, req_time):
step_data = self.step_data
del step_data[:-1]
@@ -201,25 +231,25 @@ class HandleStepQ:
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
last_time, last_halfpos, last_pos = step_data[0]
- self.step_data.append((req_time + .1, last_pos, last_pos))
+ self.step_data.append((req_time + 0.1, last_pos, last_pos))
return
- last_time = jmsg['last_step_time']
+ last_time = jmsg["last_step_time"]
if req_time <= last_time:
break
# Process block into (time, half_position, position) 3-tuples
- first_time = step_time = jmsg['first_step_time']
- first_clock = jmsg['first_clock']
- step_clock = first_clock - jmsg['data'][0][0]
- cdiff = jmsg['last_clock'] - first_clock
+ first_time = step_time = jmsg["first_step_time"]
+ first_clock = jmsg["first_clock"]
+ step_clock = first_clock - jmsg["data"][0][0]
+ cdiff = jmsg["last_clock"] - first_clock
tdiff = last_time - first_time
- inv_freq = 0.
+ inv_freq = 0.0
if cdiff:
inv_freq = tdiff / cdiff
- step_dist = jmsg['step_distance']
- step_pos = jmsg['start_position']
+ step_dist = jmsg["step_distance"]
+ step_pos = jmsg["start_position"]
if not step_data[0][0]:
- step_data[0] = (0., step_pos, step_pos)
- for interval, raw_count, add in jmsg['data']:
+ step_data[0] = (0.0, step_pos, step_pos)
+ for interval, raw_count, add in jmsg["data"]:
qs_dist = step_dist
count = raw_count
if count < 0:
@@ -229,22 +259,30 @@ class HandleStepQ:
step_clock += interval
interval += add
step_time = first_time + (step_clock - first_clock) * inv_freq
- step_halfpos = step_pos + .5 * qs_dist
+ step_halfpos = step_pos + 0.5 * qs_dist
step_pos += qs_dist
step_data.append((step_time, step_halfpos, step_pos))
+
+
LogHandlers["stepq"] = HandleStepQ
+
# Extract tmc current and stallguard data from the log
class HandleStallguard:
SubscriptionIdParts = 2
ParametersMin = 2
ParametersMax = 2
DataSets = [
- ('stallguard(<stepper>,sg_result)',
- 'Stallguard result of the given stepper driver'),
- ('stallguard(<stepper>,cs_actual)',
- 'Current level result of the given stepper driver'),
+ (
+ "stallguard(<stepper>,sg_result)",
+ "Stallguard result of the given stepper driver",
+ ),
+ (
+ "stallguard(<stepper>,cs_actual)",
+ "Current level result of the given stepper driver",
+ ),
]
+
def __init__(self, lmanager, name, name_parts):
self.name = name
self.stepper_name = name_parts[1]
@@ -253,7 +291,7 @@ class HandleStallguard:
self.data = []
self.ret = None
self.driver_name = ""
- for k in lmanager.get_initial_status()['configfile']['settings']:
+ for k in lmanager.get_initial_status()["configfile"]["settings"]:
if not k.startswith("tmc"):
continue
if k.endswith(self.stepper_name):
@@ -261,15 +299,16 @@ class HandleStallguard:
break
# Current decode
self.status_tracker = lmanager.get_status_tracker()
- self.next_status_time = 0.
+ self.next_status_time = 0.0
self.irun = 0
+
def get_label(self):
- label = '%s %s %s' % (self.driver_name, self.stepper_name,
- self.filter)
+ label = "%s %s %s" % (self.driver_name, self.stepper_name, self.filter)
if self.filter == "sg_result":
- return {'label': label, 'units': 'Stallguard'}
+ return {"label": label, "units": "Stallguard"}
elif self.filter == "cs_actual":
- return {'label': label, 'units': 'CS Actual'}
+ return {"label": label, "units": "CS Actual"}
+
# Search datapoint in dataset extrapolate in between
def pull_data(self, req_time):
while 1:
@@ -290,25 +329,30 @@ class HandleStallguard:
if req_time <= time:
return self.ret[self.filter]
self.ret = None
+
+
LogHandlers["stallguard"] = HandleStallguard
+
# Extract stepper motor phase position
class HandleStepPhase:
SubscriptionIdParts = 0
ParametersMin = 1
ParametersMax = 2
DataSets = [
- ('step_phase(<driver>)', 'Stepper motor phase of the given stepper'),
- ('step_phase(<driver>,microstep)', 'Microstep position for stepper'),
+ ("step_phase(<driver>)", "Stepper motor phase of the given stepper"),
+ ("step_phase(<driver>,microstep)", "Microstep position for stepper"),
]
+
def __init__(self, lmanager, name, name_parts):
self.name = name
self.driver_name = name_parts[1]
self.stepper_name = " ".join(self.driver_name.split()[1:])
- config = lmanager.get_initial_status()['configfile']['settings']
+ config = lmanager.get_initial_status()["configfile"]["settings"]
if self.driver_name not in config or self.stepper_name not in config:
- raise error("Unable to find stepper driver '%s' config"
- % (self.driver_name,))
+ raise error(
+ "Unable to find stepper driver '%s' config" % (self.driver_name,)
+ )
if len(name_parts) == 3 and name_parts[2] != "microstep":
raise error("Unknown step_phase selection '%s'" % (name_parts[2],))
self.report_microsteps = len(name_parts) == 3
@@ -319,23 +363,28 @@ class HandleStepPhase:
self.jdispatch = lmanager.get_jdispatch()
self.jdispatch.add_handler(name, "stepq:" + self.stepper_name)
# stepq tracking
- self.step_data = [(0., 0), (0., 0)] # [(time, mcu_pos)]
+ self.step_data = [(0.0, 0), (0.0, 0)] # [(time, mcu_pos)]
self.data_pos = 0
# driver phase tracking
self.status_tracker = lmanager.get_status_tracker()
- self.next_status_time = 0.
+ self.next_status_time = 0.0
self.mcu_phase_offset = 0
+
def get_label(self):
if self.report_microsteps:
- return {'label': '%s microstep' % (self.stepper_name,),
- 'units': 'Microstep'}
- return {'label': '%s phase' % (self.stepper_name,), 'units': 'Phase'}
+ return {
+ "label": "%s microstep" % (self.stepper_name,),
+ "units": "Microstep",
+ }
+ return {"label": "%s phase" % (self.stepper_name,), "units": "Phase"}
+
def _pull_phase_offset(self, req_time):
db, self.next_status_time = self.status_tracker.pull_status(req_time)
- mcu_phase_offset = db.get(self.driver_name, {}).get('mcu_phase_offset')
+ mcu_phase_offset = db.get(self.driver_name, {}).get("mcu_phase_offset")
if mcu_phase_offset is None:
mcu_phase_offset = 0
self.mcu_phase_offset = mcu_phase_offset
+
def pull_data(self, req_time):
if req_time >= self.next_status_time:
self._pull_phase_offset(req_time)
@@ -352,6 +401,7 @@ class HandleStepPhase:
continue
step_pos = step_data[data_pos][1]
return (step_pos + self.mcu_phase_offset) % self.phases
+
def _pull_block(self, req_time):
step_data = self.step_data
del step_data[:-1]
@@ -361,24 +411,24 @@ class HandleStepPhase:
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
last_time, last_pos = step_data[0]
- self.step_data.append((req_time + .1, last_pos))
+ self.step_data.append((req_time + 0.1, last_pos))
return
- last_time = jmsg['last_step_time']
+ last_time = jmsg["last_step_time"]
if req_time <= last_time:
break
# Process block into (time, position) 2-tuples
- first_time = step_time = jmsg['first_step_time']
- first_clock = jmsg['first_clock']
- step_clock = first_clock - jmsg['data'][0][0]
- cdiff = jmsg['last_clock'] - first_clock
+ first_time = step_time = jmsg["first_step_time"]
+ first_clock = jmsg["first_clock"]
+ step_clock = first_clock - jmsg["data"][0][0]
+ cdiff = jmsg["last_clock"] - first_clock
tdiff = last_time - first_time
- inv_freq = 0.
+ inv_freq = 0.0
if cdiff:
inv_freq = tdiff / cdiff
- step_pos = jmsg['start_mcu_position']
+ step_pos = jmsg["start_mcu_position"]
if not step_data[0][0]:
- step_data[0] = (0., step_pos)
- for interval, raw_count, add in jmsg['data']:
+ step_data[0] = (0.0, step_pos)
+ for interval, raw_count, add in jmsg["data"]:
qs_dist = 1
count = raw_count
if count < 0:
@@ -390,29 +440,35 @@ class HandleStepPhase:
step_time = first_time + (step_clock - first_clock) * inv_freq
step_pos += qs_dist
step_data.append((step_time, step_pos))
+
+
LogHandlers["step_phase"] = HandleStepPhase
+
# Extract accelerometer data
class HandleADXL345:
SubscriptionIdParts = 2
ParametersMin = ParametersMax = 2
DataSets = [
- ('adxl345(<name>,<axis>)', 'Accelerometer for given axis (x, y, or z)'),
+ ("adxl345(<name>,<axis>)", "Accelerometer for given axis (x, y, or z)"),
]
+
def __init__(self, lmanager, name, name_parts):
self.name = name
self.adxl_name = name_parts[1]
self.jdispatch = lmanager.get_jdispatch()
- self.next_accel_time = self.last_accel_time = 0.
- self.next_accel = self.last_accel = (0., 0., 0.)
+ self.next_accel_time = self.last_accel_time = 0.0
+ self.next_accel = self.last_accel = (0.0, 0.0, 0.0)
self.cur_data = []
self.data_pos = 0
- if name_parts[2] not in 'xyz':
+ if name_parts[2] not in "xyz":
raise error("Unknown adxl345 data selection '%s'" % (name,))
- self.axis = 'xyz'.index(name_parts[2])
+ self.axis = "xyz".index(name_parts[2])
+
def get_label(self):
- label = '%s %s acceleration' % (self.adxl_name, 'xyz'[self.axis])
- return {'label': label, 'units': 'Acceleration\n(mm/s^2)'}
+ label = "%s %s acceleration" % (self.adxl_name, "xyz"[self.axis])
+ return {"label": label, "units": "Acceleration\n(mm/s^2)"}
+
def pull_data(self, req_time):
axis = self.axis
while 1:
@@ -425,8 +481,8 @@ class HandleADXL345:
# Read next data block
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
- return 0.
- self.cur_data = jmsg['data']
+ return 0.0
+ self.cur_data = jmsg["data"]
self.data_pos = 0
continue
self.last_accel = self.next_accel
@@ -434,42 +490,50 @@ class HandleADXL345:
self.next_accel_time, x, y, z = self.cur_data[self.data_pos]
self.next_accel = (x, y, z)
self.data_pos += 1
+
+
LogHandlers["adxl345"] = HandleADXL345
+
# Extract positions from magnetic angle sensor
class HandleAngle:
SubscriptionIdParts = 2
ParametersMin = ParametersMax = 1
DataSets = [
- ('angle(<name>)', 'Angle sensor position'),
+ ("angle(<name>)", "Angle sensor position"),
]
+
def __init__(self, lmanager, name, name_parts):
self.name = name
self.angle_name = name_parts[1]
self.jdispatch = lmanager.get_jdispatch()
- self.next_angle_time = self.last_angle_time = 0.
- self.next_angle = self.last_angle = 0.
+ self.next_angle_time = self.last_angle_time = 0.0
+ self.next_angle = self.last_angle = 0.0
self.cur_data = []
self.data_pos = 0
- self.position_offset = 0.
- self.angle_dist = 1.
+ self.position_offset = 0.0
+ self.angle_dist = 1.0
# Determine angle distance from associated stepper's rotation_distance
- config = lmanager.get_initial_status()['configfile']['settings']
- aname = 'angle %s' % (self.angle_name,)
- stepper_name = config.get(aname, {}).get('stepper')
+ config = lmanager.get_initial_status()["configfile"]["settings"]
+ aname = "angle %s" % (self.angle_name,)
+ stepper_name = config.get(aname, {}).get("stepper")
if stepper_name is not None:
sconfig = config.get(stepper_name, {})
- rotation_distance = sconfig.get('rotation_distance', 1.)
- gear_ratio = sconfig.get('gear_ratio', ())
- if type(gear_ratio) == str: # XXX
- gear_ratio = [[float(v.strip()) for v in gr.split(':')]
- for gr in gear_ratio.split(',')]
+ rotation_distance = sconfig.get("rotation_distance", 1.0)
+ gear_ratio = sconfig.get("gear_ratio", ())
+ if type(gear_ratio) == str: # XXX
+ gear_ratio = [
+ [float(v.strip()) for v in gr.split(":")]
+ for gr in gear_ratio.split(",")
+ ]
for n, d in gear_ratio:
rotation_distance *= d / n
- self.angle_dist = rotation_distance / 65536.
+ self.angle_dist = rotation_distance / 65536.0
+
def get_label(self):
- label = '%s position' % (self.angle_name,)
- return {'label': label, 'units': 'Position\n(mm)'}
+ label = "%s position" % (self.angle_name,)
+ return {"label": label, "units": "Position\n(mm)"}
+
def pull_data(self, req_time):
while 1:
if req_time <= self.next_angle_time:
@@ -477,16 +541,14 @@ class HandleAngle:
tdiff = self.next_angle_time - self.last_angle_time
rtdiff = req_time - self.last_angle_time
po = rtdiff * pdiff / tdiff
- return ((self.last_angle + po) * self.angle_dist
- + self.position_offset)
+ return (self.last_angle + po) * self.angle_dist + self.position_offset
if self.data_pos >= len(self.cur_data):
# Read next data block
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
- return (self.next_angle * self.angle_dist
- + self.position_offset)
- self.cur_data = jmsg['data']
- position_offset = jmsg.get('position_offset')
+ return self.next_angle * self.angle_dist + self.position_offset
+ self.cur_data = jmsg["data"]
+ position_offset = jmsg.get("position_offset")
if position_offset is not None:
self.position_offset = position_offset
self.data_pos = 0
@@ -495,24 +557,29 @@ class HandleAngle:
self.last_angle_time = self.next_angle_time
self.next_angle_time, self.next_angle = self.cur_data[self.data_pos]
self.data_pos += 1
+
+
LogHandlers["angle"] = HandleAngle
+
def interpolate(next_val, prev_val, next_time, prev_time, req_time):
vdiff = next_val - prev_val
tdiff = next_time - prev_time
rtdiff = req_time - prev_time
return prev_val + rtdiff * vdiff / tdiff
+
# Extract eddy current data
class HandleEddyCurrent:
SubscriptionIdParts = 2
ParametersMin = 1
ParametersMax = 2
DataSets = [
- ('ldc1612(<name>)', 'Coil resonant frequency'),
- ('ldc1612(<name>,period)', 'Coil resonant period'),
- ('ldc1612(<name>,z)', 'Estimated Z height'),
+ ("ldc1612(<name>)", "Coil resonant frequency"),
+ ("ldc1612(<name>,period)", "Coil resonant period"),
+ ("ldc1612(<name>,z)", "Estimated Z height"),
]
+
def __init__(self, lmanager, name, name_parts):
self.name = name
self.sensor_name = name_parts[1]
@@ -521,18 +588,20 @@ class HandleEddyCurrent:
self.report_frequency = len(name_parts) == 2
self.report_z = len(name_parts) == 3 and name_parts[2] == "z"
self.jdispatch = lmanager.get_jdispatch()
- self.next_samp = self.prev_samp = [0., 0., 0.]
+ self.next_samp = self.prev_samp = [0.0, 0.0, 0.0]
self.cur_data = []
self.data_pos = 0
+
def get_label(self):
if self.report_frequency:
- label = '%s frequency' % (self.sensor_name,)
- return {'label': label, 'units': 'Frequency\n(Hz)'}
+ label = "%s frequency" % (self.sensor_name,)
+ return {"label": label, "units": "Frequency\n(Hz)"}
if self.report_z:
- label = '%s height' % (self.sensor_name,)
- return {'label': label, 'units': 'Position\n(mm)'}
- label = '%s period' % (self.sensor_name,)
- return {'label': label, 'units': 'Period\n(s)'}
+ label = "%s height" % (self.sensor_name,)
+ return {"label": label, "units": "Position\n(mm)"}
+ label = "%s period" % (self.sensor_name,)
+ return {"label": label, "units": "Period\n(s)"}
+
def pull_data(self, req_time):
while 1:
next_time, next_freq, next_z = self.next_samp
@@ -545,21 +614,22 @@ class HandleEddyCurrent:
next_val = next_z
prev_val = prev_z
else:
- next_val = 1. / next_freq
- prev_val = 1. / prev_freq
- return interpolate(next_val, prev_val, next_time, prev_time,
- req_time)
+ next_val = 1.0 / next_freq
+ prev_val = 1.0 / prev_freq
+ return interpolate(next_val, prev_val, next_time, prev_time, req_time)
if self.data_pos >= len(self.cur_data):
# Read next data block
jmsg = self.jdispatch.pull_msg(req_time, self.name)
if jmsg is None:
- return 0.
- self.cur_data = jmsg['data']
+ return 0.0
+ self.cur_data = jmsg["data"]
self.data_pos = 0
continue
self.prev_samp = self.next_samp
self.next_samp = self.cur_data[self.data_pos]
self.data_pos += 1
+
+
LogHandlers["ldc1612"] = HandleEddyCurrent
@@ -567,15 +637,18 @@ LogHandlers["ldc1612"] = HandleEddyCurrent
# Log reading
######################################################################
+
# Read, uncompress, and parse messages in a log built by data_logger.py
class JsonLogReader:
def __init__(self, filename):
self.file = open(filename, "rb")
self.comp = zlib.decompressobj(31)
self.msgs = [b""]
+
def seek(self, pos):
self.file.seek(pos)
self.comp = zlib.decompressobj(-15)
+
def pull_msg(self):
msgs = self.msgs
while 1:
@@ -591,55 +664,61 @@ class JsonLogReader:
if not raw_data:
return None
data = self.comp.decompress(raw_data)
- parts = data.split(b'\x03')
+ parts = data.split(b"\x03")
parts[0] = msgs[0] + parts[0]
self.msgs = msgs = parts
+
# Store messages in per-subscription queues until handlers are ready for them
class JsonDispatcher:
def __init__(self, log_prefix):
self.names = {}
self.queues = {}
- self.last_read_time = 0.
+ self.last_read_time = 0.0
self.log_reader = JsonLogReader(log_prefix + ".json.gz")
self.is_eof = False
+
def check_end_of_data(self):
return self.is_eof and not any(self.queues.values())
+
def add_handler(self, name, subscription_id):
self.names[name] = q = []
self.queues.setdefault(subscription_id, []).append(q)
+
def pull_msg(self, req_time, name):
q = self.names[name]
while 1:
if q:
return q.pop(0)
- if req_time + 1. < self.last_read_time:
+ if req_time + 1.0 < self.last_read_time:
return None
json_msg = self.log_reader.pull_msg()
if json_msg is None:
self.is_eof = True
return None
- qid = json_msg.get('q')
- if qid == 'status':
- pt = json_msg.get('toolhead', {}).get('estimated_print_time')
+ qid = json_msg.get("q")
+ if qid == "status":
+ pt = json_msg.get("toolhead", {}).get("estimated_print_time")
if pt is not None:
self.last_read_time = pt
for mq in self.queues.get(qid, []):
- mq.append(json_msg['params'])
+ mq.append(json_msg["params"])
######################################################################
# Dataset and log tracking
######################################################################
+
# Tracking of get_status messages
class TrackStatus:
def __init__(self, lmanager, name, start_status):
self.name = name
self.jdispatch = lmanager.get_jdispatch()
- self.next_status_time = 0.
+ self.next_status_time = 0.0
self.status = dict(start_status)
self.next_update = {}
+
def pull_status(self, req_time):
status = self.status
while 1:
@@ -652,32 +731,35 @@ class TrackStatus:
self.next_status_time = req_time + 0.100
self.next_update = {}
return status, self.next_status_time
- self.next_update = jmsg['status']
- th = self.next_update.get('toolhead', {})
- self.next_status_time = th.get('estimated_print_time', 0.)
+ self.next_update = jmsg["status"]
+ th = self.next_update.get("toolhead", {})
+ self.next_status_time = th.get("estimated_print_time", 0.0)
+
# Split a string by commas while keeping parenthesis intact
def param_split(line):
out = []
level = prev = 0
for i, c in enumerate(line):
- if not level and c == ',':
+ if not level and c == ",":
out.append(line[prev:i])
- prev = i+1
- elif c == '(':
+ prev = i + 1
+ elif c == "(":
level += 1
- elif level and c== ')':
+ elif level and c == ")":
level -= 1
out.append(line[prev:])
return out
+
# Split a dataset name (eg, "abc(def,ghi)") into parts
def name_split(name):
- if '(' not in name or not name.endswith(')'):
+ if "(" not in name or not name.endswith(")"):
raise error("Malformed dataset name '%s'" % (name,))
- aname, aparams = name.split('(', 1)
+ aname, aparams = name.split("(", 1)
return [aname] + param_split(aparams[:-1])
+
# Return a description of possible datasets
def list_datasets():
datasets = []
@@ -685,58 +767,69 @@ def list_datasets():
datasets += LogHandlers[lh].DataSets
return datasets
+
# Main log access management
class LogManager:
error = error
+
def __init__(self, log_prefix):
self.index_reader = JsonLogReader(log_prefix + ".index.gz")
self.jdispatch = JsonDispatcher(log_prefix)
- self.initial_start_time = self.start_time = 0.
+ self.initial_start_time = self.start_time = 0.0
self.datasets = {}
self.initial_status = {}
self.start_status = {}
self.log_subscriptions = {}
self.status_tracker = None
+
def setup_index(self):
fmsg = self.index_reader.pull_msg()
- self.initial_status = status = fmsg['status']
+ self.initial_status = status = fmsg["status"]
self.start_status = dict(status)
- start_time = status['toolhead']['estimated_print_time']
+ start_time = status["toolhead"]["estimated_print_time"]
self.initial_start_time = self.start_time = start_time
- self.log_subscriptions = fmsg.get('subscriptions', {})
+ self.log_subscriptions = fmsg.get("subscriptions", {})
+
def get_initial_status(self):
return self.initial_status
+
def available_dataset_types(self):
return {name: None for name in LogHandlers}
+
def get_jdispatch(self):
return self.jdispatch
+
def seek_time(self, req_time):
self.start_time = req_start_time = self.initial_start_time + req_time
start_status = self.start_status
- seek_time = max(self.initial_start_time, req_start_time - 1.)
+ seek_time = max(self.initial_start_time, req_start_time - 1.0)
file_position = 0
while 1:
fmsg = self.index_reader.pull_msg()
if fmsg is None:
break
- th = fmsg['status']['toolhead']
- ptime = max(th['estimated_print_time'], th.get('print_time', 0.))
+ th = fmsg["status"]["toolhead"]
+ ptime = max(th["estimated_print_time"], th.get("print_time", 0.0))
if ptime > seek_time:
break
for k, v in fmsg["status"].items():
start_status.setdefault(k, {}).update(v)
- file_position = fmsg['file_position']
+ file_position = fmsg["file_position"]
if file_position:
self.jdispatch.log_reader.seek(file_position)
+
def get_initial_start_time(self):
return self.initial_start_time
+
def get_start_time(self):
return self.start_time
+
def get_status_tracker(self):
if self.status_tracker is None:
self.status_tracker = TrackStatus(self, "status", self.start_status)
self.jdispatch.add_handler("status", "status")
return self.status_tracker
+
def setup_dataset(self, name):
if name in self.datasets:
return self.datasets[name]
@@ -748,7 +841,7 @@ class LogManager:
if len_pp < cls.ParametersMin or len_pp > cls.ParametersMax:
raise error("Invalid number of parameters for '%s'" % (name,))
if cls.SubscriptionIdParts:
- subscription_id = ":".join(name_parts[:cls.SubscriptionIdParts])
+ subscription_id = ":".join(name_parts[: cls.SubscriptionIdParts])
if subscription_id not in self.log_subscriptions:
raise error("Dataset '%s' not in capture" % (subscription_id,))
self.jdispatch.add_handler(name, subscription_id)
diff --git a/scripts/parsecandump.py b/scripts/parsecandump.py
index d575dc5a..4ae862dc 100755
--- a/scripts/parsecandump.py
+++ b/scripts/parsecandump.py
@@ -6,26 +6,31 @@
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, optparse
+
def import_msgproto():
global msgproto
# Load msgproto.py module
- kdir = os.path.join(os.path.dirname(__file__), '..', 'klippy')
+ kdir = os.path.join(os.path.dirname(__file__), "..", "klippy")
sys.path.append(kdir)
import msgproto
+
def read_dictionary(filename):
- dfile = open(filename, 'rb')
+ dfile = open(filename, "rb")
dictionary = dfile.read()
dfile.close()
return dictionary
+
def report(msg, line_info, name="", is_err=False):
line_number, line_time = line_info
warn = ""
if is_err:
warn = " WARN"
- sys.stdout.write("%04d:%010.6f:%s%s %s\n"
- % (line_number, line_time, name, warn, msg))
+ sys.stdout.write(
+ "%04d:%010.6f:%s%s %s\n" % (line_number, line_time, name, warn, msg)
+ )
+
class canscan:
def __init__(self, name, mp):
@@ -33,6 +38,7 @@ class canscan:
self.mp = mp
self.data = bytearray()
self.need_scan = False
+
def handle_data(self, line_info, line, newdata):
data = self.data
data += bytearray(newdata)
@@ -44,8 +50,12 @@ class canscan:
drop = syncpos + 1
self.need_scan = False
disc = ["%02X" % (d,) for d in data[:drop]]
- report("Discarding %d (%s)" % (drop, " ".join(disc)),
- line_info, self.name, is_err=True)
+ report(
+ "Discarding %d (%s)" % (drop, " ".join(disc)),
+ line_info,
+ self.name,
+ is_err=True,
+ )
data[:drop] = []
if not data:
break
@@ -53,17 +63,22 @@ class canscan:
if l == 0:
break
if l < 0:
- report("Invalid data: %s" % (line.strip(),),
- line_info, self.name, is_err=True)
+ report(
+ "Invalid data: %s" % (line.strip(),),
+ line_info,
+ self.name,
+ is_err=True,
+ )
self.need_scan = True
continue
if l == 5:
report("Ack %02x" % (data[1],), line_info, self.name)
else:
msgs = self.mp.dump(data[:l])
- report("%d: %s" % (l, ', '.join(msgs)), line_info, self.name)
+ report("%d: %s" % (l, ", ".join(msgs)), line_info, self.name)
data[:l] = []
+
def read_candump(canfile, canid, dictionary):
mp = msgproto.MessageParser()
mp.process_identify(dictionary, decompress=False)
@@ -71,7 +86,7 @@ def read_candump(canfile, canid, dictionary):
txid = "%03X" % (canid & ~1,)
handlers = {rxid: canscan("RX", mp), txid: canscan("TX", mp)}
- last_time = -1.
+ last_time = -1.0
line_number = 0
must_scan = False
data = bytearray()
@@ -80,24 +95,34 @@ def read_candump(canfile, canid, dictionary):
parts = line.split()
if len(parts) < 7:
if parts:
- report("Ignoring line: %s" % (line.strip(),),
- (line_number, 0.), is_err=True)
+ report(
+ "Ignoring line: %s" % (line.strip(),),
+ (line_number, 0.0),
+ is_err=True,
+ )
continue
p_ts = parts[0]
p_canid = parts[5]
p_len = parts[6]
p_data = parts[7:]
- if (not p_ts.startswith('(') or not p_ts.endswith(')')
- or not p_len.startswith('[') or not p_len.endswith(']')):
- report("Ignoring line: %s" % (line.strip(),),
- (line_number, 0.), is_err=True)
+ if (
+ not p_ts.startswith("(")
+ or not p_ts.endswith(")")
+ or not p_len.startswith("[")
+ or not p_len.endswith("]")
+ ):
+ report(
+ "Ignoring line: %s" % (line.strip(),), (line_number, 0.0), is_err=True
+ )
continue
new_time = float(p_ts[1:-1])
line_info = (line_number, new_time)
if new_time < last_time:
- report("Backwards time %.6f vs %.6f: %s"
- % (new_time, last_time, line.strip()),
- line_info, is_err=True)
+ report(
+ "Backwards time %.6f vs %.6f: %s" % (new_time, last_time, line.strip()),
+ line_info,
+ is_err=True,
+ )
last_time = new_time
hdlr = handlers.get(p_canid)
@@ -105,6 +130,7 @@ def read_candump(canfile, canid, dictionary):
newdata = [int(i, 16) for i in p_data]
hdlr.handle_data(line_info, line, newdata)
+
def main():
usage = "%prog <candump.log> <canid> <mcu.dict>"
opts = optparse.OptionParser(usage)
@@ -121,5 +147,6 @@ def main():
read_candump(canfile, canid, dictionary)
canfile.close()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/spi_flash/board_defs.py b/scripts/spi_flash/board_defs.py
index 44eefa4f..e6e50a72 100644
--- a/scripts/spi_flash/board_defs.py
+++ b/scripts/spi_flash/board_defs.py
@@ -11,165 +11,125 @@
###########################################################
BOARD_DEFS = {
- 'generic-lpc1768': {
- 'mcu': "lpc1768",
- 'spi_bus': "ssp1",
- "cs_pin": "P0.6"
- },
- 'generic-lpc1769': {
- 'mcu': "lpc1769",
- 'spi_bus': "ssp1",
- "cs_pin": "P0.6"
- },
- 'btt-skr-mini': {
- 'mcu': "stm32f103xe",
- 'spi_bus': "spi1",
- "cs_pin": "PA4"
- },
- 'btt-skr-mini-v3': {
- 'mcu': "stm32g0b1xx",
- 'spi_bus': "spi1",
- "cs_pin": "PA4"
- },
- 'btt-skr-mini-v3-b0': {
- 'mcu': "stm32g0b0xx",
- 'spi_bus': "spi1",
- "cs_pin": "PA4"
- },
- 'flyboard-mini': {
- 'mcu': "stm32f103xe",
- 'spi_bus': "spi2",
+ "generic-lpc1768": {"mcu": "lpc1768", "spi_bus": "ssp1", "cs_pin": "P0.6"},
+ "generic-lpc1769": {"mcu": "lpc1769", "spi_bus": "ssp1", "cs_pin": "P0.6"},
+ "btt-skr-mini": {"mcu": "stm32f103xe", "spi_bus": "spi1", "cs_pin": "PA4"},
+ "btt-skr-mini-v3": {"mcu": "stm32g0b1xx", "spi_bus": "spi1", "cs_pin": "PA4"},
+ "btt-skr-mini-v3-b0": {"mcu": "stm32g0b0xx", "spi_bus": "spi1", "cs_pin": "PA4"},
+ "flyboard-mini": {
+ "mcu": "stm32f103xe",
+ "spi_bus": "spi2",
"cs_pin": "PB12",
- "current_firmware_path": "FLY.CUR"
+ "current_firmware_path": "FLY.CUR",
},
- 'mks-robin-e3': {
- 'mcu': "stm32f103xe",
- 'spi_bus': "spi2",
+ "mks-robin-e3": {
+ "mcu": "stm32f103xe",
+ "spi_bus": "spi2",
"cs_pin": "PA15",
"conversion_script": "scripts/update_mks_robin.py",
"firmware_path": "Robin_e3.bin",
- "current_firmware_path": "Robin_e3.cur"
+ "current_firmware_path": "Robin_e3.cur",
},
# twotrees sapphire 5 v1.1 using mks robin nano 1.2 board
- 'mks-robin-v12': {
- 'mcu': "stm32f103xe",
- 'spi_bus': "swspi",
- 'spi_pins': "PC8,PD2,PC12",
- 'cs_pin': "PC11",
- 'skip_verify': True,
+ "mks-robin-v12": {
+ "mcu": "stm32f103xe",
+ "spi_bus": "swspi",
+ "spi_pins": "PC8,PD2,PC12",
+ "cs_pin": "PC11",
+ "skip_verify": True,
"conversion_script": "scripts/update_mks_robin.py",
"firmware_path": "ROBIN_NANO35.BIN",
- "current_firmware_path": "ROBIN_NANO35.BIN"
+ "current_firmware_path": "ROBIN_NANO35.BIN",
},
- 'btt-octopus-f407-v1': {
- 'mcu': "stm32f407xx",
- 'spi_bus': "swspi",
- 'spi_pins': "PC8,PD2,PC12",
- 'cs_pin': "PC11",
- 'skip_verify': True
- },
- 'btt-octopus-f429-v1': {
- 'mcu': "stm32f429xx",
- 'spi_bus': "swspi",
- 'spi_pins': "PC8,PD2,PC12",
- 'cs_pin': "PC11",
- 'skip_verify': True
- },
- 'btt-octopus-f446-v1': {
- 'mcu': "stm32f446xx",
- 'spi_bus': "swspi",
- 'spi_pins': "PC8,PD2,PC12",
- 'cs_pin': "PC11",
- 'skip_verify': True
+ "btt-octopus-f407-v1": {
+ "mcu": "stm32f407xx",
+ "spi_bus": "swspi",
+ "spi_pins": "PC8,PD2,PC12",
+ "cs_pin": "PC11",
+ "skip_verify": True,
},
- 'btt-skr-pro': {
- 'mcu': "stm32f407xx",
- 'spi_bus': "swspi",
- 'spi_pins': "PA6,PB5,PA5",
- "cs_pin": "PA4"
+ "btt-octopus-f429-v1": {
+ "mcu": "stm32f429xx",
+ "spi_bus": "swspi",
+ "spi_pins": "PC8,PD2,PC12",
+ "cs_pin": "PC11",
+ "skip_verify": True,
},
- 'btt-gtr': {
- 'mcu': "stm32f407xx",
- 'spi_bus': "spi1",
- "cs_pin": "PA4"
+ "btt-octopus-f446-v1": {
+ "mcu": "stm32f446xx",
+ "spi_bus": "swspi",
+ "spi_pins": "PC8,PD2,PC12",
+ "cs_pin": "PC11",
+ "skip_verify": True,
},
- 'fysetc-spider': {
- 'mcu': "stm32f446xx",
- 'spi_bus': "spi1",
+ "btt-skr-pro": {
+ "mcu": "stm32f407xx",
+ "spi_bus": "swspi",
+ "spi_pins": "PA6,PB5,PA5",
"cs_pin": "PA4",
- "current_firmware_path": "OLD.BIN"
},
- 'btt-skr-se-bx': {
- 'mcu': 'stm32h743xx',
- 'spi_bus': 'spi3a',
- 'cs_pin': 'PA15'
- },
- 'btt-skr-3-h743': {
- 'mcu': 'stm32h743xx',
- 'spi_bus': 'swspi',
- 'spi_pins': "PC8,PD2,PC12",
- 'cs_pin': 'PC11',
- 'skip_verify': True
- },
- 'btt-skr-3-h723': {
- 'mcu': 'stm32h723xx',
- 'spi_bus': 'swspi',
- 'spi_pins': "PC8,PD2,PC12",
- 'cs_pin': 'PC11',
- 'skip_verify': True
+ "btt-gtr": {"mcu": "stm32f407xx", "spi_bus": "spi1", "cs_pin": "PA4"},
+ "fysetc-spider": {
+ "mcu": "stm32f446xx",
+ "spi_bus": "spi1",
+ "cs_pin": "PA4",
+ "current_firmware_path": "OLD.BIN",
},
- 'creality-v4.2.2': {
- 'mcu': "stm32f103xe",
- 'spi_bus': "swspi",
- 'spi_pins': "PC8,PD2,PC12",
- 'cs_pin': "PC11",
- 'skip_verify': True
+ "btt-skr-se-bx": {"mcu": "stm32h743xx", "spi_bus": "spi3a", "cs_pin": "PA15"},
+ "btt-skr-3-h743": {
+ "mcu": "stm32h743xx",
+ "spi_bus": "swspi",
+ "spi_pins": "PC8,PD2,PC12",
+ "cs_pin": "PC11",
+ "skip_verify": True,
},
- 'monster8': {
- 'mcu': "stm32f407xx",
- 'spi_bus': "spi3a",
- "cs_pin": "PC9"
+ "btt-skr-3-h723": {
+ "mcu": "stm32h723xx",
+ "spi_bus": "swspi",
+ "spi_pins": "PC8,PD2,PC12",
+ "cs_pin": "PC11",
+ "skip_verify": True,
},
- 'fly-gemini-v2': {
- 'mcu': "stm32f405xx",
- 'spi_bus': "spi1",
- "cs_pin": "PA4"
+ "creality-v4.2.2": {
+ "mcu": "stm32f103xe",
+ "spi_bus": "swspi",
+ "spi_pins": "PC8,PD2,PC12",
+ "cs_pin": "PC11",
+ "skip_verify": True,
},
- 'fysetc-cheetah': {
- 'mcu': "stm32f401xc",
- 'spi_bus': "spi1",
+ "monster8": {"mcu": "stm32f407xx", "spi_bus": "spi3a", "cs_pin": "PC9"},
+ "fly-gemini-v2": {"mcu": "stm32f405xx", "spi_bus": "spi1", "cs_pin": "PA4"},
+ "fysetc-cheetah": {
+ "mcu": "stm32f401xc",
+ "spi_bus": "spi1",
"cs_pin": "PA4",
- "current_firmware_path": "OLD.BIN"
+ "current_firmware_path": "OLD.BIN",
},
- 'btt-octopus-max-ez': {
- 'mcu': "stm32h723xx",
- 'spi_bus': "swspi",
- 'spi_pins': "PE13,PE14,PE12",
- 'cs_pin': "PB12",
- 'skip_verify': True
- },
- 'btt-skrat': {
- 'mcu': "stm32g0b1xx",
- 'spi_bus': "spi1",
- "cs_pin": "PB8"
+ "btt-octopus-max-ez": {
+ "mcu": "stm32h723xx",
+ "spi_bus": "swspi",
+ "spi_pins": "PE13,PE14,PE12",
+ "cs_pin": "PB12",
+ "skip_verify": True,
},
- 'chitu-v6': {
- 'mcu': "stm32f103xe",
- 'spi_bus': "swspi",
- 'spi_pins': "PC8,PD2,PC12",
+ "btt-skrat": {"mcu": "stm32g0b1xx", "spi_bus": "spi1", "cs_pin": "PB8"},
+ "chitu-v6": {
+ "mcu": "stm32f103xe",
+ "spi_bus": "swspi",
+ "spi_pins": "PC8,PD2,PC12",
"cs_pin": "PC11",
#'sdio_bus': 'sdio',
"conversion_script": "scripts/update_chitu.py",
"firmware_path": "update.cbd",
- 'skip_verify': True
+ "skip_verify": True,
},
- 'znp-robin-nano-dw-v2.2': {
- 'mcu': "stm32f401xc",
- 'spi_bus': "spi2",
+ "znp-robin-nano-dw-v2.2": {
+ "mcu": "stm32f401xc",
+ "spi_bus": "spi2",
"cs_pin": "PB12",
"firmware_path": "ZNP_ROBIN_NANO.bin",
- "current_firmware_path": "ZNP_ROBIN_NANO.CUR"
- }
+ "current_firmware_path": "ZNP_ROBIN_NANO.CUR",
+ },
}
###########################################################
@@ -179,52 +139,54 @@ BOARD_DEFS = {
###########################################################
BOARD_ALIASES = {
- 'btt-skr-v1.1': BOARD_DEFS['generic-lpc1768'],
- 'btt-skr-v1.3': BOARD_DEFS['generic-lpc1768'],
- 'btt-skr-v1.4': BOARD_DEFS['generic-lpc1768'],
- 'mks-sgenl-v1': BOARD_DEFS['generic-lpc1768'],
- 'mks-sbase': BOARD_DEFS['generic-lpc1768'],
- 'smoothieboard-v1': BOARD_DEFS['generic-lpc1769'],
- 'btt-skr-turbo-v1.4': BOARD_DEFS['generic-lpc1769'],
- 'btt-skr-e3-turbo': BOARD_DEFS['generic-lpc1769'],
- 'mks-sgenl-v2': BOARD_DEFS['generic-lpc1769'],
- 'btt-skr-mini-v1.1': BOARD_DEFS['btt-skr-mini'],
- 'btt-skr-mini-e3-v1': BOARD_DEFS['btt-skr-mini'],
- 'btt-skr-mini-e3-v1.2': BOARD_DEFS['btt-skr-mini'],
- 'btt-skr-mini-e3-v2': BOARD_DEFS['btt-skr-mini'],
- 'btt-skr-mini-e3-v3': BOARD_DEFS['btt-skr-mini-v3'],
- 'btt-skr-mini-e3-v3-b0': BOARD_DEFS['btt-skr-mini-v3-b0'],
- 'btt-skr-mini-mz': BOARD_DEFS['btt-skr-mini'],
- 'btt-skr-e3-dip': BOARD_DEFS['btt-skr-mini'],
- 'btt002-v1': BOARD_DEFS['btt-skr-mini'],
- 'creality-v4.2.7': BOARD_DEFS['creality-v4.2.2'],
- 'btt-skr-2-f407': BOARD_DEFS['btt-octopus-f407-v1'],
- 'btt-skr-2-f429': BOARD_DEFS['btt-octopus-f429-v1'],
- 'btt-octopus-f407-v1.0': BOARD_DEFS['btt-octopus-f407-v1'],
- 'btt-octopus-f407-v1.1': BOARD_DEFS['btt-octopus-f407-v1'],
- 'btt-octopus-f429-v1.0': BOARD_DEFS['btt-octopus-f429-v1'],
- 'btt-octopus-f429-v1.1': BOARD_DEFS['btt-octopus-f429-v1'],
- 'btt-octopus-f446-v1.0': BOARD_DEFS['btt-octopus-f446-v1'],
- 'btt-octopus-f446-v1.1': BOARD_DEFS['btt-octopus-f446-v1'],
- 'btt-octopus-pro-f429-v1.0': BOARD_DEFS['btt-octopus-f429-v1'],
- 'btt-octopus-pro-f446-v1.0': BOARD_DEFS['btt-octopus-f446-v1'],
- 'btt-octopus-pro-h723-v1.1': BOARD_DEFS['btt-skr-3-h723'],
- 'btt-skr-pro-v1.1': BOARD_DEFS['btt-skr-pro'],
- 'btt-skr-pro-v1.2': BOARD_DEFS['btt-skr-pro'],
- 'btt-gtr-v1': BOARD_DEFS['btt-gtr'],
- 'mks-robin-e3d': BOARD_DEFS['mks-robin-e3'],
- 'fysetc-cheetah-v2': BOARD_DEFS['fysetc-cheetah'],
- 'fysetc-spider-v1': BOARD_DEFS['fysetc-spider'],
- 'fysetc-s6-v1.2': BOARD_DEFS['fysetc-spider'],
- 'fysetc-s6-v2': BOARD_DEFS['fysetc-spider'],
- 'robin_v3': BOARD_DEFS['monster8'],
- 'btt-skrat-v1.0': BOARD_DEFS['btt-skrat'],
- 'chitu-v6': BOARD_DEFS['chitu-v6']
+ "btt-skr-v1.1": BOARD_DEFS["generic-lpc1768"],
+ "btt-skr-v1.3": BOARD_DEFS["generic-lpc1768"],
+ "btt-skr-v1.4": BOARD_DEFS["generic-lpc1768"],
+ "mks-sgenl-v1": BOARD_DEFS["generic-lpc1768"],
+ "mks-sbase": BOARD_DEFS["generic-lpc1768"],
+ "smoothieboard-v1": BOARD_DEFS["generic-lpc1769"],
+ "btt-skr-turbo-v1.4": BOARD_DEFS["generic-lpc1769"],
+ "btt-skr-e3-turbo": BOARD_DEFS["generic-lpc1769"],
+ "mks-sgenl-v2": BOARD_DEFS["generic-lpc1769"],
+ "btt-skr-mini-v1.1": BOARD_DEFS["btt-skr-mini"],
+ "btt-skr-mini-e3-v1": BOARD_DEFS["btt-skr-mini"],
+ "btt-skr-mini-e3-v1.2": BOARD_DEFS["btt-skr-mini"],
+ "btt-skr-mini-e3-v2": BOARD_DEFS["btt-skr-mini"],
+ "btt-skr-mini-e3-v3": BOARD_DEFS["btt-skr-mini-v3"],
+ "btt-skr-mini-e3-v3-b0": BOARD_DEFS["btt-skr-mini-v3-b0"],
+ "btt-skr-mini-mz": BOARD_DEFS["btt-skr-mini"],
+ "btt-skr-e3-dip": BOARD_DEFS["btt-skr-mini"],
+ "btt002-v1": BOARD_DEFS["btt-skr-mini"],
+ "creality-v4.2.7": BOARD_DEFS["creality-v4.2.2"],
+ "btt-skr-2-f407": BOARD_DEFS["btt-octopus-f407-v1"],
+ "btt-skr-2-f429": BOARD_DEFS["btt-octopus-f429-v1"],
+ "btt-octopus-f407-v1.0": BOARD_DEFS["btt-octopus-f407-v1"],
+ "btt-octopus-f407-v1.1": BOARD_DEFS["btt-octopus-f407-v1"],
+ "btt-octopus-f429-v1.0": BOARD_DEFS["btt-octopus-f429-v1"],
+ "btt-octopus-f429-v1.1": BOARD_DEFS["btt-octopus-f429-v1"],
+ "btt-octopus-f446-v1.0": BOARD_DEFS["btt-octopus-f446-v1"],
+ "btt-octopus-f446-v1.1": BOARD_DEFS["btt-octopus-f446-v1"],
+ "btt-octopus-pro-f429-v1.0": BOARD_DEFS["btt-octopus-f429-v1"],
+ "btt-octopus-pro-f446-v1.0": BOARD_DEFS["btt-octopus-f446-v1"],
+ "btt-octopus-pro-h723-v1.1": BOARD_DEFS["btt-skr-3-h723"],
+ "btt-skr-pro-v1.1": BOARD_DEFS["btt-skr-pro"],
+ "btt-skr-pro-v1.2": BOARD_DEFS["btt-skr-pro"],
+ "btt-gtr-v1": BOARD_DEFS["btt-gtr"],
+ "mks-robin-e3d": BOARD_DEFS["mks-robin-e3"],
+ "fysetc-cheetah-v2": BOARD_DEFS["fysetc-cheetah"],
+ "fysetc-spider-v1": BOARD_DEFS["fysetc-spider"],
+ "fysetc-s6-v1.2": BOARD_DEFS["fysetc-spider"],
+ "fysetc-s6-v2": BOARD_DEFS["fysetc-spider"],
+ "robin_v3": BOARD_DEFS["monster8"],
+ "btt-skrat-v1.0": BOARD_DEFS["btt-skrat"],
+ "chitu-v6": BOARD_DEFS["chitu-v6"],
}
+
def list_boards():
return sorted(list(BOARD_DEFS.keys()) + list(BOARD_ALIASES.keys()))
+
def lookup_board(name):
name = name.lower()
bdef = BOARD_ALIASES.get(name, BOARD_DEFS.get(name, None))
diff --git a/scripts/spi_flash/fatfs_lib.py b/scripts/spi_flash/fatfs_lib.py
index ef92070a..6ad37e66 100644
--- a/scripts/spi_flash/fatfs_lib.py
+++ b/scripts/spi_flash/fatfs_lib.py
@@ -5,8 +5,8 @@
# This file may be distributed under the terms of the GNU GPLv3 license.
import os
import sys
-KLIPPER_DIR = os.path.abspath(os.path.join(
- os.path.dirname(__file__), "../../"))
+
+KLIPPER_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
sys.path.append(os.path.join(KLIPPER_DIR, "klippy"))
import chelper
@@ -58,6 +58,7 @@ FATFS_CDEFS = """
fatfs_ffi_main = None
fatfs_ffi_lib = None
+
def check_fatfs_build(printfunc=lambda o: o):
printfunc("Checking FatFS CFFI Build...\n")
ffi_main, ffi_lib = chelper.get_ffi()
@@ -67,20 +68,24 @@ def check_fatfs_build(printfunc=lambda o: o):
ofiles = chelper.get_abs_files(FATFS_DIR, FATFS_HEADERS)
ofiles.extend(chelper.get_abs_files(srcdir, SPI_FLASH_HEADERS))
destlib = os.path.join(srcdir, DEST_LIB)
- if chelper.check_build_code(srcfiles+ofiles+[__file__], destlib):
+ if chelper.check_build_code(srcfiles + ofiles + [__file__], destlib):
if chelper.check_gcc_option(chelper.SSE_FLAGS):
- cmd = "%s %s %s" % (chelper.GCC_CMD, chelper.SSE_FLAGS,
- chelper.COMPILE_ARGS)
+ cmd = "%s %s %s" % (
+ chelper.GCC_CMD,
+ chelper.SSE_FLAGS,
+ chelper.COMPILE_ARGS,
+ )
else:
cmd = "%s %s" % (chelper.GCC_CMD, chelper.COMPILE_ARGS)
printfunc("Building FatFS shared library...")
- chelper.do_build_code(cmd % (destlib, ' '.join(srcfiles)))
+ chelper.do_build_code(cmd % (destlib, " ".join(srcfiles)))
printfunc("Done\n")
global fatfs_ffi_main, fatfs_ffi_lib
ffi_main.cdef(FATFS_CDEFS)
fatfs_ffi_lib = ffi_main.dlopen(destlib)
fatfs_ffi_main = ffi_main
+
def get_fatfs_ffi():
global fatfs_ffi_main, fatfs_ffi_lib
if fatfs_ffi_main is None:
diff --git a/scripts/spi_flash/spi_flash.py b/scripts/spi_flash/spi_flash.py
index 729dd2bb..f784f0a4 100644
--- a/scripts/spi_flash/spi_flash.py
+++ b/scripts/spi_flash/spi_flash.py
@@ -29,14 +29,17 @@ import mcu
#
###########################################################
+
def output_line(msg):
sys.stdout.write("%s\n" % (msg,))
sys.stdout.flush()
+
def output(msg):
sys.stdout.write("%s" % (msg,))
sys.stdout.flush()
+
def calc_crc7(data, with_padding=True):
# G(x) = x^7 + x^3 + 1
# Shift left as we are only calculating a 7 bit CRC
@@ -52,6 +55,7 @@ def calc_crc7(data, with_padding=True):
return crc
return crc | 1
+
def calc_crc16(data):
poly = 0b10001000000100001
crc = 0
@@ -61,32 +65,34 @@ def calc_crc16(data):
crc = (crc << 1) ^ poly if crc & 0x8000 else crc << 1
return crc & 0xFFFF
+
# Translate a serial device name to a stable serial name in
# /dev/serial/by-path/
# Borrowed from klipper/scripts/flash_usb.py
def translate_serial_to_tty(device):
ttyname = os.path.realpath(device)
- if os.path.exists('/dev/serial/by-path/'):
- for fname in os.listdir('/dev/serial/by-path/'):
- fname = '/dev/serial/by-path/' + fname
+ if os.path.exists("/dev/serial/by-path/"):
+ for fname in os.listdir("/dev/serial/by-path/"):
+ fname = "/dev/serial/by-path/" + fname
if os.path.realpath(fname) == ttyname:
return ttyname, fname
return ttyname, ttyname
+
def check_need_convert(board_name, config):
conv_script = config.get("conversion_script")
if conv_script is None:
return
conv_util = os.path.join(fatfs_lib.KLIPPER_DIR, conv_script)
- klipper_bin = config['klipper_bin_path']
+ klipper_bin = config["klipper_bin_path"]
dest_bin = os.path.join(
- os.path.dirname(klipper_bin),
- os.path.basename(config['firmware_path']))
+ os.path.dirname(klipper_bin), os.path.basename(config["firmware_path"])
+ )
cmd = "%s %s %s %s" % (sys.executable, conv_util, klipper_bin, dest_bin)
output("Converting Klipper binary to custom format...")
os.system(cmd)
output_line("Done")
- config['klipper_bin_path'] = dest_bin
+ config["klipper_bin_path"] = dest_bin
###########################################################
@@ -102,14 +108,14 @@ SD_SPI_SPEED = 400000
# MCU Command Constants
RESET_CMD = "reset"
GET_CFG_CMD = "get_config"
-GET_CFG_RESPONSES = ( # Supported responses (sorted by newer revisions first).
- "config is_config=%c crc=%u is_shutdown=%c move_count=%hu", # d4aee4f
- "config is_config=%c crc=%u move_count=%hu is_shutdown=%c" # Original
+GET_CFG_RESPONSES = ( # Supported responses (sorted by newer revisions first).
+ "config is_config=%c crc=%u is_shutdown=%c move_count=%hu", # d4aee4f
+ "config is_config=%c crc=%u move_count=%hu is_shutdown=%c", # Original
)
ALLOC_OIDS_CMD = "allocate_oids count=%d"
SPI_CFG_CMDS = (
- "config_spi oid=%d pin=%s cs_active_high=%d", # 7793784
- "config_spi oid=%d pin=%s" # Original
+ "config_spi oid=%d pin=%s cs_active_high=%d", # 7793784
+ "config_spi oid=%d pin=%s", # Original
)
SPI_BUS_CMD = "spi_set_bus oid=%d spi_bus=%s mode=%d rate=%d"
SW_SPI_BUS_CMDS = (
@@ -124,31 +130,34 @@ SPI_XFER_RESPONSE = "spi_transfer_response oid=%c response=%*s"
SDIO_CFG_CMD = "config_sdio oid=%d blocksize=%u"
SDIO_BUS_CMD = "sdio_set_bus oid=%d sdio_bus=%s"
SDIO_SEND_CMD = "sdio_send_command oid=%c cmd=%c argument=%u wait=%c"
-SDIO_SEND_CMD_RESPONSE = "sdio_send_command_response oid=%c error=%c " \
- "response=%*s"
-SDIO_READ_DATA="sdio_read_data oid=%c cmd=%c argument=%u"
-SDIO_READ_DATA_RESPONSE="sdio_read_data_response oid=%c error=%c read=%u"
-SDIO_WRITE_DATA="sdio_write_data oid=%c cmd=%c argument=%u"
-SDIO_WRITE_DATA_RESPONSE="sdio_write_data_response oid=%c error=%c write=%u"
-SDIO_READ_DATA_BUFFER="sdio_read_data_buffer oid=%c offset=%u len=%c"
-SDIO_READ_DATA_BUFFER_RESPONSE="sdio_read_data_buffer_response oid=%c data=%*s"
-SDIO_WRITE_DATA_BUFFER="sdio_write_data_buffer oid=%c offset=%u data=%*s"
-SDIO_SET_SPEED="sdio_set_speed oid=%c speed=%u"
+SDIO_SEND_CMD_RESPONSE = "sdio_send_command_response oid=%c error=%c " "response=%*s"
+SDIO_READ_DATA = "sdio_read_data oid=%c cmd=%c argument=%u"
+SDIO_READ_DATA_RESPONSE = "sdio_read_data_response oid=%c error=%c read=%u"
+SDIO_WRITE_DATA = "sdio_write_data oid=%c cmd=%c argument=%u"
+SDIO_WRITE_DATA_RESPONSE = "sdio_write_data_response oid=%c error=%c write=%u"
+SDIO_READ_DATA_BUFFER = "sdio_read_data_buffer oid=%c offset=%u len=%c"
+SDIO_READ_DATA_BUFFER_RESPONSE = "sdio_read_data_buffer_response oid=%c data=%*s"
+SDIO_WRITE_DATA_BUFFER = "sdio_write_data_buffer oid=%c offset=%u data=%*s"
+SDIO_SET_SPEED = "sdio_set_speed oid=%c speed=%u"
FINALIZE_CFG_CMD = "finalize_config crc=%d"
+
class SPIFlashError(Exception):
pass
+
class MCUConfigError(SPIFlashError):
pass
+
class SPIDirect:
def __init__(self, ser):
self.oid = SPI_OID
self._spi_send_cmd = mcu.CommandWrapper(ser, SPI_SEND_CMD)
self._spi_transfer_cmd = mcu.CommandQueryWrapper(
- ser, SPI_XFER_CMD, SPI_XFER_RESPONSE, self.oid)
+ ser, SPI_XFER_CMD, SPI_XFER_RESPONSE, self.oid
+ )
def spi_send(self, data):
self._spi_send_cmd.send([self.oid, data])
@@ -156,20 +165,23 @@ class SPIDirect:
def spi_transfer(self, data):
return self._spi_transfer_cmd.send([self.oid, data])
+
class SDIODirect:
def __init__(self, ser):
self.oid = SDIO_OID
self._sdio_send_cmd = mcu.CommandQueryWrapper(
- ser, SDIO_SEND_CMD, SDIO_SEND_CMD_RESPONSE, self.oid)
+ ser, SDIO_SEND_CMD, SDIO_SEND_CMD_RESPONSE, self.oid
+ )
self._sdio_read_data = mcu.CommandQueryWrapper(
- ser, SDIO_READ_DATA, SDIO_READ_DATA_RESPONSE, self.oid)
+ ser, SDIO_READ_DATA, SDIO_READ_DATA_RESPONSE, self.oid
+ )
self._sdio_write_data = mcu.CommandQueryWrapper(
- ser, SDIO_WRITE_DATA, SDIO_WRITE_DATA_RESPONSE, self.oid)
+ ser, SDIO_WRITE_DATA, SDIO_WRITE_DATA_RESPONSE, self.oid
+ )
self._sdio_read_data_buffer = mcu.CommandQueryWrapper(
- ser, SDIO_READ_DATA_BUFFER, SDIO_READ_DATA_BUFFER_RESPONSE,
- self.oid)
- self._sdio_write_data_buffer = mcu.CommandWrapper(ser,
- SDIO_WRITE_DATA_BUFFER)
+ ser, SDIO_READ_DATA_BUFFER, SDIO_READ_DATA_BUFFER_RESPONSE, self.oid
+ )
+ self._sdio_write_data_buffer = mcu.CommandWrapper(ser, SDIO_WRITE_DATA_BUFFER)
self._sdio_set_speed = mcu.CommandWrapper(ser, SDIO_SET_SPEED)
def sdio_send_cmd(self, cmd, argument, wait):
@@ -192,14 +204,28 @@ class SDIODirect:
# FatFs Constants. Enums are implemented as lists. The item's index is its value
-DRESULT = ['RES_OK', 'RES_ERROR', 'RES_WRPRT', 'RES_NOTRDY', 'RES_PARERR']
+DRESULT = ["RES_OK", "RES_ERROR", "RES_WRPRT", "RES_NOTRDY", "RES_PARERR"]
FRESULT = [
- 'FR_OK', 'FR_DISK_ERR', 'FR_INT_ERR', 'FR_NOT_READY', 'FR_NO_FILE',
- 'FR_NO_PATH', 'FR_INVALID_NAME', 'FR_DENIED', 'FR_EXIST',
- 'FR_INVALID_OBJECT', 'FR_WRITE_PROTECTED', 'FR_INVALID_DRIVE',
- 'FR_NOT_ENABLED', 'FR_NO_FILESYSTEM', 'FR_MKFS_ABORTED', 'FR_TIMEOUT',
- 'FR_LOCKED', 'FR_NOT_ENOUGH_CORE', 'FR_TOO_MANY_OPEN_FILES',
- 'FR_INVALID_PARAMETER'
+ "FR_OK",
+ "FR_DISK_ERR",
+ "FR_INT_ERR",
+ "FR_NOT_READY",
+ "FR_NO_FILE",
+ "FR_NO_PATH",
+ "FR_INVALID_NAME",
+ "FR_DENIED",
+ "FR_EXIST",
+ "FR_INVALID_OBJECT",
+ "FR_WRITE_PROTECTED",
+ "FR_INVALID_DRIVE",
+ "FR_NOT_ENABLED",
+ "FR_NO_FILESYSTEM",
+ "FR_MKFS_ABORTED",
+ "FR_TIMEOUT",
+ "FR_LOCKED",
+ "FR_NOT_ENOUGH_CORE",
+ "FR_TOO_MANY_OPEN_FILES",
+ "FR_INVALID_PARAMETER",
]
FS_TYPES = {1: "FAT12", 2: "FAT16", 3: "FAT32", 4: "EXFAT"}
STA_NO_INIT = 1 << 0
@@ -207,6 +233,7 @@ STA_NO_DISK = 1 << 1
STA_WRITE_PROTECT = 1 << 2
SECTOR_SIZE = 512
+
# FAT16/32 File System Support
class FatFS:
def __init__(self, ser, spi=True):
@@ -220,25 +247,23 @@ class FatFS:
self._register_callbacks()
def _register_callbacks(self):
- status_cb = self.ffi_main.callback(
- "uint8_t(void)", self._fatfs_cb_status)
- init_cb = self.ffi_main.callback(
- "uint8_t(void)", self._fatfs_cb_initialize)
+ status_cb = self.ffi_main.callback("uint8_t(void)", self._fatfs_cb_status)
+ init_cb = self.ffi_main.callback("uint8_t(void)", self._fatfs_cb_initialize)
read_cb = self.ffi_main.callback(
- "uint8_t(uint8_t*, uint32_t, unsigned int)",
- self._fatfs_cb_disk_read)
+ "uint8_t(uint8_t*, uint32_t, unsigned int)", self._fatfs_cb_disk_read
+ )
write_cb = self.ffi_main.callback(
- "uint8_t(const uint8_t*, uint32_t, unsigned int)",
- self._fatfs_cb_disk_write)
+ "uint8_t(const uint8_t*, uint32_t, unsigned int)", self._fatfs_cb_disk_write
+ )
ioctl_cb = self.ffi_main.callback(
- "uint8_t(uint8_t, void*)", self._fatfs_cb_disk_ioctl)
- ftime_cb = self.ffi_main.callback(
- "uint32_t(void)", self._fatfs_cb_get_fattime)
+ "uint8_t(uint8_t, void*)", self._fatfs_cb_disk_ioctl
+ )
+ ftime_cb = self.ffi_main.callback("uint32_t(void)", self._fatfs_cb_get_fattime)
# Keep a reference to the callbacks so they don't get gc'ed
- self.ffi_callbacks = [status_cb, init_cb, read_cb, write_cb,
- ioctl_cb, ftime_cb]
+ self.ffi_callbacks = [status_cb, init_cb, read_cb, write_cb, ioctl_cb, ftime_cb]
self.ffi_lib.fatfs_set_callbacks(
- status_cb, init_cb, read_cb, write_cb, ioctl_cb, ftime_cb)
+ status_cb, init_cb, read_cb, write_cb, ioctl_cb, ftime_cb
+ )
def clear_callbacks(self):
self.ffi_lib.fatfs_clear_callbacks()
@@ -306,18 +331,27 @@ class FatFS:
# this module to take any actions for incoming IOCTL
# commands.
ioctl_cmds = [
- 'CTRL_SYNC', 'GET_SECTOR_COUNT', 'GET_SECTOR_SIZE',
- 'GET_BLOCK_SIZE', 'CTRL_TRIM']
- logging.debug("flash_sdcard: Received IOCTL command %s"
- % (ioctl_cmds[cmd]))
+ "CTRL_SYNC",
+ "GET_SECTOR_COUNT",
+ "GET_SECTOR_SIZE",
+ "GET_BLOCK_SIZE",
+ "CTRL_TRIM",
+ ]
+ logging.debug("flash_sdcard: Received IOCTL command %s" % (ioctl_cmds[cmd]))
return 0
def _fatfs_cb_get_fattime(self):
tobj = time.localtime()
year = tobj[0] - 1980
sec = min(tobj[5], 59) // 2
- return (year << 25) | (tobj[1] << 21) | (tobj[2] << 16) \
- | (tobj[3] << 11) | (tobj[4] << 5) | sec
+ return (
+ (year << 25)
+ | (tobj[1] << 21)
+ | (tobj[2] << 16)
+ | (tobj[3] << 11)
+ | (tobj[4] << 5)
+ | sec
+ )
def mount(self, print_func=logging.info):
ret = self.ffi_lib.fatfs_mount()
@@ -327,8 +361,9 @@ class FatFS:
for key, val in sorted(dinfo.items(), key=lambda x: x[0]):
print_func("%s: %s" % (key, val))
else:
- raise OSError("flash_sdcard: failed to mount SD Card, returned %s"
- % (FRESULT[ret]))
+ raise OSError(
+ "flash_sdcard: failed to mount SD Card, returned %s" % (FRESULT[ret])
+ )
def unmount(self):
self.ffi_lib.fatfs_unmount()
@@ -344,9 +379,10 @@ class FatFS:
# Can be path to directory or file
ret = self.ffi_lib.fatfs_remove(sd_path.encode())
if ret != 0:
- raise OSError("flash_sdcard: Error deleting item at path '%s',"
- " result: %s"
- % (sd_path, FRESULT[ret]))
+ raise OSError(
+ "flash_sdcard: Error deleting item at path '%s',"
+ " result: %s" % (sd_path, FRESULT[ret])
+ )
def get_file_info(self, sd_file_path):
finfo = self.ffi_main.new("struct ff_file_info *")
@@ -355,16 +391,25 @@ class FatFS:
raise OSError(
"flash_sdcard: Failed to retreive file info for path '%s',"
" result: %s"
- % (sd_file_path, FRESULT[ret],))
+ % (
+ sd_file_path,
+ FRESULT[ret],
+ )
+ )
return self._parse_ff_info(finfo)
def list_sd_directory(self, sd_dir_path):
flist = self.ffi_main.new("struct ff_file_info[128]")
ret = self.ffi_lib.fatfs_list_dir(flist, 128, sd_dir_path.encode())
if ret != 0:
- raise OSError("flash_sdcard: Failed to retreive file list at path"
- " '%s', result: %s"
- % (sd_dir_path, FRESULT[ret],))
+ raise OSError(
+ "flash_sdcard: Failed to retreive file list at path"
+ " '%s', result: %s"
+ % (
+ sd_dir_path,
+ FRESULT[ret],
+ )
+ )
convlist = []
for f in flist:
if f.size == 0 and f.modified_date == 0 and f.modified_time == 0:
@@ -377,37 +422,49 @@ class FatFS:
fdate = finfo.modified_date
ftime = finfo.modified_time
dstr = "%d-%d-%d %d:%d:%d" % (
- (fdate >> 5) & 0xF, fdate & 0x1F, ((fdate >> 9) & 0x7F) + 1980,
- (ftime >> 11) & 0x1F, (ftime >> 5) & 0x3F, ftime & 0x1F)
+ (fdate >> 5) & 0xF,
+ fdate & 0x1F,
+ ((fdate >> 9) & 0x7F) + 1980,
+ (ftime >> 11) & 0x1F,
+ (ftime >> 5) & 0x3F,
+ ftime & 0x1F,
+ )
return {
- 'name': self.ffi_main.string(finfo.name, 256),
- 'size': finfo.size,
- 'modified': dstr,
- 'is_dir': bool(finfo.attrs & 0x10),
- 'is_read_only': bool(finfo.attrs & 0x01),
- 'is_hidden': bool(finfo.attrs & 0x02),
- 'is_system': bool(finfo.attrs & 0x04)
+ "name": self.ffi_main.string(finfo.name, 256),
+ "size": finfo.size,
+ "modified": dstr,
+ "is_dir": bool(finfo.attrs & 0x10),
+ "is_read_only": bool(finfo.attrs & 0x01),
+ "is_hidden": bool(finfo.attrs & 0x02),
+ "is_system": bool(finfo.attrs & 0x04),
}
def get_disk_info(self):
disk_info = self.ffi_main.new("struct ff_disk_info *")
ret = self.ffi_lib.fatfs_get_disk_info(disk_info)
if ret != 0:
- logging.info("flash_sdcard: Failed to retreive disk info: %s"
- % (FRESULT[ret],))
+ logging.info(
+ "flash_sdcard: Failed to retreive disk info: %s" % (FRESULT[ret],)
+ )
return {}
return {
- 'volume_label': self.ffi_main.string(disk_info.label, 12),
- 'volume_serial': disk_info.serial_number,
- 'fs_type': FS_TYPES.get(disk_info.fs_type, "UNKNOWN")
+ "volume_label": self.ffi_main.string(disk_info.label, 12),
+ "volume_serial": disk_info.serial_number,
+ "fs_type": FS_TYPES.get(disk_info.fs_type, "UNKNOWN"),
}
SD_FILE_MODES = {
- 'w+x': 0x01 | 0x02 | 0x04, 'wx': 0x02 | 0x04,
- 'r+': 0x01 | 0x02, 'w+': 0x01 | 0x02 | 0x08,
- 'a+': 0x01 | 0x02 | 0x30, 'r': 0x01,
- 'w': 0x02 | 0x08, 'a': 0x02 | 0x30}
+ "w+x": 0x01 | 0x02 | 0x04,
+ "wx": 0x02 | 0x04,
+ "r+": 0x01 | 0x02,
+ "w+": 0x01 | 0x02 | 0x08,
+ "a+": 0x01 | 0x02 | 0x30,
+ "r": 0x01,
+ "w": 0x02 | 0x08,
+ "a": 0x02 | 0x30,
+}
+
class SDCardFile:
def __init__(self, ffi_main, ffi_lib, sd_path, mode="r"):
@@ -415,7 +472,7 @@ class SDCardFile:
self.ffi_lib = ffi_lib
self.path = sd_path
mode = mode.lower()
- if mode[-1] == 'b':
+ if mode[-1] == "b":
mode = mode[:-1]
self.mode = SD_FILE_MODES.get(mode, 0)
self.fhdl = None
@@ -429,8 +486,7 @@ class SDCardFile:
self.eof = False
if self.fhdl == self.ffi_main.NULL:
self.fhdl = None
- raise OSError("flash_sdcard: Could not open file '%s':"
- % (self.path))
+ raise OSError("flash_sdcard: Could not open file '%s':" % (self.path))
def read(self, length=None):
if self.fhdl is None:
@@ -448,9 +504,8 @@ class SDCardFile:
while True:
bytes_read = self.ffi_lib.fatfs_read(self.fhdl, cdata_buf, length)
if bytes_read < 0:
- raise OSError("flash_sdcard: Error Reading file '%s'"
- % (self.path))
- self.eof = (bytes_read < length)
+ raise OSError("flash_sdcard: Error Reading file '%s'" % (self.path))
+ self.eof = bytes_read < length
ret_buf += byte_buf[0:bytes_read]
if self.eof or not full_read:
break
@@ -473,9 +528,10 @@ class SDCardFile:
ret = self.ffi_lib.fatfs_close(self.fhdl)
self.fhdl = None
if ret != 0:
- logging.info("flash_sdcard: Error closing sd file '%s', "
- "returned %d"
- % (self.path, FRESULT[ret]))
+ logging.info(
+ "flash_sdcard: Error closing sd file '%s', "
+ "returned %d" % (self.path, FRESULT[ret])
+ )
def __enter__(self):
self.open()
@@ -486,24 +542,25 @@ class SDCardFile:
SD_COMMANDS = {
- 'GO_IDLE_STATE': 0,
- 'ALL_SEND_CID': 2,
- 'SET_REL_ADDR': 3,
- 'SET_BUS_WIDTH': 6,
- 'SEL_DESEL_CARD': 7,
- 'SEND_IF_COND': 8,
- 'SEND_CSD': 9,
- 'SEND_CID': 10,
- 'SD_SEND_OP_COND': 41,
- 'SEND_STATUS': 13,
- 'SET_BLOCKLEN': 16,
- 'READ_SINGLE_BLOCK': 17,
- 'WRITE_BLOCK': 24,
- 'APP_CMD': 55,
- 'READ_OCR': 58,
- 'CRC_ON_OFF': 59,
+ "GO_IDLE_STATE": 0,
+ "ALL_SEND_CID": 2,
+ "SET_REL_ADDR": 3,
+ "SET_BUS_WIDTH": 6,
+ "SEL_DESEL_CARD": 7,
+ "SEND_IF_COND": 8,
+ "SEND_CSD": 9,
+ "SEND_CID": 10,
+ "SD_SEND_OP_COND": 41,
+ "SEND_STATUS": 13,
+ "SET_BLOCKLEN": 16,
+ "READ_SINGLE_BLOCK": 17,
+ "WRITE_BLOCK": 24,
+ "APP_CMD": 55,
+ "READ_OCR": 58,
+ "CRC_ON_OFF": 59,
}
+
class SDCardSPI:
def __init__(self, ser):
self.spi = SPIDirect(ser)
@@ -522,18 +579,20 @@ class SDCardSPI:
if self.initialized:
return
# Send reset command (CMD0)
- if not self._check_command(1, 'GO_IDLE_STATE', 0):
+ if not self._check_command(1, "GO_IDLE_STATE", 0):
raise OSError(
"flash_sdcard: failed to reset SD Card\n"
"Note that older (Version 1.0) SD cards can not be\n"
"hot swapped. Execute FIRMWARE_RESTART with the card\n"
- "inserted for successful initialization.")
+ "inserted for successful initialization."
+ )
# Check Voltage Range (CMD8). Only Cards meeting the v2.0 spec
# support this. V1.0 cards (and MMC) will return illegal command.
check_pattern = 0b1010
resp = self._send_command_with_response(
- 'SEND_IF_COND', (1 << 8) | check_pattern)
- resp = resp.strip(b'\xFF')
+ "SEND_IF_COND", (1 << 8) | check_pattern
+ )
+ resp = resp.strip(b"\xff")
if resp and resp[0] & (1 << 2):
# CMD8 is illegal, this is a version 1.0 card
self.sd_version = 1
@@ -541,54 +600,57 @@ class SDCardSPI:
if resp[0] == 1:
self.sd_version = 2
if not (resp[-2] == 1 and resp[-1] == check_pattern):
- raise OSError("flash_sdcard: SD Card not running in a "
- "compatible voltage range")
+ raise OSError(
+ "flash_sdcard: SD Card not running in a "
+ "compatible voltage range"
+ )
else:
raise OSError("flash_sdcard: CMD8 Error 0x%X" % (resp[0],))
else:
- raise OSError("flash_sdcard: Invalid CMD8 response: %s"
- % (repr(resp)))
+ raise OSError("flash_sdcard: Invalid CMD8 response: %s" % (repr(resp)))
if self.enable_crc:
# Enable SD crc checks (CMD59)
- if not self._check_command(1, 'CRC_ON_OFF', 1):
+ if not self._check_command(1, "CRC_ON_OFF", 1):
logging.info("flash_sdcard: failed to enable CRC checks")
if self.sd_version == 2:
# Init card and come out of idle (ACMD41)
# Version 2 Cards may init before checking the OCR
- if not self._check_command(0, 'SD_SEND_OP_COND', 1 << 30,
- is_app_cmd=True):
- raise OSError("flash_sdcard: SD Card did not come"
- " out of IDLE after reset")
+ if not self._check_command(
+ 0, "SD_SEND_OP_COND", 1 << 30, is_app_cmd=True
+ ):
+ raise OSError(
+ "flash_sdcard: SD Card did not come" " out of IDLE after reset"
+ )
# Read OCR Register (CMD58)
- resp = self._send_command_with_response('READ_OCR', 0)
- resp = resp.strip(b'\xFF')
+ resp = self._send_command_with_response("READ_OCR", 0)
+ resp = resp.strip(b"\xff")
# If 'READ_OCR' is illegal then this is likely MMC.
# At this time MMC is not supported
if len(resp) == 5:
if self.sd_version == 1 and resp[0] == 1:
# Check acceptable voltage range for V1 cards
if resp[2] != 0xFF:
- raise OSError("flash_sdcard: card does not support"
- " 3.3v range")
+ raise OSError(
+ "flash_sdcard: card does not support" " 3.3v range"
+ )
elif self.sd_version == 2 and resp[0] == 0:
# Determine if this is a high capacity sdcard
if resp[1] & 0x40:
self.high_capacity = True
else:
- raise OSError("flash_sdcard: READ_OCR Error 0x%X"
- % (resp[0],))
+ raise OSError("flash_sdcard: READ_OCR Error 0x%X" % (resp[0],))
else:
raise OSError("flash_sdcard: Invalid OCR Response")
if self.sd_version == 1:
# Init card and come out of idle (ACMD41)
# Version 1 Cards do this after checking the OCR
- if not self._check_command(0, 'SD_SEND_OP_COND', 0,
- is_app_cmd=True):
- raise OSError("flash_sdcard: SD Card did not come"
- " out of IDLE after reset")
+ if not self._check_command(0, "SD_SEND_OP_COND", 0, is_app_cmd=True):
+ raise OSError(
+ "flash_sdcard: SD Card did not come" " out of IDLE after reset"
+ )
# Set block size to 512 (CMD16)
- if self._check_command(0, 'SET_BLOCKLEN', SECTOR_SIZE, tries=5):
+ if self._check_command(0, "SET_BLOCKLEN", SECTOR_SIZE, tries=5):
self.initialized = True
else:
raise OSError("flash_sdcard: failed to set block size")
@@ -601,10 +663,10 @@ class SDCardSPI:
if self.initialized:
# Reset the SD Card
try:
- if not self._check_command(1, 'GO_IDLE_STATE', 0):
+ if not self._check_command(1, "GO_IDLE_STATE", 0):
logging.info("flash_sdcard: failed to reset SD Card")
# Disable CRC Checks
- if not self._check_command(1, 'CRC_ON_OFF', 0):
+ if not self._check_command(1, "CRC_ON_OFF", 0):
logging.info("flash_sdcard: failed to disable CRC")
except Exception:
logging.exception("Error resetting SD Card")
@@ -615,26 +677,29 @@ class SDCardSPI:
self.card_info.clear()
def _check_command(self, expected, cmd, args, is_app_cmd=False, tries=15):
- func = self._send_app_cmd_with_response if is_app_cmd else \
- self._send_command_with_response
+ func = (
+ self._send_app_cmd_with_response
+ if is_app_cmd
+ else self._send_command_with_response
+ )
while True:
resp, rt = func(cmd, args, get_rt=True)
# logging.info("flash_sdcard: Check cmd %s, response: %s"
# % (cmd, repr(resp)))
- resp = resp.strip(b'\xFF')
+ resp = resp.strip(b"\xff")
if resp and expected == resp[0]:
return True
tries -= 1
if tries < 1:
return False
- self.reactor.pause(rt + .1)
+ self.reactor.pause(rt + 0.1)
def _send_command(self, cmd, args):
command = SD_COMMANDS[cmd] | 0x40
request = [command]
if isinstance(args, int):
for i in range(3, -1, -1):
- request.append((args >> (8*i)) & 0xFF)
+ request.append((args >> (8 * i)) & 0xFF)
elif isinstance(args, list) and len(args) == 4:
request += args
else:
@@ -645,22 +710,22 @@ class SDCardSPI:
def _send_command_with_response(self, cmd, args, get_rt=False):
self._send_command(cmd, args)
- params = self.spi.spi_transfer([0xFF]*8)
+ params = self.spi.spi_transfer([0xFF] * 8)
if get_rt:
- return bytearray(params['response']), params['#receive_time']
+ return bytearray(params["response"]), params["#receive_time"]
else:
- return bytearray(params['response'])
+ return bytearray(params["response"])
def _send_app_cmd_with_response(self, cmd, args, get_rt=False):
# CMD55 tells the SD Card that the next command is an
# Application Specific Command.
- self._send_command_with_response('APP_CMD', 0)
+ self._send_command_with_response("APP_CMD", 0)
return self._send_command_with_response(cmd, args, get_rt)
def _find_sd_token(self, token, tries=10):
while tries:
params = self.spi.spi_transfer([0xFF])
- resp = bytearray(params['response'])
+ resp = bytearray(params["response"])
if resp[0] == token:
return True
tries -= 1
@@ -669,36 +734,35 @@ class SDCardSPI:
def _find_sd_response(self, tries=10):
while tries:
params = self.spi.spi_transfer([0xFF])
- resp = bytearray(params['response'])
+ resp = bytearray(params["response"])
if resp[0] != 0xFF:
return resp[0]
tries -= 1
return 0xFF
def _process_cid_reg(self):
- self._send_command('SEND_CID', 0)
+ self._send_command("SEND_CID", 0)
reg = self._do_block_read(size=16)
if reg is None:
raise OSError("flash_sdcard: Error reading CID register")
cid = collections.OrderedDict()
- cid['manufacturer_id'] = reg[0]
- cid['oem_id'] = reg[1:3].decode(encoding='ascii', errors='ignore')
- cid['product_name'] = reg[3:8].decode(
- encoding='ascii', errors='ignore')
- cid['product_revision'] = str(reg[8] >> 4 & 0xFF) + "." \
- + str(reg[8] & 0xFF)
- cid['serial_number'] = "".join(["%02X" % (c,) for c in reg[9:13]])
+ cid["manufacturer_id"] = reg[0]
+ cid["oem_id"] = reg[1:3].decode(encoding="ascii", errors="ignore")
+ cid["product_name"] = reg[3:8].decode(encoding="ascii", errors="ignore")
+ cid["product_revision"] = str(reg[8] >> 4 & 0xFF) + "." + str(reg[8] & 0xFF)
+ cid["serial_number"] = "".join(["%02X" % (c,) for c in reg[9:13]])
mfg_year = (((reg[13] & 0xF) << 4) | ((reg[14] >> 4) & 0xF)) + 2000
mfg_month = reg[14] & 0xF
- cid['manufacturing_date'] = "%d/%d" % (mfg_month, mfg_year)
+ cid["manufacturing_date"] = "%d/%d" % (mfg_month, mfg_year)
crc = calc_crc7(reg[:15])
if crc != reg[15]:
- raise OSError("flash_sdcard: CID crc mismatch: 0x%02X, recd: 0x%02X"
- % (crc, reg[15]))
+ raise OSError(
+ "flash_sdcard: CID crc mismatch: 0x%02X, recd: 0x%02X" % (crc, reg[15])
+ )
self.card_info.update(cid)
def _process_csd_reg(self):
- self._send_command('SEND_CSD', 0)
+ self._send_command("SEND_CSD", 0)
reg = self._do_block_read(size=16)
if reg is None:
raise OSError("flash_sdcard: Error reading CSD register")
@@ -707,10 +771,9 @@ class SDCardSPI:
csd_type = (reg[0] >> 6) & 0x3
if csd_type == 0:
# Standard Capacity (CSD Version 1.0)
- max_block_len = 2**(reg[5] & 0xF)
- c_size = ((reg[6] & 0x3) << 10) | (reg[7] << 2) | \
- ((reg[8] >> 6) & 0x3)
- c_mult = 2**((((reg[9] & 0x3) << 1) | (reg[10] >> 7)) + 2)
+ max_block_len = 2 ** (reg[5] & 0xF)
+ c_size = ((reg[6] & 0x3) << 10) | (reg[7] << 2) | ((reg[8] >> 6) & 0x3)
+ c_mult = 2 ** ((((reg[9] & 0x3) << 1) | (reg[10] >> 7)) + 2)
max_capacity = (c_size + 1) * c_mult * max_block_len
str_capacity = "%.1f MiB" % (max_capacity / (1024.0**2))
elif csd_type == 1:
@@ -723,9 +786,10 @@ class SDCardSPI:
self.write_protected = (reg[14] & 0x30) != 0
crc = calc_crc7(reg[:15])
if crc != reg[15]:
- raise OSError("flash_sdcard: CSD crc mismatch: 0x%02X, recd: 0x%02X"
- % (crc, reg[15]))
- self.card_info['capacity'] = str_capacity
+ raise OSError(
+ "flash_sdcard: CSD crc mismatch: 0x%02X, recd: 0x%02X" % (crc, reg[15])
+ )
+ self.card_info["capacity"] = str_capacity
self.total_sectors = max_capacity // SECTOR_SIZE
def print_card_info(self, print_func=logging.info):
@@ -749,7 +813,7 @@ class SDCardSPI:
offset = sector
if not self.high_capacity:
offset = sector * SECTOR_SIZE
- self._send_command('READ_SINGLE_BLOCK', offset)
+ self._send_command("READ_SINGLE_BLOCK", offset)
buf = self._do_block_read()
if buf is None:
raise OSError(err_msg)
@@ -759,12 +823,12 @@ class SDCardSPI:
valid_response = True
sd_resp = self._find_sd_response()
if sd_resp != 0:
- logging.info("flash_sdcard: invalid read block response: 0x%02X"
- % (sd_resp))
+ logging.info(
+ "flash_sdcard: invalid read block response: 0x%02X" % (sd_resp)
+ )
valid_response = False
if not self._find_sd_token(0xFE):
- logging.info("flash_sdcard: read error, unable to find "
- "start token")
+ logging.info("flash_sdcard: read error, unable to find " "start token")
valid_response = False
if not valid_response:
# In the event of an invalid response we will still
@@ -773,26 +837,27 @@ class SDCardSPI:
bcount = size + 2
while bcount:
sent = min(32, bcount)
- self.spi.spi_send([0xFF]*sent)
+ self.spi.spi_send([0xFF] * sent)
bcount -= sent
self._find_sd_token(0xFF)
return None
buf = bytearray()
while len(buf) < size:
count = min(32, size - len(buf))
- params = self.spi.spi_transfer([0xFF]*count)
- buf += bytearray(params['response'])
+ params = self.spi.spi_transfer([0xFF] * count)
+ buf += bytearray(params["response"])
# Get the CRC
params = self.spi.spi_transfer([0xFF, 0xFF])
# Make sure we leave the busy state
self._find_sd_token(0xFF)
- crc = bytearray(params['response'])
+ crc = bytearray(params["response"])
crc_int = (crc[0] << 8) | crc[1]
calculated_crc = calc_crc16(buf)
if calculated_crc != crc_int:
logging.info(
"flash_sdcard: CRC Mismatch, Received: %04X, Calculated: %04X"
- % (crc_int, calculated_crc))
+ % (crc_int, calculated_crc)
+ )
return None
return buf
@@ -800,24 +865,23 @@ class SDCardSPI:
with self.mutex:
if not 0 <= sector < self.total_sectors:
raise OSError(
- "flash_sdcard: write error, sector number %d invalid"
- % (sector))
+ "flash_sdcard: write error, sector number %d invalid" % (sector)
+ )
if not self.initialized:
- raise OSError("flash_sdcard: write error, SD Card not"
- " initialized")
+ raise OSError("flash_sdcard: write error, SD Card not" " initialized")
outbuf = bytearray(data)
if len(outbuf) > SECTOR_SIZE:
- raise OSError("sd_card: Cannot write sector larger"
- " than %d bytes"
- % (SECTOR_SIZE))
+ raise OSError(
+ "sd_card: Cannot write sector larger"
+ " than %d bytes" % (SECTOR_SIZE)
+ )
elif len(outbuf) < SECTOR_SIZE:
outbuf += bytearray([0] * (SECTOR_SIZE - len(outbuf)))
offset = sector
if not self.high_capacity:
offset = sector * SECTOR_SIZE
- if not self._check_command(0, 'WRITE_BLOCK', offset, tries=2):
- raise OSError("flash_sdcard: Error writing to sector %d"
- % (sector,))
+ if not self._check_command(0, "WRITE_BLOCK", offset, tries=2):
+ raise OSError("flash_sdcard: Error writing to sector %d" % (sector,))
crc = calc_crc16(outbuf)
outbuf.insert(0, 0xFE)
outbuf.append((crc >> 8) & 0xFF)
@@ -827,26 +891,30 @@ class SDCardSPI:
outbuf = outbuf[32:]
resp = self._find_sd_response()
err_msgs = []
- if (resp & 0x1f) != 5:
+ if (resp & 0x1F) != 5:
err_msgs.append("flash_sdcard: write error 0x%02X" % (resp,))
# wait until the card leaves the busy state
if not self._find_sd_token(0xFF, tries=128):
- err_msgs.append("flash_sdcard: could not leave busy"
- " state after write")
+ err_msgs.append(
+ "flash_sdcard: could not leave busy" " state after write"
+ )
else:
- status = self._send_command_with_response('SEND_STATUS', 0)
- status = status.strip(b'\xFF')
+ status = self._send_command_with_response("SEND_STATUS", 0)
+ status = status.strip(b"\xff")
if len(status) == 2:
if status[1] != 0:
err_msgs.append(
- "flash_sdcard: write error 0x%02X"
- % (status[1],))
+ "flash_sdcard: write error 0x%02X" % (status[1],)
+ )
else:
- err_msgs.append("flash_sdcard: Invalid status response"
- " after write: %s" % (repr(status),))
+ err_msgs.append(
+ "flash_sdcard: Invalid status response"
+ " after write: %s" % (repr(status),)
+ )
if err_msgs:
raise OSError("\n".join(err_msgs))
+
class SDCardSDIO:
def __init__(self, ser):
self.sdio = SDIODirect(ser)
@@ -864,49 +932,61 @@ class SDCardSDIO:
def init_sd(self):
def check_for_ocr_errors(reg):
# returns False if an error flag is set
- return ((reg[0]&0xFD) | (reg[1]&0xFF) |
- (reg[2]&0xE0) | (reg[3]&0x08)) == 0
+ return (
+ (reg[0] & 0xFD) | (reg[1] & 0xFF) | (reg[2] & 0xE0) | (reg[3] & 0x08)
+ ) == 0
+
with self.mutex:
if self.initialized:
return
# Send reset command (CMD0)
- if not self._send_command('GO_IDLE_STATE', 0):
+ if not self._send_command("GO_IDLE_STATE", 0):
raise OSError(
"flash_sdcard: failed to reset SD Card\n"
"Note that older (Version 1.0) SD cards can not be\n"
"hot swapped. Execute FIRMWARE_RESTART with the card\n"
- "inserted for successful initialization.")
+ "inserted for successful initialization."
+ )
# Check Voltage Range (CMD8). Only Cards meeting the v2.0 spec
# support this. V1.0 cards (and MMC) will return illegal command.
check_pattern = 0b1010
resp = self._send_command_with_response(
- 'SEND_IF_COND', (1 << 8) | check_pattern)
- resp = resp.strip(b'\xFF')
+ "SEND_IF_COND", (1 << 8) | check_pattern
+ )
+ resp = resp.strip(b"\xff")
if len(resp) != 4:
# CMD8 is illegal, this is a version 1.0 card
self.sd_version = 1
else:
self.sd_version = 2
if not (resp[-2] == 1 and resp[-1] == check_pattern):
- raise OSError("flash_sdcard: SD Card not running in a "
- "compatible voltage range")
+ raise OSError(
+ "flash_sdcard: SD Card not running in a "
+ "compatible voltage range"
+ )
if self.sd_version == 2:
# Init card and come out of idle (ACMD41)
# Version 2 Cards may init before checking the OCR
# Allow vor LVDS card with 1.8v, too.
- resp = self._check_command(lambda x: x[0]>>7 == 1,
- 'SD_SEND_OP_COND', 0xC1100000, is_app_cmd=True,
- ignoreCRC=True)
+ resp = self._check_command(
+ lambda x: x[0] >> 7 == 1,
+ "SD_SEND_OP_COND",
+ 0xC1100000,
+ is_app_cmd=True,
+ ignoreCRC=True,
+ )
if resp is None:
- raise OSError("flash_sdcard: SD Card did not come"
- " out of IDLE after reset")
+ raise OSError(
+ "flash_sdcard: SD Card did not come" " out of IDLE after reset"
+ )
if len(resp) == 4:
if self.sd_version == 1:
# Check acceptable voltage range for V1 cards
if resp[1] != 0xFF:
- raise OSError("flash_sdcard: card does not support"
- " 3.3v range")
+ raise OSError(
+ "flash_sdcard: card does not support" " 3.3v range"
+ )
elif self.sd_version == 2:
# Determine if this is a high capacity sdcard
if resp[0] & 0x40:
@@ -916,46 +996,46 @@ class SDCardSDIO:
if self.sd_version == 1:
# Init card and come out of idle (ACMD41)
# Version 1 Cards do this after checking the OCR
- if not self._check_command(0, 'SD_SEND_OP_COND', 0,
- is_app_cmd=True):
- raise OSError("flash_sdcard: SD Card did not come"
- " out of IDLE after reset")
+ if not self._check_command(0, "SD_SEND_OP_COND", 0, is_app_cmd=True):
+ raise OSError(
+ "flash_sdcard: SD Card did not come" " out of IDLE after reset"
+ )
# Read out CID information register
self._process_cid_reg()
# Get card's relative address (RCA)
- resp = self._send_command_with_response('SET_REL_ADDR', 0)
+ resp = self._send_command_with_response("SET_REL_ADDR", 0)
# Check if bits 15:13 have some error set
- if (resp[-2] & 0xe0) != 0:
- raise OSError("flash_sdcard: set card's "
- "relative address failed")
- self.rca = resp[0]<<8 | resp[1]
+ if (resp[-2] & 0xE0) != 0:
+ raise OSError("flash_sdcard: set card's " "relative address failed")
+ self.rca = resp[0] << 8 | resp[1]
# Read out CSD information register
self._process_csd_reg()
# Select the current card
- if not self._check_command(check_for_ocr_errors, 'SEL_DESEL_CARD',
- self.rca << 16, tries=1):
+ if not self._check_command(
+ check_for_ocr_errors, "SEL_DESEL_CARD", self.rca << 16, tries=1
+ ):
raise OSError("flash_sdcard: failed to select the card")
# Set SDIO clk speed to approx. 1 MHz
self.sdio.sdio_set_speed(1000000)
- if self._check_command(check_for_ocr_errors, 'SET_BLOCKLEN',
- SECTOR_SIZE, tries=5):
+ if self._check_command(
+ check_for_ocr_errors, "SET_BLOCKLEN", SECTOR_SIZE, tries=5
+ ):
self.initialized = True
else:
raise OSError("flash_sdcard: failed to set block size")
-
def deinit(self):
with self.mutex:
if self.initialized:
# Reset the SD Card
try:
- if not self._send_command('GO_IDLE_STATE', 0):
+ if not self._send_command("GO_IDLE_STATE", 0):
logging.info("flash_sdcard: failed to reset SD Card")
except Exception:
logging.exception("Error resetting SD Card")
@@ -965,20 +1045,24 @@ class SDCardSDIO:
self.total_sectors = 0
self.card_info.clear()
- def _check_command(self, check_func, cmd, args, is_app_cmd=False, tries=15,
- ignoreCRC=False):
- func = self._send_app_cmd_with_response if is_app_cmd else \
- self._send_command_with_response
+ def _check_command(
+ self, check_func, cmd, args, is_app_cmd=False, tries=15, ignoreCRC=False
+ ):
+ func = (
+ self._send_app_cmd_with_response
+ if is_app_cmd
+ else self._send_command_with_response
+ )
while True:
resp, rt = func(cmd, args, get_rt=True, ignoreCRC=ignoreCRC)
- #logging.info("flash_sdcard: Check cmd %s, response: %s"
+ # logging.info("flash_sdcard: Check cmd %s, response: %s"
# % (cmd, repr(resp)))
if resp and check_func(resp):
return resp
tries -= 1
if tries < 1:
return None
- self.reactor.pause(rt + .1)
+ self.reactor.pause(rt + 0.1)
def _send_command(self, cmd, args, wait=0):
cmd_code = SD_COMMANDS[cmd]
@@ -986,68 +1070,70 @@ class SDCardSDIO:
if isinstance(args, int) or isinstance(args, long):
argument = args & 0xFFFFFFFF
elif isinstance(args, list) and len(args) == 4:
- argument = ((args[0] << 24) & 0xFF000000) | \
- ((args[1] << 16) & 0x00FF0000) | \
- ((args[2] << 8) & 0x0000FF00) | \
- ((args[3] << 0) & 0x000000FF)
+ argument = (
+ ((args[0] << 24) & 0xFF000000)
+ | ((args[1] << 16) & 0x00FF0000)
+ | ((args[2] << 8) & 0x0000FF00)
+ | ((args[3] << 0) & 0x000000FF)
+ )
else:
raise OSError("flash_sdcard: Invalid SD Card Command argument")
params = self.sdio.sdio_send_cmd(cmd_code, argument, wait)
- #logging.debug(f'_send_command({cmd=}, {args=}, {wait=}) -> '
+ # logging.debug(f'_send_command({cmd=}, {args=}, {wait=}) -> '
# 'CMD: {cmd_code} ARG: {argument} -> {params=}')
- if (wait == 0):
+ if wait == 0:
# Just return the error code if no response was requested
- return params['error'] == 0
+ return params["error"] == 0
return params
- def _send_command_with_response(self, cmd, args, check_error=True,
- ignoreCRC=False, get_rt=False):
+ def _send_command_with_response(
+ self, cmd, args, check_error=True, ignoreCRC=False, get_rt=False
+ ):
# Wait for a short response
params = self._send_command(cmd, args, wait=1)
- response = params['response']
+ response = params["response"]
if check_error:
- if params['error'] != 0:
- if ignoreCRC and params['error'] != 4:
+ if params["error"] != 0:
+ if ignoreCRC and params["error"] != 4:
response = []
if get_rt:
- return bytearray(response), params['#receive_time']
+ return bytearray(response), params["#receive_time"]
else:
return bytearray(response)
- def _send_app_cmd_with_response(self, cmd, args,
- ignoreCRC=False, get_rt=False):
+ def _send_app_cmd_with_response(self, cmd, args, ignoreCRC=False, get_rt=False):
# CMD55 tells the SD Card that the next command is an
# Application Specific Command.
- self._send_command_with_response('APP_CMD', self.rca << 16)
+ self._send_command_with_response("APP_CMD", self.rca << 16)
return self._send_command_with_response(
- cmd, args, ignoreCRC=ignoreCRC, get_rt=get_rt)
+ cmd, args, ignoreCRC=ignoreCRC, get_rt=get_rt
+ )
def _process_cid_reg(self):
- params = self._send_command('ALL_SEND_CID', 0, wait=2)
- reg = bytearray(params['response'])
+ params = self._send_command("ALL_SEND_CID", 0, wait=2)
+ reg = bytearray(params["response"])
if reg is None:
raise OSError("flash_sdcard: Error reading CID register")
cid = collections.OrderedDict()
- cid['manufacturer_id'] = reg[0]
- cid['oem_id'] = reg[1:3].decode(encoding='ascii', errors='ignore')
- cid['product_name'] = reg[3:8].decode(
- encoding='ascii', errors='ignore')
- cid['product_revision'] = str(reg[8] >> 4 & 0xFF) + "." \
- + str(reg[8] & 0xFF)
- cid['serial_number'] = "".join(["%02X" % (c,) for c in reg[9:13]])
+ cid["manufacturer_id"] = reg[0]
+ cid["oem_id"] = reg[1:3].decode(encoding="ascii", errors="ignore")
+ cid["product_name"] = reg[3:8].decode(encoding="ascii", errors="ignore")
+ cid["product_revision"] = str(reg[8] >> 4 & 0xFF) + "." + str(reg[8] & 0xFF)
+ cid["serial_number"] = "".join(["%02X" % (c,) for c in reg[9:13]])
mfg_year = (((reg[13] & 0xF) << 4) | ((reg[14] >> 4) & 0xF)) + 2000
mfg_month = reg[14] & 0xF
- cid['manufacturing_date'] = "%d/%d" % (mfg_month, mfg_year)
+ cid["manufacturing_date"] = "%d/%d" % (mfg_month, mfg_year)
crc = calc_crc7(reg[:15], with_padding=False)
if crc != reg[15]:
- raise OSError("flash_sdcard: CID crc mismatch: 0x%02X, recd: 0x%02X"
- % (crc, reg[15]))
+ raise OSError(
+ "flash_sdcard: CID crc mismatch: 0x%02X, recd: 0x%02X" % (crc, reg[15])
+ )
self.card_info.update(cid)
def _process_csd_reg(self):
- params = self._send_command('SEND_CSD', self.rca << 16, wait=2)
- reg = bytearray(params['response'])
+ params = self._send_command("SEND_CSD", self.rca << 16, wait=2)
+ reg = bytearray(params["response"])
if reg is None:
raise OSError("flash_sdcard: Error reading CSD register")
str_capacity = "Invalid"
@@ -1055,10 +1141,9 @@ class SDCardSDIO:
csd_type = (reg[0] >> 6) & 0x3
if csd_type == 0:
# Standard Capacity (CSD Version 1.0)
- max_block_len = 2**(reg[5] & 0xF)
- c_size = ((reg[6] & 0x3) << 10) | (reg[7] << 2) | \
- ((reg[8] >> 6) & 0x3)
- c_mult = 2**((((reg[9] & 0x3) << 1) | (reg[10] >> 7)) + 2)
+ max_block_len = 2 ** (reg[5] & 0xF)
+ c_size = ((reg[6] & 0x3) << 10) | (reg[7] << 2) | ((reg[8] >> 6) & 0x3)
+ c_mult = 2 ** ((((reg[9] & 0x3) << 1) | (reg[10] >> 7)) + 2)
max_capacity = (c_size + 1) * c_mult * max_block_len
str_capacity = "%.1f MiB" % (max_capacity / (1024.0**2))
elif csd_type == 1:
@@ -1071,9 +1156,10 @@ class SDCardSDIO:
self.write_protected = (reg[14] & 0x30) != 0
crc = calc_crc7(reg[:15], with_padding=False)
if crc != reg[15]:
- raise OSError("flash_sdcard: CSD crc mismatch: 0x%02X, recd: 0x%02X"
- % (crc, reg[15]))
- self.card_info['capacity'] = str_capacity
+ raise OSError(
+ "flash_sdcard: CSD crc mismatch: 0x%02X, recd: 0x%02X" % (crc, reg[15])
+ )
+ self.card_info["capacity"] = str_capacity
self.total_sectors = max_capacity // SECTOR_SIZE
def print_card_info(self, print_func=logging.info):
@@ -1099,22 +1185,24 @@ class SDCardSDIO:
offset = sector * SECTOR_SIZE
params = self.sdio.sdio_read_data(
- SD_COMMANDS['READ_SINGLE_BLOCK'], offset)
- if params['error'] != 0:
+ SD_COMMANDS["READ_SINGLE_BLOCK"], offset
+ )
+ if params["error"] != 0:
raise OSError(
- 'Read data failed. Error code=%d' %(params['error'],) )
- if params['read'] != SECTOR_SIZE:
+ "Read data failed. Error code=%d" % (params["error"],)
+ )
+ if params["read"] != SECTOR_SIZE:
raise OSError(
- 'Read data failed. Expected %d bytes but got %d.' %
- (SECTOR_SIZE, params['read']) )
+ "Read data failed. Expected %d bytes but got %d."
+ % (SECTOR_SIZE, params["read"])
+ )
buf = bytearray()
offset = 0
- while SECTOR_SIZE-len(buf)>0:
- rest = min(SECTOR_SIZE-len(buf), 32)
- params = self.sdio.sdio_read_data_buffer(
- offset, length=rest)
- temp = bytearray(params['data'])
+ while SECTOR_SIZE - len(buf) > 0:
+ rest = min(SECTOR_SIZE - len(buf), 32)
+ params = self.sdio.sdio_read_data_buffer(offset, length=rest)
+ temp = bytearray(params["data"])
if len(temp) == 0:
raise OSError("Read zero bytes from buffer")
buf += temp
@@ -1127,16 +1215,16 @@ class SDCardSDIO:
with self.mutex:
if not 0 <= sector < self.total_sectors:
raise OSError(
- "flash_sdcard: write error, sector number %d invalid"
- % (sector))
+ "flash_sdcard: write error, sector number %d invalid" % (sector)
+ )
if not self.initialized:
- raise OSError("flash_sdcard: write error, SD Card not"
- " initialized")
+ raise OSError("flash_sdcard: write error, SD Card not" " initialized")
outbuf = bytearray(data)
if len(outbuf) > SECTOR_SIZE:
- raise OSError("sd_card: Cannot write sector larger"
- " than %d bytes"
- % (SECTOR_SIZE))
+ raise OSError(
+ "sd_card: Cannot write sector larger"
+ " than %d bytes" % (SECTOR_SIZE)
+ )
elif len(outbuf) < SECTOR_SIZE:
outbuf += bytearray([0] * (SECTOR_SIZE - len(outbuf)))
offset = sector
@@ -1145,23 +1233,25 @@ class SDCardSDIO:
CHUNKSIZE = 32
for i in range(0, SECTOR_SIZE, CHUNKSIZE):
- self.sdio.sdio_write_data_buffer(i, outbuf[i:i+CHUNKSIZE])
- params = self.sdio.sdio_write_data(
- SD_COMMANDS['WRITE_BLOCK'], offset)
- if (params['error'] != 0) or (params['write'] != SECTOR_SIZE):
- raise OSError(
- "flash_sdcard: Error writing to sector %d"% (sector,))
+ self.sdio.sdio_write_data_buffer(i, outbuf[i : i + CHUNKSIZE])
+ params = self.sdio.sdio_write_data(SD_COMMANDS["WRITE_BLOCK"], offset)
+ if (params["error"] != 0) or (params["write"] != SECTOR_SIZE):
+ raise OSError("flash_sdcard: Error writing to sector %d" % (sector,))
- status = self._send_command_with_response(
- 'SEND_STATUS', self.rca << 16)
+ status = self._send_command_with_response("SEND_STATUS", self.rca << 16)
if len(status) != 4:
- raise OSError("flash_sdcard: Failed to get status response"
- " after write: %s" % (repr(status),))
- if ((status[3]>>1) & 0x0F) != 0:
+ raise OSError(
+ "flash_sdcard: Failed to get status response"
+ " after write: %s" % (repr(status),)
+ )
+ if ((status[3] >> 1) & 0x0F) != 0:
# Bit 12:9 are not "0" (card is in idle)
- raise OSError("flash_sdcard: Write error."
- " Card is not in transfer state: 0x%02X"
- % (((status[3]>>1) & 0x0F)))
+ raise OSError(
+ "flash_sdcard: Write error."
+ " Card is not in transfer state: 0x%02X"
+ % (((status[3] >> 1) & 0x0F))
+ )
+
SDIO_WARNING = """
This board requires a manual reboot to complete the flash process.
@@ -1170,6 +1260,7 @@ power cycle is required. Please perform the power cycle now and then
rerun this utility with the 'check' option to verify flash.
"""
+
class MCUConnection:
def __init__(self, k_reactor, device, baud, board_cfg):
self.reactor = k_reactor
@@ -1194,7 +1285,7 @@ class MCUConnection:
self.reactor.register_callback(self._do_serial_connect)
curtime = self.reactor.monotonic()
while True:
- curtime = self.reactor.pause(curtime + 1.)
+ curtime = self.reactor.pause(curtime + 1.0)
output(".")
if self.connect_completion.test():
self.connected = self.connect_completion.wait()
@@ -1205,18 +1296,19 @@ class MCUConnection:
raise SPIFlashError("Unable to connect to MCU")
output_line("Connected")
msgparser = self._serial.get_msgparser()
- mcu_type = msgparser.get_constant('MCU')
- build_mcu_type = self.board_config['mcu']
+ mcu_type = msgparser.get_constant("MCU")
+ build_mcu_type = self.board_config["mcu"]
if mcu_type != build_mcu_type:
raise SPIFlashError(
"MCU Type mismatch: Build MCU = %s, Connected MCU = %s"
- % (build_mcu_type, mcu_type))
+ % (build_mcu_type, mcu_type)
+ )
self.enumerations = msgparser.get_enumerations()
self.raw_dictionary = msgparser.get_raw_data_dictionary()
self.proto_error = msgparser.error
def _do_serial_connect(self, eventtime):
- endtime = eventtime + 60.
+ endtime = eventtime + 60.0
while True:
try:
self._serial.connect_uart(self.serial_device, self.baud)
@@ -1228,7 +1320,7 @@ class MCUConnection:
return
output("Connection Error, retrying..")
self._serial.disconnect()
- self.reactor.pause(curtime + 2.)
+ self.reactor.pause(curtime + 2.0)
else:
break
self.connect_completion.complete(True)
@@ -1255,7 +1347,8 @@ class MCUConnection:
for response in GET_CFG_RESPONSES:
try:
get_cfg_cmd = mcu.CommandQueryWrapper(
- self._serial, GET_CFG_CMD, response)
+ self._serial, GET_CFG_CMD, response
+ )
break
except Exception as err:
# Raise an exception if we hit the end of the list.
@@ -1268,25 +1361,26 @@ class MCUConnection:
output("Checking Current MCU Configuration...")
params = self.get_mcu_config()
output_line("Done")
- if params['is_config'] or params['is_shutdown']:
- output_line("MCU needs restart: is_config=%d, is_shutdown=%d"
- % (params['is_config'], params['is_shutdown']))
+ if params["is_config"] or params["is_shutdown"]:
+ output_line(
+ "MCU needs restart: is_config=%d, is_shutdown=%d"
+ % (params["is_config"], params["is_shutdown"])
+ )
return True
return False
def _configure_mcu_spibus(self, printfunc=logging.info):
# TODO: add commands for buttons? Or perhaps an endstop? We
# just need to be able to query the status of the detect pin
- cs_pin = self.board_config['cs_pin'].upper()
- bus = self.board_config['spi_bus']
- bus_enums = self.enumerations.get(
- 'spi_bus', self.enumerations.get('bus'))
- pin_enums = self.enumerations.get('pin')
+ cs_pin = self.board_config["cs_pin"].upper()
+ bus = self.board_config["spi_bus"]
+ bus_enums = self.enumerations.get("spi_bus", self.enumerations.get("bus"))
+ pin_enums = self.enumerations.get("pin")
if bus == "swspi":
mcu_freq = self.clocksync.print_time_to_clock(1)
- pulse_ticks = mcu_freq//SD_SPI_SPEED
- cfgpins = self.board_config['spi_pins']
- pins = [p.strip().upper() for p in cfgpins.split(',') if p.strip()]
+ pulse_ticks = mcu_freq // SD_SPI_SPEED
+ cfgpins = self.board_config["spi_pins"]
+ pins = [p.strip().upper() for p in cfgpins.split(",") if p.strip()]
pin_err_msg = "Invalid Software SPI Pins: %s" % (cfgpins,)
if len(pins) != 3:
raise SPIFlashError(pin_err_msg)
@@ -1294,10 +1388,10 @@ class MCUConnection:
if p not in pin_enums:
raise SPIFlashError(pin_err_msg)
bus_cmds = [
- SW_SPI_BUS_CMDS[0] % (SPI_OID, pins[0], pins[1], pins[2],
- SPI_MODE, pulse_ticks),
- SW_SPI_BUS_CMDS[1] % (SPI_OID, pins[0], pins[1], pins[2],
- SPI_MODE, SD_SPI_SPEED)
+ SW_SPI_BUS_CMDS[0]
+ % (SPI_OID, pins[0], pins[1], pins[2], SPI_MODE, pulse_ticks),
+ SW_SPI_BUS_CMDS[1]
+ % (SPI_OID, pins[0], pins[1], pins[2], SPI_MODE, SD_SPI_SPEED),
]
else:
if bus not in bus_enums:
@@ -1307,7 +1401,9 @@ class MCUConnection:
]
if cs_pin not in pin_enums:
raise SPIFlashError("Invalid CS Pin: %s" % (cs_pin,))
- cfg_cmds = [ALLOC_OIDS_CMD % (1,),]
+ cfg_cmds = [
+ ALLOC_OIDS_CMD % (1,),
+ ]
self._serial.send(cfg_cmds[0])
spi_cfg_cmds = [
SPI_CFG_CMDS[0] % (SPI_OID, cs_pin, False),
@@ -1315,20 +1411,19 @@ class MCUConnection:
]
cfg_cmds.append(self._try_send_command(spi_cfg_cmds))
cfg_cmds.append(self._try_send_command(bus_cmds))
- config_crc = zlib.crc32('\n'.join(cfg_cmds).encode()) & 0xffffffff
+ config_crc = zlib.crc32("\n".join(cfg_cmds).encode()) & 0xFFFFFFFF
self._serial.send(FINALIZE_CFG_CMD % (config_crc,))
config = self.get_mcu_config()
if not config["is_config"] or config["is_shutdown"]:
raise MCUConfigError("Failed to configure MCU")
printfunc("Initializing SD Card and Mounting file system...")
self.fatfs = FatFS(self._serial)
- self.reactor.pause(self.reactor.monotonic() + .5)
+ self.reactor.pause(self.reactor.monotonic() + 0.5)
try:
self.fatfs.mount(printfunc)
except OSError:
logging.exception("SD Card Mount Failure")
- raise SPIFlashError(
- "Failed to Initialize SD Card. Is it inserted?")
+ raise SPIFlashError("Failed to Initialize SD Card. Is it inserted?")
def _try_send_command(self, cmd_list):
for cmd in cmd_list:
@@ -1341,10 +1436,9 @@ class MCUConnection:
return cmd
def _configure_mcu_sdiobus(self, printfunc=logging.info):
- bus = self.board_config['sdio_bus']
- bus_enums = self.enumerations.get(
- 'sdio_bus', self.enumerations.get('bus'))
- pin_enums = self.enumerations.get('pin')
+ bus = self.board_config["sdio_bus"]
+ bus_enums = self.enumerations.get("sdio_bus", self.enumerations.get("bus"))
+ pin_enums = self.enumerations.get("pin")
if bus not in bus_enums:
raise SPIFlashError("Invalid SDIO Bus: %s" % (bus,))
bus_cmd = SDIO_BUS_CMD % (SDIO_OID, bus)
@@ -1352,25 +1446,24 @@ class MCUConnection:
cfg_cmds = [ALLOC_OIDS_CMD % (1,), sdio_cfg_cmd, bus_cmd]
for cmd in cfg_cmds:
self._serial.send(cmd)
- config_crc = zlib.crc32('\n'.join(cfg_cmds).encode()) & 0xffffffff
+ config_crc = zlib.crc32("\n".join(cfg_cmds).encode()) & 0xFFFFFFFF
self._serial.send(FINALIZE_CFG_CMD % (config_crc,))
config = self.get_mcu_config()
if not config["is_config"] or config["is_shutdown"]:
raise MCUConfigError("Failed to configure MCU")
printfunc("Initializing SD Card and Mounting file system...")
- self.fatfs = FatFS(self._serial,spi=False)
- self.reactor.pause(self.reactor.monotonic() + .5)
+ self.fatfs = FatFS(self._serial, spi=False)
+ self.reactor.pause(self.reactor.monotonic() + 0.5)
try:
self.fatfs.mount(printfunc)
except OSError:
logging.exception("SD Card Mount Failure")
- raise SPIFlashError(
- "Failed to Initialize SD Card. Is it inserted?")
+ raise SPIFlashError("Failed to Initialize SD Card. Is it inserted?")
def configure_mcu(self, printfunc=logging.info):
- if 'spi_bus' in self.board_config:
+ if "spi_bus" in self.board_config:
self._configure_mcu_spibus(printfunc=printfunc)
- elif 'sdio_bus' in self.board_config:
+ elif "sdio_bus" in self.board_config:
self._configure_mcu_sdiobus(printfunc=printfunc)
else:
raise SPIFlashError("Unknown bus defined in board_defs.py.")
@@ -1379,10 +1472,10 @@ class MCUConnection:
output("Uploading Klipper Firmware to SD Card...")
input_sha = hashlib.sha1()
sd_sha = hashlib.sha1()
- klipper_bin_path = self.board_config['klipper_bin_path']
- fw_path = self.board_config.get('firmware_path', "firmware.bin")
+ klipper_bin_path = self.board_config["klipper_bin_path"]
+ fw_path = self.board_config.get("firmware_path", "firmware.bin")
try:
- with open(klipper_bin_path, 'rb') as local_f:
+ with open(klipper_bin_path, "rb") as local_f:
with self.fatfs.open_file(fw_path, "wb") as sd_f:
while True:
buf = local_f.read(4096)
@@ -1397,7 +1490,7 @@ class MCUConnection:
output("Validating Upload...")
try:
finfo = self.fatfs.get_file_info(fw_path)
- with self.fatfs.open_file(fw_path, 'r') as sd_f:
+ with self.fatfs.open_file(fw_path, "r") as sd_f:
while True:
buf = sd_f.read(4096)
if not buf:
@@ -1406,20 +1499,22 @@ class MCUConnection:
except Exception:
logging.exception("SD Card Download Error")
raise SPIFlashError("Error reading %s from SD" % (fw_path))
- sd_size = finfo.get('size', -1)
+ sd_size = finfo.get("size", -1)
input_chksm = input_sha.hexdigest().upper()
sd_chksm = sd_sha.hexdigest().upper()
if input_chksm != sd_chksm:
- raise SPIFlashError("Checksum Mismatch: Got '%s', expected '%s'"
- % (sd_chksm, input_chksm))
+ raise SPIFlashError(
+ "Checksum Mismatch: Got '%s', expected '%s'" % (sd_chksm, input_chksm)
+ )
output_line("Done")
output_line(
"Firmware Upload Complete: %s, Size: %d, Checksum (SHA1): %s"
- % (fw_path, sd_size, sd_chksm))
+ % (fw_path, sd_size, sd_chksm)
+ )
return sd_chksm
def verify_flash(self, req_chksm, old_dictionary, req_dictionary):
- if bool(self.board_config.get('skip_verify', False)):
+ if bool(self.board_config.get("skip_verify", False)):
output_line(SDIO_WARNING)
return
output("Verifying Flash...")
@@ -1429,10 +1524,14 @@ class MCUConnection:
# If we have a dictionary, check that it matches.
if req_dictionary:
if cur_dictionary != req_dictionary:
- raise SPIFlashError("Version Mismatch: Got '%s...', "
- "expected '%s...'"
- % (msgparser.get_version_info()[0],
- json.loads(req_dictionary)['version']))
+ raise SPIFlashError(
+ "Version Mismatch: Got '%s...', "
+ "expected '%s...'"
+ % (
+ msgparser.get_version_info()[0],
+ json.loads(req_dictionary)["version"],
+ )
+ )
output("Version matched...")
validation_passed = True
# Otherwise check that the MCU dictionary changed
@@ -1444,10 +1543,9 @@ class MCUConnection:
# If the version didn't change, look for current firmware to checksum
cur_fw_sha = None
if not validation_passed:
- cur_fw_path = self.board_config.get('current_firmware_path',
- "FIRMWARE.CUR")
+ cur_fw_path = self.board_config.get("current_firmware_path", "FIRMWARE.CUR")
try:
- with self.fatfs.open_file(cur_fw_path, 'r') as sd_f:
+ with self.fatfs.open_file(cur_fw_path, "r") as sd_f:
cur_fw_sha = hashlib.sha1()
while True:
buf = sd_f.read(4096)
@@ -1464,16 +1562,17 @@ class MCUConnection:
validation_passed = True
output("Checksum matched...")
else:
- raise SPIFlashError("Checksum Mismatch: Got '%s', "
- "expected '%s'"
- % (cur_fw_chksm, req_chksm))
+ raise SPIFlashError(
+ "Checksum Mismatch: Got '%s', "
+ "expected '%s'" % (cur_fw_chksm, req_chksm)
+ )
if not validation_passed:
raise SPIFlashError("Validation failure.")
output_line("Done")
# Remove firmware file if MCU bootloader failed to rename.
if cur_fw_sha is None:
try:
- fw_path = self.board_config.get('firmware_path', "firmware.bin")
+ fw_path = self.board_config.get("firmware_path", "firmware.bin")
self.fatfs.remove_item(fw_path)
output_line("Found and deleted %s after reset" % (fw_path,))
except Exception:
@@ -1481,17 +1580,19 @@ class MCUConnection:
output_line("Firmware Flash Successful")
output_line("Current Firmware: %s" % (msgparser.get_version_info()[0],))
+
class SPIFlash:
def __init__(self, args):
self.board_config = args
- if not os.path.exists(args['device']):
- raise SPIFlashError("No device found at '%s'" % (args['device'],))
- if not os.path.isfile(args['klipper_bin_path']):
- raise SPIFlashError("Unable to locate klipper binary at '%s'"
- % (args['klipper_bin_path'],))
- tty_name, dev_by_path = translate_serial_to_tty(args['device'])
+ if not os.path.exists(args["device"]):
+ raise SPIFlashError("No device found at '%s'" % (args["device"],))
+ if not os.path.isfile(args["klipper_bin_path"]):
+ raise SPIFlashError(
+ "Unable to locate klipper binary at '%s'" % (args["klipper_bin_path"],)
+ )
+ tty_name, dev_by_path = translate_serial_to_tty(args["device"])
self.device_path = dev_by_path
- self.baud_rate = args['baud']
+ self.baud_rate = args["baud"]
self.mcu_conn = None
self.firmware_checksum = None
self.task_complete = False
@@ -1499,24 +1600,26 @@ class SPIFlash:
self.need_verify = True
self.old_dictionary = None
self.new_dictionary = None
- if args['klipper_dict_path'] is not None:
+ if args["klipper_dict_path"] is not None:
try:
- with open(args['klipper_dict_path'], 'rb') as dict_f:
- self.new_dictionary = dict_f.read(32*1024)
+ with open(args["klipper_dict_path"], "rb") as dict_f:
+ self.new_dictionary = dict_f.read(32 * 1024)
except Exception:
- raise SPIFlashError("Missing or invalid dictionary at '%s'"
- % (args['klipper_dict_path'],))
+ raise SPIFlashError(
+ "Missing or invalid dictionary at '%s'"
+ % (args["klipper_dict_path"],)
+ )
def _wait_for_reconnect(self):
output("Waiting for device to reconnect...")
- time.sleep(1.)
+ time.sleep(1.0)
if os.path.exists(self.device_path):
# device is already available, this could be a UART
- time.sleep(2.)
+ time.sleep(2.0)
else:
wait_left = 30
while wait_left:
- time.sleep(1.)
+ time.sleep(1.0)
output(".")
if os.path.exists(self.device_path):
break
@@ -1568,16 +1671,18 @@ class SPIFlash:
if not self.mcu_conn.connected:
self.mcu_conn.connect()
self.mcu_conn.configure_mcu()
- self.mcu_conn.verify_flash(self.firmware_checksum, self.old_dictionary,
- self.new_dictionary)
+ self.mcu_conn.verify_flash(
+ self.firmware_checksum, self.old_dictionary, self.new_dictionary
+ )
self.mcu_conn.reset()
self.task_complete = True
def run_reactor_task(self, run_cb):
self.task_complete = False
k_reactor = reactor.Reactor()
- self.mcu_conn = MCUConnection(k_reactor, self.device_path,
- self.baud_rate, self.board_config)
+ self.mcu_conn = MCUConnection(
+ k_reactor, self.device_path, self.baud_rate, self.board_config
+ )
k_reactor.register_callback(run_cb)
try:
k_reactor.run()
@@ -1593,7 +1698,7 @@ class SPIFlash:
self.mcu_conn = k_reactor = None
def run(self):
- if not bool(self.board_config.get('verify_only', False)):
+ if not bool(self.board_config.get("verify_only", False)):
self.run_reactor_task(self.run_reset_upload)
self._wait_for_reconnect()
if self.need_upload:
@@ -1606,12 +1711,12 @@ class SPIFlash:
self._wait_for_reconnect()
self.run_reactor_task(self.run_verify)
+
def main():
- parser = argparse.ArgumentParser(
- description="SD Card Firmware Upload Utility")
+ parser = argparse.ArgumentParser(description="SD Card Firmware Upload Utility")
parser.add_argument(
- "-l", "--list", action="store_true",
- help="List Supported Boards")
+ "-l", "--list", action="store_true", help="List Supported Boards"
+ )
args = parser.parse_known_args()
if args[0].list:
blist = board_defs.list_boards()
@@ -1620,24 +1725,32 @@ def main():
output_line(board)
return
parser.add_argument(
- "-b", "--baud", metavar="<baud rate>", type=int,
- default=250000, help="Serial Baud Rate")
- parser.add_argument(
- "-v", "--verbose", action="store_true",
- help="Enable verbose output")
- parser.add_argument(
- "-d", "--dict_path", metavar="<klipper.dict>", type=str,
- default=None, help="Klipper firmware dictionary")
+ "-b",
+ "--baud",
+ metavar="<baud rate>",
+ type=int,
+ default=250000,
+ help="Serial Baud Rate",
+ )
parser.add_argument(
- "-c","--check", action="store_true",
- help="Perform flash check/verify only")
+ "-v", "--verbose", action="store_true", help="Enable verbose output"
+ )
parser.add_argument(
- "device", metavar="<device>", help="Device Serial Port")
+ "-d",
+ "--dict_path",
+ metavar="<klipper.dict>",
+ type=str,
+ default=None,
+ help="Klipper firmware dictionary",
+ )
parser.add_argument(
- "board", metavar="<board>", help="Board Type")
+ "-c", "--check", action="store_true", help="Perform flash check/verify only"
+ )
+ parser.add_argument("device", metavar="<device>", help="Device Serial Port")
+ parser.add_argument("board", metavar="<board>", help="Board Type")
parser.add_argument(
- "klipper_bin_path", metavar="<klipper.bin>",
- help="Klipper firmware binary")
+ "klipper_bin_path", metavar="<klipper.bin>", help="Klipper firmware binary"
+ )
args = parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.CRITICAL
logging.basicConfig(level=log_level)
@@ -1645,14 +1758,14 @@ def main():
if flash_args is None:
output_line("Unable to find definition for board: %s" % (args.board,))
sys.exit(-1)
- flash_args['device'] = args.device
- flash_args['baud'] = args.baud
- flash_args['klipper_bin_path'] = args.klipper_bin_path
- flash_args['klipper_dict_path'] = args.dict_path
- flash_args['verify_only'] = args.check
+ flash_args["device"] = args.device
+ flash_args["baud"] = args.baud
+ flash_args["klipper_bin_path"] = args.klipper_bin_path
+ flash_args["klipper_dict_path"] = args.dict_path
+ flash_args["verify_only"] = args.check
if args.check:
# override board_defs setting when doing verify-only:
- flash_args['skip_verify'] = False
+ flash_args["skip_verify"] = False
check_need_convert(args.board, flash_args)
fatfs_lib.check_fatfs_build(output)
try:
diff --git a/scripts/test_klippy.py b/scripts/test_klippy.py
index 8363ca0b..93ea2f34 100644
--- a/scripts/test_klippy.py
+++ b/scripts/test_klippy.py
@@ -14,9 +14,11 @@ TEMP_OUTPUT_FILE = "_test_output"
# Test cases
######################################################################
+
class error(Exception):
pass
+
class TestCase:
def __init__(self, fname, dictdir, tempdir, verbose, keepfiles):
self.fname = fname
@@ -24,22 +26,24 @@ class TestCase:
self.tempdir = tempdir
self.verbose = verbose
self.keepfiles = keepfiles
- def relpath(self, fname, rel='test'):
- if rel == 'dict':
+
+ def relpath(self, fname, rel="test"):
+ if rel == "dict":
reldir = self.dictdir
- elif rel == 'temp':
+ elif rel == "temp":
reldir = self.tempdir
else:
reldir = os.path.dirname(self.fname)
return os.path.join(reldir, fname)
+
def parse_test(self):
# Parse file into test cases
config_fname = gcode_fname = dict_fnames = None
should_fail = multi_tests = False
gcode = []
- f = open(self.fname, 'r')
+ f = open(self.fname, "r")
for line in f:
- cpos = line.find('#')
+ cpos = line.find("#")
if cpos >= 0:
line = line[:cpos]
parts = line.strip().split()
@@ -50,18 +54,21 @@ class TestCase:
# Multiple tests in same file
if not multi_tests:
multi_tests = True
- self.launch_test(config_fname, dict_fnames,
- gcode_fname, gcode, should_fail)
+ self.launch_test(
+ config_fname, dict_fnames, gcode_fname, gcode, should_fail
+ )
config_fname = self.relpath(parts[1])
if multi_tests:
- self.launch_test(config_fname, dict_fnames,
- gcode_fname, gcode, should_fail)
+ self.launch_test(
+ config_fname, dict_fnames, gcode_fname, gcode, should_fail
+ )
elif parts[0] == "DICTIONARY":
- dict_fnames = [self.relpath(parts[1], 'dict')]
+ dict_fnames = [self.relpath(parts[1], "dict")]
for mcu_dict in parts[2:]:
- mcu, fname = mcu_dict.split('=', 1)
- dict_fnames.append('%s=%s' % (
- mcu.strip(), self.relpath(fname.strip(), 'dict')))
+ mcu, fname = mcu_dict.split("=", 1)
+ dict_fnames.append(
+ "%s=%s" % (mcu.strip(), self.relpath(fname.strip(), "dict"))
+ )
elif parts[0] == "GCODE":
gcode_fname = self.relpath(parts[1])
elif parts[0] == "SHOULD_FAIL":
@@ -70,16 +77,15 @@ class TestCase:
gcode.append(line.strip())
f.close()
if not multi_tests:
- self.launch_test(config_fname, dict_fnames,
- gcode_fname, gcode, should_fail)
- def launch_test(self, config_fname, dict_fnames, gcode_fname, gcode,
- should_fail):
+ self.launch_test(config_fname, dict_fnames, gcode_fname, gcode, should_fail)
+
+ def launch_test(self, config_fname, dict_fnames, gcode_fname, gcode, should_fail):
gcode_is_temp = False
if gcode_fname is None:
- gcode_fname = self.relpath(TEMP_GCODE_FILE, 'temp')
+ gcode_fname = self.relpath(TEMP_GCODE_FILE, "temp")
gcode_is_temp = True
- f = open(gcode_fname, 'w')
- f.write('\n'.join(gcode + ['']))
+ f = open(gcode_fname, "w")
+ f.write("\n".join(gcode + [""]))
f.close()
elif gcode:
raise error("Can't specify both a gcode file and gcode commands")
@@ -88,14 +94,23 @@ class TestCase:
if dict_fnames is None:
raise error("data dictionary file not specified")
# Call klippy
- sys.stderr.write(" Starting %s (%s)\n" % (
- self.fname, os.path.basename(config_fname)))
- args = [ sys.executable, './klippy/klippy.py', config_fname,
- '-i', gcode_fname, '-o', TEMP_OUTPUT_FILE, '-v' ]
+ sys.stderr.write(
+ " Starting %s (%s)\n" % (self.fname, os.path.basename(config_fname))
+ )
+ args = [
+ sys.executable,
+ "./klippy/klippy.py",
+ config_fname,
+ "-i",
+ gcode_fname,
+ "-o",
+ TEMP_OUTPUT_FILE,
+ "-v",
+ ]
for df in dict_fnames:
- args += ['-d', df]
+ args += ["-d", df]
if not self.verbose:
- args += ['-l', TEMP_LOG_FILE]
+ args += ["-l", TEMP_LOG_FILE]
res = subprocess.call(args)
is_fail = (should_fail and not res) or (not should_fail and res)
if is_fail:
@@ -113,9 +128,10 @@ class TestCase:
if not self.verbose:
os.unlink(TEMP_LOG_FILE)
else:
- sys.stderr.write('\n')
+ sys.stderr.write("\n")
if gcode_is_temp:
os.unlink(gcode_fname)
+
def run(self):
try:
self.parse_test()
@@ -125,8 +141,9 @@ class TestCase:
logging.exception("Unhandled exception during test run")
return "internal error"
return "success"
+
def show_log(self):
- f = open(TEMP_LOG_FILE, 'r')
+ f = open(TEMP_LOG_FILE, "r")
data = f.read()
f.close()
sys.stdout.write(data)
@@ -136,18 +153,34 @@ class TestCase:
# Startup
######################################################################
+
def main():
# Parse args
usage = "%prog [options] <test cases>"
opts = optparse.OptionParser(usage)
- opts.add_option("-d", "--dictdir", dest="dictdir", default=".",
- help="directory for dictionary files")
- opts.add_option("-t", "--tempdir", dest="tempdir", default=".",
- help="directory for temporary files")
- opts.add_option("-k", action="store_true", dest="keepfiles",
- help="do not remove temporary files")
- opts.add_option("-v", action="store_true", dest="verbose",
- help="show all output from tests")
+ opts.add_option(
+ "-d",
+ "--dictdir",
+ dest="dictdir",
+ default=".",
+ help="directory for dictionary files",
+ )
+ opts.add_option(
+ "-t",
+ "--tempdir",
+ dest="tempdir",
+ default=".",
+ help="directory for temporary files",
+ )
+ opts.add_option(
+ "-k",
+ action="store_true",
+ dest="keepfiles",
+ help="do not remove temporary files",
+ )
+ opts.add_option(
+ "-v", action="store_true", dest="verbose", help="show all output from tests"
+ )
options, args = opts.parse_args()
if len(args) < 1:
opts.error("Incorrect number of arguments")
@@ -155,14 +188,16 @@ def main():
# Run each test
for fname in args:
- tc = TestCase(fname, options.dictdir, options.tempdir, options.verbose,
- options.keepfiles)
+ tc = TestCase(
+ fname, options.dictdir, options.tempdir, options.verbose, options.keepfiles
+ )
res = tc.run()
- if res != 'success':
+ if res != "success":
sys.stderr.write("\n\nTest case %s FAILED (%s)!\n\n" % (fname, res))
sys.exit(-1)
sys.stderr.write("\n All %d test cases passed\n" % (len(args),))
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/update_chitu.py b/scripts/update_chitu.py
index cf7fcfe9..62145371 100755
--- a/scripts/update_chitu.py
+++ b/scripts/update_chitu.py
@@ -11,14 +11,16 @@ import uuid
import sys
import hashlib
+
def calculate_crc(contents, seed):
- accumulating_xor_value = seed;
+ accumulating_xor_value = seed
for i in range(0, len(contents), 4):
- value = struct.unpack('<I', contents[ i : i + 4])[0]
+ value = struct.unpack("<I", contents[i : i + 4])[0]
accumulating_xor_value = accumulating_xor_value ^ value
return accumulating_xor_value
+
def xor_block(r0, r1, block_number, block_size, file_key):
# This is the loop counter
loop_counter = 0x0
@@ -27,17 +29,17 @@ def xor_block(r0, r1, block_number, block_size, file_key):
key_length = 0x18
# This is an initial seed
- xor_seed = 0x4bad
+ xor_seed = 0x4BAD
# This is the block counter
block_number = xor_seed * block_number
- #load the xor key from the file
- r7 = file_key
+ # load the xor key from the file
+ r7 = file_key
for loop_counter in range(0, block_size):
# meant to make sure different bits of the key are used.
- xor_seed = int(loop_counter/key_length)
+ xor_seed = int(loop_counter / key_length)
# IP is a scratch register / R12
ip = loop_counter - (key_length * xor_seed)
@@ -57,10 +59,10 @@ def xor_block(r0, r1, block_number, block_size, file_key):
# and then with IP
xor_seed = xor_seed ^ ip
- #Now store the byte back
+ # Now store the byte back
r1[loop_counter] = xor_seed & 0xFF
- #increment the loop_counter
+ # increment the loop_counter
loop_counter = loop_counter + 1
@@ -74,12 +76,12 @@ def encode_file(input, output_file, file_length):
print("Update UUID ", uid_value)
file_key = int(uid_value.hex[0:8], 16)
- xor_crc = 0xef3d4323;
+ xor_crc = 0xEF3D4323
# the input file is expected to be in chunks of 0x800
# so round the size
while len(input_file) % block_size != 0:
- input_file.extend(b'0x0')
+ input_file.extend(b"0x0")
# write the file header
output_file.write(struct.pack(">I", 0x443D2D3F))
@@ -88,15 +90,15 @@ def encode_file(input, output_file, file_length):
# write the file_key
output_file.write(struct.pack("<I", file_key))
- #TODO - how to enforce that the firmware aligns to block boundaries?
+ # TODO - how to enforce that the firmware aligns to block boundaries?
block_count = int(len(input_file) / block_size)
print("Block Count is ", block_count)
for block_number in range(0, block_count):
- block_offset = (block_number * block_size)
+ block_offset = block_number * block_size
block_end = block_offset + block_size
- block_array = bytearray(input_file[block_offset: block_end])
+ block_array = bytearray(input_file[block_offset:block_end])
xor_block(block_array, block_array, block_number, block_size, file_key)
- for n in range (0, block_size):
+ for n in range(0, block_size):
input_file[block_offset + n] = block_array[n]
# update the expected CRC value.
@@ -109,6 +111,7 @@ def encode_file(input, output_file, file_length):
output_file.write(input_file)
return
+
def main():
if len(sys.argv) != 3:
print("Usage: update_chitu <input_file> <output_file>")
@@ -132,5 +135,6 @@ def main():
print("Encoding complete.")
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/update_mks_robin.py b/scripts/update_mks_robin.py
index fab9faa1..e4127648 100755
--- a/scripts/update_mks_robin.py
+++ b/scripts/update_mks_robin.py
@@ -7,12 +7,41 @@
import optparse
XOR_PATTERN = [
- 0xA3, 0xBD, 0xAD, 0x0D, 0x41, 0x11, 0xBB, 0x8D, 0xDC, 0x80,
- 0x2D, 0xD0, 0xD2, 0xC4, 0x9B, 0x1E, 0x26, 0xEB, 0xE3, 0x33,
- 0x4A, 0x15, 0xE4, 0x0A, 0xB3, 0xB1, 0x3C, 0x93, 0xBB, 0xAF,
- 0xF7, 0x3E
+ 0xA3,
+ 0xBD,
+ 0xAD,
+ 0x0D,
+ 0x41,
+ 0x11,
+ 0xBB,
+ 0x8D,
+ 0xDC,
+ 0x80,
+ 0x2D,
+ 0xD0,
+ 0xD2,
+ 0xC4,
+ 0x9B,
+ 0x1E,
+ 0x26,
+ 0xEB,
+ 0xE3,
+ 0x33,
+ 0x4A,
+ 0x15,
+ 0xE4,
+ 0x0A,
+ 0xB3,
+ 0xB1,
+ 0x3C,
+ 0x93,
+ 0xBB,
+ 0xAF,
+ 0xF7,
+ 0x3E,
]
+
def main():
# Parse command-line arguments
usage = "%prog <input_file> <output_file>"
@@ -34,5 +63,6 @@ def main():
f.write(firmware)
f.close()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
diff --git a/scripts/whconsole.py b/scripts/whconsole.py
index 5e76b3bc..59e726f1 100755
--- a/scripts/whconsole.py
+++ b/scripts/whconsole.py
@@ -6,10 +6,11 @@
# This file may be distributed under the terms of the GNU GPLv3 license.
import sys, os, optparse, socket, fcntl, select, json, errno, time
+
# Set a file-descriptor as non-blocking
def set_nonblock(fd):
- fcntl.fcntl(fd, fcntl.F_SETFL
- , fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
def webhook_socket_create(uds_filename):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
@@ -22,14 +23,16 @@ def webhook_socket_create(uds_filename):
if e.errno == errno.ECONNREFUSED:
time.sleep(0.1)
continue
- sys.stderr.write("Unable to connect socket %s [%d,%s]\n"
- % (uds_filename, e.errno,
- errno.errorcode[e.errno]))
+ sys.stderr.write(
+ "Unable to connect socket %s [%d,%s]\n"
+ % (uds_filename, e.errno, errno.errorcode[e.errno])
+ )
sys.exit(-1)
break
sys.stderr.write("Connection.\n")
return sock
+
class KeyboardReader:
def __init__(self, uds_filename):
self.kbd_fd = sys.stdin.fileno()
@@ -39,42 +42,46 @@ class KeyboardReader:
self.poll.register(sys.stdin, select.POLLIN | select.POLLHUP)
self.poll.register(self.webhook_socket, select.POLLIN | select.POLLHUP)
self.kbd_data = self.socket_data = b""
+
def process_socket(self):
data = self.webhook_socket.recv(4096)
if not data:
sys.stderr.write("Socket closed\n")
sys.exit(0)
- parts = data.split(b'\x03')
+ parts = data.split(b"\x03")
parts[0] = self.socket_data + parts[0]
self.socket_data = parts.pop()
for line in parts:
sys.stdout.write("GOT: %s\n" % (line,))
+
def process_kbd(self):
data = os.read(self.kbd_fd, 4096)
- parts = data.split(b'\n')
+ parts = data.split(b"\n")
parts[0] = self.kbd_data + parts[0]
self.kbd_data = parts.pop()
for line in parts:
line = line.strip()
- if not line or line.startswith(b'#'):
+ if not line or line.startswith(b"#"):
continue
try:
m = json.loads(line)
except:
sys.stderr.write("ERROR: Unable to parse line\n")
continue
- cm = json.dumps(m, separators=(',', ':'))
+ cm = json.dumps(m, separators=(",", ":"))
sys.stdout.write("SEND: %s\n" % (cm,))
self.webhook_socket.send(cm.encode() + b"\x03")
+
def run(self):
while 1:
- res = self.poll.poll(1000.)
+ res = self.poll.poll(1000.0)
for fd, event in res:
if fd == self.kbd_fd:
self.process_kbd()
else:
self.process_socket()
+
def main():
usage = "%prog [options] <socket filename>"
opts = optparse.OptionParser(usage)
@@ -85,5 +92,6 @@ def main():
ml = KeyboardReader(args[0])
ml.run()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()