aboutsummaryrefslogtreecommitdiffstats
path: root/klippy
diff options
context:
space:
mode:
Diffstat (limited to 'klippy')
-rw-r--r--klippy/extras/adxl345.py28
-rw-r--r--klippy/extras/angle.py4
-rw-r--r--klippy/extras/bulk_sensor.py34
-rw-r--r--klippy/extras/lis2dw.py21
-rw-r--r--klippy/extras/mpu9250.py22
5 files changed, 40 insertions, 69 deletions
diff --git a/klippy/extras/adxl345.py b/klippy/extras/adxl345.py
index 76fd4ca4..b91224d5 100644
--- a/klippy/extras/adxl345.py
+++ b/klippy/extras/adxl345.py
@@ -187,7 +187,7 @@ def read_axes_map(config):
MIN_MSG_TIME = 0.100
BYTES_PER_SAMPLE = 5
-SAMPLES_PER_BLOCK = 10
+SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE
BATCH_UPDATES = 0.100
@@ -205,13 +205,12 @@ class ADXL345:
self.mcu = mcu = self.spi.get_mcu()
self.oid = oid = mcu.create_oid()
self.query_adxl345_cmd = None
- self.query_adxl345_status_cmd = None
mcu.add_config_cmd("config_adxl345 oid=%d spi_oid=%d"
% (oid, self.spi.get_oid()))
mcu.add_config_cmd("query_adxl345 oid=%d clock=0 rest_ticks=0"
% (oid,), on_restart=True)
mcu.register_config_callback(self._build_config)
- self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "adxl345_data", oid)
+ self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid)
# Clock tracking
chip_smooth = self.data_rate * BATCH_UPDATES * 2
self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth)
@@ -230,10 +229,8 @@ class ADXL345:
cmdqueue = self.spi.get_command_queue()
self.query_adxl345_cmd = self.mcu.lookup_command(
"query_adxl345 oid=%c clock=%u rest_ticks=%u", cq=cmdqueue)
- self.query_adxl345_status_cmd = self.mcu.lookup_query_command(
- "query_adxl345_status oid=%c",
- "adxl345_status oid=%c clock=%u query_ticks=%u next_sequence=%hu"
- " buffered=%c fifo=%c limit_count=%hu", oid=self.oid, cq=cmdqueue)
+ self.clock_updater.setup_query_command(
+ self.mcu, "query_adxl345_status oid=%c", oid=self.oid, cq=cmdqueue)
def read_reg(self, reg):
params = self.spi.spi_transfer([reg | REG_MOD_READ, 0x00])
response = bytearray(params['response'])
@@ -286,17 +283,6 @@ class ADXL345:
self.clock_sync.set_last_chip_clock(seq * SAMPLES_PER_BLOCK + i)
del samples[count:]
return samples
- def _update_clock(self, minclock=0):
- # Query current state
- for retry in range(5):
- params = self.query_adxl345_status_cmd.send([self.oid],
- minclock=minclock)
- fifo = params['fifo'] & 0x7f
- if fifo <= 32:
- break
- else:
- raise self.printer.command_error("Unable to query adxl345 fifo")
- self.clock_updater.update_clock(params)
# Start, stop, and process message batches
def _start_measurements(self):
# In case of miswiring, testing ADXL345 device ID prevents treating
@@ -325,8 +311,6 @@ class ADXL345:
logging.info("ADXL345 starting '%s' measurements", self.name)
# Initialize clock tracking
self.clock_updater.note_start(reqclock)
- self._update_clock(minclock=reqclock)
- self.clock_updater.clear_duration_filter()
self.last_error_count = 0
def _finish_measurements(self):
# Halt bulk reading
@@ -334,7 +318,7 @@ class ADXL345:
self.bulk_queue.clear_samples()
logging.info("ADXL345 finished '%s' measurements", self.name)
def _process_batch(self, eventtime):
- self._update_clock()
+ self.clock_updater.update_clock()
raw_samples = self.bulk_queue.pull_samples()
if not raw_samples:
return {}
@@ -342,7 +326,7 @@ class ADXL345:
if not samples:
return {}
return {'data': samples, 'errors': self.last_error_count,
- 'overflows': self.clock_updater.get_last_limit_count()}
+ 'overflows': self.clock_updater.get_last_overflows()}
def load_config(config):
return ADXL345(config)
diff --git a/klippy/extras/angle.py b/klippy/extras/angle.py
index 163168d0..23f402a7 100644
--- a/klippy/extras/angle.py
+++ b/klippy/extras/angle.py
@@ -412,7 +412,7 @@ class HelperTLE5012B:
self._write_reg(reg, val)
BYTES_PER_SAMPLE = 3
-SAMPLES_PER_BLOCK = 16
+SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE
SAMPLE_PERIOD = 0.000400
BATCH_UPDATES = 0.100
@@ -445,7 +445,7 @@ class Angle:
"query_spi_angle oid=%d clock=0 rest_ticks=0 time_shift=0"
% (oid,), on_restart=True)
mcu.register_config_callback(self._build_config)
- self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "spi_angle_data", oid)
+ self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid)
# Process messages in batches
self.batch_bulk = bulk_sensor.BatchBulkHelper(
self.printer, self._process_batch,
diff --git a/klippy/extras/bulk_sensor.py b/klippy/extras/bulk_sensor.py
index 8d0c0541..df5a5da2 100644
--- a/klippy/extras/bulk_sensor.py
+++ b/klippy/extras/bulk_sensor.py
@@ -114,7 +114,7 @@ class BatchWebhooksClient:
# Helper class to store incoming messages in a queue
class BulkDataQueue:
- def __init__(self, mcu, msg_name, oid):
+ def __init__(self, mcu, msg_name="sensor_bulk_data", oid=None):
# Measurement storage (accessed from background thread)
self.lock = threading.Lock()
self.raw_samples = []
@@ -206,31 +206,37 @@ class ChipClockUpdater:
self.clock_sync = clock_sync
self.bytes_per_sample = bytes_per_sample
self.samples_per_block = MAX_BULK_MSG_SIZE // bytes_per_sample
- self.mcu = clock_sync.mcu
self.last_sequence = self.max_query_duration = 0
- self.last_limit_count = 0
+ self.last_overflows = 0
+ self.mcu = self.oid = self.query_status_cmd = None
+ def setup_query_command(self, mcu, msgformat, oid, cq):
+ self.mcu = mcu
+ self.oid = oid
+ self.query_status_cmd = self.mcu.lookup_query_command(
+ msgformat, "sensor_bulk_status oid=%c clock=%u query_ticks=%u"
+ " next_sequence=%hu buffered=%u possible_overflows=%hu",
+ oid=oid, cq=cq)
def get_last_sequence(self):
return self.last_sequence
- def get_last_limit_count(self):
- return self.last_limit_count
+ def get_last_overflows(self):
+ return self.last_overflows
def clear_duration_filter(self):
self.max_query_duration = 1 << 31
def note_start(self, reqclock):
self.last_sequence = 0
- self.last_limit_count = 0
+ self.last_overflows = 0
self.clock_sync.reset(reqclock, 0)
self.clear_duration_filter()
- def update_clock(self, params):
- # Handle a status response message of the form:
- # adxl345_status oid=x clock=x query_ticks=x next_sequence=x
- # buffered=x fifo=x limit_count=x
- fifo = params['fifo']
+ self.update_clock(minclock=reqclock)
+ self.clear_duration_filter()
+ def update_clock(self, minclock=0):
+ params = self.query_status_cmd.send([self.oid], minclock=minclock)
mcu_clock = self.mcu.clock32_to_clock64(params['clock'])
seq_diff = (params['next_sequence'] - self.last_sequence) & 0xffff
self.last_sequence += seq_diff
buffered = params['buffered']
- lc_diff = (params['limit_count'] - self.last_limit_count) & 0xffff
- self.last_limit_count += lc_diff
+ po_diff = (params['possible_overflows'] - self.last_overflows) & 0xffff
+ self.last_overflows += po_diff
duration = params['query_ticks']
if duration > self.max_query_duration:
# Skip measurement as a high query time could skew clock tracking
@@ -239,7 +245,7 @@ class ChipClockUpdater:
return
self.max_query_duration = 2 * duration
msg_count = (self.last_sequence * self.samples_per_block
- + buffered // self.bytes_per_sample + fifo)
+ + buffered // self.bytes_per_sample)
# The "chip clock" is the message counter plus .5 for average
# inaccuracy of query responses and plus .5 for assumed offset
# of hardware processing time.
diff --git a/klippy/extras/lis2dw.py b/klippy/extras/lis2dw.py
index a7fe54d7..74911e6f 100644
--- a/klippy/extras/lis2dw.py
+++ b/klippy/extras/lis2dw.py
@@ -33,7 +33,7 @@ SCALE = FREEFALL_ACCEL * 1.952 / 4
MIN_MSG_TIME = 0.100
BYTES_PER_SAMPLE = 6
-SAMPLES_PER_BLOCK = 8
+SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE
BATCH_UPDATES = 0.100
@@ -49,13 +49,12 @@ class LIS2DW:
self.mcu = mcu = self.spi.get_mcu()
self.oid = oid = mcu.create_oid()
self.query_lis2dw_cmd = None
- self.query_lis2dw_status_cmd = None
mcu.add_config_cmd("config_lis2dw oid=%d spi_oid=%d"
% (oid, self.spi.get_oid()))
mcu.add_config_cmd("query_lis2dw oid=%d clock=0 rest_ticks=0"
% (oid,), on_restart=True)
mcu.register_config_callback(self._build_config)
- self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "lis2dw_data", oid)
+ self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid)
# Clock tracking
chip_smooth = self.data_rate * BATCH_UPDATES * 2
self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth)
@@ -75,10 +74,8 @@ class LIS2DW:
cmdqueue = self.spi.get_command_queue()
self.query_lis2dw_cmd = self.mcu.lookup_command(
"query_lis2dw oid=%c clock=%u rest_ticks=%u", cq=cmdqueue)
- self.query_lis2dw_status_cmd = self.mcu.lookup_query_command(
- "query_lis2dw_status oid=%c",
- "lis2dw_status oid=%c clock=%u query_ticks=%u next_sequence=%hu"
- " buffered=%c fifo=%c limit_count=%hu", oid=self.oid, cq=cmdqueue)
+ self.clock_updater.setup_query_command(
+ self.mcu, "query_lis2dw_status oid=%c", oid=self.oid, cq=cmdqueue)
def read_reg(self, reg):
params = self.spi.spi_transfer([reg | REG_MOD_READ, 0x00])
response = bytearray(params['response'])
@@ -133,10 +130,6 @@ class LIS2DW:
self.clock_sync.set_last_chip_clock(seq * SAMPLES_PER_BLOCK + i)
del samples[count:]
return samples
- def _update_clock(self, minclock=0):
- params = self.query_lis2dw_status_cmd.send([self.oid],
- minclock=minclock)
- self.clock_updater.update_clock(params)
# Start, stop, and process message batches
def _start_measurements(self):
# In case of miswiring, testing LIS2DW device ID prevents treating
@@ -170,8 +163,6 @@ class LIS2DW:
logging.info("LIS2DW starting '%s' measurements", self.name)
# Initialize clock tracking
self.clock_updater.note_start(reqclock)
- self._update_clock(minclock=reqclock)
- self.clock_updater.clear_duration_filter()
self.last_error_count = 0
def _finish_measurements(self):
# Halt bulk reading
@@ -180,7 +171,7 @@ class LIS2DW:
logging.info("LIS2DW finished '%s' measurements", self.name)
self.set_reg(REG_LIS2DW_FIFO_CTRL, 0x00)
def _process_batch(self, eventtime):
- self._update_clock()
+ self.clock_updater.update_clock()
raw_samples = self.bulk_queue.pull_samples()
if not raw_samples:
return {}
@@ -188,7 +179,7 @@ class LIS2DW:
if not samples:
return {}
return {'data': samples, 'errors': self.last_error_count,
- 'overflows': self.clock_updater.get_last_limit_count()}
+ 'overflows': self.clock_updater.get_last_overflows()}
def load_config(config):
return LIS2DW(config)
diff --git a/klippy/extras/mpu9250.py b/klippy/extras/mpu9250.py
index 41376dc3..4626e1c0 100644
--- a/klippy/extras/mpu9250.py
+++ b/klippy/extras/mpu9250.py
@@ -50,7 +50,7 @@ FIFO_SIZE = 512
MIN_MSG_TIME = 0.100
BYTES_PER_SAMPLE = 6
-SAMPLES_PER_BLOCK = 8
+SAMPLES_PER_BLOCK = bulk_sensor.MAX_BULK_MSG_SIZE // BYTES_PER_SAMPLE
BATCH_UPDATES = 0.100
@@ -70,9 +70,8 @@ class MPU9250:
self.mcu = mcu = self.i2c.get_mcu()
self.oid = oid = mcu.create_oid()
self.query_mpu9250_cmd = None
- self.query_mpu9250_status_cmd = None
mcu.register_config_callback(self._build_config)
- self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, "mpu9250_data", oid)
+ self.bulk_queue = bulk_sensor.BulkDataQueue(mcu, oid=oid)
# Clock tracking
chip_smooth = self.data_rate * BATCH_UPDATES * 2
self.clock_sync = bulk_sensor.ClockSyncRegression(mcu, chip_smooth)
@@ -95,10 +94,8 @@ class MPU9250:
% (self.oid,), on_restart=True)
self.query_mpu9250_cmd = self.mcu.lookup_command(
"query_mpu9250 oid=%c clock=%u rest_ticks=%u", cq=cmdqueue)
- self.query_mpu9250_status_cmd = self.mcu.lookup_query_command(
- "query_mpu9250_status oid=%c",
- "mpu9250_status oid=%c clock=%u query_ticks=%u next_sequence=%hu"
- " buffered=%c fifo=%u limit_count=%hu", oid=self.oid, cq=cmdqueue)
+ self.clock_updater.setup_query_command(
+ self.mcu, "query_mpu9250_status oid=%c", oid=self.oid, cq=cmdqueue)
def read_reg(self, reg):
params = self.i2c.i2c_read([reg], 1)
return bytearray(params['response'])[0]
@@ -142,11 +139,6 @@ class MPU9250:
self.clock_sync.set_last_chip_clock(seq * SAMPLES_PER_BLOCK + i)
del samples[count:]
return samples
-
- def _update_clock(self, minclock=0):
- params = self.query_mpu9250_status_cmd.send([self.oid],
- minclock=minclock)
- self.clock_updater.update_clock(params)
# Start, stop, and process message batches
def _start_measurements(self):
# In case of miswiring, testing MPU9250 device ID prevents treating
@@ -184,8 +176,6 @@ class MPU9250:
logging.info("MPU9250 starting '%s' measurements", self.name)
# Initialize clock tracking
self.clock_updater.note_start(reqclock)
- self._update_clock(minclock=reqclock)
- self.clock_updater.clear_duration_filter()
self.last_error_count = 0
def _finish_measurements(self):
# Halt bulk reading
@@ -195,7 +185,7 @@ class MPU9250:
self.set_reg(REG_PWR_MGMT_1, SET_PWR_MGMT_1_SLEEP)
self.set_reg(REG_PWR_MGMT_2, SET_PWR_MGMT_2_OFF)
def _process_batch(self, eventtime):
- self._update_clock()
+ self.clock_updater.update_clock()
raw_samples = self.bulk_queue.pull_samples()
if not raw_samples:
return {}
@@ -203,7 +193,7 @@ class MPU9250:
if not samples:
return {}
return {'data': samples, 'errors': self.last_error_count,
- 'overflows': self.clock_updater.get_last_limit_count()}
+ 'overflows': self.clock_updater.get_last_overflows()}
def load_config(config):
return MPU9250(config)