aboutsummaryrefslogtreecommitdiffstats
path: root/klippy/chelper/stepcompress.c
diff options
context:
space:
mode:
authorKevin O'Connor <kevin@koconnor.net>2020-02-25 12:54:55 -0500
committerKevin O'Connor <kevin@koconnor.net>2020-03-13 21:53:48 -0400
commitacd165cbea2f48812b7fd96c28b81622e5c13665 (patch)
tree0496d4905d40c7cfe8a164a7c8cffd1eab53e8b1 /klippy/chelper/stepcompress.c
parentd86bf0b927b0a56bcef5a2fee98f610884c0d4a5 (diff)
downloadkutter-acd165cbea2f48812b7fd96c28b81622e5c13665.tar.gz
kutter-acd165cbea2f48812b7fd96c28b81622e5c13665.tar.xz
kutter-acd165cbea2f48812b7fd96c28b81622e5c13665.zip
stepcompress: Implement a step+dir+step filter
Some stepper motor drivers do not respond well to rapid "step + direction change + step" events. In particular, it is believed this can cause "over current" events on the tmc2208 drivers when they are in "stealthchop" mode. Detect these events and remove them from the generated step times. Signed-off-by: Kevin O'Connor <kevin@koconnor.net>
Diffstat (limited to 'klippy/chelper/stepcompress.c')
-rw-r--r--klippy/chelper/stepcompress.c113
1 files changed, 86 insertions, 27 deletions
diff --git a/klippy/chelper/stepcompress.c b/klippy/chelper/stepcompress.c
index 7b0fdfd7..812a3b50 100644
--- a/klippy/chelper/stepcompress.c
+++ b/klippy/chelper/stepcompress.c
@@ -38,6 +38,9 @@ struct stepcompress {
struct list_head msg_queue;
uint32_t queue_step_msgid, set_next_step_dir_msgid, oid;
int sdir, invert_sdir;
+ // Step+dir+step filter
+ uint64_t next_step_clock;
+ int next_step_dir;
};
@@ -270,7 +273,7 @@ stepcompress_get_oid(struct stepcompress *sc)
int
stepcompress_get_step_dir(struct stepcompress *sc)
{
- return sc->sdir;
+ return sc->next_step_dir;
}
// Determine the "print time" of the last_step_clock
@@ -293,7 +296,7 @@ stepcompress_set_time(struct stepcompress *sc
// Convert previously scheduled steps into commands for the mcu
static int
-stepcompress_flush(struct stepcompress *sc, uint64_t move_clock)
+queue_flush(struct stepcompress *sc, uint64_t move_clock)
{
if (sc->queue_pos >= sc->queue_next)
return 0;
@@ -346,7 +349,7 @@ set_next_step_dir(struct stepcompress *sc, int sdir)
if (sc->sdir == sdir)
return 0;
sc->sdir = sdir;
- int ret = stepcompress_flush(sc, UINT64_MAX);
+ int ret = queue_flush(sc, UINT64_MAX);
if (ret)
return ret;
uint32_t msg[3] = {
@@ -361,26 +364,30 @@ set_next_step_dir(struct stepcompress *sc, int sdir)
// Maximium clock delta between messages in the queue
#define CLOCK_DIFF_MAX (3<<28)
-// Slow path for stepcompress_append()
+// Slow path for queue_append() - handle next step far in future
static int
-queue_append_slow(struct stepcompress *sc, double rel_sc)
+queue_append_far(struct stepcompress *sc)
{
- uint64_t abs_step_clock = (uint64_t)rel_sc + sc->last_step_clock;
- if (abs_step_clock >= sc->last_step_clock + CLOCK_DIFF_MAX) {
- // Avoid integer overflow on steps far in the future
- int ret = stepcompress_flush(sc, abs_step_clock - CLOCK_DIFF_MAX + 1);
- if (ret)
- return ret;
-
- if (abs_step_clock >= sc->last_step_clock + CLOCK_DIFF_MAX)
- return stepcompress_flush_far(sc, abs_step_clock);
- }
+ uint64_t step_clock = sc->next_step_clock;
+ sc->next_step_clock = 0;
+ int ret = queue_flush(sc, step_clock - CLOCK_DIFF_MAX + 1);
+ if (ret)
+ return ret;
+ if (step_clock >= sc->last_step_clock + CLOCK_DIFF_MAX)
+ return stepcompress_flush_far(sc, step_clock);
+ *sc->queue_next++ = step_clock;
+ return 0;
+}
+// Slow path for queue_append() - expand the internal queue storage
+static int
+queue_append_extend(struct stepcompress *sc)
+{
if (sc->queue_next - sc->queue_pos > 65535 + 2000) {
// No point in keeping more than 64K steps in memory
uint32_t flush = (*(sc->queue_next-65535)
- (uint32_t)sc->last_step_clock);
- int ret = stepcompress_flush(sc, sc->last_step_clock + flush);
+ int ret = queue_flush(sc, sc->last_step_clock + flush);
if (ret)
return ret;
}
@@ -405,30 +412,82 @@ queue_append_slow(struct stepcompress *sc, double rel_sc)
sc->queue_next = sc->queue + in_use;
}
- *sc->queue_next++ = abs_step_clock;
+ *sc->queue_next++ = sc->next_step_clock;
+ sc->next_step_clock = 0;
return 0;
}
// Add a step time to the queue (flushing the queue if needed)
-inline int
-stepcompress_append(struct stepcompress *sc, int sdir
- , double print_time, double step_time)
+static int
+queue_append(struct stepcompress *sc)
{
- if (unlikely(sdir != sc->sdir)) {
- int ret = set_next_step_dir(sc, sdir);
+ if (unlikely(sc->next_step_dir != sc->sdir)) {
+ int ret = set_next_step_dir(sc, sc->next_step_dir);
if (ret)
return ret;
}
+ if (unlikely(sc->next_step_clock >= sc->last_step_clock + CLOCK_DIFF_MAX))
+ return queue_append_far(sc);
+ if (unlikely(sc->queue_next >= sc->queue_end))
+ return queue_append_extend(sc);
+ *sc->queue_next++ = sc->next_step_clock;
+ sc->next_step_clock = 0;
+ return 0;
+}
+
+#define SDS_FILTER_TIME .000750
+
+// Add next step time
+int
+stepcompress_append(struct stepcompress *sc, int sdir
+ , double print_time, double step_time)
+{
+ // Calculate step clock
double offset = print_time - sc->last_step_print_time;
double rel_sc = (step_time + offset) * sc->mcu_freq;
- if (unlikely(sc->queue_next >= sc->queue_end
- || rel_sc >= (double)CLOCK_DIFF_MAX))
- // Slow path to handle queue expansion and integer overflow
- return queue_append_slow(sc, rel_sc);
- *sc->queue_next++ = (uint32_t)sc->last_step_clock + (uint32_t)rel_sc;
+ uint64_t step_clock = sc->last_step_clock + (uint64_t)rel_sc;
+ // Flush previous pending step (if any)
+ if (sc->next_step_clock) {
+ if (unlikely(sdir != sc->next_step_dir)) {
+ double diff = step_clock - sc->next_step_clock;
+ if (diff < SDS_FILTER_TIME * sc->mcu_freq) {
+ // Rollback last step to avoid rapid step+dir+step
+ sc->next_step_clock = 0;
+ sc->next_step_dir = sdir;
+ return 0;
+ }
+ }
+ int ret = queue_append(sc);
+ if (ret)
+ return ret;
+ }
+ // Store this step as the next pending step
+ sc->next_step_clock = step_clock;
+ sc->next_step_dir = sdir;
+ return 0;
+}
+
+// Commit next pending step (ie, do not allow a rollback)
+int
+stepcompress_commit(struct stepcompress *sc)
+{
+ if (sc->next_step_clock)
+ return queue_append(sc);
return 0;
}
+// Flush pending steps
+static int
+stepcompress_flush(struct stepcompress *sc, uint64_t move_clock)
+{
+ if (sc->next_step_clock && move_clock >= sc->next_step_clock) {
+ int ret = queue_append(sc);
+ if (ret)
+ return ret;
+ }
+ return queue_flush(sc, move_clock);
+}
+
// Reset the internal state of the stepcompress object
int __visible
stepcompress_reset(struct stepcompress *sc, uint64_t last_step_clock)