From cd1e5ce0c2fc10990c9672b86c83c5bdd26b5a65 Mon Sep 17 00:00:00 2001 From: Scott Lahteine Date: Thu, 20 Apr 2023 15:07:10 -0500 Subject: [PATCH] Trajectory array-per-axis --- Marlin/src/core/types.h | 97 +++++++++ Marlin/src/module/ft_motion.cpp | 352 ++++++++++++++++---------------- Marlin/src/module/ft_motion.h | 13 +- Marlin/src/module/ft_types.h | 3 + 4 files changed, 282 insertions(+), 183 deletions(-) diff --git a/Marlin/src/core/types.h b/Marlin/src/core/types.h index 15d572af60226..c6c64c0e5856e 100644 --- a/Marlin/src/core/types.h +++ b/Marlin/src/core/types.h @@ -787,6 +787,103 @@ struct XYZEval { FI bool operator!=(const XYZEval &rs) const { return !operator==(rs); } }; +#include // for memset + +template +struct XYZarray { + typedef T el[SIZE]; + union { + el data[LOGICAL_AXES]; + struct { T NUM_AXIS_ARGS(); }; + struct { T NUM_AXIS_LIST(a, b, c, _i, _j, _k, _u, _v, _w); }; + }; + FI void reset() { ZERO(data); } + + FI void set(const int n, const XYval p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y,,,,,,,); } + FI void set(const int n, const XYZval p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y, z[n]=p.z, i[n]=p.i, j[n]=p.j, k[n]=p.k, u[n]=p.u, v[n]=p.v, w[n]=p.w ); } + FI void set(const int n, const XYZEval p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y, z[n]=p.z, i[n]=p.i, j[n]=p.j, k[n]=p.k, u[n]=p.u, v[n]=p.v, w[n]=p.w ); } + + // Setter for all individual args + FI void set(const int n, NUM_AXIS_ARGS(const T)) { NUM_AXIS_CODE(a[n] = x, b[n] = y, c[n] = z, _i[n] = i, _j[n] = j, _k[n] = k, _u[n] = u, _v[n] = v, _w[n] = w); } + + // Setters with fewer elements leave the rest untouched + #if HAS_Y_AXIS + FI void set(const int n, const T px) { x[n] = px; } + #endif + #if HAS_Z_AXIS + FI void set(const int n, const T px, const T py) { x[n] = px; y[n] = py; } + #endif + #if HAS_I_AXIS + FI void set(const int n, const T px, const T py, const T pz) { x[n] = px; y[n] = py; z[n] = pz; } + #endif + #if HAS_J_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; } + #endif + #if HAS_K_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; } + #endif + #if HAS_U_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; } + #endif + #if HAS_V_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk, const T pu) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; u[n] = pu; } + #endif + #if HAS_W_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk, const T pu, const T pv) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; u[n] = pu; v[n] = pv; } + #endif + + FI XYZval operator[](const int n) const { return XYZval(NUM_AXIS_ARRAY(x[n], y[n], z[n], i[n], j[n], k[n], u[n], v[n], w[n])); } +}; + +template +struct XYZEarray { + typedef T el[SIZE]; + union { + el data[LOGICAL_AXES]; + struct { el LOGICAL_AXIS_ARGS(); }; + struct { el LOGICAL_AXIS_LIST(_e, a, b, c, _i, _j, _k, _u, _v, _w); }; + }; + FI void reset() { ZERO(data); } + + FI void set(const int n, const XYval p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y,,,,,,,); } + FI void set(const int n, const XYZval p) { NUM_AXIS_CODE(x[n]=p.x, y[n]=p.y, z[n]=p.z, i[n]=p.i, j[n]=p.j, k[n]=p.k, u[n]=p.u, v[n]=p.v, w[n]=p.w ); } + FI void set(const int n, const XYZEval p) { LOGICAL_AXIS_CODE(e[n]=p.e, x[n]=p.x, y[n]=p.y, z[n]=p.z, i[n]=p.i, j[n]=p.j, k[n]=p.k, u[n]=p.u, v[n]=p.v, w[n]=p.w ); } + + // Setter for all individual args + FI void set(const int n, NUM_AXIS_ARGS(const T)) { NUM_AXIS_CODE(a[n] = x, b[n] = y, c[n] = z, _i[n] = i, _j[n] = j, _k[n] = k, _u[n] = u, _v[n] = v, _w[n] = w); } + #if LOGICAL_AXES > NUM_AXES + FI void set(const int n, LOGICAL_AXIS_ARGS(const T)) { LOGICAL_AXIS_CODE(_e[n] = e, a[n] = x, b[n] = y, c[n] = z, _i[n] = i, _j[n] = j, _k[n] = k, _u[n] = u, _v[n] = v, _w[n] = w); } + #endif + + // Setters with fewer elements leave the rest untouched + #if HAS_Y_AXIS + FI void set(const int n, const T px) { x[n] = px; } + #endif + #if HAS_Z_AXIS + FI void set(const int n, const T px, const T py) { x[n] = px; y[n] = py; } + #endif + #if HAS_I_AXIS + FI void set(const int n, const T px, const T py, const T pz) { x[n] = px; y[n] = py; z[n] = pz; } + #endif + #if HAS_J_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; } + #endif + #if HAS_K_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; } + #endif + #if HAS_U_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; } + #endif + #if HAS_V_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk, const T pu) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; u[n] = pu; } + #endif + #if HAS_W_AXIS + FI void set(const int n, const T px, const T py, const T pz, const T pi, const T pj, const T pk, const T pu, const T pv) { x[n] = px; y[n] = py; z[n] = pz; i[n] = pi; j[n] = pj; k[n] = pk; u[n] = pu; v[n] = pv; } + #endif + + FI XYZEval operator[](const int n) const { return XYZval(LOGICAL_AXIS_ARRAY(e[n], x[n], y[n], z[n], i[n], j[n], k[n], u[n], v[n], w[n])); } +}; + #undef _RECIP #undef _ABS #undef _LS diff --git a/Marlin/src/module/ft_motion.cpp b/Marlin/src/module/ft_motion.cpp index cc567eb12de6f..c35f6d47f66cf 100644 --- a/Marlin/src/module/ft_motion.cpp +++ b/Marlin/src/module/ft_motion.cpp @@ -65,8 +65,8 @@ bool FxdTiCtrl::sts_stepperBusy = false; // The stepper buffer has item // Private variables. // NOTE: These are sized for Ulendo FBS use. -xyze_float_t FxdTiCtrl::traj[2 * (FTM_BATCH_SIZE)], // = {0.0f} Storage for fixed-time-based trajectory. - FxdTiCtrl::trajMod[FTM_BATCH_SIZE]; // = {0.0f} Storage for modified fixed-time-based trajectory. +xyze_trajectory_t FxdTiCtrl::traj; // = {0.0f} Storage for fixed-time-based trajectory. +xyze_trajectoryMod_t FxdTiCtrl::trajMod; // = {0.0f} Storage for modified fixed-time-based trajectory. block_t* FxdTiCtrl::current_block_cpy = nullptr; // Pointer to current block being processed. bool FxdTiCtrl::blockProcRdy = false, // Indicates a block is ready to be processed. @@ -74,7 +74,7 @@ bool FxdTiCtrl::blockProcRdy = false, // Indicates a block is ready FxdTiCtrl::blockProcDn = false; // Indicates current block is done being processed. bool FxdTiCtrl::batchRdy = false; // Indicates a batch of the fixed time trajectory // has been generated, is now available in the upper - - // half of traj[].x, y, z ... e vectors, and is ready to be + // half of traj.x[], y, z ... e vectors, and is ready to be // post processed, if applicable, then interpolated. bool FxdTiCtrl::batchRdyForInterp = false; // Indicates the batch is done being post processed, // if applicable, and is ready to be converted to step commands. @@ -112,9 +112,8 @@ hal_timer_t FxdTiCtrl::nextStepTicks = FTM_MIN_TICKS; // Accumulator for the nex // Shaping variables. #if HAS_X_AXIS - uint32_t FxdTiCtrl::Shaping::zi_idx = 0, // Index of storage in the data point delay vectors. - FxdTiCtrl::Shaping::max_i = 0; // Vector length for the selected shaper. FxdTiCtrl::shaping_t FxdTiCtrl::shaping = { + 0, 0, x:{ { 0.0f }, { 0.0f }, { 0 } }, // d_zi, Ai, Ni #if HAS_Y_AXIS y:{ { 0.0f }, { 0.0f }, { 0 } } // d_zi, Ai, Ni @@ -149,8 +148,20 @@ void FxdTiCtrl::runoutBlock() { // the batch size), or runout is not enabled, no runout is needed. // Fill out the trajectory window with the last position calculated. if (makeVector_batchIdx > FTM_BATCH_SIZE) - for (uint32_t i = makeVector_batchIdx; i < 2 * (FTM_BATCH_SIZE); i++) - traj[i] = traj[makeVector_batchIdx - 1]; + for (uint32_t i = makeVector_batchIdx; i < 2 * (FTM_BATCH_SIZE); i++) { + LOGICAL_AXIS_CODE( + traj.e[i] = traj.e[makeVector_batchIdx - 1], + traj.x[i] = traj.x[makeVector_batchIdx - 1], + traj.y[i] = traj.y[makeVector_batchIdx - 1], + traj.z[i] = traj.z[makeVector_batchIdx - 1], + traj.i[i] = traj.i[makeVector_batchIdx - 1], + traj.j[i] = traj.j[makeVector_batchIdx - 1], + traj.k[i] = traj.k[makeVector_batchIdx - 1], + traj.u[i] = traj.u[makeVector_batchIdx - 1], + traj.v[i] = traj.v[makeVector_batchIdx - 1], + traj.w[i] = traj.w[makeVector_batchIdx - 1] + ); + } makeVector_batchIdx = FTM_BATCH_SIZE; batchRdy = true; @@ -193,26 +204,26 @@ void FxdTiCtrl::loop() { // Call Ulendo FBS here. - // Copy the uncompensated vectors. + // Copy the uncompensated vectors. (XY done, other axes uncompensated) LOGICAL_AXIS_CODE( - memcpy(&trajMod[0].e, &traj[FTM_BATCH_SIZE].e, sizeof(trajMod[0].e)), // E uncompensated - memcpy(&trajMod[0].x, &traj[FTM_BATCH_SIZE].x, sizeof(trajMod[0].x)), // Done compensating X - memcpy(&trajMod[0].y, &traj[FTM_BATCH_SIZE].y, sizeof(trajMod[0].y)), // Done compensating Y - memcpy(&trajMod[0].z, &traj[FTM_BATCH_SIZE].z, sizeof(trajMod[0].z)), // Z...W uncompensated - memcpy(&trajMod[0].i, &traj[FTM_BATCH_SIZE].i, sizeof(trajMod[0].i)), - memcpy(&trajMod[0].j, &traj[FTM_BATCH_SIZE].j, sizeof(trajMod[0].j)), - memcpy(&trajMod[0].k, &traj[FTM_BATCH_SIZE].k, sizeof(trajMod[0].k)), - memcpy(&trajMod[0].u, &traj[FTM_BATCH_SIZE].u, sizeof(trajMod[0].u)), - memcpy(&trajMod[0].v, &traj[FTM_BATCH_SIZE].v, sizeof(trajMod[0].v)), - memcpy(&trajMod[0].w, &traj[FTM_BATCH_SIZE].w, sizeof(trajMod[0].w)) + memcpy(trajMod.e, &traj.e[FTM_BATCH_SIZE], sizeof(trajMod.e)), + memcpy(trajMod.x, &traj.x[FTM_BATCH_SIZE], sizeof(trajMod.x)), + memcpy(trajMod.y, &traj.y[FTM_BATCH_SIZE], sizeof(trajMod.y)), + memcpy(trajMod.z, &traj.z[FTM_BATCH_SIZE], sizeof(trajMod.z)), + memcpy(trajMod.i, &traj.i[FTM_BATCH_SIZE], sizeof(trajMod.i)), + memcpy(trajMod.j, &traj.j[FTM_BATCH_SIZE], sizeof(trajMod.j)), + memcpy(trajMod.k, &traj.k[FTM_BATCH_SIZE], sizeof(trajMod.k)), + memcpy(trajMod.u, &traj.u[FTM_BATCH_SIZE], sizeof(trajMod.u)), + memcpy(trajMod.v, &traj.v[FTM_BATCH_SIZE], sizeof(trajMod.v)), + memcpy(trajMod.w, &traj.w[FTM_BATCH_SIZE], sizeof(trajMod.w)) ); // Shift the time series back in the window for (shaped) X and Y - TERN_(HAS_X_AXIS, memcpy(&traj[0].x, &traj[FTM_BATCH_SIZE].x, sizeof(traj[0].x) / 2)); - TERN_(HAS_Y_AXIS, memcpy(&traj[0].y, &traj[FTM_BATCH_SIZE].y, sizeof(traj[0].y) / 2)); + TERN_(HAS_X_AXIS, memcpy(traj.x, &traj.x[FTM_BATCH_SIZE], sizeof(traj.x) / 2)); + TERN_(HAS_Y_AXIS, memcpy(traj.y, &traj.y[FTM_BATCH_SIZE], sizeof(traj.y) / 2)); // Z...W and E Disabled! Uncompensated so the lower half is not used. - //TERN_(HAS_Z_AXIS, memcpy(&traj[0].z, &traj[FTM_BATCH_SIZE].z, sizeof(traj[0].z) / 2)); + //TERN_(HAS_Z_AXIS, memcpy(&traj.z[0], &traj.z[FTM_BATCH_SIZE], sizeof(traj.z) / 2)); // ... data is ready in trajMod. batchRdyForInterp = true; @@ -246,134 +257,119 @@ void FxdTiCtrl::loop() { // Refresh the gains used by shaping functions. // To be called on init or mode or zeta change. - void FxdTiCtrl::updateShapingA(const_float_t zeta/*=FTM_SHAPING_ZETA*/, const_float_t vtol/*=FTM_SHAPING_V_TOL*/) { - - const float K = exp( -zeta * PI / sqrt(1.0f - sq(zeta)) ), - K2 = sq(K); - switch (cfg_mode) { - - case ftMotionMode_ZV: - shaping.max_i = 1U; - shaping.x.Ai[0] = 1.0f / (1.0f + K); - shaping.x.Ai[1] = shaping.x.Ai[0] * K; - break; - - case ftMotionMode_ZVD: - shaping.max_i = 2U; - shaping.x.Ai[0] = 1.0f / ( 1.0f + 2.0f * K + K2 ); - shaping.x.Ai[1] = shaping.x.Ai[0] * 2.0f * K; - shaping.x.Ai[2] = shaping.x.Ai[0] * K2; - break; + void FxdTiCtrl::Shaping::updateShapingA(const_float_t zeta/*=FTM_SHAPING_ZETA*/, const_float_t vtol/*=FTM_SHAPING_V_TOL*/) { + + const float K = exp(-zeta * PI / sqrt(1.0f - sq(zeta))), + K2 = sq(K); + + switch (cfg_mode) { + + case ftMotionMode_ZV: + max_i = 1U; + x.Ai[0] = 1.0f / (1.0f + K); + x.Ai[1] = x.Ai[0] * K; + break; + + case ftMotionMode_ZVD: + max_i = 2U; + x.Ai[0] = 1.0f / ( 1.0f + 2.0f * K + K2 ); + x.Ai[1] = x.Ai[0] * 2.0f * K; + x.Ai[2] = x.Ai[0] * K2; + break; + + case ftMotionMode_EI: { + max_i = 2U; + x.Ai[0] = 0.25f * (1.0f + vtol); + x.Ai[1] = 0.50f * (1.0f - vtol) * K; + x.Ai[2] = x.Ai[0] * K2; + const float A_adj = 1.0f / (x.Ai[0] + x.Ai[1] + x.Ai[2]); + for (uint32_t i = 0U; i < 3U; i++) { x.Ai[i] *= A_adj; } + } break; + + case ftMotionMode_2HEI: { + max_i = 3U; + const float vtol2 = sq(vtol); + const float X = pow(vtol2 * (sqrt(1.0f - vtol2) + 1.0f), 1.0f / 3.0f); + x.Ai[0] = ( 3.0f * sq(X) + 2.0f * X + 3.0f * vtol2 ) / (16.0f * X); + x.Ai[1] = ( 0.5f - x.Ai[0] ) * K; + x.Ai[2] = x.Ai[1] * K; + x.Ai[3] = x.Ai[0] * cu(K); + const float A_adj = 1.0f / (x.Ai[0] + x.Ai[1] + x.Ai[2] + x.Ai[3]); + for (uint32_t i = 0U; i < 4U; i++) { x.Ai[i] *= A_adj; } + } break; + + case ftMotionMode_3HEI: { + max_i = 4U; + x.Ai[0] = 0.0625f * ( 1.0f + 3.0f * vtol + 2.0f * sqrt( 2.0f * ( vtol + 1.0f ) * vtol ) ); + x.Ai[1] = 0.25f * ( 1.0f - vtol ) * K; + x.Ai[2] = ( 0.5f * ( 1.0f + vtol ) - 2.0f * x.Ai[0] ) * K2; + x.Ai[3] = x.Ai[1] * K2; + x.Ai[4] = x.Ai[0] * sq(K2); + const float A_adj = 1.0f / (x.Ai[0] + x.Ai[1] + x.Ai[2] + x.Ai[3] + x.Ai[4]); + for (uint32_t i = 0U; i < 5U; i++) { x.Ai[i] *= A_adj; } + } break; + + case ftMotionMode_MZV: { + max_i = 2U; + const float B = 1.4142135623730950488016887242097f * K; + x.Ai[0] = 1.0f / (1.0f + B + K2); + x.Ai[1] = x.Ai[0] * B; + x.Ai[2] = x.Ai[0] * K2; + } break; + + default: + for (uint32_t i = 0U; i < 5U; i++) x.Ai[i] = 0.0f; + max_i = 0; + } + #if HAS_Y_AXIS + memcpy(y.Ai, x.Ai, sizeof(x.Ai)); // For now, zeta and vtol are shared across x and y. + #endif + } - case ftMotionMode_EI: { - shaping.max_i = 2U; - shaping.x.Ai[0] = 0.25f * (1.0f + vtol); - shaping.x.Ai[1] = 0.50f * (1.0f - vtol) * K; - shaping.x.Ai[2] = shaping.x.Ai[0] * K2; - const float A_adj = 1.0f / (shaping.x.Ai[0] + shaping.x.Ai[1] + shaping.x.Ai[2]); - for (uint32_t i = 0U; i < 3U; i++) { shaping.x.Ai[i] *= A_adj; } - } break; - - case ftMotionMode_2HEI: { - shaping.max_i = 3U; - const float vtol2 = sq(vtol); - const float X = pow(vtol2 * (sqrt(1.0f - vtol2) + 1.0f), 1.0f / 3.0f); - shaping.x.Ai[0] = ( 3.0f * sq(X) + 2.0f * X + 3.0f * vtol2 ) / (16.0f * X); - shaping.x.Ai[1] = ( 0.5f - shaping.x.Ai[0] ) * K; - shaping.x.Ai[2] = shaping.x.Ai[1] * K; - shaping.x.Ai[3] = shaping.x.Ai[0] * cu(K); - const float A_adj = 1.0f / (shaping.x.Ai[0] + shaping.x.Ai[1] + shaping.x.Ai[2] + shaping.x.Ai[3]); - for (uint32_t i = 0U; i < 4U; i++) { shaping.x.Ai[i] *= A_adj; } - } break; - - case ftMotionMode_3HEI: { - shaping.max_i = 4U; - shaping.x.Ai[0] = 0.0625f * ( 1.0f + 3.0f * vtol + 2.0f * sqrt( 2.0f * ( vtol + 1.0f ) * vtol ) ); - shaping.x.Ai[1] = 0.25f * ( 1.0f - vtol ) * K; - shaping.x.Ai[2] = ( 0.5f * ( 1.0f + vtol ) - 2.0f * shaping.x.Ai[0] ) * K2; - shaping.x.Ai[3] = shaping.x.Ai[1] * K2; - shaping.x.Ai[4] = shaping.x.Ai[0] * sq(K2); - const float A_adj = 1.0f / (shaping.x.Ai[0] + shaping.x.Ai[1] + shaping.x.Ai[2] + shaping.x.Ai[3] + shaping.x.Ai[4]); - for (uint32_t i = 0U; i < 5U; i++) { shaping.x.Ai[i] *= A_adj; } - } break; - - case ftMotionMode_MZV: { - shaping.max_i = 2U; - const float B = 1.4142135623730950488016887242097f * K; - shaping.x.Ai[0] = 1.0f / (1.0f + B + K2); - shaping.x.Ai[1] = shaping.x.Ai[0] * B; - shaping.x.Ai[2] = shaping.x.Ai[0] * K2; - } break; - - default: - for (uint32_t i = 0U; i < 5U; i++) shaping.x.Ai[i] = 0.0f; - shaping.max_i = 0; - } - #if HAS_Y_AXIS - memcpy(shaping.y.Ai, shaping.x.Ai, sizeof(shaping.x.Ai)); // For now, zeta and vtol are shared across x and y. - #endif + void FxdTiCtrl::updateShapingA(const_float_t zeta/*=FTM_SHAPING_ZETA*/, const_float_t vtol/*=FTM_SHAPING_V_TOL*/) { + shaping.updateShapingA(zeta, vtol); } // Refresh the indices used by shaping functions. // To be called when frequencies change. - void FxdTiCtrl::updateShapingN(const_float_t xf OPTARG(HAS_Y_AXIS, const_float_t yf), const_float_t zeta/*=FTM_SHAPING_ZETA*/) { + void FxdTiCtrl::AxisShaping::updateShapingN(const_float_t f, const_float_t df) { // Protections omitted for DBZ and for index exceeding array length. - - const float df = sqrt(1.0f - sq(zeta)); - switch (cfg_mode) { case ftMotionMode_ZV: - shaping.x.Ni[1] = round((0.5f / xf / df) * (FTM_FS)); - #if HAS_Y_AXIS - shaping.y.Ni[1] = round((0.5f / yf / df) * (FTM_FS)); - #endif + Ni[1] = round((0.5f / f / df) * (FTM_FS)); break; case ftMotionMode_ZVD: case ftMotionMode_EI: - shaping.x.Ni[1] = round((0.5f / xf / df) * (FTM_FS)); - shaping.x.Ni[2] = 2 * shaping.x.Ni[1]; - #if HAS_Y_AXIS - shaping.y.Ni[1] = round((0.5f / yf / df) * (FTM_FS)); - shaping.y.Ni[2] = 2 * shaping.y.Ni[1]; - #endif + Ni[1] = round((0.5f / f / df) * (FTM_FS)); + Ni[2] = Ni[1] + Ni[1]; break; case ftMotionMode_2HEI: - shaping.x.Ni[1] = round((0.5f / xf / df) * (FTM_FS)); - shaping.x.Ni[2] = 2 * shaping.x.Ni[1]; - shaping.x.Ni[3] = 3 * shaping.x.Ni[1]; - #if HAS_Y_AXIS - shaping.y.Ni[1] = round((0.5f / yf / df) * (FTM_FS)); - shaping.y.Ni[2] = 2 * shaping.y.Ni[1]; - shaping.y.Ni[3] = 3 * shaping.y.Ni[1]; - #endif + Ni[1] = round((0.5f / f / df) * (FTM_FS)); + Ni[2] = Ni[1] + Ni[1]; + Ni[3] = Ni[2] + Ni[1]; break; case ftMotionMode_3HEI: - shaping.x.Ni[1] = round((0.5f / xf / df) * (FTM_FS)); - shaping.x.Ni[2] = 2 * shaping.x.Ni[1]; - shaping.x.Ni[3] = 3 * shaping.x.Ni[1]; - shaping.x.Ni[4] = 4 * shaping.x.Ni[1]; - #if HAS_Y_AXIS - shaping.y.Ni[1] = round((0.5f / yf / df) * (FTM_FS)); - shaping.y.Ni[2] = 2 * shaping.y.Ni[1]; - shaping.y.Ni[3] = 3 * shaping.y.Ni[1]; - shaping.y.Ni[4] = 4 * shaping.y.Ni[1]; - #endif + Ni[1] = round((0.5f / f / df) * (FTM_FS)); + Ni[2] = Ni[1] + Ni[1]; + Ni[3] = Ni[2] + Ni[1]; + Ni[4] = Ni[3] + Ni[1]; break; case ftMotionMode_MZV: - shaping.x.Ni[1] = round((0.375f / xf / df) * (FTM_FS)); - shaping.x.Ni[2] = 2 * shaping.x.Ni[1]; - #if HAS_Y_AXIS - shaping.y.Ni[1] = round((0.375f / yf / df) * (FTM_FS)); - shaping.y.Ni[2] = 2 * shaping.y.Ni[1]; - #endif + Ni[1] = round((0.375f / f / df) * (FTM_FS)); + Ni[2] = Ni[1] + Ni[1]; break; - default: - for (uint32_t i = 0U; i < 5U; i++) { shaping.x.Ni[i] = 0; TERN_(HAS_Y_AXIS, shaping.y.Ni[i] = 0); } + default: ZERO(Ni); } } + void FxdTiCtrl::updateShapingN(const_float_t xf OPTARG(HAS_Y_AXIS, const_float_t yf), const_float_t zeta/*=FTM_SHAPING_ZETA*/) { + const float df = sqrt(1.0f - sq(zeta)); + shaping.x.updateShapingN(xf, df); + TERN_(HAS_Y_AXIS, shaping.y.updateShapingN(yf, df)); + } + #endif // HAS_X_AXIS // Reset all trajectory processing variables. @@ -381,8 +377,7 @@ void FxdTiCtrl::reset() { stepperCmdBuff_produceIdx = stepperCmdBuff_consumeIdx = 0; - for (uint32_t i = 0U; i < (FTM_BATCH_SIZE); i++) // Reset trajectory history - traj[i].reset(); + traj.reset(); // Reset trajectory history blockProcRdy = blockProcRdy_z1 = blockProcDn = false; batchRdy = batchRdyForInterp = false; @@ -399,9 +394,8 @@ void FxdTiCtrl::reset() { nextStepTicks = FTM_MIN_TICKS; #if HAS_X_AXIS - for (uint32_t i = 0U; i < (FTM_ZMAX); i++) { - shaping.x.d_zi[i] = 0.0f; TERN_(HAS_Y_AXIS, shaping.y.d_zi[i] = 0.0f); - } + for (uint32_t i = 0U; i < (FTM_ZMAX); i++) + shaping.x.d_zi[i] = TERN_(HAS_Y_AXIS, shaping.y.d_zi[i] =) 0.0f; shaping.zi_idx = 0; #endif @@ -476,7 +470,7 @@ void FxdTiCtrl::loadBlockData(block_t * const current_block) { odiff = oneby2a - oneby2d, // (i.e., oneby2a * 2) (mm/s) Change in speed for one second of acceleration ldiff = totalLength - fdiff; // (mm) Distance to travel if nominal speed is reached float T2 = (1.0f / F_n) * (ldiff - odiff * sq(F_n)); // (s) Coasting duration after nominal speed reached - if (T2 < 0.0f) { + if (T2 < 0.0f) { T2 = 0.0f; F_n = SQRT(ldiff / odiff); // Clip by intersection if nominal speed can't be reached. } @@ -544,37 +538,35 @@ void FxdTiCtrl::makeVector() { accel_k = decel_P; // (mm/s^2) Acceleration K factor from Decel phase } + NUM_AXIS_CODE( + traj.x[makeVector_batchIdx] = startPosn.x + ratio.x * dist, + traj.y[makeVector_batchIdx] = startPosn.y + ratio.y * dist, + traj.z[makeVector_batchIdx] = startPosn.z + ratio.z * dist, + traj.i[makeVector_batchIdx] = startPosn.i + ratio.i * dist, + traj.j[makeVector_batchIdx] = startPosn.j + ratio.j * dist, + traj.k[makeVector_batchIdx] = startPosn.k + ratio.k * dist, + traj.u[makeVector_batchIdx] = startPosn.u + ratio.u * dist, + traj.v[makeVector_batchIdx] = startPosn.v + ratio.v * dist, + traj.w[makeVector_batchIdx] = startPosn.w + ratio.w * dist + ); + #if HAS_EXTRUDERS const float new_raw_z1 = startPosn.e + ratio.e * dist; - float eTraj; if (cfg_linearAdvEna) { float dedt_adj = (new_raw_z1 - e_raw_z1) * (FTM_FS); if (ratio.e > 0.0f) dedt_adj += accel_k * cfg_linearAdvK; e_advanced_z1 += dedt_adj * (FTM_TS); - eTraj = e_advanced_z1; + traj.e[makeVector_batchIdx] = e_advanced_z1; e_raw_z1 = new_raw_z1; } else { - eTraj = new_raw_z1; + traj.e[makeVector_batchIdx] = new_raw_z1; // Alternatively: coordArray_e[makeVector_batchIdx] = e_startDist + extrusion / (N1 + N2 + N3); } #endif - LOGICAL_AXIS_CODE( - traj[makeVector_batchIdx].e = eTraj, - traj[makeVector_batchIdx].x = startPosn.x + ratio.x * dist, - traj[makeVector_batchIdx].y = startPosn.y + ratio.y * dist, - traj[makeVector_batchIdx].z = startPosn.z + ratio.z * dist, - traj[makeVector_batchIdx].i = startPosn.i + ratio.i * dist, - traj[makeVector_batchIdx].j = startPosn.j + ratio.j * dist, - traj[makeVector_batchIdx].k = startPosn.k + ratio.k * dist, - traj[makeVector_batchIdx].u = startPosn.u + ratio.u * dist, - traj[makeVector_batchIdx].v = startPosn.v + ratio.v * dist, - traj[makeVector_batchIdx].w = startPosn.w + ratio.w * dist - ); - // Update shaping parameters if needed. #if HAS_Z_AXIS static float zd_z1 = 0.0f; @@ -583,11 +575,11 @@ void FxdTiCtrl::makeVector() { #if HAS_Z_AXIS case dynFreqMode_Z_BASED: - if (traj[makeVector_batchIdx].z != zd_z1) { // Only update if Z changed. - const float xf = cfg_baseFreq[0] + cfg_dynFreqK[0] * traj[makeVector_batchIdx].z, - yf = cfg_baseFreq[1] + cfg_dynFreqK[1] * traj[makeVector_batchIdx].z; + if (traj.z[makeVector_batchIdx] != zd_z1) { // Only update if Z changed. + const float xf = cfg_baseFreq[0] + cfg_dynFreqK[0] * traj.z[makeVector_batchIdx], + yf = cfg_baseFreq[1] + cfg_dynFreqK[1] * traj.z[makeVector_batchIdx]; updateShapingN(_MAX(xf, FTM_MIN_SHAPE_FREQ), _MAX(yf, FTM_MIN_SHAPE_FREQ)); - zd_z1 = traj[makeVector_batchIdx].z; + zd_z1 = traj.z[makeVector_batchIdx]; } break; #endif @@ -596,8 +588,8 @@ void FxdTiCtrl::makeVector() { case dynFreqMode_MASS_BASED: // Update constantly. The optimization done for Z value makes // less sense for E, as E is expected to constantly change. - updateShapingN( cfg_baseFreq[0] + cfg_dynFreqK[0] * traj[makeVector_batchIdx].e - OPTARG(HAS_Y_AXIS, cfg_baseFreq[1] + cfg_dynFreqK[1] * traj[makeVector_batchIdx].e) ); + updateShapingN( cfg_baseFreq[0] + cfg_dynFreqK[0] * traj.e[makeVector_batchIdx] + OPTARG(HAS_Y_AXIS, cfg_baseFreq[1] + cfg_dynFreqK[1] * traj.e[makeVector_batchIdx]) ); break; #endif @@ -607,18 +599,18 @@ void FxdTiCtrl::makeVector() { // Apply shaping if in mode. #if HAS_X_AXIS if (WITHIN(cfg_mode, 10U, 19U)) { - shaping.x.d_zi[shaping.zi_idx] = traj[makeVector_batchIdx].x; - traj[makeVector_batchIdx].x *= shaping.x.Ai[0]; + shaping.x.d_zi[shaping.zi_idx] = traj.x[makeVector_batchIdx]; + traj.x[makeVector_batchIdx] *= shaping.x.Ai[0]; #if HAS_Y_AXIS - shaping.y.d_zi[shaping.zi_idx] = traj[makeVector_batchIdx].y; - traj[makeVector_batchIdx].y *= shaping.y.Ai[0]; + shaping.y.d_zi[shaping.zi_idx] = traj.y[makeVector_batchIdx]; + traj.y[makeVector_batchIdx] *= shaping.y.Ai[0]; #endif for (uint32_t i = 1U; i <= shaping.max_i; i++) { const uint32_t udiffx = shaping.zi_idx - shaping.x.Ni[i]; - traj[makeVector_batchIdx].x += shaping.x.Ai[i] * shaping.x.d_zi[shaping.x.Ni[i] > shaping.zi_idx ? (FTM_ZMAX) + udiffx : udiffx]; + traj.x[makeVector_batchIdx] += shaping.x.Ai[i] * shaping.x.d_zi[shaping.x.Ni[i] > shaping.zi_idx ? (FTM_ZMAX) + udiffx : udiffx]; #if HAS_Y_AXIS const uint32_t udiffy = shaping.zi_idx - shaping.y.Ni[i]; - traj[makeVector_batchIdx].y += shaping.y.Ai[i] * shaping.y.d_zi[shaping.y.Ni[i] > shaping.zi_idx ? (FTM_ZMAX) + udiffy : udiffy]; + traj.y[makeVector_batchIdx] += shaping.y.Ai[i] * shaping.y.d_zi[shaping.y.Ni[i] > shaping.zi_idx ? (FTM_ZMAX) + udiffy : udiffy]; #endif } if (++shaping.zi_idx == (FTM_ZMAX)) shaping.zi_idx = 0; @@ -647,16 +639,16 @@ void FxdTiCtrl::convertToSteps(const uint32_t idx) { //#define STEPS_ROUNDING #if ENABLED(STEPS_ROUNDING) const xyze_float_t steps_tar = LOGICAL_AXIS_ARRAY( - trajMod[idx].e * planner.settings.axis_steps_per_mm[E_AXIS_N(0)] + (trajMod[idx].e < 0.0f ? -0.5f : 0.5f), // May be eliminated if guaranteed positive. - trajMod[idx].x * planner.settings.axis_steps_per_mm[X_AXIS] + (trajMod[idx].x < 0.0f ? -0.5f : 0.5f), - trajMod[idx].y * planner.settings.axis_steps_per_mm[Y_AXIS] + (trajMod[idx].y < 0.0f ? -0.5f : 0.5f), - trajMod[idx].z * planner.settings.axis_steps_per_mm[Z_AXIS] + (trajMod[idx].z < 0.0f ? -0.5f : 0.5f), - trajMod[idx].i * planner.settings.axis_steps_per_mm[I_AXIS] + (trajMod[idx].i < 0.0f ? -0.5f : 0.5f), - trajMod[idx].j * planner.settings.axis_steps_per_mm[J_AXIS] + (trajMod[idx].j < 0.0f ? -0.5f : 0.5f), - trajMod[idx].k * planner.settings.axis_steps_per_mm[K_AXIS] + (trajMod[idx].k < 0.0f ? -0.5f : 0.5f), - trajMod[idx].u * planner.settings.axis_steps_per_mm[U_AXIS] + (trajMod[idx].u < 0.0f ? -0.5f : 0.5f), - trajMod[idx].v * planner.settings.axis_steps_per_mm[V_AXIS] + (trajMod[idx].v < 0.0f ? -0.5f : 0.5f), - trajMod[idx].w * planner.settings.axis_steps_per_mm[W_AXIS] + (trajMod[idx].w < 0.0f ? -0.5f : 0.5f), + trajMod.e[idx] * planner.settings.axis_steps_per_mm[E_AXIS_N(0)] + (trajMod.e[idx] < 0.0f ? -0.5f : 0.5f), // May be eliminated if guaranteed positive. + trajMod.x[idx] * planner.settings.axis_steps_per_mm[X_AXIS] + (trajMod.x[idx] < 0.0f ? -0.5f : 0.5f), + trajMod.y[idx] * planner.settings.axis_steps_per_mm[Y_AXIS] + (trajMod.y[idx] < 0.0f ? -0.5f : 0.5f), + trajMod.z[idx] * planner.settings.axis_steps_per_mm[Z_AXIS] + (trajMod.z[idx] < 0.0f ? -0.5f : 0.5f), + trajMod.i[idx] * planner.settings.axis_steps_per_mm[I_AXIS] + (trajMod.i[idx] < 0.0f ? -0.5f : 0.5f), + trajMod.j[idx] * planner.settings.axis_steps_per_mm[J_AXIS] + (trajMod.j[idx] < 0.0f ? -0.5f : 0.5f), + trajMod.k[idx] * planner.settings.axis_steps_per_mm[K_AXIS] + (trajMod.k[idx] < 0.0f ? -0.5f : 0.5f), + trajMod.u[idx] * planner.settings.axis_steps_per_mm[U_AXIS] + (trajMod.u[idx] < 0.0f ? -0.5f : 0.5f), + trajMod.v[idx] * planner.settings.axis_steps_per_mm[V_AXIS] + (trajMod.v[idx] < 0.0f ? -0.5f : 0.5f), + trajMod.w[idx] * planner.settings.axis_steps_per_mm[W_AXIS] + (trajMod.w[idx] < 0.0f ? -0.5f : 0.5f), ); xyze_long_t delta = xyze_long_t(steps_tar) - steps; //const xyze_long_t delta = LOGICAL_AXIS_ARRAY( @@ -673,16 +665,16 @@ void FxdTiCtrl::convertToSteps(const uint32_t idx) { //); #else xyze_long_t delta = LOGICAL_AXIS_ARRAY( - delta.e = int32_t(trajMod[idx].e * planner.settings.axis_steps_per_mm[E_AXIS_N(0)]) - steps.e, - delta.x = int32_t(trajMod[idx].x * planner.settings.axis_steps_per_mm[X_AXIS]) - steps.x, - delta.y = int32_t(trajMod[idx].y * planner.settings.axis_steps_per_mm[Y_AXIS]) - steps.y, - delta.z = int32_t(trajMod[idx].z * planner.settings.axis_steps_per_mm[Z_AXIS]) - steps.z, - delta.i = int32_t(trajMod[idx].i * planner.settings.axis_steps_per_mm[I_AXIS]) - steps.i, - delta.j = int32_t(trajMod[idx].j * planner.settings.axis_steps_per_mm[J_AXIS]) - steps.j, - delta.k = int32_t(trajMod[idx].k * planner.settings.axis_steps_per_mm[K_AXIS]) - steps.k, - delta.u = int32_t(trajMod[idx].u * planner.settings.axis_steps_per_mm[U_AXIS]) - steps.u, - delta.v = int32_t(trajMod[idx].v * planner.settings.axis_steps_per_mm[V_AXIS]) - steps.v, - delta.w = int32_t(trajMod[idx].w * planner.settings.axis_steps_per_mm[W_AXIS]) - steps.w + delta.e = int32_t(trajMod.e[idx] * planner.settings.axis_steps_per_mm[E_AXIS_N(0)]) - steps.e, + delta.x = int32_t(trajMod.x[idx] * planner.settings.axis_steps_per_mm[X_AXIS]) - steps.x, + delta.y = int32_t(trajMod.y[idx] * planner.settings.axis_steps_per_mm[Y_AXIS]) - steps.y, + delta.z = int32_t(trajMod.z[idx] * planner.settings.axis_steps_per_mm[Z_AXIS]) - steps.z, + delta.i = int32_t(trajMod.i[idx] * planner.settings.axis_steps_per_mm[I_AXIS]) - steps.i, + delta.j = int32_t(trajMod.j[idx] * planner.settings.axis_steps_per_mm[J_AXIS]) - steps.j, + delta.k = int32_t(trajMod.k[idx] * planner.settings.axis_steps_per_mm[K_AXIS]) - steps.k, + delta.u = int32_t(trajMod.u[idx] * planner.settings.axis_steps_per_mm[U_AXIS]) - steps.u, + delta.v = int32_t(trajMod.v[idx] * planner.settings.axis_steps_per_mm[V_AXIS]) - steps.v, + delta.w = int32_t(trajMod.w[idx] * planner.settings.axis_steps_per_mm[W_AXIS]) - steps.w ); #endif diff --git a/Marlin/src/module/ft_motion.h b/Marlin/src/module/ft_motion.h index 1ff7e49992cb1..bce6ae5029310 100644 --- a/Marlin/src/module/ft_motion.h +++ b/Marlin/src/module/ft_motion.h @@ -73,7 +73,8 @@ class FxdTiCtrl { private: - static xyze_float_t traj[2 * (FTM_BATCH_SIZE)], trajMod[FTM_BATCH_SIZE]; + static xyze_trajectory_t traj; + static xyze_trajectoryMod_t trajMod; static block_t *current_block_cpy; static bool blockProcRdy, blockProcRdy_z1, blockProcDn; @@ -113,15 +114,21 @@ class FxdTiCtrl { float d_zi[FTM_ZMAX] = { 0.0f }; // Data point delay vector. float Ai[5]; // Shaping gain vector. uint32_t Ni[5]; // Shaping time index vector. + + void updateShapingN(const_float_t f, const_float_t df); + } axis_shaping_t; typedef struct Shaping { - static uint32_t zi_idx, // Index of storage in the data point delay vectors. - max_i; // Vector length for the selected shaper. + uint32_t zi_idx, // Index of storage in the data point delay vectors. + max_i; // Vector length for the selected shaper. axis_shaping_t x; #if HAS_Y_AXIS axis_shaping_t y; #endif + + void updateShapingA(const_float_t zeta=FTM_SHAPING_ZETA, const_float_t vtol=FTM_SHAPING_V_TOL); + } shaping_t; static shaping_t shaping; // Shaping data diff --git a/Marlin/src/module/ft_types.h b/Marlin/src/module/ft_types.h index bf3cbd86feadc..870d486bcc72e 100644 --- a/Marlin/src/module/ft_types.h +++ b/Marlin/src/module/ft_types.h @@ -48,6 +48,9 @@ enum stepDirState_t { stepDirState_NEG = 2U }; +typedef struct XYZEarray xyze_trajectory_t; +typedef struct XYZEarray xyze_trajectoryMod_t; + typedef struct XYZEval xyze_stepDir_t; enum {