diff --git a/.github/workflows/flucoma-core-ci.yml b/.github/workflows/flucoma-core-ci.yml index d56dda8ef..8cfd964e8 100644 --- a/.github/workflows/flucoma-core-ci.yml +++ b/.github/workflows/flucoma-core-ci.yml @@ -35,5 +35,5 @@ jobs: - name: Test working-directory: ${{github.workspace}}/build - run: ctest -C ${{env.BUILD_TYPE}} -j3 + run: ctest -C ${{env.BUILD_TYPE}} -j3 --output-on-failure diff --git a/include/algorithms/public/TransientExtraction.hpp b/include/algorithms/public/TransientExtraction.hpp index 8bb39fb02..7773db1ad 100644 --- a/include/algorithms/public/TransientExtraction.hpp +++ b/include/algorithms/public/TransientExtraction.hpp @@ -82,6 +82,9 @@ class TransientExtraction index detect(const double* input, index inSize) { + assert(mInitialized && "Call init() before processing"); + assert(modelOrder() >= mDetectHalfWindow && + "model order needs to be >= half filter size"); frame(input, inSize); analyze(); detection(); @@ -91,7 +94,9 @@ class TransientExtraction void process(const RealVectorView input, RealVectorView transients, RealVectorView residual) { - assert(mInitialized); + assert(mInitialized && "Call init() before processing"); + assert(modelOrder() >= mDetectHalfWindow && + "model order needs to be >= half filter size"); index inSize = input.extent(0); frame(input.data(), inSize); analyze(); @@ -102,6 +107,9 @@ class TransientExtraction void process(const RealVectorView input, const RealVectorView unknowns, RealVectorView transients, RealVectorView residual) { + assert(mInitialized && "Call init() before processing"); + assert(modelOrder() >= mDetectHalfWindow && + "model order needs to be >= half filter size"); index inSize = input.extent(0); std::copy(unknowns.data(), unknowns.data() + hopSize(), mDetect.data()); mCount = 0; @@ -130,7 +138,8 @@ class TransientExtraction void analyze() { mModel.setMinVariance(0.0000001); - mModel.estimate(mInput.data() + modelOrder(), analysisSize()); + mModel.estimate(FluidTensorView( + mInput.data(), modelOrder(), analysisSize())); } void detection() @@ -139,18 +148,27 @@ class TransientExtraction // Forward and backward error const double normFactor = 1.0 / sqrt(mModel.variance()); - errorCalculation<&ARModel::forwardErrorArray>( - mForwardError.data(), input, blockSize() + mDetectHalfWindow + 1, - normFactor); - errorCalculation<&ARModel::backwardErrorArray>( - mBackwardError.data(), input, blockSize() + mDetectHalfWindow + 1, - normFactor); + + auto inputView = FluidTensorView( + mInput.data(), modelOrder() + padSize(), blockSize() + modelOrder()); + auto fwdError = FluidTensorView(mForwardError.data(), 0, + blockSize() + mDetectHalfWindow); + auto backError = FluidTensorView( + mBackwardError.data(), 0, blockSize() + mDetectHalfWindow); + + errorCalculation<&ARModel::forwardErrorArray>(inputView, fwdError, + normFactor); + errorCalculation<&ARModel::backwardErrorArray>(inputView, backError, + normFactor); // Window error functions (brute force convolution) - windowError(mForwardWindowedError.data(), - mForwardError.data() + modelOrder(), hopSize()); - windowError(mBackwardWindowedError.data(), - mBackwardError.data() + modelOrder(), hopSize()); + auto fwdWindowedError = + FluidTensorView(mForwardWindowedError.data(), 0, hopSize()); + auto backWindowedError = + FluidTensorView(mBackwardWindowedError.data(), 0, hopSize()); + + windowError(fwdError(Slice(modelOrder())), fwdWindowedError); + windowError(backError(Slice(modelOrder())), backWindowedError); // Detection index count = 0; @@ -164,7 +182,9 @@ class TransientExtraction { if (!click && (mBackwardWindowedError[asUnsigned(i)] > loThresh) && (mForwardWindowedError[asUnsigned(i)] > hiThresh)) - { click = true; } + { + click = true; + } else if (click && (mBackwardWindowedError[asUnsigned(i)] < loThresh)) { click = false; @@ -284,7 +304,7 @@ class TransientExtraction residual[i] = input[i + order]; } - if (mRefine) refine(residual, size, Au, u); + if (mRefine) refine(FluidTensorView(residual, 0, size), Au, u); for (index i = 0; i < (size - order); i++) transients[i] = input[i + order] - residual[i]; @@ -294,17 +314,19 @@ class TransientExtraction mInput.data() + padSize() + order + order); } - void refine(double* io, index size, Eigen::MatrixXd& Au, Eigen::MatrixXd& ls) + void refine(FluidTensorView io, Eigen::MatrixXd& Au, + Eigen::MatrixXd& ls) { const double energy = mModel.variance() * mCount; double energyLS = 0.0; index order = modelOrder(); + index size = io.size(); for (index i = 0; i < (size - order); i++) { if (mDetect[asUnsigned(i)] != 0) { - const double error = mModel.forwardError(io + i); + const double error = mModel.forwardError(io(Slice(i))); energyLS += error * error; } } @@ -341,44 +363,50 @@ class TransientExtraction return sum; } - template - void errorCalculation(double* error, const double* input, index size, - double normFactor) + template , + FluidTensorView)> + void errorCalculation(FluidTensorView input, + FluidTensorView error, double normFactor) { - (mModel.*Method)(error, input, size); + (mModel.*Method)(input, error); // Take absolutes and normalise - for (index i = 0; i < size; i++) + for (index i = 0; i < error.size(); i++) error[i] = std::fabs(error[i]) * normFactor; } // Triangle window double calcWindow(double norm) { return std::min(norm, 1.0 - norm); } - void windowError(double* errorWindowed, const double* error, index size) + void windowError(FluidTensorView error, + FluidTensorView errorWindowed) { - const index windowSize = mDetectHalfWindow * 2 + 1; - const index windowOffset = mDetectHalfWindow; + assert(error.descriptor().start >= mDetectHalfWindow && + "insufficient offset for filter size"); + assert(error.size() > errorWindowed.size() - 1 + (mDetectHalfWindow) && + "insufficient input for filter size"); + + const index windowSize = mDetectHalfWindow * 2 + 3; + const index windowOffset = mDetectHalfWindow + 1; const double powFactor = mDetectPowerFactor; // Calculate window normalisation factor double windowNormFactor = 0.0; for (index j = 0; j < windowSize; j++) - windowNormFactor += calcWindow((double) j / windowSize); + windowNormFactor += calcWindow((double) j / (windowSize - 1)); windowNormFactor = 1.0 / windowNormFactor; // Do window processing - for (index i = 0; i < size; i++) + for (index i = 0; i < errorWindowed.size(); i++) { double windowed = 0.0; - for (index j = 1; j < windowSize; j++) + for (index j = 1; j < windowSize - 1; j++) { const double value = pow(fabs(error[i - windowOffset + j]), powFactor); - windowed += value * calcWindow((double) j / windowSize); - ; + windowed += value * calcWindow((double) j / (windowSize - 1)); } errorWindowed[i] = pow((windowed * windowNormFactor), 1.0 / powFactor); diff --git a/include/algorithms/util/ARModel.hpp b/include/algorithms/util/ARModel.hpp index 76296b350..6b8607782 100644 --- a/include/algorithms/util/ARModel.hpp +++ b/include/algorithms/util/ARModel.hpp @@ -11,9 +11,11 @@ under the European Union’s Horizon 2020 research and innovation programme #pragma once #include "ConvolutionTools.hpp" +#include "FluidEigenMappings.hpp" #include "Toeplitz.hpp" #include "../public/WindowFuncs.hpp" #include "../../data/FluidIndex.hpp" +#include "../../data/FluidTensor.hpp" #include #include #include @@ -36,79 +38,121 @@ class ARModel double variance() const { return mVariance; } index order() const { return mParameters.size(); } - void estimate(const double* input, index size, index nIterations = 3, + void estimate(FluidTensorView input, index nIterations = 3, double robustFactor = 3.0) { if (nIterations > 0) - robustEstimate(input, size, nIterations, robustFactor); + robustEstimate(input, nIterations, robustFactor); else - directEstimate(input, size, true); + directEstimate(input, true); } - double fowardPrediction(const double* input) + double fowardPrediction(FluidTensorView input) { - return modelPredict>(input); + double prediction; + modelPredict(input, FluidTensorView(&prediction, 0, 1), + std::negate{}, Predict{}); + return prediction; } - double backwardPrediction(const double* input) + double backwardPrediction(FluidTensorView input) { - struct Identity - { - index operator()(index a) { return a; } - }; - return modelPredict(input); + double prediction; + modelPredict(input, FluidTensorView(&prediction, 0, 1), + Identity{}, Predict{}); + return prediction; } - double forwardError(const double* input) + double forwardError(FluidTensorView input) { - return modelError<&ARModel::fowardPrediction>(input); + double error; + forwardErrorArray(input, FluidTensorView(&error, 0, 1)); + return error; } - double backwardError(const double* input) + double backwardError(FluidTensorView input) { - return modelError<&ARModel::backwardPrediction>(input); + double error; + backwardErrorArray(input, FluidTensorView(&error, 0, 1)); + return error; } - void forwardErrorArray(double* errors, const double* input, index size) + void forwardErrorArray(FluidTensorView input, + FluidTensorView errors) { - modelErrorArray<&ARModel::forwardError>(errors, input, size); + modelPredict(input, errors, std::negate{}, Error{}); } - void backwardErrorArray(double* errors, const double* input, index size) + void backwardErrorArray(FluidTensorView input, + FluidTensorView errors) { - modelErrorArray<&ARModel::backwardError>(errors, input, size); + modelPredict(input, errors, Identity{}, Error{}); } void setMinVariance(double variance) { mMinVariance = variance; } private: - template - double modelPredict(const double* input) + struct Identity { - double estimate = 0.0; - - for (index i = 0; i < mParameters.size(); i++) - estimate += mParameters(i) * input[Op()(i + 1)]; + index operator()(index a) { return a; } + }; - return estimate; - } + struct Predict + { + double operator()(double, double estimate) { return estimate; } + }; - template - double modelError(const double* input) + struct Error { - return input[0] - (this->*Method)(input); - } + double operator()(double input, double estimate) + { + return input - estimate; + } + }; - template - void modelErrorArray(double* errors, const double* input, index size) + template + void modelPredict(FluidTensorView input, + FluidTensorView output, Indexer fIdx, + OutputFn fOut) { - for (index i = 0; i < size; i++) errors[i] = (this->*Method)(input + i); + index numPredictions = output.size(); + + assert(fIdx(1) >= -input.descriptor().start && + "array bounds error in AR model prediction: input too short"); + assert(fIdx(1) < input.size() && + "array bounds error in AR model prediction: input too short"); + assert(fIdx(mParameters.size()) >= -input.descriptor().start && + "array bounds error in AR model prediction: input too short"); + assert(fIdx(mParameters.size()) < input.size() && + "array bounds error in AR model prediction: input too short"); + assert(((numPredictions - 1) + fIdx(1)) >= -input.descriptor().start && + "array bounds error in AR model prediction: input too short"); + assert(((numPredictions - 1) + fIdx(1)) < input.size() && + "array bounds error in AR model prediction: input too short"); + assert(((numPredictions - 1) + fIdx(mParameters.size())) >= + -input.descriptor().start && + "array bounds error in AR model prediction: input too short"); + assert(((numPredictions - 1) + fIdx(mParameters.size())) < input.size() && + "array bounds error in AR model prediction: input too short"); + + const double* input_ptr = input.data(); + + for (index p = 0; p < numPredictions; p++) + { + double estimate = 0; + for (index i = 0; i < mParameters.size(); i++) + estimate += mParameters(i) * (input_ptr + p)[fIdx(i + 1)]; + output[p] = fOut(input_ptr[p], estimate); + } } - void directEstimate(const double* input, index size, bool updateVariance) + void directEstimate(FluidTensorView input, + bool updateVariance) { + index size = input.size(); + // copy input to a 32 byte aligned block (otherwise risk segfaults on Linux) - VectorXd frame = Eigen::Map(input, size); + VectorXd frame = _impl::asEigen(input); if (mUseWindow) { @@ -121,7 +165,6 @@ class ARModel frame.array() *= mWindow; } - VectorXd autocorrelation(size); algorithm::autocorrelateReal(autocorrelation.data(), frame.data(), asUnsigned(size)); @@ -154,25 +197,27 @@ class ARModel } } - void robustEstimate(const double* input, index size, index nIterations, + void robustEstimate(FluidTensorView input, index nIterations, double robustFactor) { - std::vector estimates(asUnsigned(size + mParameters.size())); + FluidTensor estimates(input.size() + mParameters.size()); // Calculate an initial estimate of parameters - directEstimate(input, size, true); + directEstimate(input, true); + + assert(input.descriptor().start >= mParameters.size() && + "too little offset into input data"); // Initialise Estimates - for (index i = mParameters.size(); i < mParameters.size() + size; i++) - estimates[asUnsigned(i)] = input[i - mParameters.size()]; + for (index i = 0; i < input.size() + mParameters.size(); i++) + estimates[i] = input.data()[i - mParameters.size()]; // Variance - robustVariance(estimates.data() + mParameters.size(), input, size, - robustFactor); + robustVariance(estimates(Slice(mParameters.size())), input, robustFactor); // Iterate for (index iterations = nIterations; iterations--;) - robustIteration(estimates.data() + mParameters.size(), input, size, + robustIteration(estimates(Slice(mParameters.size())), input, robustFactor); } @@ -181,38 +226,40 @@ class ARModel return cs * psiFunction((input - prediction) / cs); } - void robustVariance(double* estimates, const double* input, index size, - double robustFactor) + void robustVariance(FluidTensorView estimates, + FluidTensorView input, + double robustFactor) { double residualSqSum = 0.0; // Iterate to find new filtered input - for (index i = 0; i < size; i++) + for (index i = 0; i < input.size(); i++) { const double residual = - robustResidual(input[i], fowardPrediction(estimates + i), + robustResidual(input[i], fowardPrediction(estimates(Slice(i))), robustFactor * sqrt(mVariance)); residualSqSum += residual * residual; } - setVariance(residualSqSum / size); + setVariance(residualSqSum / input.size()); } - void robustIteration(double* estimates, const double* input, index size, - double robustFactor) + void robustIteration(FluidTensorView estimates, + FluidTensorView input, + double robustFactor) { // Iterate to find new filtered input - for (index i = 0; i < size; i++) + for (index i = 0; i < input.size(); i++) { - const double prediction = fowardPrediction(estimates); - estimates[0] = + const double prediction = fowardPrediction(estimates(Slice(i))); + estimates[i] = prediction + robustResidual(input[i], prediction, robustFactor * sqrt(mVariance)); } // New parameters - directEstimate(estimates, size, false); - robustVariance(estimates, input, size, robustFactor); + directEstimate(estimates(Slice(0, input.size())), false); + robustVariance(estimates(Slice(0, input.size())), input, robustFactor); } void setVariance(double variance) diff --git a/tests/algorithms/public/TestTransientSlice.cpp b/tests/algorithms/public/TestTransientSlice.cpp index b50f50104..55f9915e8 100644 --- a/tests/algorithms/public/TestTransientSlice.cpp +++ b/tests/algorithms/public/TestTransientSlice.cpp @@ -1,12 +1,16 @@ -#define CATCH_CONFIG_MAIN +#define APPROVALS_CATCH +#define CATCH_CONFIG_MAIN +// #include +#include #include "SlicerTestHarness.hpp" + #include -#include #include #include #include #include + #include #include #include @@ -24,6 +28,9 @@ using WindowSize = StrongType; using ClumpLength = StrongType; using MinSliceLength = StrongType; +auto directoryDisposer = + ApprovalTests::Approvals::useApprovalsSubdirectory("approval_tests"); + struct TestParams { Order order; @@ -107,20 +114,16 @@ TEST_CASE("TransientSlice is predictable on sharp sine bursts", "[TransientSlice][slicers]") { auto source = testsignals::sharpSines(); - auto expected = std::vector{1000, 22050, 33075}; - + auto params = TestParams{Order(20), BlockSize(256), Padding(128), Skew(0), ThreshFwd(2), ThreshBack(1.1), WindowSize(14), ClumpLength(25), MinSliceLength(1000)}; - auto matcher = Catch::Matchers::Approx(expected); - index margin = 8; - matcher.margin(margin); auto result = runTest(source, params); - CHECK_THAT(result, matcher); + ApprovalTests::Approvals::verifyAll("Detected",result); } @@ -128,48 +131,31 @@ TEST_CASE("TransientSlice is predictable on real material", "[TransientSlice][slicers]") { auto source = testsignals::monoEurorackSynth(); - auto expected = std::vector{ - 144, 19188, 34706, 47223, 49465, 58299, 68185, 86942, 105689, - 106751, 117438, 139521, 152879, 161525, 167573, 179045, 186295, 205049, - 223795, 248985, 250356, 256304, 263609, 280169, 297483, 306502, 310674, - 312505, 319114, 327659, 335217, 346778, 364673, 368356, 384718, 400937, - 431226, 433295, 434501, 435764, 439536, 441625, 444028, 445795, 452031, - 453392, 465467, 481514, 494518, 496119, 505754, 512477, 514270}; auto params = TestParams{Order(20), BlockSize(256), Padding(128), Skew(0), ThreshFwd(2), ThreshBack(1.1), WindowSize(14), ClumpLength(25), MinSliceLength(1000)}; - auto matcher = Catch::Matchers::Approx(expected); - index margin = 1; - matcher.margin(margin); auto result = runTest(source, params); - CHECK_THAT(result, matcher); + ApprovalTests::Approvals::verifyAll("Detected",result); } TEST_CASE("TransientSlice is predictable on real material with heavy settings", "[TransientSlice][slicers]") { auto source = testsignals::monoEurorackSynth(); - auto expected = std::vector{ - 140, 19182, 34704, 47217, 58297, 68182, 86941, 105688, 117356, - 122134, 139498, 150485, 161516, 167571, 179043, 186293, 205047, 220493}; auto params = TestParams{Order(200), BlockSize(2048), Padding(1024), Skew(1), ThreshFwd(3), ThreshBack(1), WindowSize(15), ClumpLength(30), MinSliceLength(4410)}; - auto matcher = Catch::Matchers::Approx(expected); - index margin = 1; - matcher.margin(margin); - auto result = runTest(source(Slice(0, 220500)), params); - CHECK_THAT(result, matcher); + ApprovalTests::Approvals::verifyAll("Detected",result); } diff --git a/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_real_material.approved.txt b/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_real_material.approved.txt new file mode 100644 index 000000000..04615e8fb --- /dev/null +++ b/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_real_material.approved.txt @@ -0,0 +1,50 @@ +Detected + + +[0] = 146 +[1] = 34710 +[2] = 47223 +[3] = 58304 +[4] = 68190 +[5] = 86944 +[6] = 105695 +[7] = 106756 +[8] = 117800 +[9] = 122140 +[10] = 139529 +[11] = 152884 +[12] = 161527 +[13] = 167575 +[14] = 186297 +[15] = 205055 +[16] = 223797 +[17] = 227228 +[18] = 238074 +[19] = 248989 +[20] = 250361 +[21] = 256309 +[22] = 306846 +[23] = 310675 +[24] = 312508 +[25] = 319117 +[26] = 327660 +[27] = 335219 +[28] = 346780 +[29] = 364675 +[30] = 400940 +[31] = 429155 +[32] = 431273 +[33] = 433296 +[34] = 434505 +[35] = 439538 +[36] = 441627 +[37] = 452033 +[38] = 453397 +[39] = 465470 +[40] = 474529 +[41] = 481516 +[42] = 494519 +[43] = 496122 +[44] = 505753 +[45] = 514268 + diff --git a/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_real_material_with_heavy_settings.approved.txt b/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_real_material_with_heavy_settings.approved.txt new file mode 100644 index 000000000..77c003331 --- /dev/null +++ b/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_real_material_with_heavy_settings.approved.txt @@ -0,0 +1,22 @@ +Detected + + +[0] = 149 +[1] = 19183 +[2] = 34706 +[3] = 47222 +[4] = 58300 +[5] = 68183 +[6] = 86942 +[7] = 105690 +[8] = 117439 +[9] = 122135 +[10] = 139500 +[11] = 150486 +[12] = 161522 +[13] = 167572 +[14] = 179048 +[15] = 186294 +[16] = 205050 +[17] = 220493 + diff --git a/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_sharp_sine_bursts.approved.txt b/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_sharp_sine_bursts.approved.txt new file mode 100644 index 000000000..b6568c5db --- /dev/null +++ b/tests/algorithms/public/approval_tests/TestTransientSlice.TransientSlice_is_predictable_on_sharp_sine_bursts.approved.txt @@ -0,0 +1,8 @@ +Detected + + +[0] = 995 +[1] = 11023 +[2] = 22045 +[3] = 33070 +