diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..1854a3f --- /dev/null +++ b/.editorconfig @@ -0,0 +1,6 @@ +root = true + +[*.py] +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true \ No newline at end of file diff --git a/pynumdiff/finite_difference/_finite_difference.py b/pynumdiff/finite_difference/_finite_difference.py index 90eba03..aa254f4 100644 --- a/pynumdiff/finite_difference/_finite_difference.py +++ b/pynumdiff/finite_difference/_finite_difference.py @@ -18,7 +18,7 @@ def first_order(x, dt, params=None, options={}, num_iterations=None): - **dxdt_hat** -- estimated derivative of x """ if params != None and 'iterate' in options: - warn("""`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.""") + warn("""`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.""", DeprecationWarning) if isinstance(params, list): params = params[0] return _iterate_first_order(x, dt, params) elif num_iterations: diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py new file mode 100644 index 0000000..3b19f97 --- /dev/null +++ b/pynumdiff/tests/test_diff_methods.py @@ -0,0 +1,83 @@ +import numpy as np +from pytest import mark +from warnings import warn + +from ..linear_model import lineardiff, polydiff, savgoldiff, spectraldiff +from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity +from ..kalman_smooth import * # constant_velocity, constant_acceleration, constant_jerk, known_dynamics +from ..smooth_finite_difference import * # mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff +from ..finite_difference import first_order, second_order + + +dt = 0.01 +t = np.arange(0, 3, dt) # domain to test over +np.random.seed(7) # for repeatability of the test, so we don't get random failures +noise = 0.05*np.random.randn(*t.shape) + +diff_methods_and_params = [ + #(lineardiff, {'order':3, 'gamma':5, 'window_size':10, 'solver':'CVXOPT'}), + (polydiff, {'polynomial_order':2, 'window_size':3}), + (savgoldiff, {'polynomial_order':2, 'window_size':4, 'smoothing_win':4}), + (spectraldiff, {'high_freq_cutoff':0.1}) + ] + +# Analytic (function, derivative) pairs on which to test differentiation methods. +test_funcs_and_derivs = [ + (0, lambda t: np.ones(t.shape), lambda t: np.zeros(t.shape)), # x(t) = 1 + (1, lambda t: t, lambda t: np.ones(t.shape)), # x(t) = t + (2, lambda t: 2*t + 1, lambda t: 2*np.ones(t.shape)), # x(t) = 2t+1 + (3, lambda t: t**2 - t + 1, lambda t: 2*t - 1), # x(t) = t^2 - t + 1 + (4, lambda t: np.sin(t) + 1/2, lambda t: np.cos(t))] # x(t) = sin(t) + 1/2 + +# All the testing methodology follows the exact same pattern; the only thing that changes is the +# closeness to the right answer various methods achieve with the given parameterizations. So index a +# big ol' table by the method, then the test function, then the pair of quantities we're comparing. +error_bounds = { + lineardiff: [[(1e-25, 1e-25)]*4]*len(test_funcs_and_derivs), + polydiff: [[(1e-14, 1e-15), (1e-12, 1e-13), (1, 0.1), (100, 100)], + [(1e-13, 1e-14), (1e-12, 1e-13), (1, 0.1), (100, 100)], + [(1e-13, 1e-14), (1e-11, 1e-12), (1, 0.1), (100, 100)], + [(1e-13, 1e-14), (1e-12, 1e-12), (1, 0.1), (100, 100)], + [(1e-6, 1e-7), (0.001, 0.0001), (1, 0.1), (100, 100)]], + savgoldiff: [[(1e-7, 1e-8), (1e-12, 1e-13), (1, 0.1), (100, 10)], + [(1e-5, 1e-7), (1e-12, 1e-13), (1, 0.1), (100, 10)], + [(1e-7, 1e-8), (1e-11, 1e-12), (1, 0.1), (100, 10)], + [(0.1, 0.01), (0.1, 0.01), (1, 0.1), (100, 10)], + [(0.01, 1e-3), (0.01, 1e-3), (1, 0.1), (100, 10)]], + spectraldiff: [[(1e-7, 1e-8), ( 1e-25 , 1e-25), (1, 0.1), (100, 10)], + [(0.1, 0.1), (10, 10), (1, 0.1), (100, 10)], + [(0.1, 0.1), (10, 10), (1, 0.1), (100, 10)], + [(1, 1), (100, 10), (1, 1), (100, 10)], + [(0.1, 0.1), (10, 10), (1, 0.1), (100, 10)]] +} + + +@mark.parametrize("diff_method_and_params", diff_methods_and_params) +@mark.parametrize("test_func_and_deriv", test_funcs_and_derivs) +def test_diff_method(diff_method_and_params, test_func_and_deriv): + diff_method, params = diff_method_and_params # unpack + i, f, df = test_func_and_deriv + + # some methods rely on cvxpy, and we'd like to allow use of pynumdiff without convex optimization + if diff_method in [lineardiff, velocity]: + try: import cvxpy + except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return + + x = f(t) # sample the function + x_noisy = x + noise # add a little noise + dxdt = df(t) # true values of the derivative + + # differentiate without and with noise + x_hat, dxdt_hat = diff_method(x, dt, **params) if isinstance(params, dict) else diff_method(x, dt, params) + x_hat_noisy, dxdt_hat_noisy = diff_method(x_noisy, dt, **params) if isinstance(params, dict) else diff_method(x_noisy, dt, params) + + # check x_hat and x_hat_noisy are close to x and dxdt_hat and dxdt_hat_noisy are close to dxdt + #print("]\n[", end="") + for j,(a,b) in enumerate([(x,x_hat), (dxdt,dxdt_hat), (x,x_hat_noisy), (dxdt,dxdt_hat_noisy)]): + l2_error = np.linalg.norm(a - b) + linf_error = np.max(np.abs(a - b)) + + #print(f"({10 ** np.ceil(np.log10(l2_error)) if l2_error> 0 else 1e-25}, {10 ** np.ceil(np.log10(linf_error)) if linf_error > 0 else 1e-25})", end=", ") + l2_bound, linf_bound = error_bounds[diff_method][i][j] + assert np.linalg.norm(a - b) < l2_bound + assert np.max(np.abs(a - b)) < linf_bound diff --git a/pynumdiff/tests/test_linear_model.py b/pynumdiff/tests/test_linear_model.py deleted file mode 100644 index 0c8ed29..0000000 --- a/pynumdiff/tests/test_linear_model.py +++ /dev/null @@ -1,99 +0,0 @@ -""" -Unit tests for linear (smoothing) model -""" -# pylint: skip-file - -import numpy as np -import logging as _logging - -_logging.basicConfig( - level=_logging.INFO, - format="%(asctime)s [%(levelname)s] %(message)s", - handlers=[ - _logging.FileHandler("debug.log"), - _logging.StreamHandler() - ] -) - - -from pynumdiff.linear_model import * - - -x = np.array([1., 4., 9., 3., 20., - 8., 16., 2., 15., 10., - 15., 3., 5., 7., 4.]) -dt = 0.01 - - -def test_savgoldiff(): - params = [2, 4, 4] - x_hat, dxdt_hat = savgoldiff(x, dt, params) - x_smooth = np.array([4.669816, 4.374363, 6.46848, 8.899164, 10.606681, 11.059424, - 10.519283, 10.058375, 10.191014, 10.193343, 9.208019, 7.445465, - 5.880869, 5.49672, 6.930156]) - dxdt = np.array([-29.5453, 156.853147, 261.970245, 224.16657, 117.336993, - -26.788542, -81.239512, -10.942197, 37.470096, -37.004311, - -160.060586, -192.450136, -120.46908, 43.639278, 243.047964]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -def test_spectraldiff(): - params = [0.1] - x_hat, dxdt_hat = spectraldiff(x, dt, params) - x_smooth = np.array([3.99, 5.038, 6.635, 8.365, 9.971, 11.201, 11.86, 11.86, - 11.231, 10.113, 8.722, 7.296, 6.047, 5.116, 4.556]) - dxdt = np.array([104.803, 147., 172.464, 173.547, 147.67, 98.194, - 33.754, -33.769, -92.105, -131.479, -146.761, -138.333, - -111.508, -74.752, -37.276]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -def test_polydiff(): - params = [2, 3] - x_hat, dxdt_hat = polydiff(x, dt, params) - x_smooth = np.array([1.16153, 4.506877, 6.407802, 8.544663, 13.431766, 14.051294, - 10.115687, 7.674865, 10.471466, 13.612046, 11.363571, 5.68407, - 4.443968, 6.213507, 4.695931]) - dxdt = np.array([330.730385, 284.267456, 299.891801, 305.441626, 205.475727, - -145.229037, -279.41178, 15.428548, 244.252341, 20.343789, - -326.727498, -288.988297, 33.647456, 27.861175, -344.695033]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -# def test_chebydiff(self): -# try: -# import pychebfun -# except: -# __warning__ = '\nCannot import pychebfun, skipping chebydiff test.' -# _logging.info(__warning__) -# return - -# params = [2, 3] -# x_hat, dxdt_hat = chebydiff(x, dt, params) -# x_smooth = np.array([1., 4.638844, 7.184256, 6.644655, 15.614775, 11.60484, -# 12.284141, 6.082226, 12.000615, 12.058705, 12.462283, 5.018101, -# 4.674378, 6.431201, 4.]) -# dxdt = np.array([202.732652, 346.950235, -140.713336, 498.719617, 212.717775, -# -185.13847, -266.604056, -51.792587, 377.969849, -0.749768, -# -297.654931, -455.876155, 197.575692, -24.809441, -150.109487]) -# np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) -# np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2) - -def test_lineardiff(): - try: - import cvxpy - except: - __warning__ = '\nCannot import cvxpy, skipping lineardiff test.' - _logging.info(__warning__) - return - - params = [3, 5, 10] - x_hat, dxdt_hat = lineardiff(x, dt, params, options={'solver': 'CVXOPT'}) - x_smooth = np.array([3.070975, 3.435072, 6.363585, 10.276584, 12.033974, 10.594136, - 9.608228, 9.731326, 10.333255, 10.806791, 9.710448, 7.456045, - 5.70695, 4.856271, 5.685251]) - dxdt = np.array([36.409751, 164.630545, 342.075623, 283.519415, 15.877598, - -121.287252, -43.140514, 36.251305, 53.773231, -31.140351, - -167.537258, -200.174883, -129.988725, -1.084955, 82.897991]) - np.testing.assert_almost_equal(x_smooth, x_hat, decimal=2) - np.testing.assert_almost_equal(dxdt, dxdt_hat, decimal=2)