diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 2ef3fed..6d6d143 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -5,6 +5,7 @@ on: branches: [main] pull_request: branches: [main] + workflow_dispatch: {} jobs: test: @@ -15,6 +16,7 @@ jobs: matrix: os: [macos-latest, ubuntu-latest] python-version: ["3.11", "3.12", "3.13"] + backend: [numpy, numba, jax, torch] steps: - uses: actions/checkout@v3 @@ -31,10 +33,15 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install the project - run: uv sync --all-extras --dev + run: | + if [ "${{ matrix.backend }}" = "numpy" ]; then + uv sync + else + uv sync --extra ${{ matrix.backend }} + fi - name: Run tests - run: uv run pytest tests/ + run: uv run pytest tests/ --backend ${{matrix.backend }} coverage: runs-on: ubuntu-latest diff --git a/tests/conftest.py b/tests/conftest.py index 94c91d4..2d03aad 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,8 +6,6 @@ from scoringrules.backend import backends DATA_DIR = Path(__file__).parent / "data" -RUN_TESTS = ["numpy", "numba", "jax", "torch"] -BACKENDS = [b for b in backends.available_backends if b in RUN_TESTS] if os.getenv("SR_TEST_OUTPUT", "False").lower() in ("true", "1", "t"): OUT_DIR = Path(__file__).parent / "output" @@ -15,8 +13,46 @@ else: OUT_DIR = None -for backend in RUN_TESTS: - backends.register_backend(backend) + +def pytest_addoption(parser): + """Add custom command-line options for pytest.""" + parser.addoption( + "--backend", + action="store", + default=None, + help="Specify backend to test", + ) + + +def get_test_backends(config): + """Determine which backends to test.""" + backend_option = config.getoption("--backend") + + if backend_option: + requested = backend_option.split(",") + else: + requested = ["numpy", "numba", "jax", "torch"] + + available = backends.available_backends + test_backends = [b for b in requested if b in available] + + # Register backends + for b in test_backends: + try: + backends.register_backend(b) + except Exception as e: + print(f"Warning: Could not register backend '{b}': {e}") + + return test_backends + + +# This generates the parametrization +def pytest_generate_tests(metafunc): + if "backend" in metafunc.fixturenames: + backends_to_test = get_test_backends(metafunc.config) + if not backends_to_test: + pytest.fail("No backends available for testing") + metafunc.parametrize("backend", backends_to_test) @pytest.fixture() @@ -27,5 +63,4 @@ def probability_forecasts(): skip_header=1, usecols=(1, -1), ) - return data diff --git a/tests/test_brier.py b/tests/test_brier.py index f92d6e1..65c348e 100644 --- a/tests/test_brier.py +++ b/tests/test_brier.py @@ -3,10 +3,7 @@ import scoringrules as sr -from .conftest import BACKENDS - -@pytest.mark.parametrize("backend", BACKENDS) def test_brier(backend): # test exceptions with pytest.raises(ValueError): @@ -25,7 +22,6 @@ def test_brier(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_rps(backend): # test exceptions with pytest.raises(ValueError): @@ -54,7 +50,6 @@ def test_rps(backend): assert np.allclose(res1, res2) -@pytest.mark.parametrize("backend", BACKENDS) def test_logs(backend): # test exceptions with pytest.raises(ValueError): @@ -73,7 +68,6 @@ def test_logs(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_rls(backend): # test exceptions with pytest.raises(ValueError): diff --git a/tests/test_crps.py b/tests/test_crps.py index bed261a..90c9e0c 100644 --- a/tests/test_crps.py +++ b/tests/test_crps.py @@ -3,8 +3,6 @@ import scipy.stats as st import scoringrules as sr -from .conftest import BACKENDS - ENSEMBLE_SIZE = 11 N = 20 @@ -12,7 +10,6 @@ @pytest.mark.parametrize("estimator", ESTIMATORS) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_ensemble(estimator, backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.3 @@ -49,7 +46,6 @@ def test_crps_ensemble(estimator, backend): assert not np.any(res - 0.0 > 0.0001) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_estimators(backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.3 @@ -68,7 +64,6 @@ def test_crps_estimators(backend): assert np.allclose(res_fair, res_pwm) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_quantile(backend): # test shapes obs = np.random.randn(N) @@ -108,7 +103,6 @@ def test_crps_quantile(backend): return -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_beta(backend): if backend == "torch": pytest.skip("Not implemented in torch backend") @@ -148,7 +142,6 @@ def test_crps_beta(backend): assert np.allclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_binomial(backend): if backend == "torch": pytest.skip("Not implemented in torch backend") @@ -181,7 +174,6 @@ def test_crps_binomial(backend): assert np.isclose(s, np.array([0.6685115, 0.6685115])).all() -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_exponential(backend): # TODO: add and test exception handling @@ -192,7 +184,6 @@ def test_crps_exponential(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_exponentialM(backend): obs, mass, location, scale = 0.3, 0.1, 0.0, 1.0 res = sr.crps_exponentialM(obs, mass, location, scale, backend=backend) @@ -210,7 +201,6 @@ def test_crps_exponentialM(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_2pexponential(backend): obs, scale1, scale2, location = 0.3, 0.1, 4.3, 0.0 res = sr.crps_2pexponential(obs, scale1, scale2, location, backend=backend) @@ -223,7 +213,6 @@ def test_crps_2pexponential(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_gamma(backend): obs, shape, rate = 0.2, 1.1, 0.7 expected = 0.6343718 @@ -243,7 +232,6 @@ def test_crps_gamma(backend): return -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_csg0(backend): obs, shape, rate, shift = 0.7, 0.5, 2.0, 0.3 expected = 0.5411044 @@ -269,7 +257,6 @@ def test_crps_csg0(backend): return -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_gev(backend): if backend == "torch": pytest.skip("`expi` not implemented in torch backend") @@ -314,7 +301,6 @@ def test_crps_gev(backend): ) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_gpd(backend): assert np.isclose(sr.crps_gpd(0.3, 0.9, backend=backend), 0.6849332) assert np.isclose(sr.crps_gpd(-0.3, 0.9, backend=backend), 1.209091) @@ -333,7 +319,6 @@ def test_crps_gpd(backend): ) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_gtclogis(backend): obs, location, scale, lower, upper, lmass, umass = ( 1.8, @@ -361,7 +346,6 @@ def test_crps_gtclogis(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_tlogis(backend): obs, location, scale, lower, upper = 4.9, 3.5, 2.3, 0.0, 20.0 expected = 0.7658979 @@ -374,7 +358,6 @@ def test_crps_tlogis(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_clogis(backend): obs, location, scale, lower, upper = -0.9, 0.4, 1.1, 0.0, 1.0 expected = 1.13237 @@ -387,7 +370,6 @@ def test_crps_clogis(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_gtcnormal(backend): obs, location, scale, lower, upper, lmass, umass = ( 0.9, @@ -415,7 +397,6 @@ def test_crps_gtcnormal(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_tnormal(backend): obs, location, scale, lower, upper = -1.0, 2.9, 2.2, 1.5, 17.3 expected = 3.982434 @@ -428,7 +409,6 @@ def test_crps_tnormal(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_cnormal(backend): obs, location, scale, lower, upper = 1.8, 0.4, 1.1, 0.0, 2.0 expected = 0.8296078 @@ -441,7 +421,6 @@ def test_crps_cnormal(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_gtct(backend): if backend in ["jax", "torch", "tensorflow"]: pytest.skip("Not implemented in jax, torch or tensorflow backends") @@ -472,7 +451,6 @@ def test_crps_gtct(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_tt(backend): if backend in ["jax", "torch", "tensorflow"]: pytest.skip("Not implemented in jax, torch or tensorflow backends") @@ -488,7 +466,6 @@ def test_crps_tt(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_ct(backend): if backend in ["jax", "torch", "tensorflow"]: pytest.skip("Not implemented in jax, torch or tensorflow backends") @@ -504,7 +481,6 @@ def test_crps_ct(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_hypergeometric(backend): if backend == "torch": pytest.skip("Currently not working in torch backend") @@ -523,7 +499,6 @@ def test_crps_hypergeometric(backend): assert np.isclose(sr.crps_hypergeometric(5, 7, 13, 12), 0.4469742) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_laplace(backend): assert np.isclose(sr.crps_laplace(-3, backend=backend), 2.29978707) assert np.isclose( @@ -534,7 +509,6 @@ def test_crps_laplace(backend): ) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_logis(backend): obs, mu, sigma = 17.1, 13.8, 3.3 expected = 2.067527 @@ -547,12 +521,10 @@ def test_crps_logis(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_loglaplace(backend): assert np.isclose(sr.crps_loglaplace(3.0, 0.1, 0.9, backend=backend), 1.16202051) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_loglogistic(backend): if backend == "torch": pytest.skip("Not implemented in torch backend") @@ -564,7 +536,6 @@ def test_crps_loglogistic(backend): ) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_lognormal(backend): obs = np.exp(np.random.randn(N)) mulog = np.log(obs) + np.random.randn(N) * 0.1 @@ -585,7 +556,6 @@ def test_crps_lognormal(backend): assert not np.any(res - 0.0 > 0.0001) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_mixnorm(backend): obs, m, s, w = 0.3, [0.0, -2.9, 0.9], [0.5, 1.4, 0.7], [1 / 3, 1 / 3, 1 / 3] res = sr.crps_mixnorm(obs, m, s, w, backend=backend) @@ -611,7 +581,6 @@ def test_crps_mixnorm(backend): assert np.allclose(res1, res2) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_negbinom(backend): if backend in ["jax", "torch", "tensorflow"]: pytest.skip("Not implemented in jax, torch or tensorflow backends") @@ -642,7 +611,6 @@ def test_crps_negbinom(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_normal(backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -664,7 +632,6 @@ def test_crps_normal(backend): assert not np.any(res - 0.0 > 0.0001) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_2pnormal(backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -677,7 +644,6 @@ def test_crps_2pnormal(backend): assert not np.any(res < 0.0) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_poisson(backend): obs, mean = 1.0, 3.0 res = sr.crps_poisson(obs, mean, backend=backend) @@ -695,7 +661,6 @@ def test_crps_poisson(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_t(backend): if backend in ["jax", "torch", "tensorflow"]: pytest.skip("Not implemented in jax, torch or tensorflow backends") @@ -711,7 +676,6 @@ def test_crps_t(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_crps_uniform(backend): obs, min, max, lmass, umass = 0.3, -1.0, 2.1, 0.3, 0.1 res = sr.crps_uniform(obs, min, max, lmass, umass, backend=backend) diff --git a/tests/test_dss.py b/tests/test_dss.py index 41f69f6..57c6c45 100644 --- a/tests/test_dss.py +++ b/tests/test_dss.py @@ -2,14 +2,12 @@ import pytest import scoringrules as sr -from .conftest import BACKENDS ENSEMBLE_SIZE = 11 N = 20 N_VARS = 3 -@pytest.mark.parametrize("backend", BACKENDS) def test_ds_score_uv(backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -33,7 +31,6 @@ def test_ds_score_uv(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_ds_score_mv(backend): with pytest.raises(ValueError): obs = np.random.randn(N, N_VARS) diff --git a/tests/test_energy.py b/tests/test_energy.py index 8cc4848..8e9f4cb 100644 --- a/tests/test_energy.py +++ b/tests/test_energy.py @@ -2,7 +2,6 @@ import pytest import scoringrules as sr -from .conftest import BACKENDS ENSEMBLE_SIZE = 11 N = 20 @@ -12,7 +11,6 @@ @pytest.mark.parametrize("estimator", ESTIMATORS) -@pytest.mark.parametrize("backend", BACKENDS) def test_energy_score(estimator, backend): # test exceptions diff --git a/tests/test_error_spread.py b/tests/test_error_spread.py index 051ba1f..c9fb191 100644 --- a/tests/test_error_spread.py +++ b/tests/test_error_spread.py @@ -1,14 +1,11 @@ import numpy as np -import pytest import scoringrules as sr -from .conftest import BACKENDS ENSEMBLE_SIZE = 11 N = 5 -@pytest.mark.parametrize("backend", BACKENDS) def test_error_spread_score(backend): # test shape obs = np.random.randn(N) diff --git a/tests/test_interval.py b/tests/test_interval.py index c59a314..c33cf1b 100644 --- a/tests/test_interval.py +++ b/tests/test_interval.py @@ -3,12 +3,10 @@ import scipy.stats as st import scoringrules as sr -from .conftest import BACKENDS N = 100 -@pytest.mark.parametrize("backend", BACKENDS) def test_interval_score(backend): # basic functionality _ = sr.interval_score(0.1, 0.0, 0.4, 0.5) @@ -57,7 +55,6 @@ def test_interval_score(backend): ## We use Bracher et al (2021) Eq. (3) to test the WIS -@pytest.mark.parametrize("backend", BACKENDS) def test_weighted_interval_score(backend): obs = np.zeros(N) alpha = np.linspace(0.01, 0.99, 99) diff --git a/tests/test_kernels.py b/tests/test_kernels.py index 259204d..1fa32e7 100644 --- a/tests/test_kernels.py +++ b/tests/test_kernels.py @@ -3,7 +3,6 @@ import scoringrules as sr from scoringrules.backend import backends -from .conftest import BACKENDS ENSEMBLE_SIZE = 11 N = 10 @@ -13,7 +12,6 @@ @pytest.mark.parametrize("estimator", ESTIMATORS) -@pytest.mark.parametrize("backend", BACKENDS) def test_gksuv(estimator, backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -67,7 +65,6 @@ def test_gksuv(estimator, backend): @pytest.mark.parametrize("estimator", ESTIMATORS) -@pytest.mark.parametrize("backend", BACKENDS) def test_gksmv(estimator, backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -122,7 +119,6 @@ def test_gksmv(estimator, backend): @pytest.mark.parametrize("estimator", ESTIMATORS) -@pytest.mark.parametrize("backend", BACKENDS) def test_twgksuv(estimator, backend): if backend == "jax": pytest.skip("Not implemented in jax backend") @@ -224,7 +220,6 @@ def v_func2(x): np.testing.assert_allclose(res, 0.0089314, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_twgksmv(backend): if backend == "jax": pytest.skip("Not implemented in jax backend") @@ -253,7 +248,6 @@ def v_func(x): np.testing.assert_allclose(res, 0.1016436, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_owgksuv(backend): if backend == "jax": pytest.skip("Not implemented in jax backend") @@ -345,7 +339,6 @@ def w_func(x): np.testing.assert_allclose(res, 0.008905213, rtol=1e-5) -@pytest.mark.parametrize("backend", BACKENDS) def test_owgksmv(backend): if backend == "jax": pytest.skip("Not implemented in jax backend") @@ -376,7 +369,6 @@ def w_func(x): np.testing.assert_allclose(res, 0.1016436, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_vrgksuv(backend): if backend == "jax": pytest.skip("Not implemented in jax backend") @@ -468,7 +460,6 @@ def w_func(x): np.testing.assert_allclose(res, 0.04011836, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_vrgksmv(backend): if backend == "jax": pytest.skip("Not implemented in jax backend") diff --git a/tests/test_logs.py b/tests/test_logs.py index af32aba..53857ea 100644 --- a/tests/test_logs.py +++ b/tests/test_logs.py @@ -3,13 +3,11 @@ import scoringrules as sr -from .conftest import BACKENDS ENSEMBLE_SIZE = 51 N = 100 -@pytest.mark.parametrize("backend", BACKENDS) def test_ensemble(backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -34,7 +32,6 @@ def test_ensemble(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_clogs(backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -71,7 +68,6 @@ def test_clogs(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_beta(backend): if backend == "torch": pytest.skip("Not implemented in torch backend") @@ -103,7 +99,6 @@ def test_beta(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_binomial(backend): # test correctness res = sr.logs_binomial(8, 10, 0.9, backend=backend) @@ -119,7 +114,6 @@ def test_binomial(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_exponential(backend): obs, rate = 0.3, 0.1 res = sr.logs_exponential(obs, rate, backend=backend) @@ -137,7 +131,6 @@ def test_exponential(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_gamma(backend): obs, shape, rate = 0.2, 1.1, 0.7 expected = 0.6434138 @@ -157,7 +150,6 @@ def test_gamma(backend): return -@pytest.mark.parametrize("backend", BACKENDS) def test_gev(backend): obs, xi, mu, sigma = 0.3, 0.7, 0.0, 1.0 res0 = sr.logs_gev(obs, xi, backend=backend) @@ -188,7 +180,6 @@ def test_gev(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_gpd(backend): obs, shape, location, scale = 0.8, 0.9, 0.0, 1.0 res0 = sr.logs_gpd(obs, shape, backend=backend) @@ -219,7 +210,6 @@ def test_gpd(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_hypergeometric(backend): res = sr.logs_hypergeometric(5, 7, 13, 12) expected = 1.251525 @@ -232,7 +222,6 @@ def test_hypergeometric(backend): assert res.shape == (2, 2) -@pytest.mark.parametrize("backend", BACKENDS) def test_logis(backend): obs, mu, sigma = 17.1, 13.8, 3.3 res = sr.logs_logistic(obs, mu, sigma, backend=backend) @@ -245,7 +234,6 @@ def test_logis(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_loglogistic(backend): obs, mulog, sigmalog = 3.0, 0.1, 0.9 res = sr.logs_loglogistic(obs, mulog, sigmalog, backend=backend) @@ -263,7 +251,6 @@ def test_loglogistic(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_lognormal(backend): obs, mulog, sigmalog = 3.0, 0.1, 0.9 res = sr.logs_lognormal(obs, mulog, sigmalog, backend=backend) @@ -281,7 +268,6 @@ def test_lognormal(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_exponential2(backend): obs, location, scale = 8.3, 7.0, 1.2 res = sr.logs_exponential2(obs, location, scale, backend=backend) @@ -303,7 +289,6 @@ def test_exponential2(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_2pexponential(backend): obs, scale1, scale2, location = 0.3, 0.1, 4.3, 0.0 res = sr.logs_2pexponential(obs, scale1, scale2, location, backend=backend) @@ -316,7 +301,6 @@ def test_2pexponential(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_laplace(backend): obs, location, scale = -3.0, 0.1, 0.9 res = sr.logs_laplace(obs, backend=backend) @@ -332,7 +316,6 @@ def test_laplace(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_loglaplace(backend): obs, locationlog, scalelog = 3.0, 0.1, 0.9 res = sr.logs_loglaplace(obs, locationlog, scalelog, backend=backend) @@ -350,7 +333,6 @@ def test_loglaplace(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_mixnorm(backend): obs, m, s, w = 0.3, [0.0, -2.9, 0.9], [0.5, 1.4, 0.7], [1 / 3, 1 / 3, 1 / 3] res = sr.logs_mixnorm(obs, m, s, w, backend=backend) @@ -376,7 +358,6 @@ def test_mixnorm(backend): assert np.allclose(res1, res2) -@pytest.mark.parametrize("backend", BACKENDS) def test_negbinom(backend): if backend in ["jax", "torch"]: pytest.skip("Not implemented in jax or torch backends") @@ -409,13 +390,11 @@ def test_negbinom(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_normal(backend): res = sr.logs_normal(0.0, 0.1, 0.1, backend=backend) assert np.isclose(res, -0.8836466, rtol=1e-5) -@pytest.mark.parametrize("backend", BACKENDS) def test_2pnormal(backend): obs, scale1, scale2, location = 29.1, 4.6, 1.3, 27.9 res = sr.logs_2pnormal(obs, scale1, scale2, location, backend=backend) @@ -442,7 +421,6 @@ def test_2pnormal(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_poisson(backend): obs, mean = 1.0, 3.0 res = sr.logs_poisson(obs, mean, backend=backend) @@ -460,7 +438,6 @@ def test_poisson(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_t(backend): if backend in ["jax", "torch", "tensorflow"]: pytest.skip("Not implemented in jax, torch or tensorflow backends") @@ -476,7 +453,6 @@ def test_t(backend): assert np.isclose(res, expected) -@pytest.mark.parametrize("backend", BACKENDS) def test_tlogis(backend): obs, location, scale, lower, upper = 4.9, 3.5, 2.3, 0.0, 20.0 res = sr.logs_tlogistic(obs, location, scale, lower, upper, backend=backend) @@ -489,7 +465,6 @@ def test_tlogis(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_tnormal(backend): obs, location, scale, lower, upper = 4.2, 2.9, 2.2, 1.5, 17.3 res = sr.logs_tnormal(obs, location, scale, lower, upper, backend=backend) @@ -507,7 +482,6 @@ def test_tnormal(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_tt(backend): if backend in ["jax", "torch", "tensorflow"]: pytest.skip("Not implemented in jax, torch or tensorflow backends") @@ -528,7 +502,6 @@ def test_tt(backend): assert np.isclose(res, res0) -@pytest.mark.parametrize("backend", BACKENDS) def test_uniform(backend): obs, min, max = 0.3, -1.0, 2.1 res = sr.logs_uniform(obs, min, max, backend=backend) diff --git a/tests/test_quantile.py b/tests/test_quantile.py index 506ba96..e0ed21d 100644 --- a/tests/test_quantile.py +++ b/tests/test_quantile.py @@ -3,10 +3,7 @@ import scoringrules as sr -from .conftest import BACKENDS - -@pytest.mark.parametrize("backend", BACKENDS) def test_quantile_score(backend): obs = np.array([0.1, 0.2, 0.3, 0.4, 0.5]) fct = np.array([0.1, 0.2, 0.3, 0.4, 0.5]) diff --git a/tests/test_variogram.py b/tests/test_variogram.py index fb4cbf1..5cf3985 100644 --- a/tests/test_variogram.py +++ b/tests/test_variogram.py @@ -3,7 +3,6 @@ import scoringrules as sr -from .conftest import BACKENDS ENSEMBLE_SIZE = 51 N = 100 @@ -13,7 +12,6 @@ @pytest.mark.parametrize("estimator", ESTIMATORS) -@pytest.mark.parametrize("backend", BACKENDS) def test_variogram_score(estimator, backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -27,7 +25,6 @@ def test_variogram_score(estimator, backend): @pytest.mark.parametrize("estimator", ESTIMATORS) -@pytest.mark.parametrize("backend", BACKENDS) def test_variogram_score_permuted_dims(estimator, backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -42,7 +39,6 @@ def test_variogram_score_permuted_dims(estimator, backend): assert "jax" in res.__module__ -@pytest.mark.parametrize("backend", BACKENDS) def test_variogram_score_correctness(backend): fct = np.array( [ diff --git a/tests/test_wcrps.py b/tests/test_wcrps.py index 5201a6d..2fd2c43 100644 --- a/tests/test_wcrps.py +++ b/tests/test_wcrps.py @@ -2,7 +2,6 @@ import pytest import scoringrules as sr -from .conftest import BACKENDS M = 11 N = 20 @@ -10,7 +9,6 @@ ESTIMATORS = ["nrg", "fair", "pwm", "qd", "akr", "akr_circperm"] -@pytest.mark.parametrize("backend", BACKENDS) def test_owcrps_ensemble(backend): # test shapes obs = np.random.randn(N) @@ -22,7 +20,6 @@ def test_owcrps_ensemble(backend): assert res.shape == (N,) -@pytest.mark.parametrize("backend", BACKENDS) def test_vrcrps_ensemble(backend): # test shapes obs = np.random.randn(N) @@ -35,7 +32,6 @@ def test_vrcrps_ensemble(backend): @pytest.mark.parametrize("estimator", ESTIMATORS) -@pytest.mark.parametrize("backend", BACKENDS) def test_twcrps_vs_crps(estimator, backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -61,7 +57,6 @@ def test_twcrps_vs_crps(estimator, backend): np.testing.assert_allclose(res, resw, rtol=1e-10) -@pytest.mark.parametrize("backend", BACKENDS) def test_owcrps_vs_crps(backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -85,7 +80,6 @@ def test_owcrps_vs_crps(backend): np.testing.assert_allclose(res, resw, rtol=1e-5) -@pytest.mark.parametrize("backend", BACKENDS) def test_vrcrps_vs_crps(backend): obs = np.random.randn(N) mu = obs + np.random.randn(N) * 0.1 @@ -109,7 +103,6 @@ def test_vrcrps_vs_crps(backend): np.testing.assert_allclose(res, resw, rtol=1e-5) -@pytest.mark.parametrize("backend", BACKENDS) def test_owcrps_score_correctness(backend): fct = np.array( [ @@ -164,7 +157,6 @@ def w_func(x): np.testing.assert_allclose(res, 0.09933139, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_twcrps_score_correctness(backend): fct = np.array( [ @@ -235,7 +227,6 @@ def v_func(x): np.testing.assert_allclose(res, 0.0994809, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_vrcrps_score_correctness(backend): fct = np.array( [ diff --git a/tests/test_wenergy.py b/tests/test_wenergy.py index 16f20aa..5f2f568 100644 --- a/tests/test_wenergy.py +++ b/tests/test_wenergy.py @@ -1,17 +1,14 @@ import numpy as np -import pytest import scoringrules as sr from scoringrules.backend import backends -from .conftest import BACKENDS ENSEMBLE_SIZE = 51 N = 100 N_VARS = 3 -@pytest.mark.parametrize("backend", BACKENDS) def test_owes_vs_es(backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -26,7 +23,6 @@ def test_owes_vs_es(backend): np.testing.assert_allclose(res, resw, atol=1e-7) -@pytest.mark.parametrize("backend", BACKENDS) def test_twes_vs_es(backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -36,7 +32,6 @@ def test_twes_vs_es(backend): np.testing.assert_allclose(res, resw, rtol=1e-10) -@pytest.mark.parametrize("backend", BACKENDS) def test_vres_vs_es(backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -51,7 +46,6 @@ def test_vres_vs_es(backend): np.testing.assert_allclose(res, resw, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_owenergy_score_correctness(backend): fct = np.array( [[0.79546742, 0.4777960, 0.2164079], [0.02461368, 0.7584595, 0.3181810]] @@ -71,7 +65,6 @@ def w_func(x): np.testing.assert_allclose(res, 0.3345418, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_twenergy_score_correctness(backend): fct = np.array( [[0.79546742, 0.4777960, 0.2164079], [0.02461368, 0.7584595, 0.3181810]] diff --git a/tests/test_wvariogram.py b/tests/test_wvariogram.py index 8e80171..3c4c1dc 100644 --- a/tests/test_wvariogram.py +++ b/tests/test_wvariogram.py @@ -1,18 +1,15 @@ import numpy as np -import pytest import scoringrules as sr from scoringrules.backend import backends -from .conftest import BACKENDS ENSEMBLE_SIZE = 51 N = 100 N_VARS = 3 -@pytest.mark.parametrize("backend", BACKENDS) def test_owvs_vs_vs(backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -27,7 +24,6 @@ def test_owvs_vs_vs(backend): np.testing.assert_allclose(res, resw, rtol=1e-3) -@pytest.mark.parametrize("backend", BACKENDS) def test_twvs_vs_vs(backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -37,7 +33,6 @@ def test_twvs_vs_vs(backend): np.testing.assert_allclose(res, resw, rtol=5e-4) -@pytest.mark.parametrize("backend", BACKENDS) def test_vrvs_vs_vs(backend): obs = np.random.randn(N, N_VARS) fct = np.expand_dims(obs, axis=-2) + np.random.randn(N, ENSEMBLE_SIZE, N_VARS) @@ -52,7 +47,6 @@ def test_vrvs_vs_vs(backend): np.testing.assert_allclose(res, resw, atol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_owvariogram_score_correctness(backend): fct = np.array( [[0.79546742, 0.4777960, 0.2164079], [0.02461368, 0.7584595, 0.3181810]] @@ -74,7 +68,6 @@ def w_func(x): np.testing.assert_allclose(res, 0.04856366, rtol=1e-6) -@pytest.mark.parametrize("backend", BACKENDS) def test_twvariogram_score_correctness(backend): fct = np.array( [[0.79546742, 0.4777960, 0.2164079], [0.02461368, 0.7584595, 0.3181810]]