diff --git a/.Rbuildignore b/.Rbuildignore index 3e18459..c3b3d91 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -7,3 +7,5 @@ ^pkgdown$ ^cran-comments\.md$ ^LICENSE\.md$ +^__pycache__$ +^CRAN-SUBMISSION$ diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml index 2bfc291..c9db6a3 100644 --- a/.github/workflows/R-CMD-check.yaml +++ b/.github/workflows/R-CMD-check.yaml @@ -1,9 +1,3 @@ -# NOTE: This workflow is overkill for most R packages -# check-standard.yaml is likely a better choice -# usethis::use_github_action("check-standard") will install it. -# -# For help debugging build failures open an issue on the RStudio community with the 'github-actions' tag. -# https://community.rstudio.com/new-topic?category=Package%20development&tags=github-actions on: push: branches: @@ -13,96 +7,105 @@ on: branches: - main - master + schedule: + - cron: '1 23 * * Sun' name: R-CMD-check -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref }} - cancel-in-progress: true +defaults: + run: + shell: Rscript {0} jobs: R-CMD-check: - runs-on: ${{ matrix.config.os }} - - name: ${{ matrix.config.os }} (${{ matrix.config.r }}) - + name: ${{ matrix.os }}, tf-${{ matrix.tf }}, R-${{ matrix.r}} + timeout-minutes: 30 strategy: fail-fast: false matrix: - config: - - {os: macOS-latest, r: 'release'} - - {os: windows-latest, r: 'release'} - - {os: windows-latest, r: 'oldrel'} - - {os: ubuntu-18.04, r: 'devel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest", http-user-agent: "R/4.0.0 (ubuntu-18.04) R (4.0.0 x86_64-pc-linux-gnu x86_64 linux-gnu) on GitHub Actions" } - - {os: ubuntu-18.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} - - {os: ubuntu-18.04, r: 'oldrel-1', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} - - {os: ubuntu-18.04, r: 'oldrel-2', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} + include: + - {os: 'ubuntu-latest' , tf: 'default', r: 'release'} + - {os: 'windows-latest', tf: 'default', r: 'release'} + - {os: 'macOS-latest' , tf: 'default', r: 'release'} + runs-on: ${{ matrix.os }} + continue-on-error: ${{ matrix.tf == 'nightly' || contains(matrix.tf, 'rc') || matrix.r == 'devel' }} env: - RSPM: ${{ matrix.config.rspm }} + R_REMOTES_NO_ERRORS_FROM_WARNINGS: 'true' + R_COMPILE_AND_INSTALL_PACKAGES: 'never' GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - RETICULATE_AUTOCONFIGURE: 'FALSE' - TF_VERSION: '1.14.0' steps: - uses: actions/checkout@v2 - uses: r-lib/actions/setup-r@v2 - id: install-r + id: setup-r with: - r-version: ${{ matrix.config.r }} - http-user-agent: ${{ matrix.config.http-user-agent }} + r-version: ${{ matrix.r }} + Ncpus: '2L' + use-public-rspm: true - uses: r-lib/actions/setup-pandoc@v2 - - uses: r-lib/actions/setup-r-dependencies@v2 + - name: Get Date + id: get-date + shell: bash + run: | + echo "::set-output name=year-week::$(date -u "+%Y-%U")" + echo "::set-output name=date::$(date -u "+%F")" + + - name: Restore R package cache + uses: actions/cache@v2 + id: r-package-cache with: - cache-version: 2 - extra-packages: | - local::. - any::keras - any::rcmdcheck + path: ${{ env.R_LIBS_USER }} + key: ${{ matrix.os }}-${{ steps.setup-r.outputs.installed-r-version }}-${{ steps.get-date.outputs.year-week }}-1 - - name: Install Miniconda - run: | - reticulate::install_miniconda() - shell: Rscript {0} + - name: Install remotes + if: steps.r-package-cache.outputs.cache-hit != 'true' + run: install.packages("remotes") - - name: Set options for conda binary for macOS - if: runner.os == 'macOS' + - name: Install system dependencies + if: runner.os == 'Linux' + shell: bash run: | - echo "options(reticulate.conda_binary = reticulate:::miniconda_conda())" >> .Rprofile + . /etc/os-release + while read -r cmd + do + echo "$cmd" + sudo $cmd + done < <(Rscript -e "writeLines(remotes::system_requirements('$ID-$VERSION_ID'))") + + - name: Install package + deps + run: remotes::install_local(dependencies = TRUE, force = TRUE) - - name: Install TensorFlow + - name: Install greta deps run: | - cat("::group::Create Environment", sep = "\n") - reticulate::conda_create('r-reticulate', packages = c('python==3.7')) - cat("::endgroup::", sep = "\n") + library(greta) + greta::install_greta_deps(timeout = 50) - cat("::group::Install Tensorflow", sep = "\n") - keras::install_keras(tensorflow = Sys.getenv('TF_VERSION'), - extra_packages = c('IPython', 'requests', 'certifi', 'urllib3', 'tensorflow-probability==0.7.0', 'numpy==1.16.4')) - cat("::endgroup::", sep = "\n") - shell: Rscript {0} + - name: Situation Report on greta install + run: greta::greta_sitrep() + - name: Install rcmdcheck + run: remotes::install_cran("rcmdcheck") - - name: Python + TF details - run: | - tensorflow::tf_config() - tensorflow::tf_version() - reticulate::py_module_available("tensorflow_probability") - reticulate::py_config() - shell: Rscript {0} + - name: Check + run: rcmdcheck::rcmdcheck(args = '--no-manual', error_on = 'warning', check_dir = 'check') + + - name: Show testthat output + if: always() + shell: bash + run: find check -name 'testthat.Rout*' -exec cat '{}' \; || true + + - name: Don't use tar from old Rtools to store the cache + if: ${{ runner.os == 'Windows' && startsWith(steps.install-r.outputs.installed-r-version, '3') }} + shell: bash + run: echo "C:/Program Files/Git/usr/bin" >> $GITHUB_PATH - name: Session info run: | options(width = 100) pkgs <- installed.packages()[, "Package"] sessioninfo::session_info(pkgs, include_base = TRUE) - shell: Rscript {0} - - - uses: r-lib/actions/check-r-package@v2 - with: - args: 'c("--no-manual", "--as-cran", "--no-multiarch")' - diff --git a/.github/workflows/rhub.yaml b/.github/workflows/rhub.yaml new file mode 100644 index 0000000..74ec7b0 --- /dev/null +++ b/.github/workflows/rhub.yaml @@ -0,0 +1,95 @@ +# R-hub's generic GitHub Actions workflow file. It's canonical location is at +# https://github.com/r-hub/actions/blob/v1/workflows/rhub.yaml +# You can update this file to a newer version using the rhub2 package: +# +# rhub::rhub_setup() +# +# It is unlikely that you need to modify this file manually. + +name: R-hub +run-name: "${{ github.event.inputs.id }}: ${{ github.event.inputs.name || format('Manually run by {0}', github.triggering_actor) }}" + +on: + workflow_dispatch: + inputs: + config: + description: 'A comma separated list of R-hub platforms to use.' + type: string + default: 'linux,windows,macos' + name: + description: 'Run name. You can leave this empty now.' + type: string + id: + description: 'Unique ID. You can leave this empty now.' + type: string + +jobs: + + setup: + runs-on: ubuntu-latest + outputs: + containers: ${{ steps.rhub-setup.outputs.containers }} + platforms: ${{ steps.rhub-setup.outputs.platforms }} + + steps: + # NO NEED TO CHECKOUT HERE + - uses: r-hub/actions/setup@v1 + with: + config: ${{ github.event.inputs.config }} + id: rhub-setup + + linux-containers: + needs: setup + if: ${{ needs.setup.outputs.containers != '[]' }} + runs-on: ubuntu-latest + name: ${{ matrix.config.label }} + strategy: + fail-fast: false + matrix: + config: ${{ fromJson(needs.setup.outputs.containers) }} + container: + image: ${{ matrix.config.container }} + + steps: + - uses: r-hub/actions/checkout@v1 + - uses: r-hub/actions/platform-info@v1 + with: + token: ${{ secrets.RHUB_TOKEN }} + job-config: ${{ matrix.config.job-config }} + - uses: r-hub/actions/setup-deps@v1 + with: + token: ${{ secrets.RHUB_TOKEN }} + job-config: ${{ matrix.config.job-config }} + - uses: r-hub/actions/run-check@v1 + with: + token: ${{ secrets.RHUB_TOKEN }} + job-config: ${{ matrix.config.job-config }} + + other-platforms: + needs: setup + if: ${{ needs.setup.outputs.platforms != '[]' }} + runs-on: ${{ matrix.config.os }} + name: ${{ matrix.config.label }} + strategy: + fail-fast: false + matrix: + config: ${{ fromJson(needs.setup.outputs.platforms) }} + + steps: + - uses: r-hub/actions/checkout@v1 + - uses: r-hub/actions/setup-r@v1 + with: + job-config: ${{ matrix.config.job-config }} + token: ${{ secrets.RHUB_TOKEN }} + - uses: r-hub/actions/platform-info@v1 + with: + token: ${{ secrets.RHUB_TOKEN }} + job-config: ${{ matrix.config.job-config }} + - uses: r-hub/actions/setup-deps@v1 + with: + job-config: ${{ matrix.config.job-config }} + token: ${{ secrets.RHUB_TOKEN }} + - uses: r-hub/actions/run-check@v1 + with: + job-config: ${{ matrix.config.job-config }} + token: ${{ secrets.RHUB_TOKEN }} diff --git a/.github/workflows/test-coverage.yaml b/.github/workflows/test-coverage.yaml index 9644845..558fef7 100644 --- a/.github/workflows/test-coverage.yaml +++ b/.github/workflows/test-coverage.yaml @@ -1,76 +1,89 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help on: push: - branches: - - main - - master + branches: [main, master] pull_request: - branches: - - main - - master + branches: [main, master] -name: test-coverage +name: test-coverage.yaml + +permissions: read-all jobs: test-coverage: - runs-on: macOS-latest + runs-on: ubuntu-latest env: GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v2 - - uses: r-lib/actions/setup-r@v1 - - - uses: r-lib/actions/setup-pandoc@v1 + steps: + - uses: actions/checkout@v4 - - name: Query dependencies - run: | - install.packages('remotes') - saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) - writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") - shell: Rscript {0} + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true - - name: Restore R package cache - uses: actions/cache@v2 + - uses: r-lib/actions/setup-r-dependencies@v2 with: - path: ${{ env.R_LIBS_USER }} - key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} - restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- + extra-packages: | + any::covr + any::xml2 + any::remotes + needs: coverage - - name: Install dependencies + - name: Install system dependencies + if: runner.os == 'Linux' + shell: bash run: | - install.packages(c("remotes")) - remotes::install_deps(dependencies = TRUE) - remotes::install_cran("covr") + . /etc/os-release + while read -r cmd + do + echo "$cmd" + sudo $cmd + done < <(Rscript -e "writeLines(remotes::system_requirements('$ID-$VERSION_ID'))") + + - name: Install package + deps + run: remotes::install_local(dependencies = TRUE, force = TRUE) shell: Rscript {0} - ### - - name: Install Miniconda + - name: Install greta deps run: | - install.packages(c("remotes", "keras")) - reticulate::install_miniconda() + library(greta) + greta::install_greta_deps(timeout = 50) shell: Rscript {0} - - name: Set options for conda binary for macOS - if: runner.os == 'macOS' - run: | - echo "options(reticulate.conda_binary = reticulate:::miniconda_conda())" >> .Rprofile + - name: Situation Report on greta install + run: greta::greta_sitrep() + shell: Rscript {0} -# Perhaps here is where we can install / change the environment that we are -# installing into? Can we call our own greta install functions here? - - name: Install TensorFlow + - name: Test coverage run: | - reticulate::conda_create(envname = "greta-env",python_version = "3.7") - reticulate::conda_install(envname = "greta-env", packages = c("numpy==1.16.4", "tensorflow-probability==0.7.0", "tensorflow==1.14.0")) + cov <- covr::package_coverage( + quiet = FALSE, + clean = FALSE, + install_path = file.path(normalizePath(Sys.getenv("RUNNER_TEMP"), winslash = "/"), "package") + ) + covr::to_cobertura(cov) shell: Rscript {0} - - name: Python + TF details + - uses: codecov/codecov-action@v4 + with: + fail_ci_if_error: ${{ github.event_name != 'pull_request' && true || false }} + file: ./cobertura.xml + plugin: noop + disable_search: true + token: ${{ secrets.CODECOV_TOKEN }} + + - name: Show testthat output + if: always() run: | - Rscript -e 'tensorflow::tf_config()' - Rscript -e 'tensorflow::tf_version()' - Rscript -e 'reticulate::py_module_available("tensorflow_probability")' - Rscript -e 'reticulate::py_config()' - ### + ## -------------------------------------------------------------------- + find '${{ runner.temp }}/package' -name 'testthat.Rout*' -exec cat '{}' \; || true + shell: bash - - name: Test coverage - run: covr::codecov() - shell: Rscript {0} + - name: Upload test results + if: failure() + uses: actions/upload-artifact@v4 + with: + name: coverage-test-failures + path: ${{ runner.temp }}/package diff --git a/DESCRIPTION b/DESCRIPTION index d774f05..4554bf9 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: greta.dynamics Type: Package Title: Modelling Structured Dynamical Systems in 'greta' -Version: 0.2.0.9000 +Version: 0.2.2.9000 Authors@R: c( person( given = "Nick", @@ -28,12 +28,13 @@ URL: https://github.com/greta-dev/greta.dynamics, https://greta-dev.github.io/greta.dynamics/ BugReports: https://github.com/greta-dev/greta.dynamics/issues Imports: - cli, + cli (>= 3.6.3), glue, + rlang (>= 1.1.4), tensorflow (>= 1.14.0) Depends: - greta (>= 0.4.2), - R (>= 3.1.0) + greta (>= 0.5.0), + R (>= 4.1.0) Suggests: covr, knitr, @@ -48,8 +49,8 @@ Config/testthat/edition: 3 Encoding: UTF-8 Language: en-GB LazyData: true -RoxygenNote: 7.2.0 -SystemRequirements: Python (>= 2.7.0) with header files and shared - library; TensorFlow (v1.14; https://www.tensorflow.org/); TensorFlow - Probability (v0.7.0; https://www.tensorflow.org/probability/) Roxygen: list(markdown = TRUE) +RoxygenNote: 7.3.2 +SystemRequirements: Python (>= 3.7.0) with header files and shared + library; TensorFlow (>= v2.0.0; https://www.tensorflow.org/); TensorFlow + Probability (v0.8.0; https://www.tensorflow.org/probability/) diff --git a/NAMESPACE b/NAMESPACE index 577510e..8799067 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,5 +1,6 @@ # Generated by roxygen2: do not edit by hand +export(iterate_dynamic_function) export(iterate_dynamic_matrix) export(iterate_matrix) export(ode_solve) diff --git a/NEWS.md b/NEWS.md index dacd51b..277241a 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,30 @@ # greta.dynamics (development version) +# greta.dynamics 0.2.2 + +## New features + +- Added `iterate_dynamic_function()` and `iterate_dynamic_matrix()` + +## Breaking Changes + +- Updated internal ODE interface to match new tensorflow probability API. This + involves now only supporting two solvers, "dp", and "bdf". The Default is + "dp", which is similar to deSolve's "ode45". The "dp" solver is + Dormand-Prince explicit solver for non-stiff ODEs. The "bdf" solver is + Backward Differentiation Formula (BDF) solver for stiff ODEs. Currently no + arguments for "bdf" or "dp" are able to be specified. + +## Internal changes + +- import rlang, cli, use latest version of greta, 0.5.0 +- use internal checking functions +- use snapshot testing for checking error messages + +# greta.dynamics 0.2.1 + +- Use sentinel "_PACKAGE" + # greta.dynamics 0.2.0 -* Added a `NEWS.md` file to track changes to the package. +- Added a `NEWS.md` file to track changes to the package. diff --git a/R/checkers.R b/R/checkers.R new file mode 100644 index 0000000..4941fa3 --- /dev/null +++ b/R/checkers.R @@ -0,0 +1,21 @@ +check_state_is_2d_or_3d <- function(state_n_dim){ + state_is_2d_oe_3e <- state_n_dim %in% 2:3 + if (!state_is_2d_oe_3e) { + cli::cli_abort( + "State must be either two- or three-dimensional" + ) + } +} + +check_initial_state_col_vec_or_3d_dim_1 <- function(state){ + state_dim <- dim(state) + state_n_dim <- length(state_dim) + last_dim_of_state_is_not_1 <- state_dim[state_n_dim] != 1 + if (last_dim_of_state_is_not_1) { + cli::cli_abort( + "{.var initial_state} must be either a column vector, or a 3D array \\ + with final dimension 1" + ) + } + +} diff --git a/R/package.R b/R/greta.dynamics-package.R similarity index 87% rename from R/package.R rename to R/greta.dynamics-package.R index 4da81c4..9f674cb 100644 --- a/R/package.R +++ b/R/greta.dynamics-package.R @@ -9,8 +9,8 @@ #' @importFrom greta .internals abind #' @importFrom tensorflow tf shape #' -#' @docType package -NULL +"_PACKAGE" + # crate the node list object whenever the package is loaded .onLoad <- function(libname, pkgname) { @@ -24,3 +24,14 @@ NULL "max_num_steps exceeded" ) } + + +## usethis namespace: start +## usethis namespace: end +NULL + +globalVariables( + c( + "as_data" + ) +) diff --git a/R/internals.R b/R/internals.R index 30a7334..7cc7f55 100644 --- a/R/internals.R +++ b/R/internals.R @@ -33,3 +33,4 @@ fl <- .internals$utils$misc$fl op <- .internals$nodes$constructors$op to_shape <- .internals$utils$misc$to_shape expand_to_batch <- .internals$utils$misc$expand_to_batch +dummy_greta_array <- .internals$utils$dummy_arrays$dummy_greta_array diff --git a/R/iterate_dynamic_function.R b/R/iterate_dynamic_function.R new file mode 100644 index 0000000..bca1f5c --- /dev/null +++ b/R/iterate_dynamic_function.R @@ -0,0 +1,451 @@ +#' @name iterate_dynamic_function +#' +#' @title iterate dynamic transition functions +#' +#' @description Calculate the stable population size for a stage-structured +#' dynamical system, encoded by a transition function, the value of which +#' changes at each iteration, given by function of the previous state: +#' \code{state[t] = f(state[t-1])}. +#' +#' @details Like \code{iterate_dynamic_matrix} this converges to \emph{absolute} +#' population sizes. The convergence criterion is therefore based on growth +#' rates converging on 0. +#' +#' The greta array returned by \code{transition_function} must have the same +#' dimension as the state input and \code{initial_state} should be shaped +#' accordingly, as detailed in \code{iterate_matrix}. +#' +#' To ensure the matrix is iterated for a specific number of iterations, you +#' can set that number as \code{niter}, and set \code{tol} to 0 or a negative +#' number to ensure that the iterations are not stopped early. +#' +#' @param transition_function a function taking in the previous population state +#' and the current iteration (and possibly other greta arrays) and returning +#' the population state at the next iteration. The first two arguments must be +#' named 'state' and 'iter', the state vector and scalar iteration number +#' respectively. The remaining parameters must be named arguments representing +#' (temporally static) model parameters. Variables and distributions cannot be +#' defined inside the function. +#' @param initial_state either a column vector (with m elements) or a 3D array +#' (with dimensions n x m x 1) giving one or more initial states from which to +#' iterate the matrix +#' @param niter a positive integer giving the maximum number of times to iterate +#' the matrix +#' @param tol a scalar giving a numerical tolerance, below which the algorithm +#' is determined to have converged to a stable population size in all stages +#' @param ... optional named arguments to \code{matrix_function}, giving greta +#' arrays for additional parameters +#' @param parameter_is_time_varying a character vector naming the parameters +#' (ie. the named arguments of the function that are passed via `...`) that +#' should be considered to be time-varying. That is, at each iteration only +#' the corresponding slice from the first dimension of the object passed in +#' should be used at that iteration. +#' @param state_limits a numeric vector of length 2 giving minimum and maximum +#' values at which to clamp the values of state after each iteration to +#' prevent numerical under/overflow; i.e. elements with values below the +#' minimum (maximum) will be set to the minimum (maximum). +#' +#' @return a named list with four greta arrays: +#' \itemize{ +#' \item `stable_state` a vector or matrix (with the same dimensions as +#' `initial_state`) giving the state after the final iteration. +#' \item `all_states` an n x m x niter matrix of the state values at +#' each iteration. This will be 0 for all entries after `iterations`. +#' \item `converged` an integer scalar indicating whether \emph{all} +#' the matrix iterations converged to a tolerance less than `tol` (1 if +#' so, 0 if not) before the algorithm finished. +#' \item `iterations` a scalar of the maximum number of iterations +#' completed before the algorithm terminated. This should match `niter` +#' if `converged` is `FALSE` +#' } +#' +#' @note because greta vectorises across both MCMC chains and the calculation of +#' greta array values, the algorithm is run until all chains (or posterior +#' samples), sites and stages have converged to stable growth. So a single +#' value of both `converged` and `iterations` is returned, and the +#' value of this will always have the same value in an `mcmc.list` object. So +#' inspecting the MCMC trace of these parameters will only tell you whether +#' the iteration converged in \emph{all} posterior samples, and the maximum +#' number of iterations required to do so across all these samples +#' +#' @export +#' +iterate_dynamic_function <- function( + transition_function, + initial_state, + niter, + tol, + ..., + parameter_is_time_varying = c(), + state_limits = c(-Inf, Inf) +) { + + # generalise checking of inputs from iterate_matrix into functions + niter <- as.integer(niter) + state <- as.greta_array(initial_state) + + # check input dimensions + state_dim <- dim(state) + state_n_dim <- length(state_dim) + + check_state_is_2d_or_3d(state_n_dim) + + last_dim_of_state_is_not_1 <- state_dim[state_n_dim] != 1 + if (last_dim_of_state_is_not_1) { + cli::cli_abort( + "{.var initial_state} must be either a column vector, or a 3D array \\ + with final dimension 1" + ) + } + + # if this is multisite + state_multisite <- state_n_dim == 3 + + # create a tensorflow function from the transition function + + # make dummy coopies of these now, so we can slice and reshape them without + # modifying the greta arrays and tensors in place + # dots <- lapply(dots, as.greta_array) + dots <- lapply(list(...), dummy_greta_array) + + args_not_named <- length(dots) > 1 && is.null(names(dots)) + if (args_not_named) { + cli::cli_abort( + "All arguments passed to transition function must be explicitly named" + ) + } + + # handle time-varying parameters, sending only a slice to the function when + # converting to TF + for (name in parameter_is_time_varying) { + dots[[name]] <- slice_first_dim(dots[[name]], 1) + } + + # get index to time-varying parameters in a list + parameter_is_time_varying_index <- match( + parameter_is_time_varying, + names(dots) + ) + + tf_transition_function <- as_tf_transition_function( + transition_function = transition_function, + state = state, + iter = as_data(1), + dots = dots + ) + + # op returning a fake greta array which is actually a list containing both + # values and states + results <- op("iterate_dynamic_function", + state, + ..., + operation_args = list( + tf_transition_function = tf_transition_function, + niter = niter, + tol = tol, + parameter_is_time_varying_index = parameter_is_time_varying_index, + state_limits = state_limits + ), + tf_operation = "tf_iterate_dynamic_function", + dim = c(1, 1)) + + # ops to extract the components + stable_population <- op("stable_population", + results, + tf_operation = "tf_extract_stable_population", + dim = state_dim) + + all_states_dim <- state_dim + if (state_multisite) { + all_states_dim[3] <- niter + } else { + all_states_dim[2] <- niter + } + + all_states <- op("all_states", + results, + tf_operation = "tf_extract_states", + dim = all_states_dim) + + converged <- op("converged", + results, + tf_operation = "tf_extract_converged", + dim = c(1, 1)) + + iterations <- op("iterations", + results, + tf_operation = "tf_extract_iterations", + dim = c(1, 1)) + + list(stable_population = stable_population, + all_states = all_states, + converged = converged, + iterations = iterations) +} + +# given a greta/R function derivative function, and greta arrays for the inputs, +# return a tensorflow function taking tensors for y and t and returning a tensor +# for dydt +as_tf_transition_function <- function (transition_function, state, iter, dots) { + + # create a function acting on the full set of inputs, as tensors + args <- list(r_fun = transition_function, state = state, iter = iter) + tf_fun <- do.call(as_tf_function, c(args, dots)) + + # for CRAN's benefit + tf_dots <- NULL + + # return a function acting only on tensors y and t, to feed to the ode solver + function (state, iter) { + + # it will be dimensionless when used in the ode solver, need to expand out t + # to have same dim as a scalar constant so that it can be used in the same + # way as the greta array in the R function + iter <- tf$reshape(iter, shape = shape(1, 1, 1)) + + # tf_dots will have been added to this environment by + # tf_iterate_dynamic_function + args <- list(state = state, iter = iter) + do.call(tf_fun, c(args, tf_dots)) + + } + +} + +# tensorflow code +# iterate matrix tensor `matrix` `niter` times, each time using and updating vector +# tensor `state`, and return lambda for the final iteration +tf_iterate_dynamic_function <- function (state, + ..., + tf_transition_function, + niter, + tol, + parameter_is_time_varying_index, + state_limits) { + + # define the tf versions of the dots (all other parameters) here + tf_dots_all <- list(...) + + # assign these to the transition function's environment so they can be used + assign("tf_dots", + list(...), + environment(tf_transition_function)) + + # note that for time-varying parameters these are too big (full timeseries), so + # inside the body (each iteration) we will re-slice and replace them each time + + # use a tensorflow while loop to do the recursion: + body <- function(old_state, + t_all_states, + growth_rates, + converged, + iter, + maxiter) { + + # get all the tf dots from the function environment + tf_dots <- environment(tf_transition_function)$tf_dots + + # for the time-varying ones, get the full timeseries from the + # tf_iterate_dynamic_function (via lexical scoping), slice out the elements + # for this loop, and put them back in the tf_dots in the function + # environment + for(index in parameter_is_time_varying_index) { + tf_dots[[index]] <- tf_slice_first_dim(tf_dots_all[[index]], iter) + } + assign("tf_dots", tf_dots, + environment(tf_transition_function)) + + # look up the batch size from old_state and put it in the greta stash, so + # greta can use it to appropriately create tensors for constants defined in + # the user-provided function. Note we need to do this inside body (not + # before), because a new control flow graph is created by tf_while_loop and + # it otherwise becomes an unknown and causes shape variance + batch_size <- tf$shape(old_state)[[0]] + + # note we need to access the greta stash directly here, rather than including + # it in internals.R, because otherwise it makes a copy of the environment + # instead and the contents can't be accessed by greta:::as_tf_function() + assign("batch_size", + batch_size, + envir = greta::.internals$greta_stash) + + # evaluate function to get the new state (dots have been inserted into its + # environment, since TF while loops are treacherous things) + new_state <- tf_transition_function(old_state, iter) + + # clamp to max and min + new_state <- tf$clip_by_value(new_state, + clip_value_min = tf_min, + clip_value_max = tf_max) + + # store new state object + t_all_states <- tf$tensor_scatter_nd_update( + tensor = t_all_states, + indices = iter, + updates = tf$transpose(new_state) + ) + + # get the growth rate, and whether it has converged + growth_rates <- new_state / old_state + converged <- state_converged(growth_rates, tf_tol) + + list( + new_state, + t_all_states, + growth_rates, + converged, + iter + 1L, + maxiter + ) + + } + + # create a matrix of zeros to store all the states, but use *the transpose* so + # it's easier to update + + # iter needs to have rank 2 for slice updating; make niter the same shape + iter <- tf$constant(0L, shape = shape(1, 1)) + tf_niter <- tf$constant(as.integer(niter), shape = shape(1, 1)) + + # add convergence tolerance and indicator + tf_tol <- tf$constant(tol, dtype = tf_float()) + converged <- tf$constant(FALSE, dtype = tf$bool) + + # coerce limits to max and min + tf_min <- tf$constant(state_limits[1], + dtype = tf_float()) + tf_max <- tf$constant(state_limits[2], + dtype = tf_float()) + + # create an empty tensor for (the transpose of) the array of all state values + + # first make a single slice (w.r.t. batch dimension) and then tile the final + # dimension along batch dimension + state_dim <- dim(state)[-1] + n_dim <- length(state_dim) + shp <- to_shape(c(niter, rev(state_dim[-n_dim]), 1)) + t_all_states_slice <- tf$zeros(shp, dtype = tf_float()) + batch_size <- tf$shape(state)[[0]] + ndim <- length(dim(t_all_states_slice)) + t_all_states <- tf$tile(t_all_states_slice, c(rep(1L, ndim - 1), batch_size)) + + # create an initial growth rate, and expand its batches too (easier because + # this isn't the transpose so we tile the first dimension along batches) + shp <- to_shape(c(1, state_dim)) + growth_rates_slice <- tf$zeros(shp, dtype = tf_float()) + growth_rates <- expand_to_batch(growth_rates_slice, state) + + # add tolerance next + values <- list( + state, + t_all_states, + growth_rates, + converged, + iter, + tf_niter + ) + + # add tolerance next + cond <- function( + new_state, + t_all_states, + growth_rates, + converged, + iter, + maxiter + ) { + tf$squeeze(tf$less(iter, maxiter)) & tf$logical_not(converged) + } + + # iterate + out <- tf$while_loop(cond, body, values) + + # return some elements: the transposed tensor of all the states + list( + state = out[[1]], + t_all_states = out[[2]], + growth_rates = out[[3]], + converged = out[[4]], + iterations = out[[5]] + ) + +} + +# assess convergence of the iterations to a stable growth rate. If it has +# converged, the rate of change will be the same for all stages. +state_converged <- function (growth_rates, tol) { + + # calculate the largest deviation across all stages, sites, and the batch + # dimension and determiine whether it's acceptatble + error <- tf$reduce_max(tf$abs(growth_rates - fl(1))) + error < tol + +} + +# pull out the final population sizes +tf_extract_stable_population <- function (results) { + + state <- results$state + + # reshape if needed + ndim <- length(dim(state)) + if (ndim > 3) { + state <- tf$squeeze(state, ndim - 1L) + } + + state + +} + +# given a greta array, tensor, or array, extract the 'element'th element on +# the first dimmension, preserving all other dimensions +slice_first_dim <- function(x, element) { + + # if it's a vector, just return like this + if (is.vector(x)) { + return(x[element]) + } + # if this is a tensor with a batch dimension to skip, add a comma before the + # element, and drop one after + ndim <- length(dim(x)) + + pre_commas <- 0 + post_commas <- ndim - 1 + + pre <- paste0(rep(",", pre_commas), collapse = "") + # calculate the nuber of commas to go after the element + post <- paste0(rep(",", post_commas), collapse = "") + # create and evaluate the command + command <- paste0("x[", pre, "element", post, "]") + result <- eval(parse(text = command)) + + # if there are more than two dimensions, drop the first one + if (ndim > 2) { + dim(result) <- dim(x)[-1] + } else if (ndim == 2) { + # if there are exactly two dimensions, transpose it so that each slice is a + # column vector rather than a row vector + result <- t(result) + } + + result +} + + +tf_slice_first_dim <- function(x, element) { + + element <- tf$squeeze(element) + + # extract on the first dimension, skipping the batch dimmension + x_out <- tf$gather(x, element, axis = 1L) + + # this drops a dimension, so reinstate it if the dimension is too small for a + # greta array - this sets it as a coumn vector too. + n_dim <- length(dim(x_out)) + if (n_dim == 2) { + x_out <- tf$expand_dims(x_out, axis = 2L) + } + + x_out + +} diff --git a/R/iterate_dynamic_matrix.R b/R/iterate_dynamic_matrix.R index 8f2f126..783b184 100644 --- a/R/iterate_dynamic_matrix.R +++ b/R/iterate_dynamic_matrix.R @@ -14,7 +14,7 @@ #' on the population sizes after the previous iteration, and the iteration #' number. Because this can encode density-dependence, the dynamics can #' converge to \emph{absolute} population sizes. The convergence criterion is -#' therefore based on growth rates conveerging on 0. +#' therefore based on growth rates converging on 0. #' #' As in \code{iterate_matrix}, the greta array returned by #' \code{matrix_function} can either be a square matrix, or a 3D array @@ -42,25 +42,29 @@ #' is determined to have converged to a stable population size in all stages #' @param ... optional named arguments to \code{matrix_function}, giving greta #' arrays for additional parameters +#' @param state_limits a numeric vector of length 2 giving minimum and maximum +#' values at which to clamp the values of state after each iteration to +#' prevent numerical under/overflow; i.e. elements with values below the +#' minimum (maximum) will be set to the minimum (maximum). #' #' @return a named list with four greta arrays: #' \itemize{ -#' \item{\code{stable_state}} {a vector or matrix (with the same dimensions as -#' \code{initial_state}) giving the state after the final iteration.} -#' \item{\code{all_states}} {an n x m x niter matrix of the state values at -#' each iteration. This will be 0 for all entries after \code{iterations}.} -#' \item{\code{converged}} {an integer scalar indicating whether \emph{all} -#' the matrix iterations converged to a tolerance less than \code{tol} (1 if -#' so, 0 if not) before the algorithm finished.} -#' \item{\code{iterations}} {a scalar of the maximum number of iterations -#' completed before the algorithm terminated. This should match \code{niter} -#' if \code{converged} is \code{FALSE}.} +#' \item `stable_state` a vector or matrix (with the same dimensions as +#' `initial_state`) giving the state after the final iteration. +#' \item `all_states` an n x m x niter matrix of the state values at +#' each iteration. This will be 0 for all entries after `iterations`. +#' \item `converged` an integer scalar indicating whether \emph{all} +#' the matrix iterations converged to a tolerance less than `tol` (1 if +#' so, 0 if not) before the algorithm finished. +#' \item `iterations` a scalar of the maximum number of iterations +#' completed before the algorithm terminated. This should match `niter` +#' if `converged` is `FALSE` #' } #' #' @note because greta vectorises across both MCMC chains and the calculation of #' greta array values, the algorithm is run until all chains (or posterior #' samples), sites and stages have converged to stable growth. So a single -#' value of both \code{converged} and \code{iterations} is returned, and the +#' value of both `converged` and `iterations` is returned, and the #' value of this will always have the same value in an `mcmc.list` object. So #' inspecting the MCMC trace of these parameters will only tell you whether #' the iteration converged in \emph{all} posterior samples, and the maximum @@ -73,7 +77,8 @@ iterate_dynamic_matrix <- function( initial_state, niter, tol, - ... + ..., + state_limits = c(-Inf, Inf) ) { # generalise checking of inputs from iterate_matrix into functions @@ -84,17 +89,9 @@ iterate_dynamic_matrix <- function( state_dim <- dim(state) state_n_dim <- length(state_dim) - if (!state_n_dim %in% 2:3) { - stop ("state must be either two- or three-dimensional", - call. = FALSE) - } + check_state_is_2d_or_3d(state_n_dim) - # ensure the last dim of state is 1 - if (state_dim[state_n_dim] != 1) { - stop ("initial_state must be either a column vector, ", - "or a 3D array with final dimension 1", - call. = FALSE) - } + check_initial_state_col_vec_or_3d_dim_1(state) # if this is multisite state_multisite <- state_n_dim == 3 @@ -102,7 +99,12 @@ iterate_dynamic_matrix <- function( # create a tensorflow function from the matrix function dots <- list(...) dots <- lapply(dots, as.greta_array) - tf_matrix_function <- as_tf_matrix_function(matrix_function, state, iter = as_data(1), dots) + tf_matrix_function <- as_tf_matrix_function( + matrix_function, + state, + iter = as_data(1), + dots + ) # op returning a fake greta array which is actually a list containing both # values and states @@ -112,7 +114,8 @@ iterate_dynamic_matrix <- function( operation_args = list( tf_matrix_function = tf_matrix_function, niter = niter, - tol = tol + tol = tol, + state_limits = state_limits ), tf_operation = "tf_iterate_dynamic_matrix", dim = c(1, 1)) @@ -183,7 +186,8 @@ as_tf_matrix_function <- function (matrix_function, state, iter, dots) { # tensorflow code # iterate matrix tensor `matrix` `niter` times, each time using and updating vector # tensor `state`, and return lambda for the final iteration -tf_iterate_dynamic_matrix <- function (state, ..., tf_matrix_function, niter, tol) { +tf_iterate_dynamic_matrix <- function (state, ..., tf_matrix_function, niter, tol, + state_limits) { # assign the dots (as tensors) to the matrix function's environment assign("tf_dots", list(...), @@ -192,6 +196,20 @@ tf_iterate_dynamic_matrix <- function (state, ..., tf_matrix_function, niter, to # use a tensorflow while loop to do the recursion: body <- function(old_state, t_all_states, growth_rates, converged, iter, maxiter) { + # look up the batch size from old_state and put it in the greta stash, so + # greta can use it to appropriately create tensors for constants defined in + # the user-provided function. Note we need to do this inside body (not + # before), because a new control flow graph is created by tf_while_loop and + # it otherwise becomes an unknown and causes shape variance + batch_size <- tf$shape(old_state)[[0]] + + # note we need to access the greta stash directly here, rather than including + # it in internals.R, because otherwise it makes a copy of the environment + # instead and the contents can't be accessed by greta:::as_tf_function() + assign("batch_size", + batch_size, + envir = greta::.internals$greta_stash) + # create matrix (dots have been inserted into its environment, since TF # while loops are treacherous things) matrix <- tf_matrix_function(old_state, iter) @@ -199,6 +217,11 @@ tf_iterate_dynamic_matrix <- function (state, ..., tf_matrix_function, niter, to # do matrix multiplication new_state <- tf$matmul(matrix, old_state, transpose_a = FALSE) + # clamp to max and min + new_state <- tf$clip_by_value(new_state, + clip_value_min = tf_min, + clip_value_max = tf_max) + # store new state object t_all_states <- tf$tensor_scatter_nd_update( tensor = t_all_states, @@ -232,6 +255,12 @@ tf_iterate_dynamic_matrix <- function (state, ..., tf_matrix_function, niter, to tf_tol <- tf$constant(tol, dtype = tf_float()) converged <- tf$constant(FALSE, dtype = tf$bool) + # coerce limits to max and min + tf_min <- tf$constant(state_limits[1], + dtype = tf_float()) + tf_max <- tf$constant(state_limits[2], + dtype = tf_float()) + # make a single slice (w.r.t. batch dimension) and tile along batch dimension state_dim <- dim(state)[-1] n_dim <- length(state_dim) diff --git a/R/iterate_matrix.R b/R/iterate_matrix.R index 9057410..57ddf15 100644 --- a/R/iterate_matrix.R +++ b/R/iterate_matrix.R @@ -30,19 +30,19 @@ #' #' @return a named list with five greta arrays: #' \itemize{ -#' \item{`lambda`} {a scalar or vector giving the ratio of the first stage -#' values between the final two iterations.} -#' \item{`stable_state`} {a vector or matrix (with the same dimensions as +#' \item `lambda` a scalar or vector giving the ratio of the first stage +#' values between the final two iterations. +#' \item `stable_state` a vector or matrix (with the same dimensions as #' `initial_state`) giving the state after the final iteration, -#' normalised so that the values for all stages sum to one.} -#' \item{`all_states`} {an n x m x niter matrix of the state values at -#' each iteration. This will be 0 for all entries after `iterations`.} -#' \item{`converged`} {an integer scalar or vector indicating whether +#' normalised so that the values for all stages sum to one. +#' \item `all_states` an n x m x niter matrix of the state values at +#' each iteration. This will be 0 for all entries after `iterations`. +#' \item `converged` an integer scalar or vector indicating whether #' the iterations for each matrix have converged to a tolerance less than -#' `tol` (1 if so, 0 if not) before the algorithm finished.} -#' \item{`iterations`} {a scalar of the maximum number of iterations +#' `tol` (1 if so, 0 if not) before the algorithm finished. +#' \item `iterations` a scalar of the maximum number of iterations #' completed before the algorithm terminated. This should match `niter` -#' if `converged` is `FALSE`.} +#' if `converged` is `FALSE`. #' } #' #' @note because greta vectorises across both MCMC chains and the calculation of @@ -120,18 +120,12 @@ iterate_matrix <- function(matrix, state_n_dim <- length(state_dim) if (!matrix_n_dim %in% 2:3 | !state_n_dim %in% 2:3) { - stop("matrix and state must be either two- or three-dimensional", - call. = FALSE + cli::cli_abort( + "matrix and state must be either two- or three-dimensional" ) } - # ensure the last dim of state is 1 - if (state_dim[state_n_dim] != 1) { - stop("initial_state must be either a column vector, ", - "or a 3D array with final dimension 1", - call. = FALSE - ) - } + check_initial_state_col_vec_or_3d_dim_1(state) # if this is multisite matrix_multisite <- matrix_n_dim == 3 @@ -169,9 +163,9 @@ iterate_matrix <- function(matrix, if (matrix_multisite & state_multisite) { n <- matrix_dim[1] if (state_dim[1] != n) { - stop("if matrix is 3D and initial_state is a matrix", - "the batch dimension (n) must match", - call. = FALSE + cli::cli_abort( + "If matrix is 3D and initial_state is a matrix, the batch \\ + dimension (n) must match" ) } } @@ -190,14 +184,14 @@ iterate_matrix <- function(matrix, } if (!all(matrix_raw_dim == m)) { - stop("each matrix must be a two-dimensional square greta array ", - call. = FALSE + cli::cli_abort( + "Each matrix must be a two-dimensional square greta array " ) } if (state_raw_dim != m) { - stop("length of each initial_state must match the dimension of matrix", - call. = FALSE + cli::cli_abort( + "Length of each initial_state must match the dimension of matrix" ) } diff --git a/R/ode_solve.R b/R/ode_solve.R index 2b36ce6..a159605 100644 --- a/R/ode_solve.R +++ b/R/ode_solve.R @@ -13,9 +13,11 @@ #' @param times a column vector of times at which to evaluate y #' @param ... named arguments giving greta arrays for the additional (fixed) #' parameters -#' @param method which solver to use. `"ode45"` uses adaptive step -#' sizes, whilst `"rk4"` and `"midpoint"` use the fixed grid defined -#' by `times`; they may be faster but less accurate than `"ode45"`. +#' @param method which solver to use. Default is "dp", which is similar to +#' deSolve's "ode45". Currently implemented is "dp", and "bdf".The "dp" solver +#' is Dormand-Prince explicit solver for non-stiff ODEs. The "bdf" solver is +#' Backward Differentiation Formula (BDF) solver for stiff ODEs. Currently no +#' arguments for "bdf" or "dp" are able to be specified. #' #' @return greta array #' @@ -111,7 +113,7 @@ #' o #' } ode_solve <- function(derivative, y0, times, ..., - method = c("ode45", "rk4", "midpoint")) { + method = c("dp", "bdf")) { y0 <- as.greta_array(y0) times <- as.greta_array(times) method <- match.arg(method) @@ -119,9 +121,7 @@ ode_solve <- function(derivative, y0, times, ..., # check times is a column vector t_dim <- dim(times) if (length(t_dim != 2) && t_dim[2] != 1) { - stop("", - call. = FALSE - ) + cli::cli_abort("times must be a column vector") } dots <- list(...) @@ -133,7 +133,12 @@ ode_solve <- function(derivative, y0, times, ..., # check the arguments of derivative are valid and match dots # create a tensorflow version of the function - tf_derivative <- as_tf_derivative(derivative, y0, times[1], dots) + tf_derivative <- as_tf_derivative( + derivative = derivative, + y = y0, + t = times[1], + dots = dots + ) # the dimensions should be the dimensions of the y0, duplicated along times n_time <- dim(times)[1] @@ -176,19 +181,24 @@ tf_ode_solve <- function(y0, times, ..., tf_derivative, method) { # tf_derivative creates tensors correctly dag <- parent.frame()$dag - tf_int <- tf$contrib$integrate + tf_int <- tfp$math$ode integrator <- switch(method, - ode45 = tf_int$odeint, - rk4 = function(...) { - tf_int$odeint_fixed(..., method = "rk4") - }, - midpoint = function(...) { - tf_int$odeint_fixed(..., method = "midpoint") - } + dp = function(...) tf_int$DormandPrince()$solve(...), + bdf = function(...) tf_int$BDF()$solve(...) ) - dag$on_graph(integral <- integrator(tf_derivative, y0, times)) + integral <- integrator( + ode_fn = tf_derivative, + initial_time = times[0L], + initial_state = y0, + solution_times = times + ) + + # dag$on_graph(integral <- integrator(tf_derivative, y0, times)) + + # pull out only the states + integral <- integral$states # reshape to put batch dimension first permutation <- seq_along(dim(integral)) - 1L @@ -216,7 +226,7 @@ as_tf_derivative <- function(derivative, y, t, dots) { tf_dots <- NULL # return a function acting only on tensors y and t, to feed to the ode solver - function(y, t) { + function(t, y) { # t will be dimensionless when used in the ode solver, need to expand out t # to have same dim as a scalar constant so that it can be used in the same diff --git a/R/zzz.R b/R/zzz.R new file mode 100644 index 0000000..9546ade --- /dev/null +++ b/R/zzz.R @@ -0,0 +1 @@ +tfp <- reticulate::import("tensorflow_probability", delay_load = TRUE) diff --git a/README.md b/README.md index 1ec9e59..177f5a4 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![R-CMD-check](https://github.com/greta-dev/greta.dynamics/workflows/R-CMD-check/badge.svg)](https://github.com/greta-dev/greta.dynamics/actions) [![CRAN status](https://www.r-pkg.org/badges/version/greta.dynamics)](https://CRAN.R-project.org/package=greta.dynamics) - [![codecov.io](https://codecov.io/github/greta-dev/greta.dynamics/coverage.svg?branch=master)](https://codecov.io/github/greta-dev/greta.dynamics?branch=master) + [![codecov.io](https://codecov.io/github/greta-dev/greta.dynamics/coverage.svg?branch=master)](https://app.codecov.io/github/greta-dev/greta.dynamics?branch=master) `greta.dynamics` provides functions for modelling dynamical systems in greta. It currently provides functions for analysing transition matrices by iteration, and solving ordinary differential equations. @@ -17,10 +17,10 @@ install.packages("greta.dynamics") ``` Or install the development version of `greta.dynamics` from -[r-universe](https://greta-dev.r-universe.dev/ui#builds): +[r-universe](https://greta-dev.r-universe.dev/): ``` r -install.packages("greta.dynamics", repos = "https://greta-dev.r-universe.dev") +install.packages("greta.dynamics", repos = c("https://greta-dev.r-universe.dev", "https://cran.r-project.org")) ``` You can also install the development version of `greta.dynamics` via GitHub: diff --git a/cran-comments.md b/cran-comments.md index 858617d..c8e955e 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,5 +1,12 @@ +## Test environments + +* local R installation, R 4.4.2 +* win-builder (devel) + ## R CMD check results -0 errors | 0 warnings | 1 note +0 errors | 0 warnings | 0 notes + +## Submission notes -* This is a new release. +* Submitted this release to address rlang import issue, and fix breaking changes in underlying TensorFlow 2.0 library. diff --git a/inst/WORDLIST b/inst/WORDLIST new file mode 100644 index 0000000..9819a39 --- /dev/null +++ b/inst/WORDLIST @@ -0,0 +1,18 @@ +BDF +CMD +Dormand +ODEs +ORCID +bdf +cli +codecov +deSolve's +doi +dp +greta +ie +io +iter +niter +rlang +tensorflow diff --git a/man/greta.dynamics.Rd b/man/greta.dynamics.Rd index 1993f5e..a142702 100644 --- a/man/greta.dynamics.Rd +++ b/man/greta.dynamics.Rd @@ -1,7 +1,8 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/package.R +% Please edit documentation in R/greta.dynamics-package.R \docType{package} \name{greta.dynamics} +\alias{greta.dynamics-package} \alias{greta.dynamics} \title{greta.dynamics: a greta extension for modelling dynamical systems} \description{ @@ -10,3 +11,21 @@ with functions for simulating dynamical systems, defined by of ordinary differential equations (see \code{\link[=ode_solve]{ode_solve()}}) or transition matrices (\code{\link[=iterate_matrix]{iterate_matrix()}}). } +\seealso{ +Useful links: +\itemize{ + \item \url{https://github.com/greta-dev/greta.dynamics} + \item \url{https://greta-dev.github.io/greta.dynamics/} + \item Report bugs at \url{https://github.com/greta-dev/greta.dynamics/issues} +} + +} +\author{ +\strong{Maintainer}: Nicholas Tierney \email{nicholas.tierney@gmail.com} (\href{https://orcid.org/0000-0003-1460-8722}{ORCID}) + +Authors: +\itemize{ + \item Nick Golding \email{nick.golding.research@gmail.com} (\href{https://orcid.org/0000-0001-8916-5570}{ORCID}) [copyright holder] +} + +} diff --git a/man/iterate_dynamic_function.Rd b/man/iterate_dynamic_function.Rd new file mode 100644 index 0000000..65fc62d --- /dev/null +++ b/man/iterate_dynamic_function.Rd @@ -0,0 +1,93 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/iterate_dynamic_function.R +\name{iterate_dynamic_function} +\alias{iterate_dynamic_function} +\title{iterate dynamic transition functions} +\usage{ +iterate_dynamic_function( + transition_function, + initial_state, + niter, + tol, + ..., + parameter_is_time_varying = c(), + state_limits = c(-Inf, Inf) +) +} +\arguments{ +\item{transition_function}{a function taking in the previous population state +and the current iteration (and possibly other greta arrays) and returning +the population state at the next iteration. The first two arguments must be +named 'state' and 'iter', the state vector and scalar iteration number +respectively. The remaining parameters must be named arguments representing +(temporally static) model parameters. Variables and distributions cannot be +defined inside the function.} + +\item{initial_state}{either a column vector (with m elements) or a 3D array +(with dimensions n x m x 1) giving one or more initial states from which to +iterate the matrix} + +\item{niter}{a positive integer giving the maximum number of times to iterate +the matrix} + +\item{tol}{a scalar giving a numerical tolerance, below which the algorithm +is determined to have converged to a stable population size in all stages} + +\item{...}{optional named arguments to \code{matrix_function}, giving greta +arrays for additional parameters} + +\item{parameter_is_time_varying}{a character vector naming the parameters +(ie. the named arguments of the function that are passed via \code{...}) that +should be considered to be time-varying. That is, at each iteration only +the corresponding slice from the first dimension of the object passed in +should be used at that iteration.} + +\item{state_limits}{a numeric vector of length 2 giving minimum and maximum +values at which to clamp the values of state after each iteration to +prevent numerical under/overflow; i.e. elements with values below the +minimum (maximum) will be set to the minimum (maximum).} +} +\value{ +a named list with four greta arrays: +\itemize{ +\item \code{stable_state} a vector or matrix (with the same dimensions as +\code{initial_state}) giving the state after the final iteration. +\item \code{all_states} an n x m x niter matrix of the state values at +each iteration. This will be 0 for all entries after \code{iterations}. +\item \code{converged} an integer scalar indicating whether \emph{all} +the matrix iterations converged to a tolerance less than \code{tol} (1 if +so, 0 if not) before the algorithm finished. +\item \code{iterations} a scalar of the maximum number of iterations +completed before the algorithm terminated. This should match \code{niter} +if \code{converged} is \code{FALSE} +} +} +\description{ +Calculate the stable population size for a stage-structured +dynamical system, encoded by a transition function, the value of which +changes at each iteration, given by function of the previous state: +\code{state[t] = f(state[t-1])}. +} +\details{ +Like \code{iterate_dynamic_matrix} this converges to \emph{absolute} +population sizes. The convergence criterion is therefore based on growth +rates converging on 0. + +The greta array returned by \code{transition_function} must have the same +dimension as the state input and \code{initial_state} should be shaped +accordingly, as detailed in \code{iterate_matrix}. + +To ensure the matrix is iterated for a specific number of iterations, you +can set that number as \code{niter}, and set \code{tol} to 0 or a negative +number to ensure that the iterations are not stopped early. +} +\note{ +because greta vectorises across both MCMC chains and the calculation of +greta array values, the algorithm is run until all chains (or posterior +samples), sites and stages have converged to stable growth. So a single +value of both \code{converged} and \code{iterations} is returned, and the +value of this will always have the same value in an \code{mcmc.list} object. So +inspecting the MCMC trace of these parameters will only tell you whether +the iteration converged in \emph{all} posterior samples, and the maximum +number of iterations required to do so across all these samples +} diff --git a/man/iterate_dynamic_matrix.Rd b/man/iterate_dynamic_matrix.Rd index d26a9b6..18a07b1 100644 --- a/man/iterate_dynamic_matrix.Rd +++ b/man/iterate_dynamic_matrix.Rd @@ -4,7 +4,14 @@ \alias{iterate_dynamic_matrix} \title{iterate dynamic transition matrices} \usage{ -iterate_dynamic_matrix(matrix_function, initial_state, niter, tol, ...) +iterate_dynamic_matrix( + matrix_function, + initial_state, + niter, + tol, + ..., + state_limits = c(-Inf, Inf) +) } \arguments{ \item{matrix_function}{a function taking in the previous population state and @@ -27,55 +34,60 @@ is determined to have converged to a stable population size in all stages} \item{...}{optional named arguments to \code{matrix_function}, giving greta arrays for additional parameters} + +\item{state_limits}{a numeric vector of length 2 giving minimum and maximum +values at which to clamp the values of state after each iteration to +prevent numerical under/overflow; i.e. elements with values below the +minimum (maximum) will be set to the minimum (maximum).} } \value{ a named list with four greta arrays: \itemize{ - \item{\code{stable_state}} {a vector or matrix (with the same dimensions as - \code{initial_state}) giving the state after the final iteration.} - \item{\code{all_states}} {an n x m x niter matrix of the state values at - each iteration. This will be 0 for all entries after \code{iterations}.} - \item{\code{converged}} {an integer scalar indicating whether \emph{all} - the matrix iterations converged to a tolerance less than \code{tol} (1 if - so, 0 if not) before the algorithm finished.} - \item{\code{iterations}} {a scalar of the maximum number of iterations - completed before the algorithm terminated. This should match \code{niter} - if \code{converged} is \code{FALSE}.} +\item \code{stable_state} a vector or matrix (with the same dimensions as +\code{initial_state}) giving the state after the final iteration. +\item \code{all_states} an n x m x niter matrix of the state values at +each iteration. This will be 0 for all entries after \code{iterations}. +\item \code{converged} an integer scalar indicating whether \emph{all} +the matrix iterations converged to a tolerance less than \code{tol} (1 if +so, 0 if not) before the algorithm finished. +\item \code{iterations} a scalar of the maximum number of iterations +completed before the algorithm terminated. This should match \code{niter} +if \code{converged} is \code{FALSE} } } \description{ Calculate the stable population size for a stage-structured - dynamical system, encoded by a transition matrix, the value of which - changes at each iteration, given by function of the previous state: - \code{state[t] = f(state[t-1]) \%*\% state[t-1]}. +dynamical system, encoded by a transition matrix, the value of which +changes at each iteration, given by function of the previous state: +\code{state[t] = f(state[t-1]) \%*\% state[t-1]}. } \details{ Because \code{iterate_matrix} iterates with a static transition - matrix, it converges to a stable \emph{growth rate} and \emph{relative} - population sizes for a dynamical system. \code{iterate_dynamic_matrix} - instead uses a matrix which changes at each iteration, and can be dependent - on the population sizes after the previous iteration, and the iteration - number. Because this can encode density-dependence, the dynamics can - converge to \emph{absolute} population sizes. The convergence criterion is - therefore based on growth rates conveerging on 0. +matrix, it converges to a stable \emph{growth rate} and \emph{relative} +population sizes for a dynamical system. \code{iterate_dynamic_matrix} +instead uses a matrix which changes at each iteration, and can be dependent +on the population sizes after the previous iteration, and the iteration +number. Because this can encode density-dependence, the dynamics can +converge to \emph{absolute} population sizes. The convergence criterion is +therefore based on growth rates converging on 0. - As in \code{iterate_matrix}, the greta array returned by - \code{matrix_function} can either be a square matrix, or a 3D array - representing (on the first dimension) \emph{n} different matrices. - \code{initial_state} should be shaped accordingly, as detailed in - \code{iterate_matrix}. +As in \code{iterate_matrix}, the greta array returned by +\code{matrix_function} can either be a square matrix, or a 3D array +representing (on the first dimension) \emph{n} different matrices. +\code{initial_state} should be shaped accordingly, as detailed in +\code{iterate_matrix}. - To ensure the matrix is iterated for a specific number of iterations, you - can set that number as \code{niter}, and set \code{tol} to 0 or a negative - number to ensure that the iterations are not stopped early. +To ensure the matrix is iterated for a specific number of iterations, you +can set that number as \code{niter}, and set \code{tol} to 0 or a negative +number to ensure that the iterations are not stopped early. } \note{ because greta vectorises across both MCMC chains and the calculation of - greta array values, the algorithm is run until all chains (or posterior - samples), sites and stages have converged to stable growth. So a single - value of both \code{converged} and \code{iterations} is returned, and the - value of this will always have the same value in an `mcmc.list` object. So - inspecting the MCMC trace of these parameters will only tell you whether - the iteration converged in \emph{all} posterior samples, and the maximum - number of iterations required to do so across all these samples +greta array values, the algorithm is run until all chains (or posterior +samples), sites and stages have converged to stable growth. So a single +value of both \code{converged} and \code{iterations} is returned, and the +value of this will always have the same value in an \code{mcmc.list} object. So +inspecting the MCMC trace of these parameters will only tell you whether +the iteration converged in \emph{all} posterior samples, and the maximum +number of iterations required to do so across all these samples } diff --git a/man/iterate_matrix.Rd b/man/iterate_matrix.Rd index eddaa2d..f2db98b 100644 --- a/man/iterate_matrix.Rd +++ b/man/iterate_matrix.Rd @@ -29,25 +29,25 @@ is determined to have converged to the same growth rate in all stages} \value{ a named list with five greta arrays: \itemize{ -\item{\code{lambda}} {a scalar or vector giving the ratio of the first stage -values between the final two iterations.} -\item{\code{stable_state}} {a vector or matrix (with the same dimensions as +\item \code{lambda} a scalar or vector giving the ratio of the first stage +values between the final two iterations. +\item \code{stable_state} a vector or matrix (with the same dimensions as \code{initial_state}) giving the state after the final iteration, -normalised so that the values for all stages sum to one.} -\item{\code{all_states}} {an n x m x niter matrix of the state values at -each iteration. This will be 0 for all entries after \code{iterations}.} -\item{\code{converged}} {an integer scalar or vector indicating whether +normalised so that the values for all stages sum to one. +\item \code{all_states} an n x m x niter matrix of the state values at +each iteration. This will be 0 for all entries after \code{iterations}. +\item \code{converged} an integer scalar or vector indicating whether the iterations for each matrix have converged to a tolerance less than -\code{tol} (1 if so, 0 if not) before the algorithm finished.} -\item{\code{iterations}} {a scalar of the maximum number of iterations +\code{tol} (1 if so, 0 if not) before the algorithm finished. +\item \code{iterations} a scalar of the maximum number of iterations completed before the algorithm terminated. This should match \code{niter} -if \code{converged} is \code{FALSE}.} +if \code{converged} is \code{FALSE}. } } \description{ Calculate the intrinsic growth rate(s) and stable stage - distribution(s) for a stage-structured dynamical system, encoded by a - transition matrix, where: \code{state[t] = matrix \%*\% state[t-1]}. +distribution(s) for a stage-structured dynamical system, encoded +as \verb{state_t = matrix \\\%*\\\% state_tm1}. } \details{ \code{iterate_matrix} can either act on a single transition matrix diff --git a/man/ode_solve.Rd b/man/ode_solve.Rd index a740699..d8e39ab 100644 --- a/man/ode_solve.Rd +++ b/man/ode_solve.Rd @@ -4,7 +4,7 @@ \alias{ode_solve} \title{solve ODEs} \usage{ -ode_solve(derivative, y0, times, ..., method = c("ode45", "rk4", "midpoint")) +ode_solve(derivative, y0, times, ..., method = c("dp", "bdf")) } \arguments{ \item{derivative}{a derivative function. The first two arguments must be 'y' @@ -20,9 +20,11 @@ the function.} \item{...}{named arguments giving greta arrays for the additional (fixed) parameters} -\item{method}{which solver to use. \code{"ode45"} uses adaptive step -sizes, whilst \code{"rk4"} and \code{"midpoint"} use the fixed grid defined -by \code{times}; they may be faster but less accurate than \code{"ode45"}.} +\item{method}{which solver to use. Default is "dp", which is similar to +deSolve's "ode45". Currently implemented is "dp", and "bdf".The "dp" solver +is Dormand-Prince explicit solver for non-stiff ODEs. The "bdf" solver is +Backward Differentiation Formula (BDF) solver for stiff ODEs. Currently no +arguments for "bdf" or "dp" are able to be specified.} } \value{ greta array diff --git a/tests/spelling.R b/tests/spelling.R new file mode 100644 index 0000000..6713838 --- /dev/null +++ b/tests/spelling.R @@ -0,0 +1,3 @@ +if(requireNamespace('spelling', quietly = TRUE)) + spelling::spell_check_test(vignettes = TRUE, error = FALSE, + skip_on_cran = TRUE) diff --git a/tests/testthat/_snaps/iterate_matrix.md b/tests/testthat/_snaps/iterate_matrix.md new file mode 100644 index 0000000..b9c244d --- /dev/null +++ b/tests/testthat/_snaps/iterate_matrix.md @@ -0,0 +1,72 @@ +# dynamics module errors informatively + + Code + iterate_matrix(matrix = bad_mat, initial_state = good_state) + Condition + Error in `iterate_matrix()`: + ! Each matrix must be a two-dimensional square greta array + +--- + + Code + iterate_matrix(matrix = bad_matrices1, initial_state = good_state) + Condition + Error in `iterate_matrix()`: + ! matrix and state must be either two- or three-dimensional + +--- + + Code + iterate_matrix(matrix = bad_matrices1, initial_state = good_states) + Condition + Error in `iterate_matrix()`: + ! matrix and state must be either two- or three-dimensional + +--- + + Code + iterate_matrix(matrix = bad_matrices2, initial_state = good_state) + Condition + Error in `iterate_matrix()`: + ! Each matrix must be a two-dimensional square greta array + +--- + + Code + iterate_matrix(matrix = bad_matrices2, initial_state = good_states) + Condition + Error in `iterate_matrix()`: + ! Each matrix must be a two-dimensional square greta array + +--- + + Code + iterate_matrix(matrix = good_mat, initial_state = bad_state) + Condition + Error in `check_initial_state_col_vec_or_3d_dim_1()`: + ! `initial_state` must be either a column vector, or a 3D array with final dimension 1 + +--- + + Code + iterate_matrix(matrix = good_matrices, initial_state = bad_state) + Condition + Error in `check_initial_state_col_vec_or_3d_dim_1()`: + ! `initial_state` must be either a column vector, or a 3D array with final dimension 1 + +--- + + Code + iterate_matrix(matrix = good_mat, initial_state = mismatched_state) + Condition + Error in `iterate_matrix()`: + ! Length of each initial_state must match the dimension of matrix + +--- + + Code + iterate_matrix(matrix = good_matrices, initial_state = mismatched_state) + Condition + Error in `iterate_matrix()`: + ! Length of each initial_state must match the dimension of matrix + diff --git a/tests/testthat/helpers.R b/tests/testthat/helpers.R index faf29c0..0031ac2 100644 --- a/tests/testthat/helpers.R +++ b/tests/testthat/helpers.R @@ -53,7 +53,10 @@ r_iterate_matrix <- function (matrix, } # R versions of dynamics module methods -r_iterate_dynamic_matrix <- function (matrix_function, initial_state, niter = 100, tol = 1e-6, ...) { +r_iterate_dynamic_matrix <- function (matrix_function, + initial_state, + niter = 100, + tol = 1e-6, ...) { states <- list(initial_state) @@ -73,7 +76,43 @@ r_iterate_dynamic_matrix <- function (matrix_function, initial_state, niter = 10 states_keep <- states[-1] all_states[, seq_along(states_keep)] <- t(do.call(rbind, states_keep)) - list(stable_state = states[[i]], + list(stable_state = states[[i + 1]], + all_states = all_states, + converged = as.integer(diff < tol), + max_iter = i) +} + +r_iterate_dynamic_function <- function(transition_function, + initial_state, + niter = 100, + tol = 1e-6, + ..., + parameter_is_time_varying = c()) { + + states <- list(initial_state) + + i <- 0L + diff <- Inf + dots <- list(...) + + while(i < niter & diff > tol) { + i <- i + 1L + # slice up time-varying ones + these_dots <- dots + for (name in parameter_is_time_varying) { + these_dots[[name]] <- slice_first_dim(these_dots[[name]], i) + } + states[[i + 1]] <- do.call(transition_function, c(list(states[[i]], i), these_dots)) + growth <- states[[i + 1]] / states[[i]] + diffs <- growth - 1 + diff <- max(abs(diffs)) + } + + all_states <- matrix(0, length(states[[1]]), niter) + states_keep <- states[-1] + all_states[, seq_along(states_keep)] <- t(do.call(rbind, states_keep)) + + list(stable_state = states[[i + 1]], all_states = all_states, converged = as.integer(diff < tol), max_iter = i) diff --git a/tests/testthat/test_iterate_dynamic_function.R b/tests/testthat/test_iterate_dynamic_function.R new file mode 100644 index 0000000..f7acecd --- /dev/null +++ b/tests/testthat/test_iterate_dynamic_function.R @@ -0,0 +1,118 @@ +test_that("single iteration works", { + skip_if_not(check_tf_version()) + set.seed(2017 - 05 - 01) + + n <- 4 + init <- rep(1, n) + niter <- 100 + tol <- 1e-8 + test_tol <- tol * 100 + + fun <- function(state, iter) { + + # make fecundity a Ricker-like function of the total population, by + # pro-rating down the fecundity + Nt <- sum(state) + K <- 100 + ratio <- exp(1 - Nt / K) + multiplier <- 1 + (ratio - 1) + state * multiplier + + } + + # r version + r_iterates <- r_iterate_dynamic_function( + transition_function = fun, + initial_state = init, + niter = niter, + tol = tol + ) + + target_stable <- r_iterates$stable_state + target_states <- r_iterates$all_states + + # greta version + iterates <- iterate_dynamic_function( + transition_function = fun, + initial_state = init, + niter = niter, + tol = tol + ) + + stable <- iterates$stable_population + states <- iterates$all_states + converged <- iterates$converged + iterations <- iterates$iterations + + greta_stable <- calculate(stable)[[1]] + difference <- abs(greta_stable - target_stable) + expect_true(all(difference < test_tol)) + + greta_states <- calculate(states)[[1]] + difference <- abs(greta_states - target_states) + expect_true(all(difference < test_tol)) + + greta_converged <- calculate(converged)[[1]] + expect_true(greta_converged == 1) + + greta_iterations <- calculate(iterations)[[1]] + expect_lt(greta_iterations, niter) + +}) + + + +test_that("iteration works with time-varying parameters", { + skip_if_not(check_tf_version()) + set.seed(2017 - 05 - 01) + + n <- 4 + init <- runif(n) + niter <- 100 + tol <- 0 + test_tol <- 1e-06 + + # time-varying covariate + x <- matrix(rnorm(niter * n), niter, n) + + fun <- function(state, iter, x) { + + # make fecundity a Ricker-like function of the total population, with random + # fluctuations on each state + Nt <- sum(state) + K <- 100 + ratio <- exp(1 - Nt / K) + multiplier <- 1 + (ratio - 1) + state * multiplier * exp(x) + + } + + # r version + r_iterates <- r_iterate_dynamic_function( + transition_function = fun, + initial_state = init, + niter = niter, + tol = tol, + x = x, + parameter_is_time_varying = "x" + ) + + target_states <- r_iterates$all_states + + # greta version + iterates <- iterate_dynamic_function( + transition_function = fun, + initial_state = init, + niter = niter, + tol = tol, + x = x, + parameter_is_time_varying = "x" + ) + + states <- iterates$all_states + + greta_states <- calculate(states)[[1]] + difference <- abs(greta_states - target_states) + expect_true(all(difference < test_tol)) + +}) diff --git a/tests/testthat/test_iterate_dynamic_matrix.R b/tests/testthat/test_iterate_dynamic_matrix.R index 0674f22..1f40757 100644 --- a/tests/testthat/test_iterate_dynamic_matrix.R +++ b/tests/testthat/test_iterate_dynamic_matrix.R @@ -1,9 +1,6 @@ -context("dynamic matrix iteration") - test_that("single iteration works", { - - skip_if_not(greta:::check_tf_version()) - source ("helpers.R") + skip_if_not(check_tf_version()) + set.seed(2017 - 05 - 01) n <- 4 base <- base_matrix(n) diff --git a/tests/testthat/test_iterate_matrix.R b/tests/testthat/test_iterate_matrix.R index 23d529e..edae824 100644 --- a/tests/testthat/test_iterate_matrix.R +++ b/tests/testthat/test_iterate_matrix.R @@ -1,8 +1,6 @@ -context("static matrix iteration") - test_that("single iteration works", { - skip_if_not(greta:::check_tf_version()) - source("helpers.R") + skip_if_not(check_tf_version()) + set.seed(2017 - 05 - 01) n <- 10 mat <- randu(n, n) @@ -55,8 +53,8 @@ test_that("single iteration works", { }) test_that("vectorised matrix iteration works", { - skip_if_not(greta:::check_tf_version()) - source("helpers.R") + skip_if_not(check_tf_version()) + set.seed(2017 - 05 - 01) n <- 10 n_mat <- 20 @@ -93,8 +91,8 @@ test_that("vectorised matrix iteration works", { }) test_that("vectorised initial_state iteration works", { - skip_if_not(greta:::check_tf_version()) - source("helpers.R") + skip_if_not(check_tf_version()) + set.seed(2017 - 05 - 01) n <- 10 n_mat <- 20 @@ -138,7 +136,7 @@ test_that("vectorised initial_state iteration works", { }) test_that("dynamics module errors informatively", { - source("helpers.R") + set.seed(2017 - 05 - 01) n <- 10 m <- 3 @@ -156,84 +154,84 @@ test_that("dynamics module errors informatively", { mismatched_state <- randu(m + 1, 1) # wrongly shaped matrix - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = bad_mat, initial_state = good_state - ), - "matrix must be a two-dimensional square greta array" + ) ) - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = bad_matrices1, initial_state = good_state - ), - "^matrix and state must be either two- or three-dimensional" + ) ) - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = bad_matrices1, initial_state = good_states - ), - "^matrix and state must be either two- or three-dimensional" + ) ) - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = bad_matrices2, initial_state = good_state - ), - "^each matrix must be a two-dimensional square greta array" + ) ) - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = bad_matrices2, initial_state = good_states - ), - "^each matrix must be a two-dimensional square greta array" + ) ) # wrongly shaped state - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = good_mat, initial_state = bad_state - ), - "initial_state must be either a column vector, or a 3D array" + ) ) - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = good_matrices, initial_state = bad_state ), - "initial_state must be either a column vector, or a 3D array" ) # mismatched matrix and state - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = good_mat, initial_state = mismatched_state - ), - "length of each initial_state must match the dimension" + ) ) - expect_error( + expect_snapshot( + error = TRUE, iterate_matrix( matrix = good_matrices, initial_state = mismatched_state - ), - "length of each initial_state must match the dimension" + ) ) }) test_that("convergence tolerance works", { - skip_if_not(greta:::check_tf_version()) - source("helpers.R") + set.seed(2017 - 05 - 01) + skip_if_not(check_tf_version()) n <- 10 niter <- 100 @@ -250,8 +248,8 @@ test_that("convergence tolerance works", { }) test_that("iteration works in mcmc", { - skip_if_not(greta:::check_tf_version()) - source("helpers.R") + set.seed(2017 - 05 - 01) + skip_if_not(check_tf_version()) n <- 10 n_site <- 30 @@ -274,7 +272,7 @@ test_that("iteration works in mcmc", { }) test_that("iteration doesn't underflow", { - skip_if_not(greta:::check_tf_version()) + skip_if_not(check_tf_version()) mat <- matrix(0, 2, 2) mat[1, 2] <- 0.9 @@ -286,7 +284,7 @@ test_that("iteration doesn't underflow", { }) test_that("iteration doesn't overflow", { - skip_if_not(greta:::check_tf_version()) + skip_if_not(check_tf_version()) mat <- matrix(1e100, 2, 2) mat[1, 2] <- 1e200 diff --git a/tests/testthat/test_ode_solve.R b/tests/testthat/test_ode_solve.R index db157ee..7e4c335 100644 --- a/tests/testthat/test_ode_solve.R +++ b/tests/testthat/test_ode_solve.R @@ -1,8 +1,6 @@ -context("ODE solver") - test_that("ode_solve works like deSolve::ode", { - skip_if_not(greta:::check_tf_version()) - source("helpers.R") + set.seed(2017 - 05 - 01) + skip_if_not(check_tf_version()) # deSolve version of the Lotka Volterra model LVmod <- function(Time, State, Pars) { @@ -42,43 +40,53 @@ test_that("ode_solve works like deSolve::ode", { ) # mmol/m3, carrying capacity yini <- c(Prey = 1, Predator = 2) - times <- seq(0, 200, by = 1) + times <- seq(0, 50, by = 1) # loop through the solvers (ode45 should be similar to the dopri5 method in TF) - methods <- c("ode45", "rk4", "midpoint") - - for (method in methods) { - deSolve_method <- method - - if (deSolve_method == "midpoint") { - deSolve_method <- rk_midpoint - } + methods <- c("bdf", "dp") - r_out <- deSolve::ode(yini, times, LVmod, pars, - method = deSolve_method - ) + desolve_ode_fun <- function(method){ + out <- deSolve::ode(yini, times, LVmod, pars, method = method) + out + } + greta_ode_fun <- function(method){ y <- ode_solve(lotka_volterra, - y0 = t(yini), - times, - rIng = pars["rIng"], - rGrow = pars["rGrow"], - rMort = pars["rMort"], - assEff = pars["assEff"], - K = pars["K"], - method = method + y0 = t(yini), + times, + rIng = pars["rIng"], + rGrow = pars["rGrow"], + rMort = pars["rMort"], + assEff = pars["assEff"], + K = pars["K"], + method = method ) g_out <- cbind(times, y) greta_out <- calculate(g_out)[[1]] - difference <- abs(greta_out - r_out) - expect_true(all(difference < 1e-4)) + + greta_out } + + desolve_bdf <- desolve_ode_fun("bdf") + greta_bdf <- greta_ode_fun("bdf") + # desolve equivalent to dp is ode45 + desolve_dp <- desolve_ode_fun("ode45") + greta_dp <- greta_ode_fun("dp") + + difference_bdf <- abs(greta_bdf - desolve_bdf) + difference_dp <- abs(greta_dp - desolve_dp) + + # these aren't a great match (during regions of rapid change), apparently due + # to hard-coded differences in implementation between deSolve and TFP + expect_true(all(difference_bdf < 1e-2)) + expect_true(all(difference_dp < 1e-2)) + }) test_that("inference works with ode_solve", { - skip_if_not(greta:::check_tf_version()) - source("helpers.R") + skip_if_not(check_tf_version()) + set.seed(2017 - 05 - 01) lotka_volterra <- function(y, t, rIng, rGrow, rMort, assEff, K) { Prey <- y[1, 1] @@ -101,7 +109,7 @@ test_that("inference works with ode_solve", { K <- uniform(0, 30) # mmol/m3, carrying capacity yini <- c(Prey = 1, Predator = 2) - times <- seq(0, 200, by = 1) + times <- seq(0, 50, by = 1) y <- ode_solve(lotka_volterra, y0 = t(yini), @@ -111,7 +119,7 @@ test_that("inference works with ode_solve", { rMort = rMort, assEff = assEff, K = K, - method = "rk4" + method = "dp" ) # simulate some data and fit to it diff --git a/vignettes/iterate-matrix-example.Rmd b/vignettes/iterate-matrix-example.Rmd index 76c05cd..948cf26 100644 --- a/vignettes/iterate-matrix-example.Rmd +++ b/vignettes/iterate-matrix-example.Rmd @@ -8,9 +8,8 @@ vignette: > --- ```{r include = FALSE} -knitr::opts_chunk$set(comment = NA, - eval = greta:::check_tf_version("message"), - cache = TRUE) +knitr::opts_chunk$set(comment = "#>", + eval = greta:::check_tf_version("message")) ``` ```{r setup} diff --git a/vignettes/ode-solve-example.Rmd b/vignettes/ode-solve-example.Rmd index d8185fa..ad6bb08 100644 --- a/vignettes/ode-solve-example.Rmd +++ b/vignettes/ode-solve-example.Rmd @@ -1,16 +1,16 @@ --- -title: "ode-solve-example" +title: "ODE solve example" output: rmarkdown::html_vignette vignette: > - %\VignetteIndexEntry{ode-solve-example} + %\VignetteIndexEntry{ODE solve example} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} -knitr::opts_chunk$set(comment = NA, - eval = greta:::check_tf_version("message"), - cache = TRUE) +knitr::opts_chunk$set(comment = "#>", + collapse = TRUE, + eval = greta:::check_tf_version("message")) ``` ```{r setup} @@ -47,7 +47,10 @@ pars <- c( yini <- c(Prey = 1, Predator = 2) times <- seq(0, 30, by = 1) -out <- ode(yini, times, LVmod, pars) +out <- ode(y = yini, + times = times, + func = LVmod, + parms = pars) # simulate observations jitter <- rnorm(2 * length(times), 0, 0.1) @@ -86,7 +89,13 @@ y0 <- uniform(0, 5, dim = c(1, 2)) obs_sd <- uniform(0, 1) # solution to the ODE -y <- ode_solve(lotka_volterra, y0, times, rIng, rGrow, rMort, assEff, K) +y <- ode_solve( + derivative = lotka_volterra, + y0 = y0, + times = times, + rIng, rGrow, rMort, assEff, K, + method = "dp" + ) # sampling statement/observation model distribution(y_obs) <- normal(y, obs_sd) @@ -102,7 +111,7 @@ values <- c( as.list(pars) ) vals <- calculate(y, values = values)[[1]] -plot(vals[, 1] ~ times, type = "l", ylim = range(vals)) +plot(vals[, 1] ~ times, type = "l", ylim = range(vals, na.rm = TRUE)) lines(vals[, 2] ~ times, lty = 2) points(y_obs[, 1] ~ times) points(y_obs[, 2] ~ times, pch = 2) @@ -111,13 +120,29 @@ points(y_obs[, 2] ~ times, pch = 2) ```{r} #| label: greta-parameter-inference -# or we can do inference on the parameters: +# We can also do inference on the parameters +# priors for the parameters +rIng <- uniform(0, 2) # /day, rate of ingestion +rGrow <- uniform(0, 3) # /day, growth rate of prey +rMort <- uniform(0, 1) # /day, mortality rate of predator +assEff <- uniform(0, 1) # -, assimilation efficiency +K <- uniform(0, 30) # mmol/m3, carrying capacity +obs_sd <- uniform(0, 1) -# build the model (takes a few seconds to define the tensorflow graph) +# build the model m <- model(rIng, rGrow, rMort, assEff, K, obs_sd) # compute MAP estimate -o <- opt(m) +o <- opt( + model = m, + initial_values = initials( + rIng = 0.2, + rGrow = 1.0, + rMort = 0.2, + assEff = 0.5, + K = 10.0 + ) + ) o ```