diff --git a/.Rbuildignore b/.Rbuildignore index 45c08ddb2..f0a0d53e9 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -14,6 +14,7 @@ LICENSE ^cran-comments\.md$ ^revdep$ +^.keras$ ^logos$ ^paper$ @@ -54,3 +55,4 @@ LICENSE # flint files ^flint$ +^\.claude$ diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml index 34a6a9dac..685d3b90b 100644 --- a/.github/workflows/R-CMD-check.yaml +++ b/.github/workflows/R-CMD-check.yaml @@ -52,8 +52,8 @@ jobs: id: get-date shell: bash run: | - echo "::set-output name=year-week::$(date -u "+%Y-%U")" - echo "::set-output name=date::$(date -u "+%F")" + echo "year-week=$(date -u "+%Y-%U")" >> $GITHUB_OUTPUT + echo "date=$(date -u "+%F")" >> $GITHUB_OUTPUT - name: Restore R package cache uses: actions/cache@v4 diff --git a/.gitignore b/.gitignore index e092f463d..c5ece7723 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ revdep/checks.noindex revdep/library.noindex testlog*.md +.claude diff --git a/DESCRIPTION b/DESCRIPTION index 781046ba4..81429a724 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Type: Package Package: greta -Title: Simple and Scalable Statistical Modelling in R -Version: 0.5.0.9000 +Title: Simple and 'Scalable' Statistical Modelling in R +Version: 0.5.1 Authors@R: c( person("Nick", "Golding", , "nick.golding.research@gmail.com", role = "aut", comment = c(ORCID = "0000-0001-8916-5570")), @@ -21,11 +21,11 @@ Authors@R: c( person("Jian", "Yen", role = "ctb") ) Description: Write statistical models in R and fit them by MCMC and - optimisation on CPUs and GPUs, using Google 'TensorFlow'. greta lets + optimisation on 'CPUs' and 'GPUs', using Google 'TensorFlow'. 'greta' lets you write your own model like in BUGS, JAGS and Stan, except that you write models right in R, it scales well to massive datasets, and it’s easy to extend and build on. See the website for more information, - including tutorials, examples, package documentation, and the greta + including tutorials, examples, package documentation, and the 'greta' forum. License: Apache License 2.0 URL: https://greta-stats.org, https://github.com/greta-dev/greta @@ -44,7 +44,7 @@ Imports: parallelly (>= 1.29.0), progress (>= 1.2.0), R6, - reticulate (>= 1.19.0), + reticulate (>= 1.43.0), rlang, tensorflow (>= 2.16.0), tools, @@ -84,7 +84,7 @@ Config/testthat/edition: 3 Encoding: UTF-8 Language: en-US Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.2 +RoxygenNote: 7.3.3 SystemRequirements: Python (>= 3.7.0) with header files and shared library; TensorFlow (>= v2.0.0; https://www.tensorflow.org/); TensorFlow Probability (v0.8.0; https://www.tensorflow.org/probability/) diff --git a/NAMESPACE b/NAMESPACE index ede86f97e..d85b84ea6 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -200,6 +200,7 @@ export(greta_create_conda_env) export(greta_deps_receipt) export(greta_deps_spec) export(greta_install_miniconda) +export(greta_list_py_modules) export(greta_notes_tf_num_error) export(greta_set_install_logfile) export(greta_sitrep) @@ -285,7 +286,6 @@ importFrom(coda,as.mcmc.list) importFrom(coda,mcmc) importFrom(coda,mcmc.list) importFrom(coda,thin) -importFrom(future,availableCores) importFrom(future,future) importFrom(future,nbrOfWorkers) importFrom(future,plan) diff --git a/NEWS.md b/NEWS.md index 98e0595fc..2b77ccdbc 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,4 +1,4 @@ -# greta (development version) +# greta 0.5.1 ## Changes @@ -6,6 +6,11 @@ - Add warmup information to MCMC print method (#652, resolved by #755). - Add more options to level of detail in `greta_sitrep()` with "verbosity" argument. There are three levels, "minimal" (default), "detailed", and "quiet". (#612, resolved by #679). - Use `.batch_size` instead of `batch_size` internally, to avoid rare name clash errors (#634). +- Resolve issues with Tensorflow version in DESCRIPTION (no longer can specify == 2.16.0, must be >= 2.16.0). +- When the number of cores requested exceeds the number of cores detected, then the number of cores detected will be used. +- Ensure that `n_cores` arg defaults to 2 cores. Similarly, `chains` defaults to two chains. +- Minor internal changes for handling versions of python modules. +- Added `greta_list_py_modules()` as a function to show the Python packages installed in a specific TF2 environment. # greta 0.5.0 diff --git a/R/checkers.R b/R/checkers.R index 5185f9e2d..46d5c84a6 100644 --- a/R/checkers.R +++ b/R/checkers.R @@ -755,16 +755,14 @@ check_cum_op <- function(x, call = rlang::caller_env()) { } } - -#' @importFrom future availableCores check_n_cores <- function(n_cores, samplers, plan_is) { - # if the plan is remote, and the user hasn't specificed the number of cores, - # leave it as all of them + # if the plan is remote, and the user hasn't specified the number of cores, + # still only use 2 if (is.null(n_cores) & !plan_is$local) { - return(NULL) + return(2L) } - n_cores_detected <- future::availableCores() + n_cores_detected <- parallelly::availableCores() allowed_n_cores <- seq_len(n_cores_detected) cores_exceed_available <- !is.null(n_cores) && !n_cores %in% allowed_n_cores @@ -773,19 +771,20 @@ check_n_cores <- function(n_cores, samplers, plan_is) { cli::cli_warn( message = "{n_cores} cores were requested, but only {n_cores_detected} \\ - are available." + are available. Setting number of cores to {n_cores_detected}." ) - n_cores <- NULL + return(as.integer(n_cores_detected)) } - # if n_cores isn't user-specified, set it so there's no clash between samplers - n_cores <- n_cores %||% floor(n_cores_detected / samplers) - - # make sure there's at least 1 - n_cores <- max(n_cores, 1) + # # if n_cores isn't user-specified, set it so there's no clash between samplers + # n_cores <- n_cores %||% floor(n_cores_detected / samplers) + # # make sure there's at least 1 + # n_cores <- max(n_cores, 1) - as.integer(n_cores) + # if n_cores isn't specified, make sure it is set to 2 by default + # Resolves #796 + n_cores %||% 2L } check_positive_integer <- function(x, name = "", call = rlang::caller_env()) { diff --git a/R/greta-sitrep.R b/R/greta-sitrep.R index 46f63abdc..827ae3db6 100644 --- a/R/greta-sitrep.R +++ b/R/greta-sitrep.R @@ -6,12 +6,12 @@ #' @param verbosity character. How verbose the output of the situation report. #' Possible options: "minimal" (default), "detailed", and "quiet". "Minimal" #' provides just information in python version, tensorflow version, -#' tensorflow proability, and whether greta conda environment is available. +#' tensorflow probability, and whether greta conda environment is available. #' "Quiet" presents no information, but prepares greta to be used. "Detailed" #' gives information on the version and path for R, greta, python, #' tensorflow, tensorflow probability, the greta conda environment, and a #' statement on greta usability. -#' @return Message on greta situation report. See "verbsoity" parameter details +#' @return Message on greta situation report. See "verbosity" parameter details #' above for more information. #' @export #' @@ -63,7 +63,8 @@ detailed_sitrep <- function() { check_if_greta_conda_env_available() conda_env_path <- greta_conda_env_path() cli::cli_ul("path: {.path {conda_env_path}}") - conda_modules <- conda_list_env_modules() + + conda_modules <- greta_list_py_modules() tf_in_conda <- nzchar(grep( "^(tensorflow)(\\s|$)", @@ -97,12 +98,75 @@ detailed_sitrep <- function() { ) } +#' List Python modules installed in greta env +#' +#' @returns matrix/data frame of Python modules that are installed in the greta environment - showing the name, version, build, and install channel. +#' +#' @export +greta_list_py_modules <- function() { + conda_modules <- tryCatch( + expr = { + # This will find conda whether it's on PATH or installed by reticulate + conda_bin <- reticulate::conda_binary() + + system2( + conda_bin, + args = c("list", "-n", "greta-env-tf2"), + stdout = TRUE, + stderr = TRUE + ) + }, + error = function(e) { + cli::cli_ul( + c( + "Encountered an error in running:", + "{.code conda list -n greta-env-tf2}", + "x" = "{.code {e$message}}", + "!" = "conda may not be installed. Try {.code reticulate::install_miniconda()}" + ) + ) + return(NULL) + } + ) + + conda_modules +} + +conda_modules <- tryCatch( + expr = { + # This will find conda whether it's on PATH or installed by reticulate + conda_bin <- reticulate::conda_binary() + + system2( + conda_bin, + args = c("list", "-n", "greta-env-tf2"), + stdout = TRUE, + stderr = TRUE + ) + }, + error = function(e) { + cli::cli_ul( + c( + "Encountered an error in running:", + "{.code conda list -n greta-env-tf2}", + "x" = "{.code {e$message}}", + "!" = "conda may not be installed. Try {.code reticulate::install_miniconda()}" + ) + ) + return(NULL) + } +) + quiet_sitrep <- function() { suppressMessages(check_greta_ready_to_use()) } conda_list_env_modules <- function() { - system(paste("conda list -n", "greta-env-tf2"), intern = TRUE) + system( + paste("conda list -n", "greta-env-tf2"), + intern = TRUE, + ignore.stderr = TRUE + ) } @@ -286,9 +350,7 @@ have_tfp <- function() { is_tfp_available <- py_module_available("tensorflow_probability") if (is_tfp_available) { - pkg <- reticulate::import("pkg_resources") - tfp_version <- pkg$get_distribution("tensorflow_probability")$version - is_tfp_available <- utils::compareVersion("0.15.0", tfp_version) <= 0 + is_tfp_available <- utils::compareVersion("0.15.0", tfp$`__version__`) <= 0 } return(is_tfp_available) diff --git a/R/greta_model_class.R b/R/greta_model_class.R index 5b089be8e..762b52325 100644 --- a/R/greta_model_class.R +++ b/R/greta_model_class.R @@ -141,7 +141,7 @@ print.greta_model <- function(x, ...) { #' @details The plot method produces a visual representation of the defined #' model. It uses the `DiagrammeR` package, which must be installed #' first. Here's a key to the plots: -#' \if{html}{\figure{plotlegend.png}{options: width="100\%"}} +#' \if{html}{\figure{plotlegend.png}{options: width=100}} #' \if{latex}{\figure{plotlegend.pdf}{options: width=7cm}} #' #' @return `plot` - a [DiagrammeR::grViz()] diff --git a/R/inference.R b/R/inference.R index ba174c76a..ec78ac2e6 100644 --- a/R/inference.R +++ b/R/inference.R @@ -29,9 +29,9 @@ greta_stash$numerical_messages <- c( #' rest are discarded #' @param warmup number of samples to spend warming up the mcmc sampler (moving #' chains toward the highest density area and tuning sampler hyperparameters). -#' @param chains number of MCMC chains to run +#' @param chains number of MCMC chains to run. Default is 2. We recommend using more chains as this helps improve convergence. However the number of chains specified can increase the CPU load, so we have to set a lower default value. #' @param n_cores the maximum number of CPU cores used by each sampler (see -#' details). +#' details). If NULL (default), it sets them to 2 cores. #' @param verbose whether to print progress information to the console #' @param pb_update how regularly to update the progress bar (in iterations). #' If `pb_update` is less than or equal to `thin`, it will be set @@ -94,9 +94,9 @@ greta_stash$numerical_messages <- c( #' schedulers. #' #' If `n_cores = NULL` and mcmc samplers are being run sequentially, each -#' sampler will be allowed to use all CPU cores (possibly to compute multiple -#' chains sequentially). If samplers are being run in parallel with the -#' `future` package, `n_cores` will be set so that `n_cores * +#' sampler will be allowed to use only 2 CPU cores (possibly to compute +#' multiple chains sequentially). If samplers are being run in parallel with +#' the `future` package, `n_cores` will be set so that `n_cores * #' [future::nbrOfWorkers]` is less than the number #' of CPU cores. #' @@ -109,7 +109,7 @@ greta_stash$numerical_messages <- c( #' memory usage. #' #' @note to set a seed with MCMC you can use [set.seed()], or -#' [tensorflow::set_random_seed()]. They both given identical results. See +#' [tensorflow::set_random_seed()]. They both give identical results. See #' examples below. #' #' @return `mcmc`, `stashed_samples` & `extra_samples` - a @@ -212,7 +212,7 @@ mcmc <- function( n_samples = 1000, thin = 1, warmup = 1000, - chains = 4, + chains = 2, n_cores = NULL, verbose = TRUE, pb_update = 50, diff --git a/R/zzz.R b/R/zzz.R index 40920a77c..f2ff59870 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -4,6 +4,14 @@ tf <- tfp <- NULL .onLoad <- function(libname, pkgname) { # nolint + # resolve issue with .keras directory + Sys.setenv( + "KERAS_HOME" = normalizePath( + tools::R_user_dir("greta", "cache"), + mustWork = FALSE + ) + ) + # silence TF's CPU instructions message Sys.setenv(TF_CPP_MIN_LOG_LEVEL = 2) @@ -42,5 +50,4 @@ tf <- tfp <- NULL # warn if TF version is bad # check_tf_version("startup") - } diff --git a/codemeta.json b/codemeta.json index 798479d78..3c1a21636 100644 --- a/codemeta.json +++ b/codemeta.json @@ -2,25 +2,19 @@ "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "@type": "SoftwareSourceCode", "identifier": "greta", - "description": "Write statistical models in R and fit them by MCMC and optimisation on CPUs and GPUs, using Google 'TensorFlow'. greta lets you write your own model like in BUGS, JAGS and Stan, except that you write models right in R, it scales well to massive datasets, and it’s easy to extend and build on. See the website for more information, including tutorials, examples, package documentation, and the greta forum.", - "name": "greta: Simple and Scalable Statistical Modelling in R", - "relatedLink": ["https://greta-stats.org", "https://CRAN.R-project.org/package=greta"], + "description": "Write statistical models in R and fit them by MCMC and optimisation on 'CPUs' and 'GPUs', using Google 'TensorFlow'. 'greta' lets you write your own model like in BUGS, JAGS and Stan, except that you write models right in R, it scales well to massive datasets, and it’s easy to extend and build on. See the website for more information, including tutorials, examples, package documentation, and the 'greta' forum.", + "name": "greta: Simple and 'Scalable' Statistical Modelling in R", + "relatedLink": "https://greta-stats.org", "codeRepository": "https://github.com/greta-dev/greta", "issueTracker": "https://github.com/greta-dev/greta/issues", "license": "https://spdx.org/licenses/Apache-2.0", - "version": "0.5.0", + "version": "0.5.1", "programmingLanguage": { "@type": "ComputerLanguage", "name": "R", "url": "https://r-project.org" }, - "runtimePlatform": "R version 4.4.2 (2024-10-31)", - "provider": { - "@id": "https://cran.r-project.org", - "@type": "Organization", - "name": "Comprehensive R Archive Network (CRAN)", - "url": "https://cran.r-project.org" - }, + "runtimePlatform": "R version 4.5.2 (2025-10-31)", "author": [ { "@type": "Person", @@ -506,11 +500,23 @@ "sameAs": "https://CRAN.R-project.org/package=glue" }, "8": { + "@type": "SoftwareApplication", + "identifier": "lifecycle", + "name": "lifecycle", + "provider": { + "@id": "https://cran.r-project.org", + "@type": "Organization", + "name": "Comprehensive R Archive Network (CRAN)", + "url": "https://cran.r-project.org" + }, + "sameAs": "https://CRAN.R-project.org/package=lifecycle" + }, + "9": { "@type": "SoftwareApplication", "identifier": "methods", "name": "methods" }, - "9": { + "10": { "@type": "SoftwareApplication", "identifier": "parallelly", "name": "parallelly", @@ -523,7 +529,7 @@ }, "sameAs": "https://CRAN.R-project.org/package=parallelly" }, - "10": { + "11": { "@type": "SoftwareApplication", "identifier": "progress", "name": "progress", @@ -536,7 +542,7 @@ }, "sameAs": "https://CRAN.R-project.org/package=progress" }, - "11": { + "12": { "@type": "SoftwareApplication", "identifier": "R6", "name": "R6", @@ -548,11 +554,11 @@ }, "sameAs": "https://CRAN.R-project.org/package=R6" }, - "12": { + "13": { "@type": "SoftwareApplication", "identifier": "reticulate", "name": "reticulate", - "version": ">= 1.19.0", + "version": ">= 1.43.0", "provider": { "@id": "https://cran.r-project.org", "@type": "Organization", @@ -561,7 +567,7 @@ }, "sameAs": "https://CRAN.R-project.org/package=reticulate" }, - "13": { + "14": { "@type": "SoftwareApplication", "identifier": "rlang", "name": "rlang", @@ -573,11 +579,11 @@ }, "sameAs": "https://CRAN.R-project.org/package=rlang" }, - "14": { + "15": { "@type": "SoftwareApplication", "identifier": "tensorflow", "name": "tensorflow", - "version": "== 2.16.0", + "version": ">= 2.16.0", "provider": { "@id": "https://cran.r-project.org", "@type": "Organization", @@ -586,17 +592,17 @@ }, "sameAs": "https://CRAN.R-project.org/package=tensorflow" }, - "15": { + "16": { "@type": "SoftwareApplication", "identifier": "tools", "name": "tools" }, - "16": { + "17": { "@type": "SoftwareApplication", "identifier": "utils", "name": "utils" }, - "17": { + "18": { "@type": "SoftwareApplication", "identifier": "whisker", "name": "whisker", @@ -608,7 +614,7 @@ }, "sameAs": "https://CRAN.R-project.org/package=whisker" }, - "18": { + "19": { "@type": "SoftwareApplication", "identifier": "yesno", "name": "yesno", @@ -622,7 +628,7 @@ }, "SystemRequirements": "Python (>= 3.7.0) with header files and shared\n library; TensorFlow (>= v2.0.0; https://www.tensorflow.org/); TensorFlow\n Probability (v0.8.0; https://www.tensorflow.org/probability/)" }, - "fileSize": "1625.784KB", + "fileSize": "1534.22KB", "citation": [ { "@type": "ScholarlyArticle", @@ -636,7 +642,6 @@ ], "name": "{greta}: simple and scalable statistical modelling in R", "identifier": "10.21105/joss.01601", - "url": "http://dx.doi.org/10.21105/joss.01601", "pagination": "1601", "@id": "https://doi.org/10.21105/joss.01601", "sameAs": "https://doi.org/10.21105/joss.01601", @@ -652,7 +657,7 @@ } } ], - "releaseNotes": "https://github.com/greta-dev/greta/blob/master/NEWS.md", - "readme": "https://github.com/greta-dev/greta/blob/master/README.md", + "releaseNotes": "https://github.com/greta-dev/greta/blob/main/NEWS.md", + "readme": "https://github.com/greta-dev/greta/blob/main/README.md", "contIntegration": ["https://github.com/greta-dev/greta/actions", "https://app.codecov.io/gh/greta-dev/greta?branch=master"] } diff --git a/cran-comments.md b/cran-comments.md index 452a0cfa8..6447732d8 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,29 +1,20 @@ ## Test environments -* local R installation, R 4.4.2 +* local R installation, R 4.5.2 * win-builder (devel) ## R CMD check results 0 errors | 0 warnings | 1 notes -> Found the following (possibly) invalid URLs: - URL: http://www.phidot.org/software/mark/docs/book/ - From: inst/doc/example_models.html - Status: 403 - Message: Forbidden - -We could not find an issue with this link, or an alternative link. - -* Days since last update: 244 days - ## Submission notes -This release is a substantial overhaul of the internals of greta to migrate the internals from tensorflow 1 to tensorflow 2. +"greta" was archived in late 2025. This release is a patch update to fix issues +which led to the archival. They also fix some fundamental a tensorflow updates, +and dependency changes in the DESCRIPTION, such as not relying on `==` for +versions. -## revdepcheck results +We now also set the number of cores to 2 by default. -We checked 1 reverse dependencies, comparing R CMD check results across CRAN and dev versions of this package. - - * We saw 0 new problems - * We failed to check 0 packages +## revdepcheck results +As the package was archived, there are no revdep issues. diff --git a/inst/WORDLIST b/inst/WORDLIST index 731a78645..75f6a59d3 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -30,7 +30,6 @@ Nesterov NumPy ORCID OpenBUGS -Optimizers PSAT ProximalAdagradOptimizer ProximalGradientDescentOptimizer @@ -41,12 +40,11 @@ Scalable TF TFP Tensorflow -Vectorised WinBUGS XLA -acyclic al analysed +arg args automagically bayesplot @@ -106,7 +104,6 @@ optimisation optimisations optimiser optimisers -optimizers ouputs parallelisation parallelise @@ -114,19 +111,11 @@ parallelising pkgdown poisson polygamma -posteriori -priori pythonic realisation realisations -recoded regularisation reinstalls -repo -reproducibility -rescale -rescaled -rescaling reticulate scalable schoolers @@ -139,12 +128,10 @@ tf tfp tril tuh -unadjusted uncentered uncoached vectorising visualise -warmup winbuilder wishart zhang diff --git a/man/greta_list_py_modules.Rd b/man/greta_list_py_modules.Rd new file mode 100644 index 000000000..c348aeb22 --- /dev/null +++ b/man/greta_list_py_modules.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/greta-sitrep.R +\name{greta_list_py_modules} +\alias{greta_list_py_modules} +\title{List Python modules installed in greta env} +\usage{ +greta_list_py_modules() +} +\value{ +matrix/data frame of Python modules that are installed in the greta environment - showing the name, version, build, and install channel. +} +\description{ +List Python modules installed in greta env +} diff --git a/man/greta_sitrep.Rd b/man/greta_sitrep.Rd index 8fd90b953..79716fad4 100644 --- a/man/greta_sitrep.Rd +++ b/man/greta_sitrep.Rd @@ -10,14 +10,14 @@ greta_sitrep(verbosity = c("minimal", "detailed", "quiet")) \item{verbosity}{character. How verbose the output of the situation report. Possible options: "minimal" (default), "detailed", and "quiet". "Minimal" provides just information in python version, tensorflow version, -tensorflow proability, and whether greta conda environment is available. +tensorflow probability, and whether greta conda environment is available. "Quiet" presents no information, but prepares greta to be used. "Detailed" gives information on the version and path for R, greta, python, tensorflow, tensorflow probability, the greta conda environment, and a statement on greta usability.} } \value{ -Message on greta situation report. See "verbsoity" parameter details +Message on greta situation report. See "verbosity" parameter details above for more information. } \description{ diff --git a/man/inference.Rd b/man/inference.Rd index 41a11069d..200aede29 100644 --- a/man/inference.Rd +++ b/man/inference.Rd @@ -15,7 +15,7 @@ mcmc( n_samples = 1000, thin = 1, warmup = 1000, - chains = 4, + chains = 2, n_cores = NULL, verbose = TRUE, pb_update = 50, @@ -67,10 +67,10 @@ rest are discarded} \item{warmup}{number of samples to spend warming up the mcmc sampler (moving chains toward the highest density area and tuning sampler hyperparameters).} -\item{chains}{number of MCMC chains to run} +\item{chains}{number of MCMC chains to run. Default is 2. We recommend using more chains as this helps improve convergence. However the number of chains specified can increase the CPU load, so we have to set a lower default value.} \item{n_cores}{the maximum number of CPU cores used by each sampler (see -details).} +details). If NULL (default), it sets them to 2 cores.} \item{verbose}{whether to print progress information to the console} @@ -186,9 +186,9 @@ to run chains on a cluster of machines on a local or remote network. See schedulers. If \code{n_cores = NULL} and mcmc samplers are being run sequentially, each -sampler will be allowed to use all CPU cores (possibly to compute multiple -chains sequentially). If samplers are being run in parallel with the -\code{future} package, \code{n_cores} will be set so that \verb{n_cores * [future::nbrOfWorkers]} is less than the number +sampler will be allowed to use only 2 CPU cores (possibly to compute +multiple chains sequentially). If samplers are being run in parallel with +the \code{future} package, \code{n_cores} will be set so that \verb{n_cores * [future::nbrOfWorkers]} is less than the number of CPU cores. After carrying out mcmc on all the model parameters, \code{mcmc()} @@ -222,7 +222,7 @@ The parameter vector can then be passed to model. See example. } \note{ to set a seed with MCMC you can use \code{\link[=set.seed]{set.seed()}}, or -\code{\link[tensorflow:set_random_seed]{tensorflow::set_random_seed()}}. They both given identical results. See +\code{\link[tensorflow:set_random_seed]{tensorflow::set_random_seed()}}. They both give identical results. See examples below. } \examples{ diff --git a/man/model.Rd b/man/model.Rd index 6047d4366..6349c7c5d 100644 --- a/man/model.Rd +++ b/man/model.Rd @@ -61,7 +61,7 @@ performance. The plot method produces a visual representation of the defined model. It uses the \code{DiagrammeR} package, which must be installed first. Here's a key to the plots: -\if{html}{\figure{plotlegend.png}{options: width="100\%"}} +\if{html}{\figure{plotlegend.png}{options: width=100}} \if{latex}{\figure{plotlegend.pdf}{options: width=7cm}} } \examples{ diff --git a/revdep/checks.noindex/greta.dynamics/greta.dynamics_0.2.0.tar.gz b/revdep/checks.noindex/greta.dynamics/greta.dynamics_0.2.0.tar.gz deleted file mode 100644 index bae5eb673..000000000 Binary files a/revdep/checks.noindex/greta.dynamics/greta.dynamics_0.2.0.tar.gz and /dev/null differ diff --git a/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/00check.log b/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/00check.log deleted file mode 100644 index dd562624a..000000000 --- a/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/00check.log +++ /dev/null @@ -1,66 +0,0 @@ -* using log directory ‘/Users/nick/github/greta-dev/greta/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck’ -* using R version 4.3.2 (2023-10-31) -* using platform: aarch64-apple-darwin20 (64-bit) -* R was compiled by - Apple clang version 14.0.0 (clang-1400.0.29.202) - GNU Fortran (GCC) 12.2.0 -* running under: macOS Sonoma 14.0 -* using session charset: UTF-8 -* using options ‘--no-manual --no-build-vignettes’ -* checking for file ‘greta.dynamics/DESCRIPTION’ ... OK -* checking extension type ... Package -* this is package ‘greta.dynamics’ version ‘0.2.0’ -* package encoding: UTF-8 -* checking package namespace information ... OK -* checking package dependencies ... OK -* checking if this is a source package ... OK -* checking if there is a namespace ... OK -* checking for executable files ... OK -* checking for hidden files and directories ... OK -* checking for portable file names ... OK -* checking for sufficient/correct file permissions ... OK -* checking whether package ‘greta.dynamics’ can be installed ... OK -* checking installed package size ... OK -* checking package directory ... OK -* checking ‘build’ directory ... OK -* checking DESCRIPTION meta-information ... OK -* checking top-level files ... OK -* checking for left-over files ... OK -* checking index information ... OK -* checking package subdirectories ... OK -* checking R files for non-ASCII characters ... OK -* checking R files for syntax errors ... OK -* checking whether the package can be loaded ... OK -* checking whether the package can be loaded with stated dependencies ... OK -* checking whether the package can be unloaded cleanly ... OK -* checking whether the namespace can be loaded with stated dependencies ... OK -* checking whether the namespace can be unloaded cleanly ... OK -* checking loading without being on the library search path ... OK -* checking startup messages can be suppressed ... OK -* checking dependencies in R code ... OK -* checking S3 generic/method consistency ... OK -* checking replacement functions ... OK -* checking foreign function calls ... OK -* checking R code for possible problems ... OK -* checking Rd files ... OK -* checking Rd metadata ... OK -* checking Rd cross-references ... OK -* checking for missing documentation entries ... OK -* checking for code/documentation mismatches ... OK -* checking Rd \usage sections ... OK -* checking Rd contents ... OK -* checking for unstated dependencies in examples ... OK -* checking installed files from ‘inst/doc’ ... OK -* checking files in ‘vignettes’ ... OK -* checking examples ... OK -* checking for unstated dependencies in ‘tests’ ... OK -* checking tests ... OK - Running ‘testthat.R’ -* checking for unstated dependencies in vignettes ... OK -* checking package vignettes in ‘inst/doc’ ... OK -* checking running R code from vignettes ... NONE - ‘iterate-matrix-example.Rmd’ using ‘UTF-8’... OK - ‘ode-solve-example.Rmd’ using ‘UTF-8’... OK -* checking re-building of vignette outputs ... SKIPPED -* DONE -Status: OK diff --git a/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/00install.out b/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/00install.out deleted file mode 100644 index e8a273399..000000000 --- a/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/00install.out +++ /dev/null @@ -1,14 +0,0 @@ -* installing *source* package ‘greta.dynamics’ ... -** package ‘greta.dynamics’ successfully unpacked and MD5 sums checked -** using staged installation -** R -** inst -** byte-compile and prepare package for lazy loading -** help -*** installing help indices -** building package indices -** installing vignettes -** testing if installed package can be loaded from temporary location -** testing if installed package can be loaded from final location -** testing if installed package keeps a record of temporary installation path -* DONE (greta.dynamics) diff --git a/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/greta.dynamics-Ex.R b/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/greta.dynamics-Ex.R deleted file mode 100644 index f2e416bc7..000000000 --- a/revdep/checks.noindex/greta.dynamics/new/greta.dynamics.Rcheck/greta.dynamics-Ex.R +++ /dev/null @@ -1,185 +0,0 @@ -pkgname <- "greta.dynamics" -source(file.path(R.home("share"), "R", "examples-header.R")) -options(warn = 1) -library('greta.dynamics') - -base::assign(".oldSearch", base::search(), pos = 'CheckExEnv') -base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv') -cleanEx() -nameEx("iterate_matrix") -### * iterate_matrix - -flush(stderr()); flush(stdout()) - -### Name: iterate_matrix -### Title: iterate transition matrices -### Aliases: iterate_matrix - -### ** Examples - -## Not run: -##D # simulate from a probabilistic 4-stage transition matrix model -##D k <- 4 -##D -##D # component variables -##D # survival probability for all stages -##D survival <- uniform(0, 1, dim = k) -##D # conditional (on survival) probability of staying in a stage -##D stasis <- c(uniform(0, 1, dim = k - 1), 1) -##D # marginal probability of staying/progressing -##D stay <- survival * stasis -##D progress <- (survival * (1 - stay))[1:(k - 1)] -##D # recruitment rate for the largest two stages -##D recruit <- exponential(c(3, 5)) -##D -##D # combine into a matrix: -##D tmat <- zeros(k, k) -##D diag(tmat) <- stay -##D progress_idx <- row(tmat) - col(tmat) == 1 -##D tmat[progress_idx] <- progress -##D tmat[1, k - (1:0)] <- recruit -##D -##D # analyse this to get the intrinsic growth rate and stable state -##D iterations <- iterate_matrix(tmat) -##D iterations$lambda -##D iterations$stable_distribution -##D iterations$all_states -##D -##D # Can also do this simultaneously for a collection of transition matrices -##D k <- 2 -##D n <- 10 -##D survival <- uniform(0, 1, dim = c(n, k)) -##D stasis <- cbind(uniform(0, 1, dim = n), rep(1, n)) -##D stay <- survival * stasis -##D progress <- (survival * (1 - stasis))[, 1] -##D recruit_rate <- 1 / seq(0.1, 5, length.out = n) -##D recruit <- exponential(recruit_rate, dim = n) -##D tmats <- zeros(10, 2, 2) -##D tmats[, 1, 1] <- stasis[, 1] -##D tmats[, 2, 2] <- stasis[, 2] -##D tmats[, 2, 1] <- progress -##D tmats[, 1, 2] <- recruit -##D -##D iterations <- iterate_matrix(tmats) -##D iterations$lambda -##D iterations$stable_distribution -##D iterations$all_states -## End(Not run) - - - -cleanEx() -nameEx("ode_solve") -### * ode_solve - -flush(stderr()); flush(stdout()) - -### Name: ode_solve -### Title: solve ODEs -### Aliases: ode_solve - -### ** Examples - -## Not run: -##D # replicate the Lotka-Volterra example from deSolve -##D library(deSolve) -##D LVmod <- function(Time, State, Pars) { -##D with(as.list(c(State, Pars)), { -##D Ingestion <- rIng * Prey * Predator -##D GrowthPrey <- rGrow * Prey * (1 - Prey / K) -##D MortPredator <- rMort * Predator -##D -##D dPrey <- GrowthPrey - Ingestion -##D dPredator <- Ingestion * assEff - MortPredator -##D -##D return(list(c(dPrey, dPredator))) -##D }) -##D } -##D -##D pars <- c( -##D rIng = 0.2, # /day, rate of ingestion -##D rGrow = 1.0, # /day, growth rate of prey -##D rMort = 0.2, # /day, mortality rate of predator -##D assEff = 0.5, # -, assimilation efficiency -##D K = 10 -##D ) # mmol/m3, carrying capacity -##D -##D yini <- c(Prey = 1, Predator = 2) -##D times <- seq(0, 30, by = 1) -##D out <- ode(yini, times, LVmod, pars) -##D -##D # simulate observations -##D jitter <- rnorm(2 * length(times), 0, 0.1) -##D y_obs <- out[, -1] + matrix(jitter, ncol = 2) -##D -##D # ~~~~~~~~~ -##D # fit a greta model to infer the parameters from this simulated data -##D -##D # greta version of the function -##D lotka_volterra <- function(y, t, rIng, rGrow, rMort, assEff, K) { -##D Prey <- y[1, 1] -##D Predator <- y[1, 2] -##D -##D Ingestion <- rIng * Prey * Predator -##D GrowthPrey <- rGrow * Prey * (1 - Prey / K) -##D MortPredator <- rMort * Predator -##D -##D dPrey <- GrowthPrey - Ingestion -##D dPredator <- Ingestion * assEff - MortPredator -##D -##D cbind(dPrey, dPredator) -##D } -##D -##D # priors for the parameters -##D rIng <- uniform(0, 2) # /day, rate of ingestion -##D rGrow <- uniform(0, 3) # /day, growth rate of prey -##D rMort <- uniform(0, 1) # /day, mortality rate of predator -##D assEff <- uniform(0, 1) # -, assimilation efficiency -##D K <- uniform(0, 30) # mmol/m3, carrying capacity -##D -##D # initial values and observation error -##D y0 <- uniform(0, 5, dim = c(1, 2)) -##D obs_sd <- uniform(0, 1) -##D -##D # solution to the ODE -##D y <- ode_solve(lotka_volterra, y0, times, rIng, rGrow, rMort, assEff, K) -##D -##D # sampling statement/observation model -##D distribution(y_obs) <- normal(y, obs_sd) -##D -##D # we can use greta to solve directly, for a fixed set of parameters (the true -##D # ones in this case) -##D values <- c( -##D list(y0 = t(1:2)), -##D as.list(pars) -##D ) -##D vals <- calculate(y, values = values)[[1]] -##D plot(vals[, 1] ~ times, type = "l", ylim = range(vals)) -##D lines(vals[, 2] ~ times, lty = 2) -##D points(y_obs[, 1] ~ times) -##D points(y_obs[, 2] ~ times, pch = 2) -##D -##D # or we can do inference on the parameters: -##D -##D # build the model (takes a few seconds to define the tensorflow graph) -##D m <- model(rIng, rGrow, rMort, assEff, K, obs_sd) -##D -##D # compute MAP estimate -##D o <- opt(m) -##D o -## End(Not run) - - - -### *