From f31700eac7869c3899b250d30c36e7345a93e551 Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 8 Jul 2025 11:19:15 +0200 Subject: [PATCH 01/15] BREAKING CHANGE: replace adagio::pureCMAES() with cmaes::cma_es() in OptimizerBatchCmaes. --- DESCRIPTION | 2 +- NEWS.md | 2 + R/OptimInstanceBatch.R | 1 - R/OptimizerBatchCmaes.R | 85 ++++++++++++++++++---------- man/mlr_optimizers_cmaes.Rd | 20 ++++--- tests/testthat/test_OptimizerCmaes.R | 8 +-- 6 files changed, 72 insertions(+), 46 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index dd58f8f86..f98180add 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -34,7 +34,7 @@ Imports: mlr3misc (>= 0.15.1), R6 Suggests: - adagio, + cmaes, emoa, GenSA, irace (>= 4.0.0), diff --git a/NEWS.md b/NEWS.md index c4c20494d..fc5285480 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,7 @@ # bbotk (development version) +* BREAKING CHANGE: Replace `adagio::pureCMAES()` with `cmaes::cma_es()` in `OptimizerBatchCmaes`. + # bbotk 1.6.0 # bbotk 1.5.0 diff --git a/R/OptimInstanceBatch.R b/R/OptimInstanceBatch.R index 603f0213d..d0321f4e3 100644 --- a/R/OptimInstanceBatch.R +++ b/R/OptimInstanceBatch.R @@ -180,7 +180,6 @@ OptimInstanceBatch = R6Class("OptimInstanceBatch", objective_function = function(x, inst, direction) { xs = set_names(as.list(x), inst$search_space$ids()) - inst$search_space$assert(xs) xdt = as.data.table(xs) res = inst$eval_batch(xdt) y = as.numeric(res[, inst$objective$codomain$target_ids, with = FALSE]) diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index 7a83129e6..20428ce2a 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -4,28 +4,30 @@ #' @name mlr_optimizers_cmaes #' #' @description -#' `OptimizerBatchCmaes` class that implements CMA-ES. Calls [adagio::pureCMAES()] -#' from package \CRANpkg{adagio}. The algorithm is typically applied to search -#' space dimensions between three and fifty. Lower search space dimensions might -#' crash. +#' `OptimizerBatchCmaes` class that implements CMA-ES. +#' Calls `cma_es()` from package \CRANpkg{cmaes}. +#' The algorithm is typically applied to search space dimensions between three and fifty. +#' Lower search space dimensions might crash. #' #' @templateVar id cmaes #' @template section_dictionary_optimizers #' #' @section Parameters: #' \describe{ -#' \item{`sigma`}{`numeric(1)`} #' \item{`start_values`}{`character(1)`\cr -#' Create `"random"` start values or based on `"center"` of search space? -#' In the latter case, it is the center of the parameters before a trafo is applied. -#' If set to `"custom"`, the start values can be passed via the `start` parameter.} +#' Create `"random"` start values or based on `"center"` of search space? +#' In the latter case, it is the center of the parameters before a trafo is applied. +#' If set to `"custom"`, the start values can be passed via the `start` parameter.} #' \item{`start`}{`numeric()`\cr -#' Custom start values. Only applicable if `start_values` parameter is set to `"custom"`.} +#' Custom start values. +#' Only applicable if `start_values` parameter is set to `"custom"`.} #' } #' -#' For the meaning of the control parameters, see [adagio::pureCMAES()]. Note -#' that we have removed all control parameters which refer to the termination of -#' the algorithm and where our terminators allow to obtain the same behavior. +#' For the meaning of the control parameters, see `cma_es()`. +#' The parameters `maxit`, `stopfitness` and `stop.tolx` can be used additionally to our terminators. +#' The default values of `maxit` is `100 * D^2` where `D` is the number of dimensions of the search space. +#' The `stop.tolx` parameter stops when the step size is smaller than `1e-12 * sigma`. +#' The `vectorized` parameter is always set to `TRUE`. #' #' @template section_progress_bars #' @@ -72,9 +74,26 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", #' Creates a new instance of this [R6][R6::R6Class] class. initialize = function() { param_set = ps( - sigma = p_dbl(default = 0.5), - start_values = p_fct(default = "random", levels = c("random", "center", "custom")), - start = p_uty(default = NULL, depends = start_values == "custom") + fnscale = p_dbl(default = 1), + maxit = p_int(lower = 1L), + stopfitness = p_dbl(default = -Inf), + keep.best = p_lgl(default = TRUE), + sigma = p_uty(default = 0.5), + mu = p_int(lower = 1L), + lambda = p_int(lower = 1L), + weights = p_uty(), + damps = p_dbl(), + cs = p_dbl(), + ccum = p_dbl(), + ccov.1 = p_dbl(lower = 0), + ccov.mu = p_dbl(lower = 0), + diag.sigma = p_lgl(default = FALSE), + diag.eigen = p_lgl(default = FALSE), + diag.pop = p_lgl(default = FALSE), + diag.value = p_lgl(default = FALSE), + stop.tolx = p_dbl(), # undocumented stop criterion + start_values = p_fct(default = "random", levels = c("random", "center", "custom")), + start = p_uty(default = NULL, depends = start_values == "custom") ) param_set$values$start_values = "random" param_set$values$start = NULL @@ -84,7 +103,7 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", param_set = param_set, param_classes = "ParamDbl", properties = "single-crit", - packages = "adagio", + packages = "cmaes", label = "Covariance Matrix Adaptation Evolution Strategy", man = "bbotk::mlr_optimizers_cmaes" ) @@ -94,29 +113,33 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", private = list( .optimize = function(inst) { pv = self$param_set$values + start_values = pv$start_values + start = pv$start - if (pv$start_values == "custom") { - pv$par = pv$start - pv$start_values = NULL - pv$start = NULL - } else { - pv$par = search_start(inst$search_space, type = pv$start_values) - pv$start_values = NULL - pv$start = NULL - } - pv$stopeval = .Machine$integer.max # make sure pureCMAES does not stop - pv$stopfitness = -Inf + par = if (pv$start_values == "custom") set_names(start, inst$search_space$ids()) else search_start(inst$search_space, type = start_values) - if (length(pv$par) < 2L) { + if (length(par) < 2L) { warning("CMA-ES is typically applied to search space dimensions between three and fifty. A lower search space dimension might crash.") } - invoke(adagio::pureCMAES, - fun = inst$objective_function, + control = pv[names(pv) %nin% c("start_values", "start")] + control$vectorized = TRUE + + wrapper = function(xmat, inst) { + xdt = as.data.table(t(xmat)) + res = inst$eval_batch(xdt) + y = res[, inst$objective$codomain$target_ids, with = FALSE][[1]] + y * inst$objective_multiplicator + } + + invoke(cmaes::cma_es, + par = par, + fn = wrapper, lower = inst$search_space$lower, upper = inst$search_space$upper, - .args = pv) + control = control, + inst = inst) } ) ) diff --git a/man/mlr_optimizers_cmaes.Rd b/man/mlr_optimizers_cmaes.Rd index b78f599d3..124d4b071 100644 --- a/man/mlr_optimizers_cmaes.Rd +++ b/man/mlr_optimizers_cmaes.Rd @@ -5,10 +5,10 @@ \alias{OptimizerBatchCmaes} \title{Optimization via Covariance Matrix Adaptation Evolution Strategy} \description{ -\code{OptimizerBatchCmaes} class that implements CMA-ES. Calls \code{\link[adagio:cmaes]{adagio::pureCMAES()}} -from package \CRANpkg{adagio}. The algorithm is typically applied to search -space dimensions between three and fifty. Lower search space dimensions might -crash. +\code{OptimizerBatchCmaes} class that implements CMA-ES. +Calls \code{cma_es()} from package \CRANpkg{cmaes}. +The algorithm is typically applied to search space dimensions between three and fifty. +Lower search space dimensions might crash. } \section{Dictionary}{ @@ -23,18 +23,20 @@ opt("cmaes") \section{Parameters}{ \describe{ -\item{\code{sigma}}{\code{numeric(1)}} \item{\code{start_values}}{\code{character(1)}\cr Create \code{"random"} start values or based on \code{"center"} of search space? In the latter case, it is the center of the parameters before a trafo is applied. If set to \code{"custom"}, the start values can be passed via the \code{start} parameter.} \item{\code{start}}{\code{numeric()}\cr -Custom start values. Only applicable if \code{start_values} parameter is set to \code{"custom"}.} +Custom start values. +Only applicable if \code{start_values} parameter is set to \code{"custom"}.} } -For the meaning of the control parameters, see \code{\link[adagio:cmaes]{adagio::pureCMAES()}}. Note -that we have removed all control parameters which refer to the termination of -the algorithm and where our terminators allow to obtain the same behavior. +For the meaning of the control parameters, see \code{cma_es()}. +The parameters \code{maxit}, \code{stopfitness} and \code{stop.tolx} can be used additionally to our terminators. +The default values of \code{maxit} is \code{100 * D^2} where \code{D} is the number of dimensions of the search space. +The \code{stop.tolx} parameter stops when the step size is smaller than \code{1e-12 * sigma}. +The \code{vectorized} parameter is always set to \code{TRUE}. } \section{Progress Bars}{ diff --git a/tests/testthat/test_OptimizerCmaes.R b/tests/testthat/test_OptimizerCmaes.R index 5bb912616..96d276387 100644 --- a/tests/testthat/test_OptimizerCmaes.R +++ b/tests/testthat/test_OptimizerCmaes.R @@ -1,5 +1,5 @@ test_that("OptimizerBatchCmaes", { - skip_if_not_installed("adagio") + skip_if_not_installed("cmaes") search_space = domain = ps( x1 = p_dbl(-10, 10), @@ -22,15 +22,15 @@ test_that("OptimizerBatchCmaes", { search_space = search_space, terminator = trm("evals", n_evals = 10L)) - z = test_optimizer(instance, "cmaes", real_evals = 10L) + z = test_optimizer(instance, "cmaes", mu = 5, lambda = 5, real_evals = 10L) expect_class(z$optimizer, "OptimizerBatchCmaes") expect_snapshot(z$optimizer) - expect_error(test_optimizer_2d("cmaes", term_evals = 10L), "multi-crit objectives") + expect_error(test_optimizer_2d("cmaes", mu = 5, lambda = 5, term_evals = 10L), "multi-crit objectives") instance$archive$clear() - optimizer = opt("cmaes", start_values = "custom", start = c(-9.1, 1.3)) + optimizer = opt("cmaes", mu = 5, lambda = 5, start_values = "custom", start = c(-9.1, 1.3)) optimizer$optimize(instance) # start values are used for the initial mean vector so a deterministic test is not applicable }) From 3182e0fab428479916e29db35cd8ac2aed7533e1 Mon Sep 17 00:00:00 2001 From: be-marc Date: Wed, 9 Jul 2025 12:59:39 +0200 Subject: [PATCH 02/15] ... --- tests/testthat/_snaps/OptimizerCmaes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/testthat/_snaps/OptimizerCmaes.md b/tests/testthat/_snaps/OptimizerCmaes.md index 2102466c6..216259fb5 100644 --- a/tests/testthat/_snaps/OptimizerCmaes.md +++ b/tests/testthat/_snaps/OptimizerCmaes.md @@ -5,8 +5,8 @@ Output -- - Covariance Matrix Adaptation Evolution Strategy ----- - * Parameters: start_values=random + * Parameters: mu=5, lambda=5, start_values=random * Parameter classes: * Properties: single-crit - * Packages: bbotk and adagio + * Packages: bbotk and cmaes From 1be9f0b1fb563d1720f2997a541bd37850011984 Mon Sep 17 00:00:00 2001 From: be-marc Date: Wed, 9 Jul 2025 13:00:58 +0200 Subject: [PATCH 03/15] ... --- R/OptimizerBatchCmaes.R | 1 - 1 file changed, 1 deletion(-) diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index 20428ce2a..e9313b304 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -116,7 +116,6 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", start_values = pv$start_values start = pv$start - par = if (pv$start_values == "custom") set_names(start, inst$search_space$ids()) else search_start(inst$search_space, type = start_values) if (length(par) < 2L) { From d44bb6dafd3b67f98b13fc0319c82c4881530425 Mon Sep 17 00:00:00 2001 From: be-marc Date: Wed, 9 Jul 2025 14:50:52 +0200 Subject: [PATCH 04/15] ... --- R/OptimizerBatchCmaes.R | 2 +- man/mlr_optimizers_cmaes.Rd | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index e9313b304..e6d86b1c9 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -33,7 +33,7 @@ #' #' @export #' @examples -#' if (requireNamespace("adagio")) { +#' if (requireNamespace("cmaes")) { #' search_space = domain = ps( #' x1 = p_dbl(-10, 10), #' x2 = p_dbl(-5, 5) diff --git a/man/mlr_optimizers_cmaes.Rd b/man/mlr_optimizers_cmaes.Rd index 124d4b071..d3621c9d0 100644 --- a/man/mlr_optimizers_cmaes.Rd +++ b/man/mlr_optimizers_cmaes.Rd @@ -48,7 +48,7 @@ combined with a \link{Terminator}. Simply wrap the function in } \examples{ -if (requireNamespace("adagio")) { +if (requireNamespace("cmaes")) { search_space = domain = ps( x1 = p_dbl(-10, 10), x2 = p_dbl(-5, 5) From c2c0c7162b8bb80926875a7a4fb70ca25c0e48d1 Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 16 Sep 2025 13:30:17 +0200 Subject: [PATCH 05/15] ... --- R/OptimizerBatchCmaes.R | 63 +++++++++++----------------- tests/testthat/test_OptimizerCmaes.R | 4 +- 2 files changed, 26 insertions(+), 41 deletions(-) diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index e6d86b1c9..e6d58f29a 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -5,7 +5,7 @@ #' #' @description #' `OptimizerBatchCmaes` class that implements CMA-ES. -#' Calls `cma_es()` from package \CRANpkg{cmaes}. +#' Calls `cmaes()` from package \CRANpkg{libcmaesr}. #' The algorithm is typically applied to search space dimensions between three and fifty. #' Lower search space dimensions might crash. #' @@ -23,7 +23,7 @@ #' Only applicable if `start_values` parameter is set to `"custom"`.} #' } #' -#' For the meaning of the control parameters, see `cma_es()`. +#' For the meaning of the control parameters, see `cmaes()`. #' The parameters `maxit`, `stopfitness` and `stop.tolx` can be used additionally to our terminators. #' The default values of `maxit` is `100 * D^2` where `D` is the number of dimensions of the search space. #' The `stop.tolx` parameter stops when the step size is smaller than `1e-12 * sigma`. @@ -33,7 +33,7 @@ #' #' @export #' @examples -#' if (requireNamespace("cmaes")) { +#' if (requireNamespace("libcmaesr")) { #' search_space = domain = ps( #' x1 = p_dbl(-10, 10), #' x2 = p_dbl(-5, 5) @@ -74,24 +74,7 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", #' Creates a new instance of this [R6][R6::R6Class] class. initialize = function() { param_set = ps( - fnscale = p_dbl(default = 1), - maxit = p_int(lower = 1L), - stopfitness = p_dbl(default = -Inf), - keep.best = p_lgl(default = TRUE), - sigma = p_uty(default = 0.5), - mu = p_int(lower = 1L), - lambda = p_int(lower = 1L), - weights = p_uty(), - damps = p_dbl(), - cs = p_dbl(), - ccum = p_dbl(), - ccov.1 = p_dbl(lower = 0), - ccov.mu = p_dbl(lower = 0), - diag.sigma = p_lgl(default = FALSE), - diag.eigen = p_lgl(default = FALSE), - diag.pop = p_lgl(default = FALSE), - diag.value = p_lgl(default = FALSE), - stop.tolx = p_dbl(), # undocumented stop criterion + max_fevals = p_int(lower = 1L, init = 1000L), start_values = p_fct(default = "random", levels = c("random", "center", "custom")), start = p_uty(default = NULL, depends = start_values == "custom") ) @@ -115,30 +98,32 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", pv = self$param_set$values start_values = pv$start_values start = pv$start + direction = inst$objective$codomain$direction - par = if (pv$start_values == "custom") set_names(start, inst$search_space$ids()) else search_start(inst$search_space, type = start_values) + lower = inst$search_space$lower + upper = inst$search_space$upper + x0 = if (pv$start_values == "custom") set_names(start, inst$search_space$ids()) else search_start(inst$search_space, type = start_values) - if (length(par) < 2L) { - warning("CMA-ES is typically applied to search space dimensions between three and fifty. A lower search space dimension might crash.") - } - - control = pv[names(pv) %nin% c("start_values", "start")] - control$vectorized = TRUE - - wrapper = function(xmat, inst) { - xdt = as.data.table(t(xmat)) + wrapper = function(xmat) { + xdt = set_names(as.data.table(xmat), inst$objective$domain$ids()) res = inst$eval_batch(xdt) y = res[, inst$objective$codomain$target_ids, with = FALSE][[1]] - y * inst$objective_multiplicator + y * direction } - invoke(cmaes::cma_es, - par = par, - fn = wrapper, - lower = inst$search_space$lower, - upper = inst$search_space$upper, - control = control, - inst = inst) + control = libcmaesr::cmaes_control( + maximize = direction == -1L, + algo = "abipop", + max_fevals = pv$max_fevals + ) + + libcmaesr::cmaes( + objective = wrapper, + x0 = x0, + lower = lower, + upper = upper, + batch = TRUE, + control = control) } ) ) diff --git a/tests/testthat/test_OptimizerCmaes.R b/tests/testthat/test_OptimizerCmaes.R index 96d276387..5abe18a6f 100644 --- a/tests/testthat/test_OptimizerCmaes.R +++ b/tests/testthat/test_OptimizerCmaes.R @@ -20,9 +20,9 @@ test_that("OptimizerBatchCmaes", { instance = OptimInstanceBatchSingleCrit$new( objective = objective, search_space = search_space, - terminator = trm("evals", n_evals = 10L)) + terminator = trm("evals", n_evals = 100L)) - z = test_optimizer(instance, "cmaes", mu = 5, lambda = 5, real_evals = 10L) + z = test_optimizer(instance, "cmaes", max_fevals = 100L) expect_class(z$optimizer, "OptimizerBatchCmaes") expect_snapshot(z$optimizer) From 047e49ec8a7c587b675c339ce5acce2c1ba4d54c Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 16 Sep 2025 13:38:10 +0200 Subject: [PATCH 06/15] ... --- R/OptimizerBatchCmaes.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index e6d58f29a..fd2828d3c 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -86,7 +86,7 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", param_set = param_set, param_classes = "ParamDbl", properties = "single-crit", - packages = "cmaes", + packages = "libcmaesr", label = "Covariance Matrix Adaptation Evolution Strategy", man = "bbotk::mlr_optimizers_cmaes" ) From b453afc466b89cd689d611fb4a57f1bffd3ac5b6 Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 08:23:04 +0200 Subject: [PATCH 07/15] ... --- R/OptimizerBatchCmaes.R | 5 +++- tests/testthat/test_OptimizerCmaes.R | 37 ++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index fd2828d3c..b0929aa15 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -75,11 +75,13 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", initialize = function() { param_set = ps( max_fevals = p_int(lower = 1L, init = 1000L), + max_restarts = p_int(lower = 1L, special_vals = list(NA), default = NA), start_values = p_fct(default = "random", levels = c("random", "center", "custom")), start = p_uty(default = NULL, depends = start_values == "custom") ) param_set$values$start_values = "random" param_set$values$start = NULL + param_set$values$max_restarts = NA super$initialize( id = "cmaes", @@ -114,7 +116,8 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", control = libcmaesr::cmaes_control( maximize = direction == -1L, algo = "abipop", - max_fevals = pv$max_fevals + max_fevals = pv$max_fevals, + max_restarts = pv$max_restarts ) libcmaesr::cmaes( diff --git a/tests/testthat/test_OptimizerCmaes.R b/tests/testthat/test_OptimizerCmaes.R index 5abe18a6f..7695ee861 100644 --- a/tests/testthat/test_OptimizerCmaes.R +++ b/tests/testthat/test_OptimizerCmaes.R @@ -34,3 +34,40 @@ test_that("OptimizerBatchCmaes", { optimizer$optimize(instance) # start values are used for the initial mean vector so a deterministic test is not applicable }) + +test_that("OptimizerBatchCmaes", { + skip_if_not_installed("cmaes") + + search_space = domain = ps( + x1 = p_dbl(-10, 10), + x2 = p_dbl(-5, 5) + ) + + codomain = ps(y = p_dbl(tags = "maximize")) + + objective_function = function(xs) { + c(y = -(xs[[1]] - 2)^2 - (xs[[2]] + 3)^2 + 10) + } + + objective = ObjectiveRFun$new( + fun = objective_function, + domain = domain, + codomain = codomain) + + instance = OptimInstanceBatchSingleCrit$new( + objective = objective, + search_space = search_space, + terminator = trm("evals", n_evals = 100L)) + + z = test_optimizer(instance, "cmaes", max_fevals = 100L, max_restarts = 2L) + + expect_class(z$optimizer, "OptimizerBatchCmaes") + expect_snapshot(z$optimizer) + + expect_error(test_optimizer_2d("cmaes", mu = 5, lambda = 5, term_evals = 10L), "multi-crit objectives") + + instance$archive$clear() + optimizer = opt("cmaes", mu = 5, lambda = 5, start_values = "custom", start = c(-9.1, 1.3)) + optimizer$optimize(instance) + # start values are used for the initial mean vector so a deterministic test is not applicable +}) From 693d3126eab08536adadd41c1e314a25c91c6fde Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 09:23:25 +0200 Subject: [PATCH 08/15] ... --- R/OptimizerBatchCmaes.R | 34 +++++++++++++++++++--------------- man/mlr_optimizers_cmaes.Rd | 13 ++++--------- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index b0929aa15..5b00a419f 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -6,8 +6,6 @@ #' @description #' `OptimizerBatchCmaes` class that implements CMA-ES. #' Calls `cmaes()` from package \CRANpkg{libcmaesr}. -#' The algorithm is typically applied to search space dimensions between three and fifty. -#' Lower search space dimensions might crash. #' #' @templateVar id cmaes #' @template section_dictionary_optimizers @@ -23,11 +21,8 @@ #' Only applicable if `start_values` parameter is set to `"custom"`.} #' } #' -#' For the meaning of the control parameters, see `cmaes()`. -#' The parameters `maxit`, `stopfitness` and `stop.tolx` can be used additionally to our terminators. -#' The default values of `maxit` is `100 * D^2` where `D` is the number of dimensions of the search space. -#' The `stop.tolx` parameter stops when the step size is smaller than `1e-12 * sigma`. -#' The `vectorized` parameter is always set to `TRUE`. +#' For the meaning of the control parameters, see `libcmaesr::cmaes_control()`. +#' The parameters `maxfevals`, `ftarget`, `f_tolerance` and `x_tolerance` can be used additionally to our terminators. #' #' @template section_progress_bars #' @@ -50,7 +45,7 @@ #' domain = domain, #' codomain = codomain) #' -#' instance = OptimInstanceBatchSingleCrit$new( +#' instance = oi( #' objective = objective, #' search_space = search_space, #' terminator = trm("evals", n_evals = 10)) @@ -74,8 +69,21 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", #' Creates a new instance of this [R6][R6::R6Class] class. initialize = function() { param_set = ps( - max_fevals = p_int(lower = 1L, init = 1000L), + algo = p_fct(default = "acmaes", levels = c("cmaes", "ipop", "bipop", "acmaes", "aipop", "abipop", "sepcmaes", "sepipop", "sepbipop", "sepacmaes", "sepaipop", "sepabipop", "vdcma", "vdipopcma", "vdbipopcma")), + lambda = p_int(lower = 1L, default = NA_integer_, special_vals = list(NA_integer_)), + sigma = p_dbl(default = NA_real_, special_vals = list(NA_real_)), max_restarts = p_int(lower = 1L, special_vals = list(NA), default = NA), + tpa = p_int(default = NA_integer_, special_vals = list(NA_integer_)), + tpa_dsigma = p_dbl(default = NA_real_, special_vals = list(NA_real_)), + seed = p_int(default = NA_integer_, special_vals = list(NA_integer_)), + quiet = p_lgl(default = FALSE), + # internal termination criteria + max_fevals = p_int(lower = 1L, default = 100L, special_vals = list(NA_integer_)), + max_iter = p_int(lower = 1L, default = NA_integer_, special_vals = list(NA_integer_)), + ftarget = p_dbl(default = NA_real_, special_vals = list(NA_real_)), + f_tolerance = p_dbl(default = NA_real_, special_vals = list(NA_real_)), + x_tolerance = p_dbl(default = NA_real_, special_vals = list(NA_real_)), + # bbotk parameters start_values = p_fct(default = "random", levels = c("random", "center", "custom")), start = p_uty(default = NULL, depends = start_values == "custom") ) @@ -113,12 +121,8 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", y * direction } - control = libcmaesr::cmaes_control( - maximize = direction == -1L, - algo = "abipop", - max_fevals = pv$max_fevals, - max_restarts = pv$max_restarts - ) + control = invoke(libcmaesr::cmaes_control, maximize = direction == -1L, + .args = pv[which(names(pv) %nin% formalArgs(libcmaesr::cmaes_control))]) libcmaesr::cmaes( objective = wrapper, diff --git a/man/mlr_optimizers_cmaes.Rd b/man/mlr_optimizers_cmaes.Rd index d3621c9d0..70ae78f37 100644 --- a/man/mlr_optimizers_cmaes.Rd +++ b/man/mlr_optimizers_cmaes.Rd @@ -6,9 +6,7 @@ \title{Optimization via Covariance Matrix Adaptation Evolution Strategy} \description{ \code{OptimizerBatchCmaes} class that implements CMA-ES. -Calls \code{cma_es()} from package \CRANpkg{cmaes}. -The algorithm is typically applied to search space dimensions between three and fifty. -Lower search space dimensions might crash. +Calls \code{cmaes()} from package \CRANpkg{libcmaesr}. } \section{Dictionary}{ @@ -32,11 +30,8 @@ Custom start values. Only applicable if \code{start_values} parameter is set to \code{"custom"}.} } -For the meaning of the control parameters, see \code{cma_es()}. -The parameters \code{maxit}, \code{stopfitness} and \code{stop.tolx} can be used additionally to our terminators. -The default values of \code{maxit} is \code{100 * D^2} where \code{D} is the number of dimensions of the search space. -The \code{stop.tolx} parameter stops when the step size is smaller than \code{1e-12 * sigma}. -The \code{vectorized} parameter is always set to \code{TRUE}. +For the meaning of the control parameters, see \code{libcmaesr::cmaes_control()}. +The parameters \code{maxfevals}, \code{ftarget}, \code{f_tolerance} and \code{x_tolerance} can be used additionally to our terminators. } \section{Progress Bars}{ @@ -65,7 +60,7 @@ if (requireNamespace("cmaes")) { domain = domain, codomain = codomain) - instance = OptimInstanceBatchSingleCrit$new( + instance = oi( objective = objective, search_space = search_space, terminator = trm("evals", n_evals = 10)) From e9f3320b3dc1de087b78a0a836b428b606b4402c Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 09:39:33 +0200 Subject: [PATCH 09/15] ... --- DESCRIPTION | 3 +- R/OptimizerBatchCmaes.R | 15 +++--- man/mlr_optimizers_cmaes.Rd | 2 +- tests/testthat/test_OptimizerCmaes.R | 69 +--------------------------- 4 files changed, 12 insertions(+), 77 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index f98180add..a39261e81 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -34,7 +34,6 @@ Imports: mlr3misc (>= 0.15.1), R6 Suggests: - cmaes, emoa, GenSA, irace (>= 4.0.0), @@ -53,7 +52,7 @@ Encoding: UTF-8 Language: en-US NeedsCompilation: no Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.2 +RoxygenNote: 7.3.3 Collate: 'Archive.R' 'ArchiveAsync.R' diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index 5b00a419f..9b0f9b3ce 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -84,12 +84,9 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", f_tolerance = p_dbl(default = NA_real_, special_vals = list(NA_real_)), x_tolerance = p_dbl(default = NA_real_, special_vals = list(NA_real_)), # bbotk parameters - start_values = p_fct(default = "random", levels = c("random", "center", "custom")), + start_values = p_fct(init = "random", levels = c("random", "center", "custom")), start = p_uty(default = NULL, depends = start_values == "custom") ) - param_set$values$start_values = "random" - param_set$values$start = NULL - param_set$values$max_restarts = NA super$initialize( id = "cmaes", @@ -112,17 +109,21 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", lower = inst$search_space$lower upper = inst$search_space$upper - x0 = if (pv$start_values == "custom") set_names(start, inst$search_space$ids()) else search_start(inst$search_space, type = start_values) + x0 = if (pv$start_values == "custom") { + set_names(start, inst$search_space$ids()) + } else { + search_start(inst$search_space, type = start_values) + } wrapper = function(xmat) { - xdt = set_names(as.data.table(xmat), inst$objective$domain$ids()) + xdt = set_names(as.data.table(xmat), inst$search_space$ids()) res = inst$eval_batch(xdt) y = res[, inst$objective$codomain$target_ids, with = FALSE][[1]] y * direction } control = invoke(libcmaesr::cmaes_control, maximize = direction == -1L, - .args = pv[which(names(pv) %nin% formalArgs(libcmaesr::cmaes_control))]) + .args = pv[which(names(pv) %in% formalArgs(libcmaesr::cmaes_control))]) libcmaesr::cmaes( objective = wrapper, diff --git a/man/mlr_optimizers_cmaes.Rd b/man/mlr_optimizers_cmaes.Rd index 70ae78f37..b39800e9d 100644 --- a/man/mlr_optimizers_cmaes.Rd +++ b/man/mlr_optimizers_cmaes.Rd @@ -43,7 +43,7 @@ combined with a \link{Terminator}. Simply wrap the function in } \examples{ -if (requireNamespace("cmaes")) { +if (requireNamespace("libcmaesr")) { search_space = domain = ps( x1 = p_dbl(-10, 10), x2 = p_dbl(-5, 5) diff --git a/tests/testthat/test_OptimizerCmaes.R b/tests/testthat/test_OptimizerCmaes.R index 7695ee861..583f7455f 100644 --- a/tests/testthat/test_OptimizerCmaes.R +++ b/tests/testthat/test_OptimizerCmaes.R @@ -1,73 +1,8 @@ test_that("OptimizerBatchCmaes", { - skip_if_not_installed("cmaes") - - search_space = domain = ps( - x1 = p_dbl(-10, 10), - x2 = p_dbl(-5, 5) - ) - - codomain = ps(y = p_dbl(tags = "maximize")) - - objective_function = function(xs) { - c(y = -(xs[[1]] - 2)^2 - (xs[[2]] + 3)^2 + 10) - } - - objective = ObjectiveRFun$new( - fun = objective_function, - domain = domain, - codomain = codomain) - - instance = OptimInstanceBatchSingleCrit$new( - objective = objective, - search_space = search_space, - terminator = trm("evals", n_evals = 100L)) - - z = test_optimizer(instance, "cmaes", max_fevals = 100L) + skip_if_not_installed("libcmaesr") + z = test_optimizer_1d("cmaes", term_evals = 100L) expect_class(z$optimizer, "OptimizerBatchCmaes") expect_snapshot(z$optimizer) - - expect_error(test_optimizer_2d("cmaes", mu = 5, lambda = 5, term_evals = 10L), "multi-crit objectives") - - instance$archive$clear() - optimizer = opt("cmaes", mu = 5, lambda = 5, start_values = "custom", start = c(-9.1, 1.3)) - optimizer$optimize(instance) - # start values are used for the initial mean vector so a deterministic test is not applicable }) -test_that("OptimizerBatchCmaes", { - skip_if_not_installed("cmaes") - - search_space = domain = ps( - x1 = p_dbl(-10, 10), - x2 = p_dbl(-5, 5) - ) - - codomain = ps(y = p_dbl(tags = "maximize")) - - objective_function = function(xs) { - c(y = -(xs[[1]] - 2)^2 - (xs[[2]] + 3)^2 + 10) - } - - objective = ObjectiveRFun$new( - fun = objective_function, - domain = domain, - codomain = codomain) - - instance = OptimInstanceBatchSingleCrit$new( - objective = objective, - search_space = search_space, - terminator = trm("evals", n_evals = 100L)) - - z = test_optimizer(instance, "cmaes", max_fevals = 100L, max_restarts = 2L) - - expect_class(z$optimizer, "OptimizerBatchCmaes") - expect_snapshot(z$optimizer) - - expect_error(test_optimizer_2d("cmaes", mu = 5, lambda = 5, term_evals = 10L), "multi-crit objectives") - - instance$archive$clear() - optimizer = opt("cmaes", mu = 5, lambda = 5, start_values = "custom", start = c(-9.1, 1.3)) - optimizer$optimize(instance) - # start values are used for the initial mean vector so a deterministic test is not applicable -}) From 272bc427caa656cd9a43870862db4d89da3dd439 Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 10:37:25 +0200 Subject: [PATCH 10/15] ... --- DESCRIPTION | 1 + tests/testthat/_snaps/OptimizerCmaes.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index a39261e81..b890b9f97 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -38,6 +38,7 @@ Suggests: GenSA, irace (>= 4.0.0), knitr, + libcmaesr, mirai, nloptr, processx, diff --git a/tests/testthat/_snaps/OptimizerCmaes.md b/tests/testthat/_snaps/OptimizerCmaes.md index 216259fb5..ba59f4f55 100644 --- a/tests/testthat/_snaps/OptimizerCmaes.md +++ b/tests/testthat/_snaps/OptimizerCmaes.md @@ -5,8 +5,8 @@ Output -- - Covariance Matrix Adaptation Evolution Strategy ----- - * Parameters: mu=5, lambda=5, start_values=random + * Parameters: start_values=random * Parameter classes: * Properties: single-crit - * Packages: bbotk and cmaes + * Packages: bbotk and libcmaesr From 8dd76fa8aa1b75edaf0681c2ee0ed2f73d625e51 Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 14:10:49 +0200 Subject: [PATCH 11/15] refactor: internal termination and start values --- R/OptimizerBatchFocusSearch.R | 12 +- R/OptimizerBatchGenSA.R | 87 +++++++++--- R/OptimizerBatchIrace.R | 83 ++++++----- R/OptimizerBatchNLoptr.R | 155 +++++++++++++-------- R/OptimizerBatchRandomSearch.R | 5 +- man/mlr_optimizers_focus_search.Rd | 8 +- man/mlr_optimizers_gensa.Rd | 57 ++++++-- man/mlr_optimizers_irace.Rd | 74 +++++++--- man/mlr_optimizers_nloptr.Rd | 67 ++++++--- tests/testthat/test_OptimizerBatchGenSA.R | 29 +++- tests/testthat/test_OptimizerBatchNLoptr.R | 7 +- 11 files changed, 401 insertions(+), 183 deletions(-) diff --git a/R/OptimizerBatchFocusSearch.R b/R/OptimizerBatchFocusSearch.R index 170e9419d..2e50bde70 100644 --- a/R/OptimizerBatchFocusSearch.R +++ b/R/OptimizerBatchFocusSearch.R @@ -7,15 +7,11 @@ #' `OptimizerBatchFocusSearch` class that implements a Focus Search. #' #' Focus Search starts with evaluating `n_points` drawn uniformly at random. -#' For 1 to `maxit` batches, `n_points` are then drawn uniformly at random and -#' if the best value of a batch outperforms the previous best value over all -#' batches evaluated so far, the search space is shrinked around this new best -#' point prior to the next batch being sampled and evaluated. +#' For 1 to `maxit` batches, `n_points` are then drawn uniformly at random and if the best value of a batch outperforms the previous best value over all batches evaluated so far, the search space is shrinked around this new best point prior to the next batch being sampled and evaluated. #' #' For details on the shrinking, see [shrink_ps]. #' -#' Depending on the [Terminator] this procedure simply restarts after `maxit` is -#' reached. +#' Depending on the [Terminator] this procedure simply restarts after `maxit` is reached. #' #' @templateVar id focus_search #' @template section_dictionary_optimizers @@ -23,9 +19,9 @@ #' @section Parameters: #' \describe{ #' \item{`n_points`}{`integer(1)`\cr -#' Number of points to evaluate in each random search batch.} +#' Number of points to evaluate in each random search batch.} #' \item{`maxit`}{`integer(1)`\cr -#' Number of random search batches to run.} +#' Number of random search batches to run.} #' } #' #' @template section_progress_bars diff --git a/R/OptimizerBatchGenSA.R b/R/OptimizerBatchGenSA.R index 732ed58fa..aabffa9f8 100644 --- a/R/OptimizerBatchGenSA.R +++ b/R/OptimizerBatchGenSA.R @@ -1,32 +1,57 @@ -#' @title Optimization via Generalized Simulated Annealing +#' @title Generalized Simulated Annealing #' #' @include Optimizer.R #' @name mlr_optimizers_gensa #' #' @description -#' `OptimizerBatchGenSA` class that implements generalized simulated annealing. Calls -#' [GenSA::GenSA()] from package \CRANpkg{GenSA}. +#' `OptimizerBatchGenSA` class that implements generalized simulated annealing. +#' Calls [GenSA::GenSA()] from package \CRANpkg{GenSA}. #' #' @templateVar id gensa #' @template section_dictionary_optimizers #' #' @section Parameters: #' \describe{ -#' \item{`smooth`}{`logical(1)`} -#' \item{`temperature`}{`numeric(1)`} -#' \item{`acceptance.param`}{`numeric(1)`} -#' \item{`verbose`}{`logical(1)`} -#' \item{`trace.mat`}{`logical(1)`} +#' \item{`par`}{`numeric()`\cr +#' Initial parameter values. +#' Default is `NULL`, in which case, default values will be generated automatically.} +#' \item{`start_values`}{`character(1)`\cr +#' Create `"random"` start values or based on `"center"` of search space? +#' In the latter case, it is the center of the parameters before a trafo is applied. +#' By default, `nloptr` will generate start values automatically. +#' Custom start values can be passed via the `par` parameter.} #' } #' -#' For the meaning of the control parameters, see [GenSA::GenSA()]. Note that we -#' have removed all control parameters which refer to the termination of the -#' algorithm and where our terminators allow to obtain the same behavior. -#' -#' In contrast to the [GenSA::GenSA()] defaults, we set `trace.mat = FALSE`. +#' For the meaning of the control parameters, see [GenSA::GenSA()]. #' Note that [GenSA::GenSA()] uses `smooth = TRUE` as a default. -#' In the case of using this optimizer for Hyperparameter Optimization you may -#' want to set `smooth = FALSE`. +#' In the case of using this optimizer for Hyperparameter Optimization you may want to set `smooth = FALSE`. +#' +#' @section Internal Termination Parameters: +#' The algorithm can terminated with all [Terminator]s. +#' Additionally, the following internal termination parameters can be used: +#' +#' \describe{ +#' \item{`maxit`}{`integer(1)`\cr +#' Maximum number of iterations. +#' Original default is `5000`. +#' Overwritten with `.Machine$integer.max`.} +#' \item{`threshold.stop`}{`numeric(1)`\cr +#' Threshold stop. +#' Deactivated with `NULL`. +#' Default is `NULL`.} +#' \item{`nb.stop.improvement`}{`integer(1)`\cr +#' Number of stop improvement. +#' Deactivated with `-1L`. +#' Default is `-1L`.} +#' \item{`max.call`}{`integer(1)`\cr +#' Maximum number of calls. +#' Original default is `1e7`. +#' Overwritten with `.Machine$integer.max`.} +#' \item{`max.time`}{`integer(1)`\cr +#' Maximum time. +#' Deactivate with `NULL`. +#' Default is `NULL`.} +#' } #' #' @template section_progress_bars #' @@ -73,13 +98,22 @@ OptimizerBatchGenSA = R6Class("OptimizerBatchGenSA", inherit = OptimizerBatch, #' Creates a new instance of this [R6][R6::R6Class] class. initialize = function() { param_set = ps( + par = p_uty(default = NULL), smooth = p_lgl(default = TRUE), temperature = p_dbl(default = 5230), visiting.param = p_dbl(default = 2.62, lower = 2.01, upper = 2.99), # see https://journal.r-project.org/archive/2013-1/xiang-gubian-suomela-etal.pdf acceptance.param = p_dbl(default = -5, upper = -0.01), # see https://journal.r-project.org/archive/2013-1/xiang-gubian-suomela-etal.pdf simple.function = p_lgl(default = FALSE), verbose = p_lgl(default = FALSE), - trace.mat = p_lgl(default = TRUE) + trace.mat = p_lgl(default = TRUE), + # bbotk parameters + start_values = p_fct(levels = c("random", "center")), + # internal termination criteria + maxit = p_int(lower = 1L, init = .Machine$integer.max), + threshold.stop = p_dbl(lower = 0), + nb.stop.improvement = p_int(lower = 1L, init = -1L, special_vals = list(-1L)), + max.call = p_int(lower = 1L, init = .Machine$integer.max), + max.time = p_int(lower = 0L) ) super$initialize( id = "gensa", @@ -95,12 +129,21 @@ OptimizerBatchGenSA = R6Class("OptimizerBatchGenSA", inherit = OptimizerBatch, private = list( .optimize = function(inst) { - v = self$param_set$values - v$maxit = .Machine$integer.max # make sure GenSA does not stop - v$nb.stop.improvement = .Machine$integer.max # make sure GenSA does not stop - GenSA::GenSA(par = NULL, fn = inst$objective_function, - lower = inst$search_space$lower, upper = inst$search_space$upper, - control = v) + pv = self$param_set$values + + if (!is.null(pv$start_values) && is.null(pv$par)) { + pv$par = search_start(inst$search_space, type = pv$start_values) + } + pv$start_values = NULL + par = pv$par + pv$par = NULL + + GenSA::GenSA( + par = par, + fn = inst$objective_function, + lower = inst$search_space$lower, + upper = inst$search_space$upper, + control = pv) } ) ) diff --git a/R/OptimizerBatchIrace.R b/R/OptimizerBatchIrace.R index dcfaf7771..bb4383a52 100644 --- a/R/OptimizerBatchIrace.R +++ b/R/OptimizerBatchIrace.R @@ -1,37 +1,62 @@ -#' @title Optimization via Iterated Racing +#' @title Iterated Racing #' #' @include Optimizer.R #' @name mlr_optimizers_irace #' #' @description -#' `OptimizerBatchIrace` class that implements iterated racing. Calls -#' [irace::irace()] from package \CRANpkg{irace}. +#' `OptimizerBatchIrace` class that implements iterated racing. +#' Calls [irace::irace()] from package \CRANpkg{irace}. #' #' @section Parameters: #' \describe{ #' \item{`instances`}{`list()`\cr -#' A list of instances where the configurations executed on.} +#' A list of instances where the configurations executed on.} #' \item{`targetRunnerParallel`}{`function()`\cr -#' A function that executes the objective function with a specific parameter -#' configuration and instance. A default function is provided, see section -#' "Target Runner and Instances".} +#' A function that executes the objective function with a specific parameter configuration and instance. +#' A default function is provided, see section "Target Runner and Instances".} #' } #' -#' For the meaning of all other parameters, see [irace::defaultScenario()]. Note -#' that we have removed all control parameters which refer to the termination of -#' the algorithm. Use [TerminatorEvals] instead. Other terminators do not work -#' with `OptimizerBatchIrace`. +#' For the meaning of all other parameters, see [irace::defaultScenario()]. #' -#' In contrast to [irace::defaultScenario()], we set `digits = 15`. -#' This represents double parameters with a higher precision and avoids rounding errors. +#' @section Internal Termination Parameters: +#' The algorithm can terminated with [TerminatorEvals]. +#' Other [Terminator]s do not work with `OptimizerBatchIrace`. +#' Additionally, the following internal termination parameters can be used: +#' +#' \describe{ +#' \item{`maxExperiments`}{`integer(1)`\cr +#' Maximum number of runs (invocations of targetRunner) that will be performed. +#' It determines the maximum budget of experiments for the tuning. +#' Default is 0.} +#' \item{`minExperiments`}{`integer(1)`\cr +#' Minimum number of runs (invocations of targetRunner) that will be performed. +#' It determines the minimum budget of experiments for the tuning. +#' The actual budget depends on the number of parameters and minSurvival. +#' Default is NA.} +#' \item{`maxTime`}{`integer(1)`\cr +#' Maximum total execution time for the executions of targetRunner. +#' targetRunner must return two values: cost and time. +#' This value and the one returned by targetRunner must use the same units (seconds, minutes, iterations, evaluations, ...). +#' Default is 0.} +#' \item{`budgetEstimation`}{`numeric(1)`\cr +#' Fraction (smaller than 1) of the budget used to estimate the mean computation time of a configuration. +#' Only used when maxTime > 0 +#' Default is 0.05.} +#' \item{`minMeasurableTime`}{`numeric(1)`\cr +#' Minimum time unit that is still (significantly) measureable. +#' Default is 0.01.} +#' } +#' +#' @section Initial parameter values: +#' - `digits`: +#' - Adjusted default: 15. +#' - This represents double parameters with a higher precision and avoids rounding errors. #' #' @section Target Runner and Instances: -#' The irace package uses a `targetRunner` script or R function to evaluate a -#' configuration on a particular instance. Usually it is not necessary to -#' specify a `targetRunner` function when using `OptimizerBatchIrace`. A default -#' function is used that forwards several configurations and instances to the -#' user defined objective function. As usually, the user defined function has -#' a `xs`, `xss` or `xdt` parameter depending on the used [Objective] class. +#' The irace package uses a `targetRunner` script or R function to evaluate a configuration on a particular instance. +#' Usually it is not necessary to specify a `targetRunner` function when using `OptimizerBatchIrace`. +#' A default function is used that forwards several configurations and instances to the user defined objective function. +#' As usually, the user defined function has a `xs`, `xss` or `xdt` parameter depending on the used [Objective] class. #' For irace, the function needs an additional `instances` parameter. #' #' ``` @@ -52,9 +77,8 @@ #' Identifies configurations across races and steps. #' #' @section Result: -#' The optimization result (`instance$result`) is the best performing elite of -#' the final race. The reported performance is the average performance estimated -#' on all used instances. +#' The optimization result (`instance$result`) is the best performing elite of the final race. +#' The reported performance is the average performance estimated on all used instances. #' #' @templateVar id irace #' @template section_dictionary_optimizers @@ -122,9 +146,9 @@ OptimizerBatchIrace = R6Class("OptimizerBatchIrace", initialize = function() { param_set = ps( instances = p_uty(tags = "required"), - targetRunnerParallel = p_uty(tags = "required"), + targetRunnerParallel = p_uty(init = target_runner_default, tags = "required"), debugLevel = p_int(default = 0, lower = 0), - logFile = p_uty(), + logFile = p_uty(init = tempfile(fileext = ".Rdata")), seed = p_int(), postselection = p_dbl(default = 0, lower = 0, upper = 1), elitist = p_int(default = 1, lower = 0, upper = 1), @@ -136,7 +160,7 @@ OptimizerBatchIrace = R6Class("OptimizerBatchIrace", mu = p_int(default = 5, lower = 1), softRestart = p_int(default = 1, lower = 0, upper = 1), softRestartThreshold = p_dbl(), - digits = p_int(lower = 1, upper = 15, tags = "required"), + digits = p_int(lower = 1, upper = 15, init = 15, tags = "required"), testType = p_fct(default = "F-test", levels = c("F-test", "t-test", "t-test-bonferroni", "t-test-holm")), firstTest = p_int(default = 5, lower = 0), eachTest = p_int(default = 1, lower = 1), @@ -150,10 +174,6 @@ OptimizerBatchIrace = R6Class("OptimizerBatchIrace", boundAsTimeout = p_dbl(default = 1), deterministic = p_lgl(default = FALSE) ) - param_set$values$debugLevel = 0 - param_set$values$logFile = tempfile(fileext = ".Rdata") - param_set$values$targetRunnerParallel = target_runner_default - param_set$values$digits = 15 super$initialize( id = "irace", @@ -186,10 +206,11 @@ OptimizerBatchIrace = R6Class("OptimizerBatchIrace", digits = pv$digits pv$digits = NULL - scenario = c(list( + scenario = list( parameters = paradox_to_irace(inst$search_space, pv$digits), maxExperiments = terminator$param_set$values$n_evals, - targetRunnerData = list(inst = inst)), pv) + targetRunnerData = list(inst = inst)) + scenario = insert_named(scenario, pv) # run irace res = invoke(irace::irace, scenario = scenario, .opts = allow_partial_matching) diff --git a/R/OptimizerBatchNLoptr.R b/R/OptimizerBatchNLoptr.R index 5fa90b38e..debea4b37 100644 --- a/R/OptimizerBatchNLoptr.R +++ b/R/OptimizerBatchNLoptr.R @@ -1,4 +1,4 @@ -#' @title Optimization via Non-linear Optimization +#' @title Non-linear Optimization #' #' @include Optimizer.R #' @name mlr_optimizers_nloptr @@ -9,34 +9,59 @@ #' #' @section Parameters: #' \describe{ -#' \item{`algorithm`}{`character(1)`} -#' \item{`eval_g_ineq`}{`function()`} -#' \item{`xtol_rel`}{`numeric(1)`} -#' \item{`xtol_abs`}{`numeric(1)`} -#' \item{`ftol_rel`}{`numeric(1)`} -#' \item{`ftol_abs`}{`numeric(1)`} +#' \item{`algorithm`}{`character(1)`\cr +#' Algorithm to use. +#' See [nloptr::nloptr.print.options()] for available algorithms.} +#' \item{`x0`}{`numeric()`\cr +#' Initial parameter values. +#' Use `start_values` parameter to create `"random"` or `"center"` start values.} #' \item{`start_values`}{`character(1)`\cr -#' Create `"random"` start values or based on `"center"` of search space? -#' In the latter case, it is the center of the parameters before a trafo is applied. -#' If set to `"custom"`, the start values can be passed via the `start` parameter.} -#' \item{`start`}{`numeric()`\cr -#' Custom start values. Only applicable if `start_values` parameter is set to `"custom"`.} +#' Create `"random"` start values or based on `"center"` of search space? +#' In the latter case, it is the center of the parameters before a trafo is applied. +#' Custom start values can be passed via the `x0` parameter.} #' \item{`approximate_eval_grad_f`}{`logical(1)`\cr -#' Should gradients be numerically approximated via finite differences ([nloptr::nl.grad]). -#' Only required for certain algorithms. -#' Note that function evaluations required for the numerical gradient approximation will be logged as usual -#' and are not treated differently than regular function evaluations by, e.g., [Terminator]s.} +#' Should gradients be numerically approximated via finite differences ([nloptr::nl.grad]). +#' Only required for certain algorithms. +#' Note that function evaluations required for the numerical gradient approximation will be logged as usual and are not treated differently than regular function evaluations by, e.g., [Terminator]s.} #' } #' -#' For the meaning of the control parameters, see [nloptr::nloptr()] and -#' [nloptr::nloptr.print.options()]. +#' For the meaning of other control parameters, see [nloptr::nloptr()] and [nloptr::nloptr.print.options()]. #' -#' The termination conditions `stopval`, `maxtime` and `maxeval` of -#' [nloptr::nloptr()] are deactivated and replaced by the [Terminator] -#' subclasses. The x and function value tolerance termination conditions -#' (`xtol_rel = 10^-4`, `xtol_abs = rep(0.0, length(x0))`, `ftol_rel = 0.0` and -#' `ftol_abs = 0.0`) are still available and implemented with their package -#' defaults. To deactivate these conditions, set them to `-1`. +#' @section Internal Termination Parameters: +#' The algorithm can terminated with all [Terminator]s. +#' Additionally, the following internal termination parameters can be used: +#' +#' \describe{ +#' \item{`stopval`}{`numeric(1)`\cr +#' Stop value. +#' Deactivate with `-Inf`. +#' Default is `-Inf`.} +#' \item{`maxtime`}{`integer(1)`\cr +#' Maximum time. +#' Deactivate with `-1L`. +#' Default is `-1L`.} +#' \item{`maxeval`}{`integer(1)`\cr +#' Maximum number of evaluations. +#' Deactivate with `-1L`. +#' Default is `-1L`.} +#' \item{`xtol_rel`}{`numeric(1)`\cr +#' Relative tolerance. +#' Original default is 10^-4. +#' Deactivate with `-1`. +#' Overwritten with `-1`.} +#' \item{`xtol_abs`}{`numeric(1)`\cr +#' Absolute tolerance. +#' Deactivate with `-1`. +#' Default is `-1`.} +#' \item{`ftol_rel`}{`numeric(1)`\cr +#' Relative tolerance. +#' Deactivate with `-1`. +#' Default is `-1`.} +#' \item{`ftol_abs`}{`numeric(1)`\cr +#' Absolute tolerance. +#' Deactivate with `-1`. +#' Default is `-1`.} +#' } #' #' @template section_progress_bars #' @@ -90,29 +115,59 @@ OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch, #' Creates a new instance of this [R6][R6::R6Class] class. initialize = function() { param_set = ps( + x0 = p_uty(default = NULL), + eval_g_ineq = p_uty(default = NULL), algorithm = p_fct( levels = c( - "NLOPT_GN_DIRECT_L", "NLOPT_GN_DIRECT_L_RAND", "NLOPT_GN_DIRECT_NOSCAL", "NLOPT_GN_DIRECT_L_NOSCAL", - "NLOPT_GN_DIRECT_L_RAND_NOSCAL", "NLOPT_GN_ORIG_DIRECT", "NLOPT_GN_ORIG_DIRECT_L", "NLOPT_GD_STOGO", - "NLOPT_GD_STOGO_RAND", "NLOPT_LD_SLSQP", "NLOPT_LD_LBFGS_NOCEDAL", "NLOPT_LD_LBFGS", "NLOPT_LN_PRAXIS", - "NLOPT_LD_VAR1", "NLOPT_LD_VAR2", "NLOPT_LD_TNEWTON", "NLOPT_LD_TNEWTON_RESTART", "NLOPT_LD_TNEWTON_PRECOND", - "NLOPT_LD_TNEWTON_PRECOND_RESTART", "NLOPT_GN_CRS2_LM", "NLOPT_GN_MLSL", "NLOPT_GD_MLSL", "NLOPT_GN_MLSL_LDS", - "NLOPT_GD_MLSL_LDS", "NLOPT_LD_MMA", "NLOPT_LD_CCSAQ", "NLOPT_LN_COBYLA", "NLOPT_LN_NEWUOA", - "NLOPT_LN_NEWUOA_BOUND", "NLOPT_LN_NELDERMEAD", "NLOPT_LN_SBPLX", "NLOPT_LN_AUGLAG", "NLOPT_LD_AUGLAG", - "NLOPT_LN_AUGLAG_EQ", "NLOPT_LD_AUGLAG_EQ", "NLOPT_LN_BOBYQA", "NLOPT_GN_ISRES"), - tags = "required"), - eval_g_ineq = p_uty(default = NULL), - xtol_rel = p_dbl(default = 10^-4, lower = 0, upper = Inf, special_vals = list(-1)), - xtol_abs = p_dbl(default = 0, lower = 0, upper = Inf, special_vals = list(-1)), - ftol_rel = p_dbl(default = 0, lower = 0, upper = Inf, special_vals = list(-1)), - ftol_abs = p_dbl(default = 0, lower = 0, upper = Inf, special_vals = list(-1)), - start_values = p_fct(default = "random", levels = c("random", "center", "custom")), - start = p_uty(default = NULL, depends = start_values == "custom"), - approximate_eval_grad_f = p_lgl(default = FALSE) + "NLOPT_GN_DIRECT_L", + "NLOPT_GN_DIRECT_L_RAND", + "NLOPT_GN_DIRECT_NOSCAL", + "NLOPT_GN_DIRECT_L_NOSCAL", + "NLOPT_GN_DIRECT_L_RAND_NOSCAL", + "NLOPT_GN_ORIG_DIRECT", + "NLOPT_GN_ORIG_DIRECT_L", + "NLOPT_GD_STOGO", + "NLOPT_GD_STOGO_RAND", + "NLOPT_LD_SLSQP", + "NLOPT_LD_LBFGS_NOCEDAL", + "NLOPT_LD_LBFGS", + "NLOPT_LN_PRAXIS", + "NLOPT_LD_VAR1", + "NLOPT_LD_VAR2", + "NLOPT_LD_TNEWTON", + "NLOPT_LD_TNEWTON_RESTART", + "NLOPT_LD_TNEWTON_PRECOND", + "NLOPT_LD_TNEWTON_PRECOND_RESTART", + "NLOPT_GN_CRS2_LM", + "NLOPT_GN_MLSL", + "NLOPT_GD_MLSL", + "NLOPT_GN_MLSL_LDS", + "NLOPT_GD_MLSL_LDS", + "NLOPT_LD_MMA", + "NLOPT_LD_CCSAQ", + "NLOPT_LN_COBYLA", + "NLOPT_LN_NEWUOA", + "NLOPT_LN_NEWUOA_BOUND", + "NLOPT_LN_NELDERMEAD", + "NLOPT_LN_SBPLX", + "NLOPT_LN_AUGLAG", + "NLOPT_LD_AUGLAG", + "NLOPT_LN_AUGLAG_EQ", + "NLOPT_LD_AUGLAG_EQ", + "NLOPT_LN_BOBYQA", + "NLOPT_GN_ISRES"), tags = "required"), + # bbotk parameters + start_values = p_fct(init = "random", levels = c("random", "center"), tags = "required"), + approximate_eval_grad_f = p_lgl(init = FALSE), + # internal termination criteria + maxeval = p_int(lower = 0, init = -1L, special_vals = list(-1L)), + maxtime = p_int(lower = 0, init = -1L, special_vals = list(-1L)), + stopval = p_dbl(init = -Inf), + xtol_rel = p_dbl(lower = 0, upper = Inf, init = -1, special_vals = list(-1)), + xtol_abs = p_dbl(lower = 0, upper = Inf, init = -1, special_vals = list(-1)), + ftol_rel = p_dbl(lower = 0, upper = Inf, init = -1, special_vals = list(-1)), + ftol_abs = p_dbl(lower = 0, upper = Inf, init = -1, special_vals = list(-1)) ) - param_set$values$start_values = "random" - param_set$values$start = NULL - param_set$values$approximate_eval_grad_f = FALSE super$initialize( id = "nloptr", @@ -130,15 +185,7 @@ OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch, .optimize = function(inst) { pv = self$param_set$values - if (pv$start_values == "custom") { - pv$x0 = pv$start - pv$start_values = NULL - pv$start = NULL - } else { - pv$x0 = search_start(inst$search_space, type = pv$start_values) - pv$start_values = NULL - pv$start = NULL - } + if (is.null(pv$x0)) pv$x0 = search_start(inst$search_space, type = pv$start_values) if (pv$approximate_eval_grad_f) { eval_grad_f = function(x) { @@ -148,10 +195,8 @@ OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch, } else { eval_grad_f = NULL saveguard_epsilon = 0 - } pv$eval_grad_f = eval_grad_f - pv$approximate_eval_grad_f = NULL opts = pv[which(names(pv) %nin% formalArgs(nloptr::nloptr))] # deactivate termination criterions which are replaced by Terminators diff --git a/R/OptimizerBatchRandomSearch.R b/R/OptimizerBatchRandomSearch.R index 23cd1d968..6fc6916bf 100644 --- a/R/OptimizerBatchRandomSearch.R +++ b/R/OptimizerBatchRandomSearch.R @@ -17,7 +17,7 @@ #' @section Parameters: #' \describe{ #' \item{`batch_size`}{`integer(1)`\cr -#' Maximum number of points to try in a batch.} +#' Maximum number of points to try in a batch.} #' } #' #' @template section_progress_bars @@ -35,9 +35,8 @@ OptimizerBatchRandomSearch = R6Class("OptimizerBatchRandomSearch", #' Creates a new instance of this [R6][R6::R6Class] class. initialize = function() { param_set = ps( - batch_size = p_int(tags = "required") + batch_size = p_int(init = 1L, tags = "required") ) - param_set$values = list(batch_size = 1L) super$initialize( id = "random_search", diff --git a/man/mlr_optimizers_focus_search.Rd b/man/mlr_optimizers_focus_search.Rd index 94fcb29a5..7a8086767 100644 --- a/man/mlr_optimizers_focus_search.Rd +++ b/man/mlr_optimizers_focus_search.Rd @@ -8,15 +8,11 @@ \code{OptimizerBatchFocusSearch} class that implements a Focus Search. Focus Search starts with evaluating \code{n_points} drawn uniformly at random. -For 1 to \code{maxit} batches, \code{n_points} are then drawn uniformly at random and -if the best value of a batch outperforms the previous best value over all -batches evaluated so far, the search space is shrinked around this new best -point prior to the next batch being sampled and evaluated. +For 1 to \code{maxit} batches, \code{n_points} are then drawn uniformly at random and if the best value of a batch outperforms the previous best value over all batches evaluated so far, the search space is shrinked around this new best point prior to the next batch being sampled and evaluated. For details on the shrinking, see \link{shrink_ps}. -Depending on the \link{Terminator} this procedure simply restarts after \code{maxit} is -reached. +Depending on the \link{Terminator} this procedure simply restarts after \code{maxit} is reached. } \section{Dictionary}{ diff --git a/man/mlr_optimizers_gensa.Rd b/man/mlr_optimizers_gensa.Rd index 29d99892f..f346525ca 100644 --- a/man/mlr_optimizers_gensa.Rd +++ b/man/mlr_optimizers_gensa.Rd @@ -3,7 +3,7 @@ \name{mlr_optimizers_gensa} \alias{mlr_optimizers_gensa} \alias{OptimizerBatchGenSA} -\title{Optimization via Generalized Simulated Annealing} +\title{Generalized Simulated Annealing} \source{ Tsallis C, Stariolo DA (1996). \dQuote{Generalized simulated annealing.} @@ -16,8 +16,8 @@ Xiang Y, Gubian S, Suomela B, Hoeng J (2013). \doi{10.32614/rj-2013-002}. } \description{ -\code{OptimizerBatchGenSA} class that implements generalized simulated annealing. Calls -\code{\link[GenSA:GenSA]{GenSA::GenSA()}} from package \CRANpkg{GenSA}. +\code{OptimizerBatchGenSA} class that implements generalized simulated annealing. +Calls \code{\link[GenSA:GenSA]{GenSA::GenSA()}} from package \CRANpkg{GenSA}. } \section{Dictionary}{ @@ -32,21 +32,48 @@ opt("gensa") \section{Parameters}{ \describe{ -\item{\code{smooth}}{\code{logical(1)}} -\item{\code{temperature}}{\code{numeric(1)}} -\item{\code{acceptance.param}}{\code{numeric(1)}} -\item{\code{verbose}}{\code{logical(1)}} -\item{\code{trace.mat}}{\code{logical(1)}} +\item{\code{par}}{\code{numeric()}\cr +Initial parameter values. +Default is \code{NULL}, in which case, default values will be generated automatically.} +\item{\code{start_values}}{\code{character(1)}\cr +Create \code{"random"} start values or based on \code{"center"} of search space? +In the latter case, it is the center of the parameters before a trafo is applied. +By default, \code{nloptr} will generate start values automatically. +Custom start values can be passed via the \code{par} parameter.} } -For the meaning of the control parameters, see \code{\link[GenSA:GenSA]{GenSA::GenSA()}}. Note that we -have removed all control parameters which refer to the termination of the -algorithm and where our terminators allow to obtain the same behavior. - -In contrast to the \code{\link[GenSA:GenSA]{GenSA::GenSA()}} defaults, we set \code{trace.mat = FALSE}. +For the meaning of the control parameters, see \code{\link[GenSA:GenSA]{GenSA::GenSA()}}. Note that \code{\link[GenSA:GenSA]{GenSA::GenSA()}} uses \code{smooth = TRUE} as a default. -In the case of using this optimizer for Hyperparameter Optimization you may -want to set \code{smooth = FALSE}. +In the case of using this optimizer for Hyperparameter Optimization you may want to set \code{smooth = FALSE}. +} + +\section{Internal Termination Parameters}{ + +The algorithm can terminated with all \link{Terminator}s. +Additionally, the following internal termination parameters can be used: + +\describe{ +\item{\code{maxit}}{\code{integer(1)}\cr +Maximum number of iterations. +Original default is \code{5000}. +Overwritten with \code{.Machine$integer.max}.} +\item{\code{threshold.stop}}{\code{numeric(1)}\cr +Threshold stop. +Deactivated with \code{NULL}. +Default is \code{NULL}.} +\item{\code{nb.stop.improvement}}{\code{integer(1)}\cr +Number of stop improvement. +Deactivated with \code{-1L}. +Default is \code{-1L}.} +\item{\code{max.call}}{\code{integer(1)}\cr +Maximum number of calls. +Original default is \code{1e7}. +Overwritten with \code{.Machine$integer.max}.} +\item{\code{max.time}}{\code{integer(1)}\cr +Maximum time. +Deactivate with \code{NULL}. +Default is \code{NULL}.} +} } \section{Progress Bars}{ diff --git a/man/mlr_optimizers_irace.Rd b/man/mlr_optimizers_irace.Rd index 98edffe28..67c441c30 100644 --- a/man/mlr_optimizers_irace.Rd +++ b/man/mlr_optimizers_irace.Rd @@ -3,7 +3,7 @@ \name{mlr_optimizers_irace} \alias{mlr_optimizers_irace} \alias{OptimizerBatchIrace} -\title{Optimization via Iterated Racing} +\title{Iterated Racing} \source{ Lopez-Ibanez M, Dubois-Lacoste J, Caceres LP, Birattari M, Stuetzle T (2016). \dQuote{The irace package: Iterated racing for automatic algorithm configuration.} @@ -11,8 +11,8 @@ Lopez-Ibanez M, Dubois-Lacoste J, Caceres LP, Birattari M, Stuetzle T (2016). \doi{https://doi.org/10.1016/j.orp.2016.09.002}. } \description{ -\code{OptimizerBatchIrace} class that implements iterated racing. Calls -\code{\link[irace:irace]{irace::irace()}} from package \CRANpkg{irace}. +\code{OptimizerBatchIrace} class that implements iterated racing. +Calls \code{\link[irace:irace]{irace::irace()}} from package \CRANpkg{irace}. } \section{Parameters}{ @@ -20,28 +20,61 @@ Lopez-Ibanez M, Dubois-Lacoste J, Caceres LP, Birattari M, Stuetzle T (2016). \item{\code{instances}}{\code{list()}\cr A list of instances where the configurations executed on.} \item{\code{targetRunnerParallel}}{\verb{function()}\cr -A function that executes the objective function with a specific parameter -configuration and instance. A default function is provided, see section -"Target Runner and Instances".} +A function that executes the objective function with a specific parameter configuration and instance. +A default function is provided, see section "Target Runner and Instances".} } -For the meaning of all other parameters, see \code{\link[irace:defaultScenario]{irace::defaultScenario()}}. Note -that we have removed all control parameters which refer to the termination of -the algorithm. Use \link{TerminatorEvals} instead. Other terminators do not work -with \code{OptimizerBatchIrace}. +For the meaning of all other parameters, see \code{\link[irace:defaultScenario]{irace::defaultScenario()}}. +} + +\section{Internal Termination Parameters}{ + +The algorithm can terminated with \link{TerminatorEvals}. +Other \link{Terminator}s do not work with \code{OptimizerBatchIrace}. +Additionally, the following internal termination parameters can be used: + +\describe{ +\item{\code{maxExperiments}}{\code{integer(1)}\cr +Maximum number of runs (invocations of targetRunner) that will be performed. +It determines the maximum budget of experiments for the tuning. +Default is 0.} +\item{\code{minExperiments}}{\code{integer(1)}\cr +Minimum number of runs (invocations of targetRunner) that will be performed. +It determines the minimum budget of experiments for the tuning. +The actual budget depends on the number of parameters and minSurvival. +Default is NA.} +\item{\code{maxTime}}{\code{integer(1)}\cr +Maximum total execution time for the executions of targetRunner. +targetRunner must return two values: cost and time. +This value and the one returned by targetRunner must use the same units (seconds, minutes, iterations, evaluations, ...). +Default is 0.} +\item{\code{budgetEstimation}}{\code{numeric(1)}\cr +Fraction (smaller than 1) of the budget used to estimate the mean computation time of a configuration. +Only used when maxTime > 0 +Default is 0.05.} +\item{\code{minMeasurableTime}}{\code{numeric(1)}\cr +Minimum time unit that is still (significantly) measureable. +Default is 0.01.} +} +} + +\section{Initial parameter values}{ -In contrast to \code{\link[irace:defaultScenario]{irace::defaultScenario()}}, we set \code{digits = 15}. -This represents double parameters with a higher precision and avoids rounding errors. +\itemize{ +\item \code{digits}: +\itemize{ +\item Adjusted default: 15. +\item This represents double parameters with a higher precision and avoids rounding errors. +} +} } \section{Target Runner and Instances}{ -The irace package uses a \code{targetRunner} script or R function to evaluate a -configuration on a particular instance. Usually it is not necessary to -specify a \code{targetRunner} function when using \code{OptimizerBatchIrace}. A default -function is used that forwards several configurations and instances to the -user defined objective function. As usually, the user defined function has -a \code{xs}, \code{xss} or \code{xdt} parameter depending on the used \link{Objective} class. +The irace package uses a \code{targetRunner} script or R function to evaluate a configuration on a particular instance. +Usually it is not necessary to specify a \code{targetRunner} function when using \code{OptimizerBatchIrace}. +A default function is used that forwards several configurations and instances to the user defined objective function. +As usually, the user defined function has a \code{xs}, \code{xss} or \code{xdt} parameter depending on the used \link{Objective} class. For irace, the function needs an additional \code{instances} parameter. \if{html}{\out{
}}\preformatted{fun = function(xs, instances) \{ @@ -67,9 +100,8 @@ Identifies configurations across races and steps. \section{Result}{ -The optimization result (\code{instance$result}) is the best performing elite of -the final race. The reported performance is the average performance estimated -on all used instances. +The optimization result (\code{instance$result}) is the best performing elite of the final race. +The reported performance is the average performance estimated on all used instances. } \section{Dictionary}{ diff --git a/man/mlr_optimizers_nloptr.Rd b/man/mlr_optimizers_nloptr.Rd index 565c417fe..89280b0b8 100644 --- a/man/mlr_optimizers_nloptr.Rd +++ b/man/mlr_optimizers_nloptr.Rd @@ -3,7 +3,7 @@ \name{mlr_optimizers_nloptr} \alias{mlr_optimizers_nloptr} \alias{OptimizerBatchNLoptr} -\title{Optimization via Non-linear Optimization} +\title{Non-linear Optimization} \source{ Johnson, G S (2020). \dQuote{The NLopt nonlinear-optimization package.} @@ -16,34 +16,61 @@ Calls \code{\link[nloptr:nloptr]{nloptr::nloptr()}} from package \CRANpkg{nloptr \section{Parameters}{ \describe{ -\item{\code{algorithm}}{\code{character(1)}} -\item{\code{eval_g_ineq}}{\verb{function()}} -\item{\code{xtol_rel}}{\code{numeric(1)}} -\item{\code{xtol_abs}}{\code{numeric(1)}} -\item{\code{ftol_rel}}{\code{numeric(1)}} -\item{\code{ftol_abs}}{\code{numeric(1)}} +\item{\code{algorithm}}{\code{character(1)}\cr +Algorithm to use. +See \code{\link[nloptr:nloptr.print.options]{nloptr::nloptr.print.options()}} for available algorithms.} +\item{\code{x0}}{\code{numeric()}\cr +Initial parameter values. +Use \code{start_values} parameter to create \code{"random"} or \code{"center"} start values.} \item{\code{start_values}}{\code{character(1)}\cr Create \code{"random"} start values or based on \code{"center"} of search space? In the latter case, it is the center of the parameters before a trafo is applied. -If set to \code{"custom"}, the start values can be passed via the \code{start} parameter.} -\item{\code{start}}{\code{numeric()}\cr -Custom start values. Only applicable if \code{start_values} parameter is set to \code{"custom"}.} +Custom start values can be passed via the \code{x0} parameter.} \item{\code{approximate_eval_grad_f}}{\code{logical(1)}\cr Should gradients be numerically approximated via finite differences (\link[nloptr:nl.grad]{nloptr::nl.grad}). Only required for certain algorithms. -Note that function evaluations required for the numerical gradient approximation will be logged as usual -and are not treated differently than regular function evaluations by, e.g., \link{Terminator}s.} +Note that function evaluations required for the numerical gradient approximation will be logged as usual and are not treated differently than regular function evaluations by, e.g., \link{Terminator}s.} } -For the meaning of the control parameters, see \code{\link[nloptr:nloptr]{nloptr::nloptr()}} and -\code{\link[nloptr:nloptr.print.options]{nloptr::nloptr.print.options()}}. +For the meaning of other control parameters, see \code{\link[nloptr:nloptr]{nloptr::nloptr()}} and \code{\link[nloptr:nloptr.print.options]{nloptr::nloptr.print.options()}}. +} + +\section{Internal Termination Parameters}{ + +The algorithm can terminated with all \link{Terminator}s. +Additionally, the following internal termination parameters can be used: -The termination conditions \code{stopval}, \code{maxtime} and \code{maxeval} of -\code{\link[nloptr:nloptr]{nloptr::nloptr()}} are deactivated and replaced by the \link{Terminator} -subclasses. The x and function value tolerance termination conditions -(\code{xtol_rel = 10^-4}, \code{xtol_abs = rep(0.0, length(x0))}, \code{ftol_rel = 0.0} and -\code{ftol_abs = 0.0}) are still available and implemented with their package -defaults. To deactivate these conditions, set them to \code{-1}. +\describe{ +\item{\code{stopval}}{\code{numeric(1)}\cr +Stop value. +Deactivate with \code{-Inf}. +Default is \code{-Inf}.} +\item{\code{maxtime}}{\code{integer(1)}\cr +Maximum time. +Deactivate with \code{-1L}. +Default is \code{-1L}.} +\item{\code{maxeval}}{\code{integer(1)}\cr +Maximum number of evaluations. +Deactivate with \code{-1L}. +Default is \code{-1L}.} +\item{\code{xtol_rel}}{\code{numeric(1)}\cr +Relative tolerance. +Original default is 10^-4. +Deactivate with \code{-1}. +Overwritten with \code{-1}.} +\item{\code{xtol_abs}}{\code{numeric(1)}\cr +Absolute tolerance. +Deactivate with \code{-1}. +Default is \code{-1}.} +\item{\code{ftol_rel}}{\code{numeric(1)}\cr +Relative tolerance. +Deactivate with \code{-1}. +Default is \code{-1}.} +\item{\code{ftol_abs}}{\code{numeric(1)}\cr +Absolute tolerance. +Deactivate with \code{-1}. +Default is \code{-1}.} +} } \section{Progress Bars}{ diff --git a/tests/testthat/test_OptimizerBatchGenSA.R b/tests/testthat/test_OptimizerBatchGenSA.R index 7804e634a..8466baace 100644 --- a/tests/testthat/test_OptimizerBatchGenSA.R +++ b/tests/testthat/test_OptimizerBatchGenSA.R @@ -4,6 +4,33 @@ test_that("OptimizerBatchGenSA", { z = test_optimizer_1d("gensa", term_evals = 10L) expect_class(z$optimizer, "OptimizerBatchGenSA") expect_snapshot(z$optimizer) +}) + +test_that("OptimizerBatchGenSA custom start values work", { + skip_if_not_installed("GenSA") + + search_space = domain = ps( + x1 = p_dbl(-10, 10), + x2 = p_dbl(-5, 5) + ) + + codomain = ps(y = p_dbl(tags = "maximize")) + + objective_function = function(xs) { + c(y = -(xs[[1]] - 2)^2 - (xs[[2]] + 3)^2 + 10) + } + + objective = ObjectiveRFun$new( + fun = objective_function, + domain = domain, + codomain = codomain) + + instance = OptimInstanceBatchSingleCrit$new( + objective = objective, + search_space = search_space, + terminator = trm("evals", n_evals = 10L)) - expect_error(test_optimizer_2d("gensa", term_evals = 10L), "multi-crit objectives") + optimizer = opt("gensa", par = c(-9.1, 1.3)) + optimizer$optimize(instance) + expect_equal(unlist(instance$archive$data[1L, c("x1", "x2")]), c(x1 = -9.1, x2 = 1.3)) }) diff --git a/tests/testthat/test_OptimizerBatchNLoptr.R b/tests/testthat/test_OptimizerBatchNLoptr.R index ea327d71c..78b7b3fc6 100644 --- a/tests/testthat/test_OptimizerBatchNLoptr.R +++ b/tests/testthat/test_OptimizerBatchNLoptr.R @@ -12,6 +12,11 @@ test_that("OptimizerBatchNLoptr", { xtol_rel = -1, xtol_abs = -1, ftol_rel = -1, ftol_abs = -1, term_evals = 5L) expect_class(z$optimizer, "OptimizerBatchNLoptr") expect_snapshot(z$optimizer) +}) + +test_that("OptimizerBatchNLoptr custom start values work", { + skip_on_os("windows") + skip_if_not_installed("nloptr") search_space = domain = ps( x1 = p_dbl(-10, 10), @@ -34,7 +39,7 @@ test_that("OptimizerBatchNLoptr", { search_space = search_space, terminator = trm("evals", n_evals = 10L)) - optimizer = opt("nloptr", algorithm = "NLOPT_LN_BOBYQA", start_values = "custom", start = c(-9.1, 1.3)) + optimizer = opt("nloptr", algorithm = "NLOPT_LN_BOBYQA", x0 = c(-9.1, 1.3)) optimizer$optimize(instance) expect_equal(unlist(instance$archive$data[1L, c("x1", "x2")]), c(x1 = -9.1, x2 = 1.3)) }) From 7a1bc43d8134a561fec64d130e8fce2afd43d33b Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 14:23:28 +0200 Subject: [PATCH 12/15] ... --- R/OptimizerBatchCmaes.R | 74 ++++++++++++++++++++++++++++--------- man/mlr_optimizers_cmaes.Rd | 39 ++++++++++++++++--- 2 files changed, 91 insertions(+), 22 deletions(-) diff --git a/R/OptimizerBatchCmaes.R b/R/OptimizerBatchCmaes.R index 9b0f9b3ce..2b633d992 100644 --- a/R/OptimizerBatchCmaes.R +++ b/R/OptimizerBatchCmaes.R @@ -12,17 +12,44 @@ #' #' @section Parameters: #' \describe{ +#' \item{`x0`}{`numeric()`\cr +#' Initial parameter values. +#' Use `start_values` parameter to create `"random"` or `"center"` initial values.} #' \item{`start_values`}{`character(1)`\cr #' Create `"random"` start values or based on `"center"` of search space? #' In the latter case, it is the center of the parameters before a trafo is applied. -#' If set to `"custom"`, the start values can be passed via the `start` parameter.} -#' \item{`start`}{`numeric()`\cr -#' Custom start values. -#' Only applicable if `start_values` parameter is set to `"custom"`.} +#' Custom start values can be passed via the `x0` parameter.} #' } #' #' For the meaning of the control parameters, see `libcmaesr::cmaes_control()`. -#' The parameters `maxfevals`, `ftarget`, `f_tolerance` and `x_tolerance` can be used additionally to our terminators. +#' +#' @section Internal Termination Parameters: +#' The algorithm can terminated with all [Terminator]s. +#' Additionally, the following internal termination parameters can be used: +#' +#' \describe{ +#' \item{`max_fevals`}{`integer(1)`\cr +#' Maximum number of function evaluations. +#' Original default is `100`. +#' Deactivate with `NA`. +#' Overwritten with `NA`.} +#' \item{`max_iter`}{`integer(1)`\cr +#' Maximum number of iterations. +#' Deactivate with `NA`. +#' Default is `NA`.} +#' \item{`ftarget`}{`numeric(1)`\cr +#' Target function value. +#' Deactivate with `NA`. +#' Default is `NA`.} +#' \item{`f_tolerance`}{`numeric(1)`\cr +#' Function tolerance. +#' Deactivate with `NA`. +#' Default is `NA`.} +#' \item{`x_tolerance`}{`numeric(1)`\cr +#' Parameter tolerance. +#' Deactivate with `NA`. +#' Default is `NA`.} +#' } #' #' @template section_progress_bars #' @@ -69,7 +96,23 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", #' Creates a new instance of this [R6][R6::R6Class] class. initialize = function() { param_set = ps( - algo = p_fct(default = "acmaes", levels = c("cmaes", "ipop", "bipop", "acmaes", "aipop", "abipop", "sepcmaes", "sepipop", "sepbipop", "sepacmaes", "sepaipop", "sepabipop", "vdcma", "vdipopcma", "vdbipopcma")), + x0 = p_uty(default = NULL), + algo = p_fct(default = "acmaes", levels = c( + "cmaes", + "ipop", + "bipop", + "acmaes", + "aipop", + "abipop", + "sepcmaes", + "sepipop", + "sepbipop", + "sepacmaes", + "sepaipop", + "sepabipop", + "vdcma", + "vdipopcma", + "vdbipopcma")), lambda = p_int(lower = 1L, default = NA_integer_, special_vals = list(NA_integer_)), sigma = p_dbl(default = NA_real_, special_vals = list(NA_real_)), max_restarts = p_int(lower = 1L, special_vals = list(NA), default = NA), @@ -77,15 +120,14 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", tpa_dsigma = p_dbl(default = NA_real_, special_vals = list(NA_real_)), seed = p_int(default = NA_integer_, special_vals = list(NA_integer_)), quiet = p_lgl(default = FALSE), + # bbotk parameters + start_values = p_fct(init = "random", levels = c("random", "center")), # internal termination criteria - max_fevals = p_int(lower = 1L, default = 100L, special_vals = list(NA_integer_)), + max_fevals = p_int(lower = 1L, init = NA_integer_, special_vals = list(NA_integer_)), max_iter = p_int(lower = 1L, default = NA_integer_, special_vals = list(NA_integer_)), ftarget = p_dbl(default = NA_real_, special_vals = list(NA_real_)), f_tolerance = p_dbl(default = NA_real_, special_vals = list(NA_real_)), - x_tolerance = p_dbl(default = NA_real_, special_vals = list(NA_real_)), - # bbotk parameters - start_values = p_fct(init = "random", levels = c("random", "center", "custom")), - start = p_uty(default = NULL, depends = start_values == "custom") + x_tolerance = p_dbl(default = NA_real_, special_vals = list(NA_real_)) ) super$initialize( @@ -103,16 +145,14 @@ OptimizerBatchCmaes = R6Class("OptimizerBatchCmaes", private = list( .optimize = function(inst) { pv = self$param_set$values - start_values = pv$start_values - start = pv$start direction = inst$objective$codomain$direction - lower = inst$search_space$lower upper = inst$search_space$upper - x0 = if (pv$start_values == "custom") { - set_names(start, inst$search_space$ids()) + + x0 = if (!is.null(pv$x0)) { + set_names(pv$x0, inst$search_space$ids()) } else { - search_start(inst$search_space, type = start_values) + search_start(inst$search_space, type = pv$start_values) } wrapper = function(xmat) { diff --git a/man/mlr_optimizers_cmaes.Rd b/man/mlr_optimizers_cmaes.Rd index b39800e9d..30c01c67b 100644 --- a/man/mlr_optimizers_cmaes.Rd +++ b/man/mlr_optimizers_cmaes.Rd @@ -21,17 +21,46 @@ opt("cmaes") \section{Parameters}{ \describe{ +\item{\code{x0}}{\code{numeric()}\cr +Initial parameter values. +Use \code{start_values} parameter to create \code{"random"} or \code{"center"} initial values.} \item{\code{start_values}}{\code{character(1)}\cr Create \code{"random"} start values or based on \code{"center"} of search space? In the latter case, it is the center of the parameters before a trafo is applied. -If set to \code{"custom"}, the start values can be passed via the \code{start} parameter.} -\item{\code{start}}{\code{numeric()}\cr -Custom start values. -Only applicable if \code{start_values} parameter is set to \code{"custom"}.} +Custom start values can be passed via the \code{x0} parameter.} } For the meaning of the control parameters, see \code{libcmaesr::cmaes_control()}. -The parameters \code{maxfevals}, \code{ftarget}, \code{f_tolerance} and \code{x_tolerance} can be used additionally to our terminators. +} + +\section{Internal Termination Parameters}{ + +The algorithm can terminated with all \link{Terminator}s. +Additionally, the following internal termination parameters can be used: + +\describe{ +\item{\code{max_fevals}}{\code{integer(1)}\cr +Maximum number of function evaluations. +Original default is \code{100}. +Deactivate with \code{NA}. +Overwritten with \code{NA}.} +\item{\code{max_iter}}{\code{integer(1)}\cr +Maximum number of iterations. +Deactivate with \code{NA}. +Default is \code{NA}.} +\item{\code{ftarget}}{\code{numeric(1)}\cr +Target function value. +Deactivate with \code{NA}. +Default is \code{NA}.} +\item{\code{f_tolerance}}{\code{numeric(1)}\cr +Function tolerance. +Deactivate with \code{NA}. +Default is \code{NA}.} +\item{\code{x_tolerance}}{\code{numeric(1)}\cr +Parameter tolerance. +Deactivate with \code{NA}. +Default is \code{NA}.} +} } \section{Progress Bars}{ From 9005199659ecb0a764734f2edf0fdbc8bb76e1df Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 14:26:44 +0200 Subject: [PATCH 13/15] ... --- tests/testthat/test_OptimizerBatchChain.R | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/testthat/test_OptimizerBatchChain.R b/tests/testthat/test_OptimizerBatchChain.R index 7fdb1f362..4fb3982c5 100644 --- a/tests/testthat/test_OptimizerBatchChain.R +++ b/tests/testthat/test_OptimizerBatchChain.R @@ -53,11 +53,14 @@ test_that("OptimizerBatchChain", { expect_set_equal(optimizer$packages, c("bbotk", "GenSA")) expect_identical(optimizer$properties, "single-crit") expect_identical(optimizer$param_classes, "ParamDbl") + + expected_ids = c( + paste0("OptimizerBatchRandomSearch_1.", opt("random_search")$param_set$ids()), + paste0("OptimizerBatchGenSA_1.", opt("gensa")$param_set$ids())) + expect_set_equal( optimizer$param_set$ids(), - c("OptimizerBatchRandomSearch_1.batch_size", "OptimizerBatchGenSA_1.smooth", "OptimizerBatchGenSA_1.temperature", - "OptimizerBatchGenSA_1.visiting.param", "OptimizerBatchGenSA_1.acceptance.param", "OptimizerBatchGenSA_1.simple.function", - "OptimizerBatchGenSA_1.verbose", "OptimizerBatchGenSA_1.trace.mat") + expected_ids ) }) From e3f47efb5173bf112391477ce80a9fee11f3558d Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 14:32:18 +0200 Subject: [PATCH 14/15] ... --- .../{OptimizerCmaes.md => OptimizerBatchCmaes.md} | 0 ...erDesignPoints.md => OptimizerBatchDesignPoints.md} | 0 ...izerFocusSearch.md => OptimizerBatchFocusSearch.md} | 0 .../{OptimizerGenSA.md => OptimizerBatchGenSA.md} | 2 +- ...imizerGridSearch.md => OptimizerBatchGridSearch.md} | 0 ...izerLocalSearch.md => OptimizerBatchLocalSearch.md} | 0 .../{OptimizerNLoptr.md => OptimizerBatchNLoptr.md} | 10 ++++++---- ...erRandomSearch.md => OptimizerBatchRandomSearch.md} | 0 8 files changed, 7 insertions(+), 5 deletions(-) rename tests/testthat/_snaps/{OptimizerCmaes.md => OptimizerBatchCmaes.md} (100%) rename tests/testthat/_snaps/{OptimizerDesignPoints.md => OptimizerBatchDesignPoints.md} (100%) rename tests/testthat/_snaps/{OptimizerFocusSearch.md => OptimizerBatchFocusSearch.md} (100%) rename tests/testthat/_snaps/{OptimizerGenSA.md => OptimizerBatchGenSA.md} (76%) rename tests/testthat/_snaps/{OptimizerGridSearch.md => OptimizerBatchGridSearch.md} (100%) rename tests/testthat/_snaps/{OptimizerLocalSearch.md => OptimizerBatchLocalSearch.md} (100%) rename tests/testthat/_snaps/{OptimizerNLoptr.md => OptimizerBatchNLoptr.md} (55%) rename tests/testthat/_snaps/{OptimizerRandomSearch.md => OptimizerBatchRandomSearch.md} (100%) diff --git a/tests/testthat/_snaps/OptimizerCmaes.md b/tests/testthat/_snaps/OptimizerBatchCmaes.md similarity index 100% rename from tests/testthat/_snaps/OptimizerCmaes.md rename to tests/testthat/_snaps/OptimizerBatchCmaes.md diff --git a/tests/testthat/_snaps/OptimizerDesignPoints.md b/tests/testthat/_snaps/OptimizerBatchDesignPoints.md similarity index 100% rename from tests/testthat/_snaps/OptimizerDesignPoints.md rename to tests/testthat/_snaps/OptimizerBatchDesignPoints.md diff --git a/tests/testthat/_snaps/OptimizerFocusSearch.md b/tests/testthat/_snaps/OptimizerBatchFocusSearch.md similarity index 100% rename from tests/testthat/_snaps/OptimizerFocusSearch.md rename to tests/testthat/_snaps/OptimizerBatchFocusSearch.md diff --git a/tests/testthat/_snaps/OptimizerGenSA.md b/tests/testthat/_snaps/OptimizerBatchGenSA.md similarity index 76% rename from tests/testthat/_snaps/OptimizerGenSA.md rename to tests/testthat/_snaps/OptimizerBatchGenSA.md index 23d4da565..f08da39d3 100644 --- a/tests/testthat/_snaps/OptimizerGenSA.md +++ b/tests/testthat/_snaps/OptimizerBatchGenSA.md @@ -5,7 +5,7 @@ Output -- - Generalized Simulated Annealing --------------------- - * Parameters: list() + * Parameters: maxit=2147483647, nb.stop.improvement=-1, max.call=2147483647 * Parameter classes: * Properties: single-crit * Packages: bbotk and GenSA diff --git a/tests/testthat/_snaps/OptimizerGridSearch.md b/tests/testthat/_snaps/OptimizerBatchGridSearch.md similarity index 100% rename from tests/testthat/_snaps/OptimizerGridSearch.md rename to tests/testthat/_snaps/OptimizerBatchGridSearch.md diff --git a/tests/testthat/_snaps/OptimizerLocalSearch.md b/tests/testthat/_snaps/OptimizerBatchLocalSearch.md similarity index 100% rename from tests/testthat/_snaps/OptimizerLocalSearch.md rename to tests/testthat/_snaps/OptimizerBatchLocalSearch.md diff --git a/tests/testthat/_snaps/OptimizerNLoptr.md b/tests/testthat/_snaps/OptimizerBatchNLoptr.md similarity index 55% rename from tests/testthat/_snaps/OptimizerNLoptr.md rename to tests/testthat/_snaps/OptimizerBatchNLoptr.md index f7c5e073e..34b9cb369 100644 --- a/tests/testthat/_snaps/OptimizerNLoptr.md +++ b/tests/testthat/_snaps/OptimizerBatchNLoptr.md @@ -5,8 +5,9 @@ Output -- - Non-linear Optimization ---------------------------- - * Parameters: algorithm=NLOPT_LN_BOBYQA, xtol_rel=-1, xtol_abs=-1, ftol_rel=-1, - ftol_abs=-1, start_values=random, approximate_eval_grad_f=FALSE + * Parameters: algorithm=NLOPT_LN_BOBYQA, start_values=random, + approximate_eval_grad_f=FALSE, maxeval=-1, maxtime=-1, stopval=-Inf, + xtol_rel=-1, xtol_abs=-1, ftol_rel=-1, ftol_abs=-1 * Parameter classes: * Properties: single-crit * Packages: bbotk and nloptr @@ -18,8 +19,9 @@ Output -- - Non-linear Optimization ---------------------------- - * Parameters: algorithm=NLOPT_LD_LBFGS, xtol_rel=-1, xtol_abs=-1, ftol_rel=-1, - ftol_abs=-1, start_values=random, approximate_eval_grad_f=TRUE + * Parameters: algorithm=NLOPT_LD_LBFGS, start_values=random, + approximate_eval_grad_f=TRUE, maxeval=-1, maxtime=-1, stopval=-Inf, + xtol_rel=-1, xtol_abs=-1, ftol_rel=-1, ftol_abs=-1 * Parameter classes: * Properties: single-crit * Packages: bbotk and nloptr diff --git a/tests/testthat/_snaps/OptimizerRandomSearch.md b/tests/testthat/_snaps/OptimizerBatchRandomSearch.md similarity index 100% rename from tests/testthat/_snaps/OptimizerRandomSearch.md rename to tests/testthat/_snaps/OptimizerBatchRandomSearch.md From 8fb681e8cfff0c0903d4ddc63c018380059a0688 Mon Sep 17 00:00:00 2001 From: be-marc Date: Tue, 7 Oct 2025 14:39:16 +0200 Subject: [PATCH 15/15] ... --- R/OptimizerBatchChain.R | 90 ++++++++++++++++++------------------ R/OptimizerBatchGenSA.R | 44 +++++++++--------- R/OptimizerBatchIrace.R | 1 + R/OptimizerBatchNLoptr.R | 4 +- man/mlr_optimizers_chain.Rd | 32 ++++++------- man/mlr_optimizers_gensa.Rd | 45 +++++++++--------- man/mlr_optimizers_irace.Rd | 2 + man/mlr_optimizers_nloptr.Rd | 5 +- 8 files changed, 108 insertions(+), 115 deletions(-) diff --git a/R/OptimizerBatchChain.R b/R/OptimizerBatchChain.R index 5e1ab02a2..4575b643e 100644 --- a/R/OptimizerBatchChain.R +++ b/R/OptimizerBatchChain.R @@ -31,53 +31,51 @@ #' @template section_progress_bars #' #' @export -#' @examplesIf requireNamespace("GenSA") +#' @examplesIf requireNamespace("GenSA", quietly = TRUE) #' @examples -#'library(paradox) -#' -#'domain = ps(x = p_dbl(lower = -1, upper = 1)) -#' -#'search_space = ps(x = p_dbl(lower = -1, upper = 1)) -#' -#'codomain = ps(y = p_dbl(tags = "minimize")) -#' -#'objective_function = function(xs) { -#' list(y = as.numeric(xs)^2) -#'} -#' -#'objective = ObjectiveRFun$new( -#' fun = objective_function, -#' domain = domain, -#' codomain = codomain -#') -#' -#'terminator = trm("evals", n_evals = 10) -#' -#'# run optimizers sequentially -#'instance = OptimInstanceBatchSingleCrit$new( -#' objective = objective, -#' search_space = search_space, -#' terminator = terminator -#') -#' -#'optimizer = opt("chain", -#' optimizers = list(opt("random_search"), opt("grid_search")), -#' terminators = list(trm("evals", n_evals = 5), trm("evals", n_evals = 5)) -#') -#' -#'optimizer$optimize(instance) -#' -#'# random restarts -#'instance = OptimInstanceBatchSingleCrit$new( -#' objective = objective, -#' search_space = search_space, -#' terminator = trm("none") -#') -#'optimizer = opt("chain", -#' optimizers = list(opt("gensa"), opt("gensa")), -#' terminators = list(trm("evals", n_evals = 10), trm("evals", n_evals = 10)) -#') -#'optimizer$optimize(instance) +#' domain = ps(x = p_dbl(lower = -1, upper = 1)) +#' +#' search_space = ps(x = p_dbl(lower = -1, upper = 1)) +#' +#' codomain = ps(y = p_dbl(tags = "minimize")) +#' +#' objective_function = function(xs) { +#' list(y = as.numeric(xs)^2) +#' } +#' +#' objective = ObjectiveRFun$new( +#' fun = objective_function, +#' domain = domain, +#' codomain = codomain +#' ) +#' +#' terminator = trm("evals", n_evals = 10) +#' +#' # run optimizers sequentially +#' instance = OptimInstanceBatchSingleCrit$new( +#' objective = objective, +#' search_space = search_space, +#' terminator = terminator +#' ) +#' +#' optimizer = opt("chain", +#' optimizers = list(opt("random_search"), opt("grid_search")), +#' terminators = list(trm("evals", n_evals = 5), trm("evals", n_evals = 5)) +#' ) +#' +#' optimizer$optimize(instance) +#' +#' # random restarts +#' instance = OptimInstanceBatchSingleCrit$new( +#' objective = objective, +#' search_space = search_space, +#' terminator = trm("none") +#' ) +#' optimizer = opt("chain", +#' optimizers = list(opt("gensa"), opt("gensa")), +#' terminators = list(trm("evals", n_evals = 10), trm("evals", n_evals = 10)) +#' ) +#' optimizer$optimize(instance) OptimizerBatchChain = R6Class("OptimizerBatchChain", inherit = OptimizerBatch, public = list( diff --git a/R/OptimizerBatchGenSA.R b/R/OptimizerBatchGenSA.R index aabffa9f8..c6eb6b703 100644 --- a/R/OptimizerBatchGenSA.R +++ b/R/OptimizerBatchGenSA.R @@ -59,38 +59,36 @@ #' `r format_bib("tsallis_1996", "xiang_2013")` #' #' @export +#' @examplesIf requireNamespace("GenSA", quietly = TRUE) #' @examples -#' if (requireNamespace("GenSA")) { +#' search_space = domain = ps(x = p_dbl(lower = -1, upper = 1)) #' -#' search_space = domain = ps(x = p_dbl(lower = -1, upper = 1)) +#' codomain = ps(y = p_dbl(tags = "minimize")) #' -#' codomain = ps(y = p_dbl(tags = "minimize")) -#' -#' objective_function = function(xs) { -#' list(y = as.numeric(xs)^2) -#' } +#' objective_function = function(xs) { +#' list(y = as.numeric(xs)^2) +#' } #' -#' objective = ObjectiveRFun$new( -#' fun = objective_function, -#' domain = domain, -#' codomain = codomain) +#' objective = ObjectiveRFun$new( +#' fun = objective_function, +#' domain = domain, +#' codomain = codomain) #' -#' instance = OptimInstanceBatchSingleCrit$new( -#' objective = objective, -#' search_space = search_space, -#' terminator = trm("evals", n_evals = 10)) +#' instance = OptimInstanceBatchSingleCrit$new( +#' objective = objective, +#' search_space = search_space, +#' terminator = trm("evals", n_evals = 10)) #' -#' optimizer = opt("gensa") +#' optimizer = opt("gensa") #' -#' # Modifies the instance by reference -#' optimizer$optimize(instance) +#' # Modifies the instance by reference +#' optimizer$optimize(instance) #' -#' # Returns best scoring evaluation -#' instance$result +#' # Returns best scoring evaluation +#' instance$result #' -#' # Allows access of data.table of full path of all evaluations -#' as.data.table(instance$archive$data) -#' } +#' # Allows access of data.table of full path of all evaluations +#' as.data.table(instance$archive$data) OptimizerBatchGenSA = R6Class("OptimizerBatchGenSA", inherit = OptimizerBatch, public = list( diff --git a/R/OptimizerBatchIrace.R b/R/OptimizerBatchIrace.R index bb4383a52..d58c94537 100644 --- a/R/OptimizerBatchIrace.R +++ b/R/OptimizerBatchIrace.R @@ -89,6 +89,7 @@ #' `r format_bib("lopez_2016")` #' #' @export +#' @examplesIf requireNamespace("irace", quietly = TRUE) #' @examples #' # runtime of the example is too long #' \donttest{ diff --git a/R/OptimizerBatchNLoptr.R b/R/OptimizerBatchNLoptr.R index debea4b37..e42f9aa53 100644 --- a/R/OptimizerBatchNLoptr.R +++ b/R/OptimizerBatchNLoptr.R @@ -69,10 +69,9 @@ #' `r format_bib("johnson_2014")` #' #' @export +#' @examplesIf requireNamespace("nloptr", quietly = TRUE) #' @examples #' \donttest{ -#' if (requireNamespace("nloptr")) { -#' #' search_space = domain = ps(x = p_dbl(lower = -1, upper = 1)) #' #' codomain = ps(y = p_dbl(tags = "minimize")) @@ -106,7 +105,6 @@ #' # Allows access of data.table of full path of all evaluations #' as.data.table(instance$archive) #' } -#' } #' OptimizerBatchNLoptr = R6Class("OptimizerBatchNLoptr", inherit = OptimizerBatch, public = list( diff --git a/man/mlr_optimizers_chain.Rd b/man/mlr_optimizers_chain.Rd index 5a5de7009..5e4f220b9 100644 --- a/man/mlr_optimizers_chain.Rd +++ b/man/mlr_optimizers_chain.Rd @@ -47,10 +47,8 @@ combined with a \link{Terminator}. Simply wrap the function in } \examples{ -\dontshow{if (requireNamespace("GenSA")) withAutoprint(\{ # examplesIf} +\dontshow{if (requireNamespace("GenSA", quietly = TRUE)) withAutoprint(\{ # examplesIf} \dontshow{\}) # examplesIf} -library(paradox) - domain = ps(x = p_dbl(lower = -1, upper = 1)) search_space = ps(x = p_dbl(lower = -1, upper = 1)) @@ -58,40 +56,40 @@ search_space = ps(x = p_dbl(lower = -1, upper = 1)) codomain = ps(y = p_dbl(tags = "minimize")) objective_function = function(xs) { - list(y = as.numeric(xs)^2) + list(y = as.numeric(xs)^2) } objective = ObjectiveRFun$new( - fun = objective_function, - domain = domain, - codomain = codomain + fun = objective_function, + domain = domain, + codomain = codomain ) terminator = trm("evals", n_evals = 10) # run optimizers sequentially instance = OptimInstanceBatchSingleCrit$new( - objective = objective, - search_space = search_space, - terminator = terminator + objective = objective, + search_space = search_space, + terminator = terminator ) optimizer = opt("chain", - optimizers = list(opt("random_search"), opt("grid_search")), - terminators = list(trm("evals", n_evals = 5), trm("evals", n_evals = 5)) + optimizers = list(opt("random_search"), opt("grid_search")), + terminators = list(trm("evals", n_evals = 5), trm("evals", n_evals = 5)) ) optimizer$optimize(instance) # random restarts instance = OptimInstanceBatchSingleCrit$new( - objective = objective, - search_space = search_space, - terminator = trm("none") + objective = objective, + search_space = search_space, + terminator = trm("none") ) optimizer = opt("chain", - optimizers = list(opt("gensa"), opt("gensa")), - terminators = list(trm("evals", n_evals = 10), trm("evals", n_evals = 10)) + optimizers = list(opt("gensa"), opt("gensa")), + terminators = list(trm("evals", n_evals = 10), trm("evals", n_evals = 10)) ) optimizer$optimize(instance) } diff --git a/man/mlr_optimizers_gensa.Rd b/man/mlr_optimizers_gensa.Rd index f346525ca..1a3820409 100644 --- a/man/mlr_optimizers_gensa.Rd +++ b/man/mlr_optimizers_gensa.Rd @@ -85,37 +85,36 @@ combined with a \link{Terminator}. Simply wrap the function in } \examples{ -if (requireNamespace("GenSA")) { +\dontshow{if (requireNamespace("GenSA", quietly = TRUE)) withAutoprint(\{ # examplesIf} +\dontshow{\}) # examplesIf} +search_space = domain = ps(x = p_dbl(lower = -1, upper = 1)) - search_space = domain = ps(x = p_dbl(lower = -1, upper = 1)) +codomain = ps(y = p_dbl(tags = "minimize")) - codomain = ps(y = p_dbl(tags = "minimize")) - - objective_function = function(xs) { - list(y = as.numeric(xs)^2) - } +objective_function = function(xs) { + list(y = as.numeric(xs)^2) +} - objective = ObjectiveRFun$new( - fun = objective_function, - domain = domain, - codomain = codomain) +objective = ObjectiveRFun$new( + fun = objective_function, + domain = domain, + codomain = codomain) - instance = OptimInstanceBatchSingleCrit$new( - objective = objective, - search_space = search_space, - terminator = trm("evals", n_evals = 10)) +instance = OptimInstanceBatchSingleCrit$new( + objective = objective, + search_space = search_space, + terminator = trm("evals", n_evals = 10)) - optimizer = opt("gensa") +optimizer = opt("gensa") - # Modifies the instance by reference - optimizer$optimize(instance) +# Modifies the instance by reference +optimizer$optimize(instance) - # Returns best scoring evaluation - instance$result +# Returns best scoring evaluation +instance$result - # Allows access of data.table of full path of all evaluations - as.data.table(instance$archive$data) -} +# Allows access of data.table of full path of all evaluations +as.data.table(instance$archive$data) } \section{Super classes}{ \code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}} -> \code{OptimizerBatchGenSA} diff --git a/man/mlr_optimizers_irace.Rd b/man/mlr_optimizers_irace.Rd index 67c441c30..a079420ca 100644 --- a/man/mlr_optimizers_irace.Rd +++ b/man/mlr_optimizers_irace.Rd @@ -123,6 +123,8 @@ combined with a \link{Terminator}. Simply wrap the function in } \examples{ +\dontshow{if (requireNamespace("irace", quietly = TRUE)) withAutoprint(\{ # examplesIf} +\dontshow{\}) # examplesIf} # runtime of the example is too long \donttest{ diff --git a/man/mlr_optimizers_nloptr.Rd b/man/mlr_optimizers_nloptr.Rd index 89280b0b8..10edd4c91 100644 --- a/man/mlr_optimizers_nloptr.Rd +++ b/man/mlr_optimizers_nloptr.Rd @@ -82,9 +82,9 @@ combined with a \link{Terminator}. Simply wrap the function in } \examples{ +\dontshow{if (requireNamespace("nloptr", quietly = TRUE)) withAutoprint(\{ # examplesIf} +\dontshow{\}) # examplesIf} \donttest{ -if (requireNamespace("nloptr")) { - search_space = domain = ps(x = p_dbl(lower = -1, upper = 1)) codomain = ps(y = p_dbl(tags = "minimize")) @@ -118,7 +118,6 @@ if (requireNamespace("nloptr")) { # Allows access of data.table of full path of all evaluations as.data.table(instance$archive) } -} } \section{Super classes}{