diff --git a/NAMESPACE b/NAMESPACE
index 8584428b1..b703be59e 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -132,7 +132,7 @@ S3method(tunable,logistic_reg)
S3method(tunable,mars)
S3method(tunable,mlp)
S3method(tunable,model_spec)
-S3method(tunable,multinomial_reg)
+S3method(tunable,multinom_reg)
S3method(tunable,rand_forest)
S3method(tunable,survival_reg)
S3method(tunable,svm_poly)
diff --git a/R/mlp_brulee_two_layer.R b/R/mlp_brulee_two_layer.R
new file mode 100644
index 000000000..317e72b15
--- /dev/null
+++ b/R/mlp_brulee_two_layer.R
@@ -0,0 +1,11 @@
+#' Multilayer perceptron via brulee with two hidden layers
+#'
+#' [brulee::brulee_mlp_two_layer()] fits a neural network (with version 0.3.0.9000 or higher of brulee)
+#'
+#' @includeRmd man/rmd/mlp_brulee_two_layer.md details
+#'
+#' @name details_mlp_brulee_two_layer
+#' @keywords internal
+NULL
+
+# See inst/README-DOCS.md for a description of how these files are processed
diff --git a/R/mlp_data.R b/R/mlp_data.R
index 8c1ff2080..4d3e5039d 100644
--- a/R/mlp_data.R
+++ b/R/mlp_data.R
@@ -368,7 +368,8 @@ set_pred(
set_model_engine("mlp", "classification", "brulee")
set_model_engine("mlp", "regression", "brulee")
-set_dependency("mlp", "brulee", "brulee")
+set_dependency("mlp", "brulee", "brulee", mode = "classification")
+set_dependency("mlp", "brulee", "brulee", mode = "regression")
set_model_arg(
model = "mlp",
@@ -527,3 +528,166 @@ set_pred(
)
)
+
+set_model_engine("mlp", "classification", "brulee_two_layer")
+set_model_engine("mlp", "regression", "brulee_two_layer")
+set_dependency("mlp", "brulee_two_layer", "brulee", mode = "classification")
+set_dependency("mlp", "brulee_two_layer", "brulee", mode = "regression")
+
+set_model_arg(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ parsnip = "hidden_units",
+ original = "hidden_units",
+ func = list(pkg = "dials", fun = "hidden_units"),
+ has_submodel = FALSE
+)
+
+set_model_arg(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ parsnip = "penalty",
+ original = "penalty",
+ func = list(pkg = "dials", fun = "penalty"),
+ has_submodel = FALSE
+)
+
+set_model_arg(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ parsnip = "epochs",
+ original = "epochs",
+ func = list(pkg = "dials", fun = "epochs"),
+ has_submodel = FALSE
+)
+
+set_model_arg(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ parsnip = "dropout",
+ original = "dropout",
+ func = list(pkg = "dials", fun = "dropout"),
+ has_submodel = FALSE
+)
+
+set_model_arg(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ parsnip = "learn_rate",
+ original = "learn_rate",
+ func = list(pkg = "dials", fun = "learn_rate", range = c(-2.5, -0.5)),
+ has_submodel = FALSE
+)
+
+set_model_arg(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ parsnip = "activation",
+ original = "activation",
+ func = list(pkg = "dials", fun = "activation", values = c('relu', 'elu', 'tanh')),
+ has_submodel = FALSE
+)
+
+
+set_fit(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ mode = "regression",
+ value = list(
+ interface = "data.frame",
+ protect = c("x", "y"),
+ func = c(pkg = "brulee", fun = "brulee_mlp_two_layer"),
+ defaults = list()
+ )
+)
+
+set_encoding(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ mode = "regression",
+ options = list(
+ predictor_indicators = "none",
+ compute_intercept = FALSE,
+ remove_intercept = FALSE,
+ allow_sparse_x = FALSE
+ )
+)
+
+set_fit(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ mode = "classification",
+ value = list(
+ interface = "data.frame",
+ protect = c("x", "y"),
+ func = c(pkg = "brulee", fun = "brulee_mlp_two_layer"),
+ defaults = list()
+ )
+)
+
+set_encoding(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ mode = "classification",
+ options = list(
+ predictor_indicators = "none",
+ compute_intercept = FALSE,
+ remove_intercept = FALSE,
+ allow_sparse_x = FALSE
+ )
+)
+
+set_pred(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ mode = "regression",
+ type = "numeric",
+ value = list(
+ pre = NULL,
+ post = reformat_torch_num,
+ func = c(fun = "predict"),
+ args =
+ list(
+ object = quote(object$fit),
+ new_data = quote(new_data),
+ type = "numeric"
+ )
+ )
+)
+
+set_pred(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ mode = "classification",
+ type = "class",
+ value = list(
+ pre = NULL,
+ post = NULL,
+ func = c(fun = "predict"),
+ args =
+ list(
+ object = quote(object$fit),
+ new_data = quote(new_data),
+ type = "class"
+ )
+ )
+)
+
+set_pred(
+ model = "mlp",
+ eng = "brulee_two_layer",
+ mode = "classification",
+ type = "prob",
+ value = list(
+ pre = NULL,
+ post = NULL,
+ func = c(fun = "predict"),
+ args =
+ list(
+ object = quote(object$fit),
+ new_data = quote(new_data),
+ type = "prob"
+ )
+ )
+)
+
diff --git a/R/tunable.R b/R/tunable.R
index 8d6fde55b..15d628949 100644
--- a/R/tunable.R
+++ b/R/tunable.R
@@ -194,37 +194,6 @@ earth_engine_args <-
component_id = "engine"
)
-brulee_mlp_engine_args <-
- tibble::tribble(
- ~name, ~call_info,
- "momentum", list(pkg = "dials", fun = "momentum", range = c(0.5, 0.95)),
- "batch_size", list(pkg = "dials", fun = "batch_size", range = c(3, 10)),
- "stop_iter", list(pkg = "dials", fun = "stop_iter"),
- "class_weights", list(pkg = "dials", fun = "class_weights"),
- "decay", list(pkg = "dials", fun = "rate_decay"),
- "initial", list(pkg = "dials", fun = "rate_initial"),
- "largest", list(pkg = "dials", fun = "rate_largest"),
- "rate_schedule", list(pkg = "dials", fun = "rate_schedule"),
- "step_size", list(pkg = "dials", fun = "rate_step_size"),
- "mixture", list(pkg = "dials", fun = "mixture")
- ) %>%
- dplyr::mutate(source = "model_spec",
- component = "mlp",
- component_id = "engine"
- )
-
-brulee_linear_engine_args <-
- brulee_mlp_engine_args %>%
- dplyr::filter(name %in% c("momentum", "batch_size", "stop_iter"))
-
-brulee_logistic_engine_args <-
- brulee_mlp_engine_args %>%
- dplyr::filter(name %in% c("momentum", "batch_size", "stop_iter", "class_weights"))
-
-brulee_multinomial_engine_args <-
- brulee_mlp_engine_args %>%
- dplyr::filter(name %in% c("momentum", "batch_size", "stop_iter", "class_weights"))
-
flexsurvspline_engine_args <-
tibble::tibble(
name = c("k"),
@@ -236,6 +205,42 @@ flexsurvspline_engine_args <-
component_id = "engine"
)
+# ------------------------------------------------------------------------------
+# used for brulee engines:
+
+tune_activations <- c("relu", "tanh", "elu", "log_sigmoid", "tanhshrink")
+tune_sched <- c("none", "decay_time", "decay_expo", "cyclic", "step")
+
+brulee_mlp_args <-
+ tibble::tibble(
+ name = c('epochs', 'hidden_units', 'hidden_units_2', 'activation', 'activation_2',
+ 'penalty', 'mixture', 'dropout', 'learn_rate', 'momentum', 'batch_size',
+ 'class_weights', 'stop_iter', 'rate_schedule'),
+ call_info = list(
+ list(pkg = "dials", fun = "epochs", range = c(5L, 500L)),
+ list(pkg = "dials", fun = "hidden_units", range = c(2L, 50L)),
+ list(pkg = "dials", fun = "hidden_units_2", range = c(2L, 50L)),
+ list(pkg = "dials", fun = "activation", values = tune_activations),
+ list(pkg = "dials", fun = "activation_2", values = tune_activations),
+ list(pkg = "dials", fun = "penalty"),
+ list(pkg = "dials", fun = "mixture"),
+ list(pkg = "dials", fun = "dropout"),
+ list(pkg = "dials", fun = "learn_rate", range = c(-3, -1/5)),
+ list(pkg = "dials", fun = "momentum", range = c(0.50, 0.95)),
+ list(pkg = "dials", fun = "batch_size"),
+ list(pkg = "dials", fun = "stop_iter"),
+ list(pkg = "dials", fun = "class_weights"),
+ list(pkg = "dials", fun = "rate_schedule", values = tune_sched)
+ )
+ ) %>%
+ dplyr::mutate(source = "model_spec")
+
+brulee_mlp_only_args <-
+ tibble::tibble(
+ name =
+ c('hidden_units', 'hidden_units_2', 'activation', 'activation_2', 'dropout')
+ )
+
# ------------------------------------------------------------------------------
#' @export
@@ -245,11 +250,21 @@ tunable.linear_reg <- function(x, ...) {
res$call_info[res$name == "mixture"] <-
list(list(pkg = "dials", fun = "mixture", range = c(0.05, 1.00)))
} else if (x$engine == "brulee") {
- res <- add_engine_parameters(res, brulee_linear_engine_args)
+ res <-
+ brulee_mlp_args %>%
+ dplyr::anti_join(brulee_mlp_only_args, by = "name") %>%
+ dplyr::filter(name != "class_weights") %>%
+ dplyr::mutate(
+ component = "linear_reg",
+ component_id = ifelse(name %in% names(formals("linear_reg")), "main", "engine")
+ ) %>%
+ dplyr::select(name, call_info, source, component, component_id)
}
res
}
+#' @export
+
#' @export
tunable.logistic_reg <- function(x, ...) {
res <- NextMethod()
@@ -257,19 +272,33 @@ tunable.logistic_reg <- function(x, ...) {
res$call_info[res$name == "mixture"] <-
list(list(pkg = "dials", fun = "mixture", range = c(0.05, 1.00)))
} else if (x$engine == "brulee") {
- res <- add_engine_parameters(res, brulee_logistic_engine_args)
+ res <-
+ brulee_mlp_args %>%
+ dplyr::anti_join(brulee_mlp_only_args, by = "name") %>%
+ dplyr::mutate(
+ component = "logistic_reg",
+ component_id = ifelse(name %in% names(formals("logistic_reg")), "main", "engine")
+ ) %>%
+ dplyr::select(name, call_info, source, component, component_id)
}
res
}
#' @export
-tunable.multinomial_reg <- function(x, ...) {
+tunable.multinom_reg <- function(x, ...) {
res <- NextMethod()
if (x$engine == "glmnet") {
res$call_info[res$name == "mixture"] <-
list(list(pkg = "dials", fun = "mixture", range = c(0.05, 1.00)))
} else if (x$engine == "brulee") {
- res <- add_engine_parameters(res, brulee_multinomial_engine_args)
+ res <-
+ brulee_mlp_args %>%
+ dplyr::anti_join(brulee_mlp_only_args, by = "name") %>%
+ dplyr::mutate(
+ component = "multinom_reg",
+ component_id = ifelse(name %in% names(formals("multinom_reg")), "main", "engine")
+ ) %>%
+ dplyr::select(name, call_info, source, component, component_id)
}
res
}
@@ -345,28 +374,23 @@ tunable.svm_poly <- function(x, ...) {
res
}
-
#' @export
tunable.mlp <- function(x, ...) {
res <- NextMethod()
- if (x$engine == "brulee") {
- res <- add_engine_parameters(res, brulee_mlp_engine_args)
- res$call_info[res$name == "learn_rate"] <-
- list(list(pkg = "dials", fun = "learn_rate", range = c(-3, -1/2)))
- res$call_info[res$name == "epochs"] <-
- list(list(pkg = "dials", fun = "epochs", range = c(5L, 500L)))
- activation_values <- rlang::eval_tidy(
- rlang::call2("brulee_activations", .ns = "brulee")
- )
- res$call_info[res$name == "activation"] <-
- list(list(pkg = "dials", fun = "activation", values = activation_values))
- } else if (x$engine == "keras") {
- activation_values <- parsnip::keras_activations()
- res$call_info[res$name == "activation"] <-
- list(list(pkg = "dials", fun = "activation", values = activation_values))
+ if (grepl("brulee", x$engine)) {
+ res <-
+ brulee_mlp_args %>%
+ dplyr::mutate(
+ component = "mlp",
+ component_id = ifelse(name %in% names(formals("mlp")), "main", "engine")
+ ) %>%
+ dplyr::select(name, call_info, source, component, component_id)
+ if (x$engine == "brulee") {
+ res <- res[!grepl("_2", res$name),]
+ }
}
res
- }
+}
#' @export
tunable.survival_reg <- function(x, ...) {
diff --git a/inst/models.tsv b/inst/models.tsv
index 1ef2a8505..2c3e2d9ab 100644
--- a/inst/models.tsv
+++ b/inst/models.tsv
@@ -1,4 +1,5 @@
"model" "mode" "engine" "pkg"
+"C5_rules" "classification" "C5.0" "rules"
"auto_ml" "classification" "h2o" "agua"
"auto_ml" "regression" "h2o" "agua"
"bag_mars" "classification" "earth" "baguette"
@@ -23,7 +24,6 @@
"boost_tree" "regression" "lightgbm" "bonsai"
"boost_tree" "regression" "spark" NA
"boost_tree" "regression" "xgboost" NA
-"C5_rules" "classification" "C5.0" "rules"
"cubist_rules" "regression" "Cubist" "rules"
"decision_tree" "censored regression" "partykit" "censored"
"decision_tree" "censored regression" "rpart" "censored"
@@ -59,6 +59,7 @@
"linear_reg" "regression" "spark" NA
"linear_reg" "regression" "stan" NA
"linear_reg" "regression" "stan_glmer" "multilevelmod"
+"logistic_reg" "classification" "LiblineaR" NA
"logistic_reg" "classification" "brulee" NA
"logistic_reg" "classification" "gee" "multilevelmod"
"logistic_reg" "classification" "glm" NA
@@ -66,17 +67,18 @@
"logistic_reg" "classification" "glmnet" NA
"logistic_reg" "classification" "h2o" "agua"
"logistic_reg" "classification" "keras" NA
-"logistic_reg" "classification" "LiblineaR" NA
"logistic_reg" "classification" "spark" NA
"logistic_reg" "classification" "stan" NA
"logistic_reg" "classification" "stan_glmer" "multilevelmod"
"mars" "classification" "earth" NA
"mars" "regression" "earth" NA
"mlp" "classification" "brulee" NA
+"mlp" "classification" "brulee_two_layer" NA
"mlp" "classification" "h2o" "agua"
"mlp" "classification" "keras" NA
"mlp" "classification" "nnet" NA
"mlp" "regression" "brulee" NA
+"mlp" "regression" "brulee_two_layer" NA
"mlp" "regression" "h2o" "agua"
"mlp" "regression" "keras" NA
"mlp" "regression" "nnet" NA
@@ -129,10 +131,10 @@
"survival_reg" "censored regression" "flexsurv" "censored"
"survival_reg" "censored regression" "flexsurvspline" "censored"
"survival_reg" "censored regression" "survival" "censored"
-"svm_linear" "classification" "kernlab" NA
"svm_linear" "classification" "LiblineaR" NA
-"svm_linear" "regression" "kernlab" NA
+"svm_linear" "classification" "kernlab" NA
"svm_linear" "regression" "LiblineaR" NA
+"svm_linear" "regression" "kernlab" NA
"svm_poly" "classification" "kernlab" NA
"svm_poly" "regression" "kernlab" NA
"svm_rbf" "classification" "kernlab" NA
diff --git a/man/details_linear_reg_glmer.Rd b/man/details_linear_reg_glmer.Rd
index 67d8745bb..d4da1c9a4 100644
--- a/man/details_linear_reg_glmer.Rd
+++ b/man/details_linear_reg_glmer.Rd
@@ -52,7 +52,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where $i$ denotes the \code{i}th independent experimental unit
+where \code{i} denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_linear_reg_lmer.Rd b/man/details_linear_reg_lmer.Rd
index 0441f464a..4e4b5a34d 100644
--- a/man/details_linear_reg_lmer.Rd
+++ b/man/details_linear_reg_lmer.Rd
@@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where $i$ denotes the \code{i}th independent experimental unit
+where \code{i} denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_linear_reg_stan_glmer.Rd b/man/details_linear_reg_stan_glmer.Rd
index 3bcb67ddf..78132f4bb 100644
--- a/man/details_linear_reg_stan_glmer.Rd
+++ b/man/details_linear_reg_stan_glmer.Rd
@@ -64,7 +64,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where $i$ denotes the \code{i}th independent experimental unit
+where \code{i} denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_logistic_reg_glmer.Rd b/man/details_logistic_reg_glmer.Rd
index b848df19c..a87a9e9c6 100644
--- a/man/details_logistic_reg_glmer.Rd
+++ b/man/details_logistic_reg_glmer.Rd
@@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where $i$ denotes the \code{i}th independent experimental unit
+where \code{i} denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_logistic_reg_stan_glmer.Rd b/man/details_logistic_reg_stan_glmer.Rd
index ce1281501..628702e45 100644
--- a/man/details_logistic_reg_stan_glmer.Rd
+++ b/man/details_logistic_reg_stan_glmer.Rd
@@ -63,7 +63,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where $i$ denotes the \code{i}th independent experimental unit
+where \code{i} denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_mlp_brulee.Rd b/man/details_mlp_brulee.Rd
index 050be24ac..388a16605 100644
--- a/man/details_mlp_brulee.Rd
+++ b/man/details_mlp_brulee.Rd
@@ -10,14 +10,15 @@
For this engine, there are multiple modes: classification and regression
\subsection{Tuning Parameters}{
-This model has 6 tuning parameters:
+This model has 7 tuning parameters:
\itemize{
+\item \code{epochs}: # Epochs (type: integer, default: 100L)
\item \code{hidden_units}: # Hidden Units (type: integer, default: 3L)
+\item \code{activation}: Activation Function (type: character, default: ‘relu’)
\item \code{penalty}: Amount of Regularization (type: double, default: 0.001)
-\item \code{epochs}: # Epochs (type: integer, default: 100L)
+\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 0.0)
\item \code{dropout}: Dropout Rate (type: double, default: 0.0)
\item \code{learn_rate}: Learning Rate (type: double, default: 0.01)
-\item \code{activation}: Activation Function (type: character, default: ‘relu’)
}
The use of the L1 penalty (a.k.a. the lasso penalty) does \emph{not} force
@@ -29,17 +30,18 @@ Both \code{penalty} and \code{dropout} should be not be used in the same model.
Other engine arguments of interest:
\itemize{
-\item \code{momentum()}: A number used to use historical gradient infomration
+\item \code{momentum}: A number used to use historical gradient infomration
during optimization.
-\item \code{batch_size()}: An integer for the number of training set points in
-each batch.
-\item \code{class_weights()}: Numeric class weights. See
+\item \code{batch_size}: An integer for the number of training set points in each
+batch.
+\item \code{class_weights}: Numeric class weights. See
\code{\link[brulee:brulee_mlp]{brulee::brulee_mlp()}}.
-\item \code{stop_iter()}: A non-negative integer for how many iterations with no
+\item \code{stop_iter}: A non-negative integer for how many iterations with no
improvement before stopping. (default: 5L).
+\item \code{rate_schedule}: A function to change the learning rate over epochs.
+See \code{\link[brulee:schedule_decay_time]{brulee::schedule_decay_time()}}
+for details.
}
-
-Parsnip changes the default range for \code{learn_rate} to \code{c(-2.5, -0.5)}.
}
\subsection{Translation from parsnip to the original package (regression)}{
diff --git a/man/details_mlp_brulee_two_layer.Rd b/man/details_mlp_brulee_two_layer.Rd
new file mode 100644
index 000000000..0df09869e
--- /dev/null
+++ b/man/details_mlp_brulee_two_layer.Rd
@@ -0,0 +1,157 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/mlp_brulee_two_layer.R
+\name{details_mlp_brulee_two_layer}
+\alias{details_mlp_brulee_two_layer}
+\title{Multilayer perceptron via brulee with two hidden layers}
+\description{
+\code{\link[brulee:brulee_mlp]{brulee::brulee_mlp_two_layer()}} fits a neural network (with version 0.3.0.9000 or higher of brulee)
+}
+\details{
+For this engine, there are multiple modes: classification and regression
+\subsection{Tuning Parameters}{
+
+This model has 7 tuning parameters:
+\itemize{
+\item \code{epochs}: # Epochs (type: integer, default: 100L)
+\item \code{hidden_units}: # Hidden Units (type: integer, default: 3L)
+\item \code{activation}: Activation Function (type: character, default: ‘relu’)
+\item \code{penalty}: Amount of Regularization (type: double, default: 0.001)
+\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 0.0)
+\item \code{dropout}: Dropout Rate (type: double, default: 0.0)
+\item \code{learn_rate}: Learning Rate (type: double, default: 0.01)
+}
+
+The use of the L1 penalty (a.k.a. the lasso penalty) does \emph{not} force
+parameters to be strictly zero (as it does in packages such as glmnet).
+The zeroing out of parameters is a specific feature the optimization
+method used in those packages.
+
+Both \code{penalty} and \code{dropout} should be not be used in the same model.
+
+Other engine arguments of interest:
+\itemize{
+\item \code{hidden_layer_2} and \code{activation_2} control the format of the second
+layer.
+\item \code{momentum}: A number used to use historical gradient information
+during optimization.
+\item \code{batch_size}: An integer for the number of training set points in each
+batch.
+\item \code{class_weights}: Numeric class weights. See
+\code{\link[brulee:brulee_mlp]{brulee::brulee_mlp()}}.
+\item \code{stop_iter}: A non-negative integer for how many iterations with no
+improvement before stopping. (default: 5L).
+\item \code{rate_schedule}: A function to change the learning rate over epochs.
+See \code{\link[brulee:schedule_decay_time]{brulee::schedule_decay_time()}}
+for details.
+}
+}
+
+\subsection{Translation from parsnip to the original package (regression)}{
+
+\if{html}{\out{}}\preformatted{mlp(
+ hidden_units = integer(1),
+ penalty = double(1),
+ dropout = double(1),
+ epochs = integer(1),
+ learn_rate = double(1),
+ activation = character(1)
+) \%>\%
+ set_engine("brulee_two_layer",
+ hidden_units_2 = integer(1),
+ activation_2 = character(1)) \%>\%
+ set_mode("regression") \%>\%
+ translate()
+}\if{html}{\out{
}}
+
+\if{html}{\out{}}\preformatted{## Single Layer Neural Network Model Specification (regression)
+##
+## Main Arguments:
+## hidden_units = integer(1)
+## penalty = double(1)
+## dropout = double(1)
+## epochs = integer(1)
+## activation = character(1)
+## learn_rate = double(1)
+##
+## Engine-Specific Arguments:
+## hidden_units_2 = integer(1)
+## activation_2 = character(1)
+##
+## Computational engine: brulee_two_layer
+##
+## Model fit template:
+## brulee::brulee_mlp_two_layer(x = missing_arg(), y = missing_arg(),
+## hidden_units = integer(1), penalty = double(1), dropout = double(1),
+## epochs = integer(1), activation = character(1), learn_rate = double(1),
+## hidden_units_2 = integer(1), activation_2 = character(1))
+}\if{html}{\out{
}}
+
+Note that parsnip automatically sets the linear activation in the last
+layer.
+}
+
+\subsection{Translation from parsnip to the original package (classification)}{
+
+\if{html}{\out{}}\preformatted{mlp(
+ hidden_units = integer(1),
+ penalty = double(1),
+ dropout = double(1),
+ epochs = integer(1),
+ learn_rate = double(1),
+ activation = character(1)
+) \%>\%
+ set_engine("brulee_two_layer",
+ hidden_units_2 = integer(1),
+ activation_2 = character(1)) \%>\%
+ set_mode("classification") \%>\%
+ translate()
+}\if{html}{\out{
}}
+
+\if{html}{\out{}}\preformatted{## Single Layer Neural Network Model Specification (classification)
+##
+## Main Arguments:
+## hidden_units = integer(1)
+## penalty = double(1)
+## dropout = double(1)
+## epochs = integer(1)
+## activation = character(1)
+## learn_rate = double(1)
+##
+## Engine-Specific Arguments:
+## hidden_units_2 = integer(1)
+## activation_2 = character(1)
+##
+## Computational engine: brulee_two_layer
+##
+## Model fit template:
+## brulee::brulee_mlp_two_layer(x = missing_arg(), y = missing_arg(),
+## hidden_units = integer(1), penalty = double(1), dropout = double(1),
+## epochs = integer(1), activation = character(1), learn_rate = double(1),
+## hidden_units_2 = integer(1), activation_2 = character(1))
+}\if{html}{\out{
}}
+}
+
+\subsection{Preprocessing requirements}{
+
+Factor/categorical predictors need to be converted to numeric values
+(e.g., dummy or indicator variables) for this engine. When using the
+formula method via \code{\link[=fit.model_spec]{fit()}}, parsnip will
+convert factor columns to indicators.
+
+Predictors should have the same scale. One way to achieve this is to
+center and scale each so that each predictor has mean zero and a
+variance of one.
+}
+
+\subsection{Case weights}{
+
+The underlying model implementation does not allow for case weights.
+}
+
+\subsection{References}{
+\itemize{
+\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
+}
+}
+}
+\keyword{internal}
diff --git a/man/details_poisson_reg_glmer.Rd b/man/details_poisson_reg_glmer.Rd
index 5a32c17bd..520798925 100644
--- a/man/details_poisson_reg_glmer.Rd
+++ b/man/details_poisson_reg_glmer.Rd
@@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where $i$ denotes the \code{i}th independent experimental unit
+where \code{i} denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/details_poisson_reg_stan_glmer.Rd b/man/details_poisson_reg_stan_glmer.Rd
index ef1065ada..e34078846 100644
--- a/man/details_poisson_reg_stan_glmer.Rd
+++ b/man/details_poisson_reg_stan_glmer.Rd
@@ -63,7 +63,7 @@ linear predictor (\verb{\eta}) for a random intercept:
\if{html}{\out{}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\}
}\if{html}{\out{
}}
-where $i$ denotes the \code{i}th independent experimental unit
+where \code{i} denotes the \code{i}th independent experimental unit
(e.g. subject). When the model has seen subject \code{i}, it can use that
subject’s data to adjust the \emph{population} intercept to be more specific
to that subjects results.
diff --git a/man/parsnip-package.Rd b/man/parsnip-package.Rd
index 84b9a622d..2e074dc3b 100644
--- a/man/parsnip-package.Rd
+++ b/man/parsnip-package.Rd
@@ -30,7 +30,7 @@ Authors:
Other contributors:
\itemize{
\item Emil Hvitfeldt \email{emil.hvitfeldt@posit.co} [contributor]
- \item Posit Software, PBC [copyright holder, funder]
+ \item Posit Software, PBC (03wc8by49) [copyright holder, funder]
}
}
diff --git a/man/rmd/aaa.Rmd b/man/rmd/aaa.Rmd
index c17bd649d..23dffad82 100644
--- a/man/rmd/aaa.Rmd
+++ b/man/rmd/aaa.Rmd
@@ -99,6 +99,8 @@ get_arg <- function(ns, f, arg) {
get_dials <- function(x) {
if (any(names(x) == "range")) {
cl <- rlang::call2(x$fun, .ns = x$pkg, range = x$range)
+ } else if (any(names(x) == "values")) {
+ cl <- rlang::call2(x$fun, .ns = x$pkg, values = x$values)
} else {
cl <- rlang::call2(x$fun, .ns = x$pkg)
}
diff --git a/man/rmd/mlp_brulee.Rmd b/man/rmd/mlp_brulee.Rmd
index f6f9bdb34..c8e984015 100644
--- a/man/rmd/mlp_brulee.Rmd
+++ b/man/rmd/mlp_brulee.Rmd
@@ -10,10 +10,18 @@ defaults <-
tibble::tibble(parsnip = c("hidden_units", "penalty", "dropout", "epochs", "learn_rate", "activation", "mixture"),
default = c("3L", "0.001", "0.0", "100L", "0.01", "'relu'", "0.0"))
-param <-
- mlp() %>%
- set_engine("brulee") %>%
- make_parameter_list(defaults)
+spec <-
+ mlp(
+ hidden_units = tune(),
+ penalty = tune(),
+ dropout = tune(),
+ epochs = tune(),
+ learn_rate = tune(),
+ activation = tune()
+ ) %>%
+ set_engine("brulee")
+
+param <- spec %>% make_parameter_list(defaults)
```
This model has `r nrow(param)` tuning parameters:
@@ -28,13 +36,12 @@ Both `penalty` and `dropout` should be not be used in the same model.
Other engine arguments of interest:
- - `momentum()`: A number used to use historical gradient infomration during optimization.
- - `batch_size()`: An integer for the number of training set points in each batch.
- - `class_weights()`: Numeric class weights. See [brulee::brulee_mlp()].
- - `stop_iter()`: A non-negative integer for how many iterations with no improvement before stopping. (default: 5L).
-
-Parsnip changes the default range for `learn_rate` to `c(-2.5, -0.5)`.
-
+ - `momentum`: A number used to use historical gradient infomration during optimization.
+ - `batch_size`: An integer for the number of training set points in each batch.
+ - `class_weights`: Numeric class weights. See [brulee::brulee_mlp()].
+ - `stop_iter`: A non-negative integer for how many iterations with no improvement before stopping. (default: 5L).
+ - `rate_schedule`: A function to change the learning rate over epochs. See [brulee::schedule_decay_time()] for details.
+
## Translation from parsnip to the original package (regression)
```{r brulee-reg}
diff --git a/man/rmd/mlp_brulee.md b/man/rmd/mlp_brulee.md
index f8580b094..1d0684fb6 100644
--- a/man/rmd/mlp_brulee.md
+++ b/man/rmd/mlp_brulee.md
@@ -7,33 +7,34 @@ For this engine, there are multiple modes: classification and regression
-This model has 6 tuning parameters:
+This model has 7 tuning parameters:
+
+- `epochs`: # Epochs (type: integer, default: 100L)
- `hidden_units`: # Hidden Units (type: integer, default: 3L)
+- `activation`: Activation Function (type: character, default: 'relu')
+
- `penalty`: Amount of Regularization (type: double, default: 0.001)
-- `epochs`: # Epochs (type: integer, default: 100L)
+- `mixture`: Proportion of Lasso Penalty (type: double, default: 0.0)
- `dropout`: Dropout Rate (type: double, default: 0.0)
- `learn_rate`: Learning Rate (type: double, default: 0.01)
-- `activation`: Activation Function (type: character, default: 'relu')
-
The use of the L1 penalty (a.k.a. the lasso penalty) does _not_ force parameters to be strictly zero (as it does in packages such as glmnet). The zeroing out of parameters is a specific feature the optimization method used in those packages.
Both `penalty` and `dropout` should be not be used in the same model.
Other engine arguments of interest:
- - `momentum()`: A number used to use historical gradient infomration during optimization.
- - `batch_size()`: An integer for the number of training set points in each batch.
- - `class_weights()`: Numeric class weights. See [brulee::brulee_mlp()].
- - `stop_iter()`: A non-negative integer for how many iterations with no improvement before stopping. (default: 5L).
-
-Parsnip changes the default range for `learn_rate` to `c(-2.5, -0.5)`.
-
+ - `momentum`: A number used to use historical gradient infomration during optimization.
+ - `batch_size`: An integer for the number of training set points in each batch.
+ - `class_weights`: Numeric class weights. See [brulee::brulee_mlp()].
+ - `stop_iter`: A non-negative integer for how many iterations with no improvement before stopping. (default: 5L).
+ - `rate_schedule`: A function to change the learning rate over epochs. See [brulee::schedule_decay_time()] for details.
+
## Translation from parsnip to the original package (regression)
diff --git a/man/rmd/mlp_brulee_two_layer.Rmd b/man/rmd/mlp_brulee_two_layer.Rmd
new file mode 100644
index 000000000..4e76f9228
--- /dev/null
+++ b/man/rmd/mlp_brulee_two_layer.Rmd
@@ -0,0 +1,104 @@
+```{r, child = "aaa.Rmd", include = FALSE}
+```
+
+`r descr_models("mlp", "brulee_two_layer")`
+
+## Tuning Parameters
+
+```{r brulee-param-info, echo = FALSE}
+defaults <-
+ tibble::tibble(parsnip = c("hidden_units", "penalty", "dropout", "epochs", "learn_rate", "activation", "mixture"),
+ default = c("3L", "0.001", "0.0", "100L", "0.01", "'relu'", "0.0"))
+
+spec <-
+ mlp(
+ hidden_units = tune(),
+ penalty = tune(),
+ dropout = tune(),
+ epochs = tune(),
+ learn_rate = tune(),
+ activation = tune()
+ ) %>%
+ set_engine("brulee_two_layer")
+
+param <- spec %>% make_parameter_list(defaults)
+```
+
+This model has `r nrow(param)` tuning parameters:
+
+```{r brulee-param-list, echo = FALSE, results = "asis"}
+param$item
+```
+
+The use of the L1 penalty (a.k.a. the lasso penalty) does _not_ force parameters to be strictly zero (as it does in packages such as glmnet). The zeroing out of parameters is a specific feature the optimization method used in those packages.
+
+Both `penalty` and `dropout` should be not be used in the same model.
+
+Other engine arguments of interest:
+
+ - `hidden_layer_2` and `activation_2` control the format of the second layer.
+ - `momentum`: A number used to use historical gradient information during optimization.
+ - `batch_size`: An integer for the number of training set points in each batch.
+ - `class_weights`: Numeric class weights. See [brulee::brulee_mlp()].
+ - `stop_iter`: A non-negative integer for how many iterations with no improvement before stopping. (default: 5L).
+ - `rate_schedule`: A function to change the learning rate over epochs. See [brulee::schedule_decay_time()] for details.
+
+
+## Translation from parsnip to the original package (regression)
+
+```{r brulee-reg}
+mlp(
+ hidden_units = integer(1),
+ penalty = double(1),
+ dropout = double(1),
+ epochs = integer(1),
+ learn_rate = double(1),
+ activation = character(1)
+) %>%
+ set_engine("brulee_two_layer",
+ hidden_units_2 = integer(1),
+ activation_2 = character(1)) %>%
+ set_mode("regression") %>%
+ translate()
+```
+
+Note that parsnip automatically sets the linear activation in the last layer.
+
+## Translation from parsnip to the original package (classification)
+
+```{r brulee-cls}
+mlp(
+ hidden_units = integer(1),
+ penalty = double(1),
+ dropout = double(1),
+ epochs = integer(1),
+ learn_rate = double(1),
+ activation = character(1)
+) %>%
+ set_engine("brulee_two_layer",
+ hidden_units_2 = integer(1),
+ activation_2 = character(1)) %>%
+ set_mode("classification") %>%
+ translate()
+```
+
+
+## Preprocessing requirements
+
+```{r child = "template-makes-dummies.Rmd"}
+```
+
+```{r child = "template-same-scale.Rmd"}
+```
+
+## Case weights
+
+```{r child = "template-no-case-weights.Rmd"}
+```
+
+## References
+
+ - Kuhn, M, and K Johnson. 2013. _Applied Predictive Modeling_. Springer.
+
+
+
diff --git a/man/rmd/mlp_brulee_two_layer.md b/man/rmd/mlp_brulee_two_layer.md
new file mode 100644
index 000000000..93a7eabb1
--- /dev/null
+++ b/man/rmd/mlp_brulee_two_layer.md
@@ -0,0 +1,148 @@
+
+
+
+For this engine, there are multiple modes: classification and regression
+
+## Tuning Parameters
+
+
+
+This model has 7 tuning parameters:
+
+- `epochs`: # Epochs (type: integer, default: 100L)
+
+- `hidden_units`: # Hidden Units (type: integer, default: 3L)
+
+- `activation`: Activation Function (type: character, default: 'relu')
+
+- `penalty`: Amount of Regularization (type: double, default: 0.001)
+
+- `mixture`: Proportion of Lasso Penalty (type: double, default: 0.0)
+
+- `dropout`: Dropout Rate (type: double, default: 0.0)
+
+- `learn_rate`: Learning Rate (type: double, default: 0.01)
+
+The use of the L1 penalty (a.k.a. the lasso penalty) does _not_ force parameters to be strictly zero (as it does in packages such as glmnet). The zeroing out of parameters is a specific feature the optimization method used in those packages.
+
+Both `penalty` and `dropout` should be not be used in the same model.
+
+Other engine arguments of interest:
+
+ - `hidden_layer_2` and `activation_2` control the format of the second layer.
+ - `momentum`: A number used to use historical gradient information during optimization.
+ - `batch_size`: An integer for the number of training set points in each batch.
+ - `class_weights`: Numeric class weights. See [brulee::brulee_mlp()].
+ - `stop_iter`: A non-negative integer for how many iterations with no improvement before stopping. (default: 5L).
+ - `rate_schedule`: A function to change the learning rate over epochs. See [brulee::schedule_decay_time()] for details.
+
+
+## Translation from parsnip to the original package (regression)
+
+
+``` r
+mlp(
+ hidden_units = integer(1),
+ penalty = double(1),
+ dropout = double(1),
+ epochs = integer(1),
+ learn_rate = double(1),
+ activation = character(1)
+) %>%
+ set_engine("brulee_two_layer",
+ hidden_units_2 = integer(1),
+ activation_2 = character(1)) %>%
+ set_mode("regression") %>%
+ translate()
+```
+
+```
+## Single Layer Neural Network Model Specification (regression)
+##
+## Main Arguments:
+## hidden_units = integer(1)
+## penalty = double(1)
+## dropout = double(1)
+## epochs = integer(1)
+## activation = character(1)
+## learn_rate = double(1)
+##
+## Engine-Specific Arguments:
+## hidden_units_2 = integer(1)
+## activation_2 = character(1)
+##
+## Computational engine: brulee_two_layer
+##
+## Model fit template:
+## brulee::brulee_mlp_two_layer(x = missing_arg(), y = missing_arg(),
+## hidden_units = integer(1), penalty = double(1), dropout = double(1),
+## epochs = integer(1), activation = character(1), learn_rate = double(1),
+## hidden_units_2 = integer(1), activation_2 = character(1))
+```
+
+Note that parsnip automatically sets the linear activation in the last layer.
+
+## Translation from parsnip to the original package (classification)
+
+
+``` r
+mlp(
+ hidden_units = integer(1),
+ penalty = double(1),
+ dropout = double(1),
+ epochs = integer(1),
+ learn_rate = double(1),
+ activation = character(1)
+) %>%
+ set_engine("brulee_two_layer",
+ hidden_units_2 = integer(1),
+ activation_2 = character(1)) %>%
+ set_mode("classification") %>%
+ translate()
+```
+
+```
+## Single Layer Neural Network Model Specification (classification)
+##
+## Main Arguments:
+## hidden_units = integer(1)
+## penalty = double(1)
+## dropout = double(1)
+## epochs = integer(1)
+## activation = character(1)
+## learn_rate = double(1)
+##
+## Engine-Specific Arguments:
+## hidden_units_2 = integer(1)
+## activation_2 = character(1)
+##
+## Computational engine: brulee_two_layer
+##
+## Model fit template:
+## brulee::brulee_mlp_two_layer(x = missing_arg(), y = missing_arg(),
+## hidden_units = integer(1), penalty = double(1), dropout = double(1),
+## epochs = integer(1), activation = character(1), learn_rate = double(1),
+## hidden_units_2 = integer(1), activation_2 = character(1))
+```
+
+
+## Preprocessing requirements
+
+
+Factor/categorical predictors need to be converted to numeric values (e.g., dummy or indicator variables) for this engine. When using the formula method via \\code{\\link[=fit.model_spec]{fit()}}, parsnip will convert factor columns to indicators.
+
+
+Predictors should have the same scale. One way to achieve this is to center and
+scale each so that each predictor has mean zero and a variance of one.
+
+## Case weights
+
+
+The underlying model implementation does not allow for case weights.
+
+## References
+
+ - Kuhn, M, and K Johnson. 2013. _Applied Predictive Modeling_. Springer.
+
+
+
diff --git a/tests/testthat/_snaps/linear_reg.md b/tests/testthat/_snaps/linear_reg.md
index 229828f30..6aefc8f4f 100644
--- a/tests/testthat/_snaps/linear_reg.md
+++ b/tests/testthat/_snaps/linear_reg.md
@@ -175,3 +175,59 @@
Error in `fit()`:
! Please install the glmnet package to use this engine.
+# tunables
+
+ Code
+ linear_reg() %>% tunable()
+ Output
+ # A tibble: 0 x 5
+ # i 5 variables: name , call_info , source , component ,
+ # component_id
+
+---
+
+ Code
+ linear_reg() %>% set_engine("brulee") %>% tunable()
+ Output
+ # A tibble: 8 x 5
+ name call_info source component component_id
+
+ 1 epochs model_spec linear_reg engine
+ 2 penalty model_spec linear_reg main
+ 3 mixture model_spec linear_reg main
+ 4 learn_rate model_spec linear_reg engine
+ 5 momentum model_spec linear_reg engine
+ 6 batch_size model_spec linear_reg engine
+ 7 stop_iter model_spec linear_reg engine
+ 8 rate_schedule model_spec linear_reg engine
+
+---
+
+ Code
+ linear_reg() %>% set_engine("glmnet") %>% tunable()
+ Output
+ # A tibble: 2 x 5
+ name call_info source component component_id
+
+ 1 penalty model_spec linear_reg main
+ 2 mixture model_spec linear_reg main
+
+---
+
+ Code
+ linear_reg() %>% set_engine("quantreg") %>% tunable()
+ Output
+ # A tibble: 0 x 5
+ # i 5 variables: name , call_info , source , component ,
+ # component_id
+
+---
+
+ Code
+ linear_reg() %>% set_engine("keras") %>% tunable()
+ Output
+ # A tibble: 1 x 5
+ name call_info source component component_id
+
+ 1 penalty model_spec linear_reg main
+
diff --git a/tests/testthat/_snaps/logistic_reg.md b/tests/testthat/_snaps/logistic_reg.md
index b87d8711f..e95923dc1 100644
--- a/tests/testthat/_snaps/logistic_reg.md
+++ b/tests/testthat/_snaps/logistic_reg.md
@@ -139,3 +139,51 @@
Error in `fit()`:
! For the LiblineaR engine, `penalty` must be `> 0`, not 0.
+# tunables
+
+ Code
+ logistic_reg() %>% tunable()
+ Output
+ # A tibble: 0 x 5
+ # i 5 variables: name , call_info , source , component ,
+ # component_id
+
+---
+
+ Code
+ logistic_reg() %>% set_engine("brulee") %>% tunable()
+ Output
+ # A tibble: 9 x 5
+ name call_info source component component_id
+
+ 1 epochs model_spec logistic_reg engine
+ 2 penalty model_spec logistic_reg main
+ 3 mixture model_spec logistic_reg main
+ 4 learn_rate model_spec logistic_reg engine
+ 5 momentum model_spec logistic_reg engine
+ 6 batch_size model_spec logistic_reg engine
+ 7 class_weights model_spec logistic_reg engine
+ 8 stop_iter model_spec logistic_reg engine
+ 9 rate_schedule model_spec logistic_reg engine
+
+---
+
+ Code
+ logistic_reg() %>% set_engine("glmnet") %>% tunable()
+ Output
+ # A tibble: 2 x 5
+ name call_info source component component_id
+
+ 1 penalty model_spec logistic_reg main
+ 2 mixture model_spec logistic_reg main
+
+---
+
+ Code
+ logistic_reg() %>% set_engine("keras") %>% tunable()
+ Output
+ # A tibble: 1 x 5
+ name call_info source component component_id
+
+ 1 penalty model_spec logistic_reg main
+
diff --git a/tests/testthat/_snaps/mlp.md b/tests/testthat/_snaps/mlp.md
index 2ab34ced0..a4b820eee 100644
--- a/tests/testthat/_snaps/mlp.md
+++ b/tests/testthat/_snaps/mlp.md
@@ -60,3 +60,73 @@
Error in `fit()`:
! Both weight decay and dropout should not be specified.
+# tunables
+
+ Code
+ mlp() %>% set_engine("brulee") %>% tunable()
+ Output
+ # A tibble: 12 x 5
+ name call_info source component component_id
+
+ 1 epochs model_spec mlp main
+ 2 hidden_units model_spec mlp main
+ 3 activation model_spec mlp main
+ 4 penalty model_spec mlp main
+ 5 mixture model_spec mlp engine
+ 6 dropout model_spec mlp main
+ 7 learn_rate model_spec mlp main
+ 8 momentum model_spec mlp engine
+ 9 batch_size model_spec mlp engine
+ 10 class_weights model_spec mlp engine
+ 11 stop_iter model_spec mlp engine
+ 12 rate_schedule model_spec mlp engine
+
+---
+
+ Code
+ mlp() %>% set_engine("brulee_two_layer") %>% tunable()
+ Output
+ # A tibble: 14 x 5
+ name call_info source component component_id
+
+ 1 epochs model_spec mlp main
+ 2 hidden_units model_spec mlp main
+ 3 hidden_units_2 model_spec mlp engine
+ 4 activation model_spec mlp main
+ 5 activation_2 model_spec mlp engine
+ 6 penalty model_spec mlp main
+ 7 mixture model_spec mlp engine
+ 8 dropout model_spec mlp main
+ 9 learn_rate model_spec mlp main
+ 10 momentum model_spec mlp engine
+ 11 batch_size model_spec mlp engine
+ 12 class_weights model_spec mlp engine
+ 13 stop_iter model_spec mlp engine
+ 14 rate_schedule model_spec mlp engine
+
+---
+
+ Code
+ mlp() %>% set_engine("nnet") %>% tunable()
+ Output
+ # A tibble: 3 x 5
+ name call_info source component component_id
+
+ 1 hidden_units model_spec mlp main
+ 2 penalty model_spec mlp main
+ 3 epochs model_spec mlp main
+
+---
+
+ Code
+ mlp() %>% set_engine("keras") %>% tunable()
+ Output
+ # A tibble: 5 x 5
+ name call_info source component component_id
+
+ 1 hidden_units model_spec mlp main
+ 2 penalty model_spec mlp main
+ 3 dropout model_spec mlp main
+ 4 epochs model_spec mlp main
+ 5 activation model_spec mlp main
+
diff --git a/tests/testthat/_snaps/multinom_reg.md b/tests/testthat/_snaps/multinom_reg.md
index 78d2ef3ad..1107794b2 100644
--- a/tests/testthat/_snaps/multinom_reg.md
+++ b/tests/testthat/_snaps/multinom_reg.md
@@ -60,3 +60,62 @@
Error in `fit()`:
! `penalty` must be a number larger than or equal to 0 or `NULL`, not the number -1.
+# tunables
+
+ Code
+ multinom_reg() %>% tunable()
+ Output
+ # A tibble: 1 x 5
+ name call_info source component component_id
+
+ 1 penalty model_spec multinom_reg main
+
+---
+
+ Code
+ multinom_reg() %>% set_engine("brulee") %>% tunable()
+ Output
+ # A tibble: 9 x 5
+ name call_info source component component_id
+
+ 1 epochs model_spec multinom_reg engine
+ 2 penalty model_spec multinom_reg main
+ 3 mixture model_spec multinom_reg main
+ 4 learn_rate model_spec multinom_reg engine
+ 5 momentum model_spec multinom_reg engine
+ 6 batch_size model_spec multinom_reg engine
+ 7 class_weights model_spec multinom_reg engine
+ 8 stop_iter model_spec multinom_reg engine
+ 9 rate_schedule model_spec multinom_reg engine
+
+---
+
+ Code
+ multinom_reg() %>% set_engine("nnet") %>% tunable()
+ Output
+ # A tibble: 1 x 5
+ name call_info source component component_id
+
+ 1 penalty model_spec multinom_reg main
+
+---
+
+ Code
+ multinom_reg() %>% set_engine("glmnet") %>% tunable()
+ Output
+ # A tibble: 2 x 5
+ name call_info source component component_id
+
+ 1 penalty model_spec multinom_reg main
+ 2 mixture model_spec multinom_reg main
+
+---
+
+ Code
+ multinom_reg() %>% set_engine("keras") %>% tunable()
+ Output
+ # A tibble: 1 x 5
+ name call_info source component component_id
+
+ 1 penalty model_spec multinom_reg main
+
diff --git a/tests/testthat/_snaps/registration.md b/tests/testthat/_snaps/registration.md
index 4774183ac..575f18df6 100644
--- a/tests/testthat/_snaps/registration.md
+++ b/tests/testthat/_snaps/registration.md
@@ -413,22 +413,29 @@
modes: unknown, classification, regression
engines:
- classification: brulee, keras, nnet
- regression: brulee, keras, nnet
+ classification: brulee, brulee_two_layer, keras, nnet
+ regression: brulee, brulee_two_layer, keras, nnet
arguments:
- keras:
+ keras:
hidden_units --> hidden_units
penalty --> penalty
dropout --> dropout
epochs --> epochs
activation --> activation
- nnet:
+ nnet:
hidden_units --> size
penalty --> decay
epochs --> maxit
- brulee:
+ brulee:
+ hidden_units --> hidden_units
+ penalty --> penalty
+ epochs --> epochs
+ dropout --> dropout
+ learn_rate --> learn_rate
+ activation --> activation
+ brulee_two_layer:
hidden_units --> hidden_units
penalty --> penalty
epochs --> epochs
@@ -437,21 +444,25 @@
activation --> activation
fit modules:
- engine mode
- keras regression
- keras classification
- nnet regression
- nnet classification
- brulee regression
- brulee classification
+ engine mode
+ keras regression
+ keras classification
+ nnet regression
+ nnet classification
+ brulee regression
+ brulee classification
+ brulee_two_layer regression
+ brulee_two_layer classification
prediction modules:
- mode engine methods
- classification brulee class, prob
- classification keras class, prob, raw
- classification nnet class, prob, raw
- regression brulee numeric
- regression keras numeric, raw
- regression nnet numeric, raw
+ mode engine methods
+ classification brulee class, prob
+ classification brulee_two_layer class, prob
+ classification keras class, prob, raw
+ classification nnet class, prob, raw
+ regression brulee numeric
+ regression brulee_two_layer numeric
+ regression keras numeric, raw
+ regression nnet numeric, raw
diff --git a/tests/testthat/test-linear_reg.R b/tests/testthat/test-linear_reg.R
index ef0022feb..a74dceebd 100644
--- a/tests/testthat/test-linear_reg.R
+++ b/tests/testthat/test-linear_reg.R
@@ -387,3 +387,39 @@ test_that("prevent using a Poisson family", {
error = TRUE
)
})
+
+
+# ------------------------------------------------------------------------------
+
+test_that("tunables", {
+
+ expect_snapshot(
+ linear_reg() %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ linear_reg() %>%
+ set_engine("brulee") %>%
+ tunable()
+ )
+ expect_snapshot(
+ linear_reg() %>%
+ set_engine("glmnet") %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ linear_reg() %>%
+ set_engine("quantreg") %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ linear_reg() %>%
+ set_engine("keras") %>%
+ tunable()
+ )
+
+})
+
diff --git a/tests/testthat/test-logistic_reg.R b/tests/testthat/test-logistic_reg.R
index c205fbe3b..da3f0dfce 100644
--- a/tests/testthat/test-logistic_reg.R
+++ b/tests/testthat/test-logistic_reg.R
@@ -286,3 +286,31 @@ test_that("check_args() works", {
}
)
})
+
+# ------------------------------------------------------------------------------
+
+test_that("tunables", {
+
+ expect_snapshot(
+ logistic_reg() %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ logistic_reg() %>%
+ set_engine("brulee") %>%
+ tunable()
+ )
+ expect_snapshot(
+ logistic_reg() %>%
+ set_engine("glmnet") %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ logistic_reg() %>%
+ set_engine("keras") %>%
+ tunable()
+ )
+
+})
diff --git a/tests/testthat/test-mlp.R b/tests/testthat/test-mlp.R
index 6ce032d74..f56270dc6 100644
--- a/tests/testthat/test-mlp.R
+++ b/tests/testthat/test-mlp.R
@@ -81,3 +81,32 @@ test_that("check_args() works", {
}
)
})
+
+# ------------------------------------------------------------------------------
+
+test_that("tunables", {
+
+ expect_snapshot(
+ mlp() %>%
+ set_engine("brulee") %>%
+ tunable()
+ )
+ expect_snapshot(
+ mlp() %>%
+ set_engine("brulee_two_layer") %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ mlp() %>%
+ set_engine("nnet") %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ mlp() %>%
+ set_engine("keras") %>%
+ tunable()
+ )
+
+})
diff --git a/tests/testthat/test-multinom_reg.R b/tests/testthat/test-multinom_reg.R
index 748367f81..af9885f89 100644
--- a/tests/testthat/test-multinom_reg.R
+++ b/tests/testthat/test-multinom_reg.R
@@ -43,3 +43,38 @@ test_that('check_args() works', {
}
)
})
+
+# ------------------------------------------------------------------------------
+
+test_that("tunables", {
+
+ expect_snapshot(
+ multinom_reg() %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ multinom_reg() %>%
+ set_engine("brulee") %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ multinom_reg() %>%
+ set_engine("nnet") %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ multinom_reg() %>%
+ set_engine("glmnet") %>%
+ tunable()
+ )
+
+ expect_snapshot(
+ multinom_reg() %>%
+ set_engine("keras") %>%
+ tunable()
+ )
+
+})