Skip to content

Instantly share code, notes, and snippets.

View RaphaelS1's full-sized avatar
🐢

Raphael Sonabend RaphaelS1

🐢
View GitHub Profile
@RaphaelS1
RaphaelS1 / riscitation.R
Last active November 25, 2023 10:43
Create a .ris file from an R citation
#' riscitation - Create .ris file from R citation
#'
#' Suggests:
#' Package `berryFunctions` if `opendelete = TRUE`.
#'
#' Arguments
#' `pkg:character(1)` - Name of package to cite
#' `path:character(1)` - Path to write file too, should include file name but not extension
#' `opendelete:logical(1)` - If `TRUE` (default), opens the .ris file in the default application then deletes
#' the file.
options(repos=c(
mlrorg = 'https://mlr-org.r-universe.dev',
raphaels1 = 'https://raphaels1.r-universe.dev',
CRAN = 'https://cloud.r-project.org'
))
install.packages(c("ggplot2", "mlr3benchmark", "mlr3pipelines", "mlr3proba", "mlr3tuning",
"survivalmodels", "mlr3extralearners"))
library(survivalmodels)
install_pycox(pip = TRUE, install_torch = TRUE)
install_keras(pip = TRUE, install_tensorflow = TRUE)
set_seed(1234)
library(mlr3)
library(mlr3proba)
## get the `whas` task from mlr3proba
whas <- tsk("whas")
## create our own task from the rats dataset
rats_data <- survival::rats
## convert characters to factors
library(paradox)
search_space <- ps(
 ## p_dbl for numeric valued parameters
 dropout = p_dbl(lower = 0, upper = 1),
 weight_decay = p_dbl(lower = 0, upper = 0.5),
 learning_rate = p_dbl(lower = 0, upper = 1),
 ## p_int for integer valued parameters
 nodes = p_int(lower = 1, upper = 32),
library(mlr3tuning)
create_autotuner <- function(learner) {
 AutoTuner$new(
  learner = learner,
  search_space = search_space,
  resampling = rsmp("holdout"),
  measure = msr("surv.cindex"),
  terminator = trm("evals", n_evals = 2),
  tuner = tnr("random_search")
## learners are stored in mlr3extralearners
library(mlr3extralearners)
## load learners
learners <- lrns(
 paste0("surv.", c("coxtime", "deephit", "deepsurv", "loghaz", "pchazard")),
 frac = 0.3, early_stopping = TRUE, epochs = 10, optimizer = "adam"
)
# apply our function
library(mlr3pipelines)
create_pipeops <- function(learner) {
 po("encode") %>>% po("scale") %>>% po("learner", learner)
}
## apply our function
learners <- lapply(learners, create_pipeops)
## select holdout as the resampling strategy
resampling <- rsmp("cv", folds = 3)
## add KM and CPH
learners <- c(learners, lrns(c("surv.kaplan", "surv.coxph")))
design <- benchmark_grid(tasks, learners, resampling)
bm <- benchmark(design)
## Aggreggate with Harrell's C and Integrated Graf Score
msrs <- msrs(c("surv.cindex", "surv.graf"))