Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Most Starred R Packages
# load packages & custom functions ---------------------------------------------
today_date <- Sys.Date()
from_date <- as.Date("2015-06-01")
to_date <- as.Date("2020-05-31")
library(tidyverse)
library(httr)
library(cranlogs)
library(ggrepel)
library(scales)
library(lubridate)
library(knitr)
library(stringr)
date_without_zeros <- function(x){
gsub("0(\\d)", "\\1", format(x, "%m/%d/%y"))
}
gh_from_url <- function(x){
x <- gsub("http://", "https://", tolower(x))
x <- gsub("www\\.github\\.com", "github.com", x)
x <- gsub("^github.com", "https://github.com", x)
x <- gsub("/issues", "", x)
x <- gsub("\\.git", "", x)
x <- gsub("For source code, development versions and issue tracker see", "", x, ignore.case=TRUE)
x <- trimws(x)
x <- gsub("development versions and issue tracker see ", "", x, ignore.case=TRUE)
x <- trimws(x)
x <- gsub("^<(.*)>$", "\\1", x)
if(grepl(',', x)){
x <- strsplit(x, ",")[[1]]
x <- trimws(x[min(which(grepl('http://github.com|https://github.com|http://www.github.com', x)))])
}
if(grepl(' ', x)){
x <- strsplit(x, " ")[[1]]
x <- trimws(x[min(which(grepl('http://github.com|https://github.com|http://www.github.com', x)))])
}
x <- gsub("^(.*)/(.*)#\\([a-zA-z]+\\)\\b", "\\1/\\2", x)
x <- gsub("^(.*)/(.*)[[:space:]]+\\([a-zA-z]+\\)\\b", "\\1/\\2", x)
x <- gsub("^(.*) http(.*)$", "http\\2", x)
x <- trimws(x)
x <- gsub("/$", "", x)
x <- trimws(x)
return(x)
}
aut_maintainer_from_details <- function(x){
x <- gsub("'|\"", "", x)
if(grepl(',', x)){
x <- strsplit(x, "\\],")[[1]]
aut_cre_ind <- grepl(pattern='\\[aut, cre|\\[cre, aut|\\[cre', x, ignore.case=TRUE)
if(any(aut_cre_ind)){
x <- x[min(which(aut_cre_ind))]
x <- gsub("\\[aut, cre|\\[cre, aut|\\[cre", "", x)
}
x <- strsplit(x, ",")[[1]][1]
x <- trimws(gsub("\\]", "", x))
x <- trimws(gsub(" \\[aut", "", x))
}
x <- trimws(gsub(" \\(.*\\)$", "", x))
x <- trimws(gsub(" <.*>$", "", x))
return(x)
}
gh_star_count <- function(url){
Sys.sleep(0.5)
stars <- tryCatch({
this_url <- gsub("https://github.com/", "https://api.github.com/repos/", url)
req <- GET(this_url, gtoken)
stop_for_status(req)
cont <- content(req)
cont$stargazers_count
}, error = function(e){
return(NA_integer_)
})
return(stars)
}
gh_last_commit_date <- function(url){
last_commit <- tryCatch({
this_url <- gsub("https://github.com/", "https://api.github.com/repos/", url)
req <- GET(paste0(this_url, "/commits?page=1&per_page=1"), gtoken)
stop_for_status(req)
cont <- content(req)
cont[[1]]$commit$committer$date
}, error = function(e){
return(NA_character_)
})
return(last_commit)
}
# authenticate to github -------------------------------------------------------
# use Hadley's key and secret
myapp <- oauth_app("github",
key = "56b637a5baffac62cad9",
secret = "8e107541ae1791259e9987d544ca568633da2ebf")
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
gtoken <- config(token = github_token)
# pull list of packages --------------------------------------------------------
# get list of currently available packages on CRAN
pkgs <- tools::CRAN_package_db()
# remove duplicate MD5sum column since tibbles can't handle duplicate column names
pkgs <- pkgs[,unique(names(pkgs))]
# filter out lines any duplicates
pkgs <- pkgs %>%
rename(Name = Package) %>%
distinct(Name, .keep_all = TRUE)
# get details for each package -------------------------------------------------
all_pkg_details <- NULL
# old fashioned looping!
# WARNING: This takes awhile to complete
for(i in 1:nrow(pkgs)){
if(i %% 5 == 0){
Sys.sleep(1)
}
if(i %% 100 == 0){
message(sprintf("Processing package #%s out of %s", i, nrow(pkgs)))
}
this_url <- pkgs[i,]$URL
on_github <- FALSE
this_github_url <- NA_character_
gh_stars <- NA_integer_
gh_last_commit <- NA_character_
if(!is.null(this_url)){
on_github <- grepl('http://github.com|https://github.com|http://www.github.com', this_url)
if(on_github){
this_github_url <- gh_from_url(this_url)
gh_stars <- gh_star_count(this_github_url)
gh_last_commit <- gh_last_commit_date(this_github_url)
} else {
# check the BugReports URL as a backup (e.g. shiny package references GitHub this way)
issues_on_github <- grepl('http://github.com|https://github.com|http://www.github.com', pkgs[i,]$BugReports)
if(length(issues_on_github) == 0 || !issues_on_github){
this_github_url <- NA_character_
} else {
this_github_url <- gh_from_url(pkgs[i,]$BugReports)
gh_stars <- gh_star_count(this_github_url)
gh_last_commit <- gh_last_commit_date(this_github_url)
on_github <- TRUE
}
}
} else {
this_url <- NA_character_
}
downloads <- cran_downloads(pkgs[i,]$Name, from=from_date, to=to_date)
all_pkg_details <- rbind(all_pkg_details,
tibble(name = pkgs[i,]$Name,
description = pkgs[i,]$Description,
published = pkgs[i,]$Published,
author = aut_maintainer_from_details(pkgs[i,]$Author),
url = this_url,
github_ind = on_github,
github_url = this_github_url,
downloads = sum(downloads$count),
stars = gh_stars,
last_commit = gh_last_commit))
}
# save dataset for Twitter bot -------------------------------------------------
# remove observations where the GitHub URL refers to a repository that
# is not specific to R and therefore might have an inflated star count
all_pkg_details_clean <- all_pkg_details %>%
filter(!(name %in% c('xgboost', 'h2o', 'feather', 'prophet', 'mlflow', 'xtensor', 'arrow', 'interpret', 'mlr'))) %>%
filter(as_datetime(last_commit) >= today() - years(1)) %>% # MUST BE RECENTLY BEING WORKED ON IN LAST YEAR!!!
mutate(downloads_per_star = downloads / stars,
downloads_per_star = ifelse(!is.finite(downloads_per_star), NA_real_, downloads_per_star))
write_csv(all_pkg_details_clean, "r-package-star-download-data.csv")
# basic summary stats ----------------------------------------------------------
# proportion of all packages listing github
sum(all_pkg_details$github_ind)
mean(all_pkg_details$github_ind)
# proportion of packages with stars
mean(!is.na(all_pkg_details$stars))
# typical number of stars per package
mean(all_pkg_details_clean$stars, na.rm=TRUE)
median(all_pkg_details_clean$stars, na.rm=TRUE)
max(all_pkg_details_clean$stars, na.rm=TRUE)
# typical number of downloads per package
mean(all_pkg_details_clean$downloads, na.rm=TRUE)
median(all_pkg_details_clean$downloads, na.rm=TRUE)
# percent of packages over 10 stars
mean(all_pkg_details_clean$stars > 10, na.rm=TRUE)
mean(all_pkg_details_clean$downloads_per_star, na.rm=TRUE)
median(all_pkg_details_clean$downloads_per_star, na.rm=TRUE)
# stars histogram --------------------------------------------------------------
ggplot(data=all_pkg_details_clean, mapping=aes(stars)) +
geom_histogram(aes(fill=..count..), bins=60) +
scale_x_continuous(trans = "log1p", breaks=c(0,1,2,3,10,100,1000,3000)) +
labs(x = "Stars",
y = "Count",
fill = "Count",
caption = sprintf("Sources: api.github.com as of %s",
date_without_zeros(today_date))) +
ggtitle("Distribution of GitHub Stars on R Packages") +
theme_bw() +
theme(panel.grid.minor = element_blank(),
plot.caption=element_text(hjust = 0))
# stars to downloads scatterplot -----------------------------------------------
plot_dat <- all_pkg_details_clean
idx_label <- which(with(plot_dat, downloads > 10000000 | stars > 1000))
plot_dat$name2 <- plot_dat$name
plot_dat$name <- ""
plot_dat$name[idx_label] <- plot_dat$name2[idx_label]
ggplot(data=plot_dat, aes(stars, downloads, label = name)) +
geom_point(color = ifelse(plot_dat$name == "", "grey50", "red")) +
geom_text_repel(box.padding = .5) +
scale_y_continuous(labels = comma) +
scale_x_continuous(labels = comma) +
labs(x = "GitHub Stars",
y = "CRAN Downloads",
caption = sprintf("Sources:\napi.github.com as of %s\ncranlogs as of %s - %s",
date_without_zeros(today_date),
date_without_zeros(from_date),
date_without_zeros(to_date))) +
ggtitle("Relationship Between CRAN Downloads and GitHub Stars") +
theme_bw() +
theme(plot.caption=element_text(hjust = 0))
# author stats -----------------------------------------------------------------
# summary by author
authors_detail <- all_pkg_details_clean %>%
group_by(author) %>%
summarize(downloads = sum(downloads, na.rm=TRUE),
stars = sum(stars, na.rm=TRUE)) %>%
mutate(downloads_per_star = downloads / stars,
downloads_per_star = ifelse(!is.finite(downloads_per_star), NA_real_, downloads_per_star)) %>%
arrange(desc(downloads))
# popular authors
pop_authors <- tibble(author = c('Hadley Wickham',
'Dirk Eddelbuettel',
'Yihui Xie',
'Winston Chang',
'Jennifer Bryan',
'JJ Allaire',
'Jeroen Ooms',
'Scott Chamberlain',
'Jim Hester',
'Kirill Müller'),
notable_packages = c('ggplot2, dplyr, httr',
'Rcpp, BH',
'knitr, rmarkdown, bookdown',
'R6, shiny',
'readxl, gapminder, googlesheets',
'rstudioapi, reticulate, tensorflow',
'jsonlite, curl, openssl',
'geojsonio, taxize',
'devtools, memoise, readr',
'tibble, DBI')
)
author_stats <- pop_authors %>%
inner_join(., authors_detail, by='author') %>%
select(author, notable_packages, downloads, stars, downloads_per_star) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# single author
#all_pkg_details_clean %>% filter(author == 'Dirk Eddelbuettel') %>% arrange(desc(downloads))
# top 10 lists -----------------------------------------------------------------
# Top 10 Most Starred Packages
top_starred <- all_pkg_details_clean %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(stars)) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Top 10 Most Downloaded Packages with stars
top_downloaded <- all_pkg_details_clean %>%
filter(!is.na(stars)) %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(downloads)) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Bottom 10 Packages by Downloads per Star (frequently starred)
frequently_starred <- all_pkg_details_clean %>%
filter(downloads > 100) %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(downloads_per_star) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 2)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Top 10 Packages by Downloads per Star (infrequently starred)
infrequently_starred <- all_pkg_details_clean %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(downloads_per_star)) %>%
slice(1:10) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
We can't make this file beautiful and searchable because it's too large.
name,description,published,author,url,github_ind,github_url,downloads,stars,last_commit,downloads_per_star
abbyyR,"Get text from images of text using Abbyy Cloud Optical Character
Recognition (OCR) API. Easily OCR images, barcodes, forms, documents with
machine readable zones, e.g. passports. Get the results in a variety of formats
including plain text and XML. To learn more about the Abbyy OCR API, see
<http://ocrsdk.com/>.",2019-06-25,Gaurav Sood,http://github.com/soodoku/abbyyR,TRUE,https://github.com/soodoku/abbyyr,38425,38,2019-06-30T01:53:13Z,1011.1842105263158
ABCoptim,"An implementation of Karaboga (2005) Artificial Bee Colony
Optimization algorithm <http://mf.erciyes.edu.tr/abc/pub/tr06_2005.pdf>.
This (working) version is a Work-in-progress, which is
why it has been implemented using pure R code. This was developed upon the basic
version programmed in C and distributed at the algorithm's official website.",2017-11-06,George Vega Yon,"http://github.com/gvegayon/ABCoptim, http://mf.erciyes.edu.tr/abc/",TRUE,https://github.com/gvegayon/abcoptim,32889,22,2020-05-31T18:09:24Z,1494.9545454545455
abdiv,"A collection of measures for measuring ecological diversity.
Ecological diversity comes in two flavors: alpha diversity measures the
diversity within a single site or sample, and beta diversity measures the
diversity across two sites or samples. This package overlaps considerably
with other R packages such as 'vegan', 'gUniFrac', 'betapart', and 'fossil'.
We also include a wide range of functions that are implemented in software
outside the R ecosystem, such as 'scipy', 'Mothur', and 'scikit-bio'. The
implementations here are designed to be basic and clear to the reader.",2020-01-20,Kyle Bittinger,https://github.com/kylebittinger/abdiv,TRUE,https://github.com/kylebittinger/abdiv,2831,0,2020-01-26T20:25:01Z,NA
abjutils,"The Brazilian Jurimetrics Association (ABJ in
Portuguese, see <http://www.abjur.org.br/en/> for more information) is
a non-profit organization which aims to investigate and promote the
use of statistics and probability in the study of Law and its
institutions. This package implements general purpose tools used by
ABJ, such as functions for sampling and basic manipulation of
Brazilian lawsuits identification number. It also implements functions
for text cleaning, such as accentuation removal.",2019-02-07,Caio Lente,https://github.com/abjur/abjutils,TRUE,https://github.com/abjur/abjutils,40051,19,2019-09-28T20:34:27Z,2107.9473684210525
ace2fastq,"The ACE file format is used in genomics to store contigs from sequencing machines.
This tools converts it into FASTQ format. Both formats contain the
sequence characters and their
corresponding quality information. Unlike the FASTQ file, the ace file stores the
quality values numerically.
The conversion algorithm uses the standard Sanger formula. The package facilitates insertion
into pipelines, and content inspection.",2019-06-20,Reinhard Simon,https://github.com/c5sire/ace2fastq,TRUE,https://github.com/c5sire/ace2fastq,5850,0,2020-02-24T10:48:08Z,NA
ActFrag,"Recent studies haven shown that, on top of total daily active/sedentary volumes, the time
accumulation strategies provide more sensitive information. This package provides functions to extract
commonly used fragmentation metrics to quantify such time accumulation strategies based on minute level
actigraphy-measured activity counts data. ",2020-02-11,Junrui Di,https://github.com/junruidi/ActFrag,TRUE,https://github.com/junruidi/actfrag,5735,0,2020-02-28T02:22:27Z,NA
activityCounts,"ActiLife software generates activity counts from data collected by Actigraph accelerometers <https://s3.amazonaws.com/actigraphcorp.com/wp-content/uploads/2017/11/26205758/ActiGraph-White-Paper_What-is-a-Count_.pdf>.
Actigraph is one of the most common research-grade accelerometers. There is considerable research
validating and developing algorithms for human activity using ActiLife counts. Unfortunately,
ActiLife counts are proprietary and difficult to implement if researchers use different accelerometer brands.
The code creates ActiLife counts from raw acceleration data for different accelerometer brands and it is developed
based on the study done by Brond and others (2017) <doi:10.1249/MSS.0000000000001344>.",2019-07-31,SeyedJavad KhataeiPour,"https://github.com/walkabillylab/activityCounts,
https://github.com/jbrond/ActigraphCounts",TRUE,https://github.com/walkabillylab/activitycounts,5069,2,2019-11-20T17:12:22Z,2534.5
adapr,"Tracks reading and writing within R scripts that are organized into
a directed acyclic graph. Contains an interactive shiny application adaprApp().
Uses git2r package, Git and file hashes to track version histories of input
and output. See package vignette for how to get started. V1.02 adds parallel
execution of project scripts and function map in vignette. Makes project
specification argument last in order. V2.0 adds project specific libraries, packrat option, and adaprSheet().",2017-11-30,Jon Gelfond,NA,TRUE,https://github.com/gelfondjal/adapr,19374,13,2020-01-28T22:56:18Z,1490.3076923076924
AdaptGauss,"Multimodal distributions can be modelled as a mixture of components. The model is derived using the Pareto Density Estimation (PDE) for an estimation of the pdf. PDE has been designed in particular to identify groups/classes in a dataset. Precise limits for the classes can be calculated using the theorem of Bayes. Verification of the model is possible by QQ plot, Chi-squared test and Kolmogorov-Smirnov test. The package is based on the publication of Ultsch, A., Thrun, M.C., Hansen-Goos, O., Lotsch, J. (2015) <DOI:10.3390/ijms161025897>.",2020-02-03,Michael Thrun,https://www.uni-marburg.de/fb12/datenbionik/software-en,TRUE,https://github.com/mthrun/adaptgauss,29418,0,2020-02-03T17:16:37Z,NA
adaptMT,"Implementation of adaptive p-value thresholding (AdaPT), including both a framework that allows the user to specify any
algorithm to learn local false discovery rate and a pool of convenient functions that implement specific
algorithms. See Lei, Lihua and Fithian, William (2016) <arXiv:1609.06035>.",2018-07-31,Lihua Lei,"https://arxiv.org/abs/1609.06035,
https://github.com/lihualei71/adaptMT",TRUE,https://github.com/lihualei71/adaptmt,9397,6,2020-02-29T23:25:30Z,1566.1666666666667
add2ggplot,Create 'ggplot2' themes and color palettes.,2020-02-07,Jiaxiang Li,https://github.com/JiaxiangBU/add2ggplot,TRUE,https://github.com/jiaxiangbu/add2ggplot,2492,2,2020-02-08T12:07:44Z,1246
addinslist,"Browse through a continuously updated list of existing RStudio
addins and install/uninstall their corresponding packages.",2019-08-30,Dean Attali,https://github.com/daattali/addinslist,TRUE,https://github.com/daattali/addinslist,42488,567,2020-05-11T17:07:38Z,74.93474426807761
addinsOutline,"'RStudio' allows to show and navigate for the outline of a
R Markdown file, but not for R Markdown projects with multiple
files. For this reason, I have developed several 'RStudio' addins capable
of show project outline. Each addin is specialized in showing projects
of different types: R Markdown project, 'bookdown' package project
and 'LaTeX' project. There is a configuration file that allows you
to customize additional searches.",2019-12-02,Pedro L. Luque-Calvo,https://github.com/calote/addinsOutline,TRUE,https://github.com/calote/addinsoutline,3302,0,2019-11-29T09:25:38Z,NA
ade4,"Tools for multivariate data analysis. Several methods are provided for the analysis (i.e., ordination) of one-table (e.g., principal component analysis, correspondence analysis), two-table (e.g., coinertia analysis, redundancy analysis), three-table (e.g., RLQ analysis) and K-table (e.g., STATIS, multiple coinertia analysis). The philosophy of the package is described in Dray and Dufour (2007) <doi:10.18637/jss.v022.i04>.",2020-02-13,Stéphane Dray,http://pbil.univ-lyon1.fr/ADE-4,TRUE,https://github.com/sdray/ade4,1469321,13,2020-04-23T15:23:48Z,113024.69230769231
ade4TkGUI,A Tcl/Tk GUI for some basic functions in the 'ade4' package.,2019-09-17,Jean Thioulouse,"http://pbil.univ-lyon1.fr/ade4TkGUI, Mailing list:
http://listes.univ-lyon1.fr/wws/info/adelist",TRUE,https://github.com/aursiber/ade4tkgui,78145,0,2019-09-13T09:21:07Z,NA
adegenet,"Toolset for the exploration of genetic and genomic
data. Adegenet provides formal (S4) classes for storing and handling
various genetic data, including genetic markers with varying ploidy
and hierarchical population structure ('genind' class), alleles counts
by populations ('genpop'), and genome-wide SNP data ('genlight'). It
also implements original multivariate methods (DAPC, sPCA), graphics,
statistical tests, simulation tools, distance and similarity measures,
and several spatial methods. A range of both empirical and simulated
datasets is also provided to illustrate various methods.",2020-05-10,Thibaut Jombart,https://github.com/thibautjombart/adegenet,TRUE,https://github.com/thibautjombart/adegenet,300883,101,2020-05-20T00:21:46Z,2979.039603960396
adegraphics,Graphical functionalities for the representation of multivariate data. It is a complete re-implementation of the functions available in the 'ade4' package.,2018-12-18,Stéphane Dray,"http://pbil.univ-lyon1.fr/ADE-4, Mailing list:
http://listes.univ-lyon1.fr/wws/info/adelist",TRUE,https://github.com/sdray/adegraphics,132944,6,2020-06-03T14:04:53Z,22157.333333333332
adept,"Designed for optimal use in performing fast,
accurate walking strides segmentation from high-density
data collected from a wearable accelerometer worn
during continuous walking activity.",2019-06-18,Marta Karas,https://github.com/martakarass/adept,TRUE,https://github.com/martakarass/adept,5402,3,2019-06-18T06:17:42Z,1800.6666666666667
AdhereR,"Computation of adherence to medications from Electronic Health care
Data and visualization of individual medication histories and adherence
patterns. The package implements a set of S3 classes and
functions consistent with current adherence guidelines and definitions.
It allows the computation of different measures of
adherence (as defined in the literature, but also several original ones),
their publication-quality plotting,
the estimation of event duration and time to initiation,
the interactive exploration of patient medication history and
the real-time estimation of adherence given various parameter settings.
It scales from very small datasets stored in flat CSV files to very large
databases and from single-thread processing on mid-range consumer
laptops to parallel processing on large heterogeneous computing clusters.
It exposes a standardized interface allowing it to be used from other
programming languages and platforms, such as Python.",2020-05-12,Dan Dediu,https://github.com/ddediu/AdhereR,TRUE,https://github.com/ddediu/adherer,19940,14,2019-06-14T13:16:52Z,1424.2857142857142
AdhereRViz,"Interactive graphical user interface (GUI) for the package
'AdhereR', allowing the user to access different data sources, to explore
the patterns of medication use therein, and the computation of various
measures of adherence. It is implemented using Shiny and HTML/CSS/JavaScript. ",2020-05-16,Dan Dediu,https://github.com/ddediu/AdhereR,TRUE,https://github.com/ddediu/adherer,389,14,2019-06-14T13:16:52Z,27.785714285714285
adjclust,"Implements a constrained version of hierarchical agglomerative
clustering, in which each observation is associated to a position, and
only adjacent clusters can be merged. Typical application fields in
bioinformatics include Genome-Wide Association Studies or Hi-C data
analysis, where the similarity between items is a decreasing function of
their genomic distance. Taking advantage of this feature, the implemented
algorithm is time and memory efficient. This algorithm is described in
Chapter 4 of Alia Dehman (2015)
<https://hal.archives-ouvertes.fr/tel-01288568v1>.",2019-12-10,Pierre Neuvial,https://github.com/pneuvial/adjclust,TRUE,https://github.com/pneuvial/adjclust,13790,13,2020-06-08T14:59:19Z,1060.7692307692307
adjustedcranlogs,Adjusts output of 'cranlogs' package to account for 'CRAN'-wide daily automated downloads and re-downloads caused by package updates.,2017-11-23,Tyler Morgan-Wall,https://github.com/tylermorganwall/adjustedcranlogs,TRUE,https://github.com/tylermorganwall/adjustedcranlogs,11927,24,2020-02-24T00:56:11Z,496.9583333333333
AdMit,"Provides functions to perform the fitting of an adaptive mixture
of Student-t distributions to a target density through its kernel function as described in
Ardia et al. (2009) <doi:10.18637/jss.v029.i03>. The
mixture approximation can then be used as the importance density in importance
sampling or as the candidate density in the Metropolis-Hastings algorithm to
obtain quantities of interest for the target density itself. ",2020-04-20,David Ardia,https://github.com/ArdiaD/AdMit,TRUE,https://github.com/ardiad/admit,41881,2,2020-04-19T20:50:45Z,20940.5
adoptr,"Optimize one or two-arm, two-stage designs for clinical trials with
respect to several pre-implemented objective criteria or implement custom
objectives.
Optimization under uncertainty and conditional (given stage-one outcome)
constraints are supported.
See Pilz M, Kunzmann K, Herrmann C, Rauch G, Kieser M. A variational
approach to optimal two-stage designs. Statistics in Medicine. 2019;38(21):4159–4171.
<doi:10.1002/sim.8291> for details.",2020-01-09,Kevin Kunzmann,https://github.com/kkmann/adoptr,TRUE,https://github.com/kkmann/adoptr,8701,3,2020-02-10T15:02:50Z,2900.3333333333335
adpss,"Provides the functions for planning and conducting a
clinical trial with adaptive sample size determination. Maximal statistical
efficiency will be exploited even when dramatic or multiple adaptations
are made. Such a trial consists of adaptive determination of sample size
at an interim analysis and implementation of frequentist statistical test at the
interim and final analysis with a prefixed significance level. The required
assumptions for the stage-wise test statistics are independent and stationary
increments and normality. Predetermination of adaptation rule is not required.",2018-09-20,Kosuke Kashiwabara,https://github.com/ca4wa/R-adpss,TRUE,https://github.com/ca4wa/r-adpss,8842,0,2020-01-07T02:52:34Z,NA
afex,"Convenience functions for analyzing factorial experiments using ANOVA or
mixed models. aov_ez(), aov_car(), and aov_4() allow specification of
between, within (i.e., repeated-measures), or mixed (i.e., split-plot)
ANOVAs for data in long format (i.e., one observation per row),
automatically aggregating multiple observations per individual and cell
of the design. mixed() fits mixed models using lme4::lmer() and computes
p-values for all fixed effects using either Kenward-Roger or Satterthwaite
approximation for degrees of freedom (LMM only), parametric bootstrap
(LMMs and GLMMs), or likelihood ratio tests (LMMs and GLMMs).
afex_plot() provides a high-level interface for interaction or one-way
plots using ggplot2, combining raw data and model estimates. afex uses
type 3 sums of squares as default (imitating commercial statistical software).",2020-03-28,Henrik Singmann,"http://afex.singmann.science/, https://github.com/singmann/afex",TRUE,https://github.com/singmann/afex,224665,80,2020-06-09T20:32:59Z,2808.3125
afpt,"Allows estimation and modelling of flight costs in animal (vertebrate) flight,
implementing the aerodynamic power model described in Klein Heerenbrink et al.
(2015) <doi:10.1098/rspa.2014.0952>. Taking inspiration from the program
'Flight', developed by Colin Pennycuick (Pennycuick (2008) ""Modelling the flying
bird"". Amsterdam: Elsevier. ISBN 0-19-857721-4), flight performance is estimated
based on basic morphological measurements such as body mass, wingspan and wing
area. 'afpt' can be used to make predictions on how animals should adjust their
flight behaviour and wingbeat kinematics to varying flight conditions.",2020-03-19,Marco KleinHeerenbrink,https://github.com/MarcoKlH/afpt-r/,TRUE,https://github.com/marcoklh/afpt-r,12870,1,2020-03-18T15:32:16Z,12870
aftgee,"A collection of methods for both the rank-based estimates and least-square estimates
to the Accelerated Failure Time (AFT) model.
For rank-based estimation, it provides approaches that include the computationally
efficient Gehan's weight and the general's weight such as the logrank weight.
Details of the rank-based estimation can be found in
Chiou et al. (2014) <doi:10.1007/s11222-013-9388-2> and
Chiou et al. (2015) <doi:10.1002/sim.6415>.
For the least-square estimation, the estimating equation is solved with
generalized estimating equations (GEE).
Moreover, in multivariate cases, the dependence working correlation structure
can be specified in GEE's setting.
Details on the least-squares estimation can be found in
Chiou et al. (2014) <doi:10.1007/s10985-014-9292-x>.",2018-07-24,Sy Han Chiou,http://github.com/stc04003/aftgee,TRUE,https://github.com/stc04003/aftgee,32629,0,2019-12-19T16:33:21Z,NA
AGD,"Tools for the analysis of growth data: to extract an
LMS table from a gamlss object, to calculate the standard
deviation scores and its inverse, and to superpose two wormplots
from different models. The package contains a some varieties of
reference tables, especially for The Netherlands.",2018-05-29,Stef van Buuren,https://github.com/stefvanbuuren/AGD,TRUE,https://github.com/stefvanbuuren/agd,104241,1,2020-05-05T19:48:54Z,104241
AGHmatrix,"Computation of A (pedigree), G (genomic-base), and H (A corrected
by G) relationship matrices for diploid and autopolyploid species. Several methods
are implemented considering additive and non-additive models.",2019-07-30,Rodrigo Amadeu,http://github.com/prmunoz/AGHmatrix,TRUE,https://github.com/prmunoz/aghmatrix,11668,5,2020-01-14T14:20:10Z,2333.6
agop,"Tools supporting multi-criteria and group decision making,
including variable number of criteria, by means of
aggregation operators, spread measures,
fuzzy logic connectives, fusion functions,
and preordered sets. Possible applications include,
but are not limited to, quality management, scientometrics,
software engineering, etc.",2020-01-08,Marek Gagolewski,http://www.gagolewski.com/software/,TRUE,https://github.com/gagolews/agop,25078,3,2020-01-10T05:51:35Z,8359.333333333334
AGread,"Standardize the process of bringing various modes of output files
into R. Additionally, processes are provided to read and minimally pre-
process raw data from primary accelerometer and inertial measurement unit files,
as well as binary .gt3x files. ActiGraph monitors are used to estimate physical
activity outcomes via body-worn sensors that measure (e.g.) acceleration or
rotational velocity.",2020-02-26,Paul R. Hibbing,https://github.com/paulhibbing/AGread,TRUE,https://github.com/paulhibbing/agread,13402,7,2020-06-07T02:37:18Z,1914.5714285714287
agridat,"Datasets from books, papers, and websites related to agriculture.
Example graphics and analyses are included. Data come from small-plot trials,
multi-environment trials, uniformity trials, yield monitors, and more.",2018-07-06,Kevin Wright,https://github.com/kwstat/agridat,TRUE,https://github.com/kwstat/agridat,54968,64,2020-01-20T15:28:35Z,858.875
agriwater,"Spatial modeling of energy balance and actual
evapotranspiration using satellite images and meteorological data.
Options of satellite are: Landsat-8 (with and without thermal bands),
Sentinel-2 and MODIS. Respectively spatial resolutions are 30, 100,
10 and 250 meters. User can use data from a single meteorological
station or a grid of meteorological stations (using any spatial
interpolation method). Teixeira (2010) <doi:10.3390/rs0251287>.
Teixeira et al. (2015) <doi:10.3390/rs71114597>.
Silva, Manzione, and Albuquerque Filho (2018) <doi:10.3390/horticulturae4040044>.",2019-01-30,Cesar de Oliveira Ferreira Silva,NA,TRUE,https://github.com/cesarofs/agriwater,7271,3,2020-03-03T20:44:34Z,2423.6666666666665
AHMbook,"Provides functions and data sets to accompany the two volume publication ""Applied Hierarchical Modeling in Ecology: Analysis of distribution, abundance and species richness in R and BUGS"" by Marc Kéry and Andy Royle: volume 1 (2016, ISBN: 978-0-12-801378-6) and volume 2 (2020, ISBN: 978-0-12-809585-0), <https://www.mbr-pwrc.usgs.gov/pubanalysis/keryroylebook>.",2020-06-09,Mike Meredith,"https://www.mbr-pwrc.usgs.gov/pubanalysis/keryroylebook/,
https://sites.google.com/site/appliedhierarchicalmodeling/home",TRUE,https://github.com/mikemeredith/ahmbook,18100,9,2020-06-08T13:14:36Z,2011.111111111111
aimsir17,"Named after the Irish name for weather, this package contains
tidied data from the Irish Meteorological Service's hourly observations for 2017.
In all, the data sets include observations from 25 weather stations, and also
latitude and longitude coordinates for each weather station.",2019-12-02,Jim Duggan,"https://github.com/JimDuggan/aimsir17, https://www.met.ie",TRUE,https://github.com/jimduggan/aimsir17,3065,0,2019-12-04T14:14:22Z,NA
aire.zmvm,"Tools for downloading hourly averages, daily maximums and minimums from each of the
pollution, wind, and temperature measuring stations or geographic zones in the Mexico City
metro area. The package also includes the locations of each of the stations and zones. See
<http://aire.cdmx.gob.mx/> for more information.",2019-03-30,Diego Valle-Jones,"https://hoyodesmog.diegovalle.net/aire.zmvm/,
https://github.com/diegovalle/aire.zmvm",TRUE,https://github.com/diegovalle/aire.zmvm,16966,9,2020-05-05T02:50:57Z,1885.111111111111
aiRly,Get information about air quality using 'Airly' <https://airly.eu/> API through R.,2020-03-19,Piotr Janus,https://github.com/piotrekjanus/aiRly,TRUE,https://github.com/piotrekjanus/airly,1544,0,2020-03-19T22:31:01Z,NA
airportr,"Retrieves open source airport data and provides tools to look up information, translate names into codes and vice-verse, as well as some basic calculation functions for measuring distances. Data is licensed under the Open Database License. ",2019-10-09,Dmitry Shkolnik,https://github.com/dshkol/airportr,TRUE,https://github.com/dshkol/airportr,9904,4,2020-05-24T06:22:26Z,2476
airqualityES,"These dataset contains daily quality air measurements in
Spain over a period of 18 years (from 2001 to 2018). The measurements refer to
several pollutants. These data are openly published by the Government of Spain.
The datasets were originally spread over a number of files and formats. Here,
the same information is contained in simple dataframe for convenience of
researches, journalists or general public. See the Spanish Government website
<http://www.miteco.gob.es/> for more information.",2020-02-29,Jose V. Die,https://github.com/jdieramon/airqualityES,TRUE,https://github.com/jdieramon/airqualityes,2098,0,2020-03-03T18:02:08Z,NA
airr,"Schema definitions and read, write and validation tools for data
formatted in accordance with the AIRR Data Representation schemas defined
by the AIRR Community <http://docs.airr-community.org>.",2020-05-27,Jason Vander Heiden,http://docs.airr-community.org,TRUE,https://github.com/airr-community/airr-standards,11243,18,2020-06-01T21:21:28Z,624.6111111111111
akc,"A tidy framework for automatic knowledge classification and visualization. Currently, the core functionality of the framework is mainly supported by modularity-based clustering (community detection) in keyword co-occurrence network, and focuses on co-word analysis of bibliometric research. However, the designed functions in 'akc' are general, and could be extended to solve other tasks in text mining as well. ",2020-01-30,Tian-Yuan Huang,https://github.com/hope-data-science/akc,TRUE,https://github.com/hope-data-science/akc,3382,8,2020-02-17T01:01:31Z,422.75
ALA4R,"The Atlas of Living Australia (ALA) provides tools to enable users
of biodiversity information to find, access, combine and visualise data on
Australian plants and animals; these have been made available from
<https://ala.org.au/>. ALA4R provides a subset of the tools to be
directly used within R. It enables the R community to directly access data
and resources hosted by the ALA.",2020-04-04,Peggy Newman,https://github.com/AtlasOfLivingAustralia/ALA4R,TRUE,https://github.com/atlasoflivingaustralia/ala4r,21946,32,2020-04-03T05:17:58Z,685.8125
albopictus,Implements discrete time deterministic and stochastic age-structured population dynamics models described in Erguler and others (2016) <doi:10.1371/journal.pone.0149282> and Erguler and others (2017) <doi:10.1371/journal.pone.0174293>.,2018-11-29,Kamil Erguler,https://github.com/kerguler/albopictusR,TRUE,https://github.com/kerguler/albopictusr,15595,0,2020-03-20T15:34:46Z,NA
alfr,"Allows you to connect to an 'Alfresco' content management repository and interact
with its contents using simple and intuitive functions. You will be able to establish a connection session to the 'Alfresco' repository,
read and upload content and manage folder hierarchies. For more details on the 'Alfresco' content management repository
see <https://www.alfresco.com/ecm-software/document-management>.",2019-07-19,Roy Wetherall,"https://github.com/rwetherall/alfr,
https://rwetherall.github.io/alfr/",TRUE,https://github.com/rwetherall/alfr,5086,0,2019-07-19T02:21:15Z,NA
AlgDesign,"Algorithmic experimental designs. Calculates exact and
approximate theory experimental designs for D,A, and I
criteria. Very large designs may be created. Experimental
designs may be blocked or blocked designs created from a
candidate list, using several criteria. The blocking can be
done when whole and within plot factors interact.",2019-11-29,Bob Wheeler,https://github.com/jvbraun/AlgDesign,TRUE,https://github.com/jvbraun/algdesign,815651,6,2019-11-29T02:10:41Z,135941.83333333334
algorithmia,"The company, Algorithmia, houses the largest marketplace of online
algorithms. This package essentially holds a bunch of REST wrappers that
make it very easy to call algorithms in the Algorithmia platform and access
files and directories in the Algorithmia data API. To learn more about the
services they offer and the algorithms in the platform visit
<http://algorithmia.com>. More information for developers can be found at
<http://developers.algorithmia.com>.",2019-08-01,James Sutton,NA,TRUE,https://github.com/algorithmiaio/algorithmia-r,17860,10,2019-08-02T19:03:08Z,1786
aliases2entrez,"Queries multiple resources authors HGNC (2019) <https://www.genenames.org>, authors limma (2015) <doi:10.1093/nar/gkv007>
to find the correspondence between evolving nomenclature of human gene symbols, aliases, previous symbols or synonyms with
stable, curated gene entrezID from NCBI database. This allows fast, accurate and up-to-date correspondence
between human gene expression datasets from various date and platform (e.g: gene symbol: BRCA1 - ID: 672).",2020-05-19,Raphael Bonnet,NA,TRUE,https://github.com/peyronlab/aliases2entrez,3859,1,2019-10-08T08:14:36Z,3859
almanac,"Provides tools for defining recurrence rules and
recurrence bundles. Recurrence rules are a programmatic way to define
a recurring event, like the first Monday of December. Multiple
recurrence rules can be combined into larger recurrence bundles.
Together, these provide a system for adjusting and generating
sequences of dates while simultaneously skipping over dates in a
recurrence bundle's event set.",2020-05-28,Davis Vaughan,https://github.com/DavisVaughan/almanac,TRUE,https://github.com/davisvaughan/almanac,138,52,2020-05-28T17:39:14Z,2.6538461538461537
alookr,"A collection of tools that support data splitting, predictive modeling, and model evaluation.
A typical function is to split a dataset into a training dataset and a test dataset.
Then compare the data distribution of the two datasets.
Another feature is to support the development of predictive models and to compare the performance of several predictive models,
helping to select the best model. ",2020-06-07,Choonghyun Ryu,NA,TRUE,https://github.com/choonghyunryu/alookr,1660,6,2020-06-07T15:02:02Z,276.6666666666667
alpaca,"Provides a routine to concentrate out factors with many levels during the
optimization of the log-likelihood function of the corresponding generalized linear model (glm).
The package is based on the algorithm proposed by Stammann (2018) <arXiv:1707.01815> and is
restricted to glm's that are based on maximum likelihood estimation and non-linear. It also offers
an efficient algorithm to recover estimates of the fixed effects in a post-estimation routine and
includes robust and multi-way clustered standard errors. Further the package provides analytical
bias corrections for binary choice models (logit and probit) derived by Fernandez-Val
and Weidner (2016) <doi:10.1016/j.jeconom.2015.12.014> and Hinz, Stammann, and Wanner (2019).",2020-01-12,Amrei Stammann,https://github.com/amrei-stammann/alpaca,TRUE,https://github.com/amrei-stammann/alpaca,27931,23,2020-01-19T12:39:26Z,1214.391304347826
alphavantager,"
Alpha Vantage has free historical financial information.
All you need to do is get a free API key at <https://www.alphavantage.co>.
Then you can use the R interface to retrieve free equity information.
Refer to the Alpha Vantage website for more information.",2020-03-01,Matt Dancho,https://github.com/business-science/alphavantager,TRUE,https://github.com/business-science/alphavantager,84585,44,2020-03-01T14:14:43Z,1922.3863636363637
altair,"Interface to 'Altair' <https://altair-viz.github.io>, which itself
is a 'Python' interface to 'Vega-Lite' <https://vega.github.io/vega-lite>.
This package uses the 'Reticulate' framework
<https://rstudio.github.io/reticulate> to manage the interface between R
and 'Python'.",2020-01-23,Ian Lyttle,https://github.com/vegawidget/altair,TRUE,https://github.com/vegawidget/altair,5680,68,2020-01-23T20:38:11Z,83.52941176470588
alterryx,"A tool to access each of the 'Alteryx' Gallery 'API' endpoints.
Users can queue jobs, poll job status, and retrieve application output as
a data frame. You will need an 'Alteryx' Server license and have 'Alteryx'
Gallery running to utilize this package. The 'API' is accessed through the
'URL' that you setup for the server running 'Alteryx' Gallery and more
information on the endpoints can be found at
<https://gallery.alteryx.com/api-docs/>.",2019-06-06,Michael Treadwell,"https://github.com/mtreadwell/alterryx,
https://gallery.alteryx.com/api-docs/",TRUE,https://github.com/mtreadwell/alterryx,17370,3,2019-09-03T17:15:38Z,5790
altR2,"Provides alternatives to the normal adjusted R-squared estimator for the estimation of the multiple squared correlation in regression models,
as fitted by the lm() function. The alternative estimators are described in Karch (2016) <DOI:10.31234/osf.io/v8dz5>.",2019-09-23,Julian Karch,https://github.com/karchjd/altR2,TRUE,https://github.com/karchjd/altr2,4061,0,2019-09-26T10:24:21Z,NA
ambient,"Generation of natural looking noise has many application within
simulation, procedural generation, and art, to name a few. The 'ambient'
package provides an interface to the 'FastNoise' C++ library and allows for
efficient generation of perlin, simplex, worley, cubic, value, and white
noise with optional pertubation in either 2, 3, or 4 (in case of simplex and
white noise) dimensions.",2020-03-21,Thomas Lin Pedersen,"https://ambient.data-imaginist.com,
https://github.com/thomasp85/ambient",TRUE,https://github.com/thomasp85/ambient,10102,59,2020-03-19T20:43:12Z,171.22033898305085
ameco,Annual macro-economic database provided by the European Commission.,2018-05-04,Eric Persson,http://github.com/expersso/ameco,TRUE,https://github.com/expersso/ameco,28191,6,2019-09-10T08:50:00Z,4698.5
amerika,"A color palette generator inspired by American politics, with colors ranging from blue on the
left to gray in the middle and red on the right. A variety of palettes allow for a range of applications
from brief discrete scales (e.g., three colors for Democrats, Independents, and Republicans) to
continuous interpolated arrays including dozens of shades graded from blue (left) to red (right). This
package greatly benefitted from building on the source code (with permission) from Ram and Wickham (2015).",2019-05-03,Philip Waggoner,NA,TRUE,https://github.com/pdwaggoner/amerika,7873,0,2019-11-14T20:08:36Z,NA
AmpGram,"Predicts antimicrobial peptides using random forests trained on the
n-gram encoded peptides. The implemented algorithm can be accessed from
both the command line and shiny-based GUI. The AmpGram model is too large
for CRAN and it has to be downloaded separately from the repository:
<https://github.com/michbur/AmpGramModel>.",2020-05-31,Michal Burdukiewicz,https://github.com/michbur/AmpGram,TRUE,https://github.com/michbur/ampgram,10,1,2020-05-22T09:21:48Z,10
ampir,"A toolkit to predict antimicrobial peptides from protein sequences on a genome-wide scale.
It incorporates two support vector machine models (""precursor"" and ""mature"") trained on publicly available antimicrobial peptide data using calculated
physico-chemical and compositional sequence properties described in Meher et al. (2017) <doi:10.1038/srep42362>.
In order to support genome-wide analyses, these models are designed to accept any type of protein as input
and calculation of compositional properties has been optimised for high-throughput use. For details see Fingerhut et al. 2020 <doi:10.1101/2020.05.07.082412>.",2020-05-11,Legana Fingerhut,https://github.com/Legana/ampir,TRUE,https://github.com/legana/ampir,3243,5,2020-05-11T12:00:38Z,648.6
amt,"Manage and analyze animal movement data. The functionality of 'amt' includes methods to calculate track statistics (e.g. step lengths, speed, or turning angles), prepare data for fitting habitat selection analyses (resource selection functions and step-selection functions <doi:10.1890/04-0953> and integrated step-selection functions <doi:10.1111/2041-210X.12528>), and simulation of space-use from fitted step-selection functions <doi:10.1002/ecs2.1771>.",2020-04-28,Johannes Signer,https://github.com/jmsigner/amt,TRUE,https://github.com/jmsigner/amt,19886,9,2020-05-22T11:29:08Z,2209.5555555555557
AmyloGram,"Predicts amyloid proteins using random forests trained on the
n-gram encoded peptides. The implemented algorithm can be accessed from
both the command line and shiny-based GUI.",2017-10-11,Michal Burdukiewicz,https://github.com/michbur/AmyloGram,TRUE,https://github.com/michbur/amylogram,15714,7,2020-05-21T19:41:56Z,2244.8571428571427
AnaCoDa,"Is a collection of models to analyze genome scale codon
data using a Bayesian framework. Provides visualization
routines and checkpointing for model fittings. Currently
published models to analyze gene data for selection on codon
usage based on Ribosome Overhead Cost (ROC) are: ROC (Gilchrist
et al. (2015) <doi:10.1093/gbe/evv087>), and ROC with phi
(Wallace & Drummond (2013) <doi:10.1093/molbev/mst051>). In
addition 'AnaCoDa' contains three currently unpublished models.
The FONSE (First order approximation On NonSense Error) model
analyzes gene data for selection on codon usage against of
nonsense error rates. The PA (PAusing time) and PANSE (PAusing
time + NonSense Error) models use ribosome footprinting data to
analyze estimate ribosome pausing times with and without
nonsense error rate from ribosome footprinting data.",2019-05-11,Cedric Landerer,https://github.com/clandere/AnaCoDa,TRUE,https://github.com/clandere/anacoda,13327,1,2019-06-12T11:13:15Z,13327
analogsea,"Provides a set of functions for interacting with the 'Digital
Ocean' API at <https://developers.digitalocean.com/documentation/v2>, including
creating images, destroying them, rebooting, getting details on regions, and
available images.",2020-01-30,Scott Chamberlain,https://github.com/sckott/analogsea,TRUE,https://github.com/sckott/analogsea,68685,108,2020-04-15T00:43:56Z,635.9722222222222
analogue,"Fits Modern Analogue Technique and Weighted Averaging transfer
function models for prediction of environmental data from species
data, and related methods used in palaeoecology.",2020-02-06,Gavin L. Simpson,https://github.com/gavinsimpson/analogue,TRUE,https://github.com/gavinsimpson/analogue,58809,11,2020-02-04T04:26:31Z,5346.272727272727
analogueExtra,"Provides additional functionality for the analogue package
that is not required by all users of the main package.",2016-04-10,Gavin L. Simpson,https://github.com/gavinsimpson/analogueExtra,TRUE,https://github.com/gavinsimpson/analogueextra,20394,1,2019-08-26T23:20:36Z,20394
analysisPipelines,"Enables data scientists to compose pipelines of analysis which consist of data manipulation, exploratory analysis & reporting, as well as modeling steps. Data scientists can use tools of their choice through an R interface, and compose interoperable pipelines between R, Spark, and Python.
Credits to Mu Sigma for supporting the development of the package.
Note - To enable pipelines involving Spark tasks, the package uses the 'SparkR' package.
The SparkR package needs to be installed to use Spark as an engine within a pipeline. SparkR is distributed natively with Apache Spark and is not distributed on CRAN. The SparkR version needs to directly map to the Spark version (hence the native distribution), and care needs to be taken to ensure that this is configured properly.
To install SparkR from Github, run the following command if you know the Spark version: 'devtools::install_github('apache/spark@v2.x.x', subdir='R/pkg')'.
The other option is to install SparkR by running the following terminal commands if Spark has already been installed: '$ export SPARK_HOME=/path/to/spark/directory && cd $SPARK_HOME/R/lib/SparkR/ && R -e ""devtools::install('.')""'.",2020-05-05,Mu Sigma,https://github.com/Mu-Sigma/analysis-pipelines,TRUE,https://github.com/mu-sigma/analysis-pipelines,8539,18,2020-05-05T14:06:35Z,474.3888888888889
Andromeda,"Storing very large data objects on a local drive, while still making it possible to manipulate the data in an efficient manner.",2020-06-03,Martijn Schuemie,"https://ohdsi.github.io/Andromeda/,
https://github.com/OHDSI/Andromeda",TRUE,https://github.com/ohdsi/andromeda,588,0,2020-06-03T05:11:30Z,NA
anglr,"Gives direct access to generic 3D tools and provides a full suite
of mesh-creation and 3D plotting functions. By extending the 'rgl' package
conversion and visualization functions for the 'mesh3d' class a wide variety of
complex spatial data can be brought into 3D scenes. These tools allow for
spatial raster, polygons, and lines that are common in 'GIS' contexts to be
converted into mesh forms with high flexibility and the ability to integrate
disparate data types. Vector and raster data can be seamlessly combined as
meshes, and surfaces can be set to have material properties based on data
values or with image textures. Textures and other data combinations use
projection transformations to map between coordinate systems, and objects can
be easily visualized in an interactive scene at any stage. This package relies
on the 'RTriangle' package for high-quality triangular meshing which is
licensed restrictively under 'CC BY-NC-SA 4.0'. ",2020-05-13,Michael D. Sumner,https://github.com/hypertidy/anglr,TRUE,https://github.com/hypertidy/anglr,1113,45,2020-05-21T10:39:40Z,24.733333333333334
angstroms,"Helper functions for working with Regional Ocean Modeling System 'ROMS' output. See
<https://www.myroms.org/> for more information about 'ROMS'. ",2017-05-01,Michael D. Sumner,https://github.com/mdsumner/angstroms,TRUE,https://github.com/mdsumner/angstroms,12475,2,2020-04-12T14:20:27Z,6237.5
animation,"Provides functions for animations in statistics, covering topics
in probability theory, mathematical statistics, multivariate statistics,
non-parametric statistics, sampling survey, linear models, time series,
computational statistics, data mining and machine learning. These functions
may be helpful in teaching statistics and data analysis. Also provided in this
package are a series of functions to save animations to various formats, e.g.
Flash, 'GIF', HTML pages, 'PDF' and videos. 'PDF' animations can be inserted
into 'Sweave' / 'knitr' easily.",2018-12-11,Yihui Xie,https://yihui.name/animation,TRUE,https://github.com/yihui/animation,632661,162,2020-05-20T04:36:17Z,3905.314814814815
aniview,Animate Shiny and R Markdown content when it comes into view using 'animate-css' effects thanks to 'jQuery AniView'.,2020-03-31,Félix Luginbuhl,"https://felixluginbuhl.com/aniview,
https://github.com/lgnbhl/aniview",TRUE,https://github.com/lgnbhl/aniview,1240,1,2020-04-11T14:36:36Z,1240
ANN2,"Training of neural networks for classification and regression tasks
using mini-batch gradient descent. Special features include a function for
training autoencoders, which can be used to detect anomalies, and some
related plotting functions. Multiple activation functions are supported,
including tanh, relu, step and ramp. For the use of the step and ramp
activation functions in detecting anomalies using autoencoders, see
Hawkins et al. (2002) <doi:10.1007/3-540-46145-0_17>. Furthermore,
several loss functions are supported, including robust ones such as Huber
and pseudo-Huber loss, as well as L1 and L2 regularization. The possible
options for optimization algorithms are RMSprop, Adam and SGD with momentum.
The package contains a vectorized C++ implementation that facilitates
fast training through mini-batch learning.",2020-03-14,Bart Lammers,https://github.com/bflammers/ANN2,TRUE,https://github.com/bflammers/ann2,29442,6,2020-03-14T21:49:29Z,4907
AnnotationBustR,Extraction of subsequences into FASTA files from GenBank annotations where gene names may vary among accessions.,2018-04-09,Samuel R. Borstein,"https://github.com/sborstein/AnnotationBustR,
https://www.ncbi.nlm.nih.gov/nuccore,
https://en.wikipedia.org/wiki/FASTA_format",TRUE,https://github.com/sborstein/annotationbustr,15667,0,2019-11-12T18:51:33Z,NA
anomalize,"
The 'anomalize' package enables a ""tidy"" workflow for detecting anomalies in data.
The main functions are time_decompose(), anomalize(), and time_recompose().
When combined, it's quite simple to decompose time series, detect anomalies,
and create bands separating the ""normal"" data from the anomalous data at scale (i.e. for multiple time series).
Time series decomposition is used to remove trend and seasonal components via the time_decompose() function
and methods include seasonal decomposition of time series by Loess (""stl"") and
seasonal decomposition by piecewise medians (""twitter""). The anomalize() function implements
two methods for anomaly detection of residuals including using an inner quartile range (""iqr"")
and generalized extreme studentized deviation (""gesd""). These methods are based on
those used in the 'forecast' package and the Twitter 'AnomalyDetection' package.
Refer to the associated functions for specific references for these methods. ",2019-09-21,Matt Dancho,https://github.com/business-science/anomalize,TRUE,https://github.com/business-science/anomalize,58802,222,2020-04-24T20:06:48Z,264.8738738738739
antaresProcessing,"
Process results generated by 'Antares', a powerful open source software developed by
RTE (Réseau de Transport d’Électricité) to simulate and study electric power systems (more information about
'Antares' here: <https://github.com/AntaresSimulatorTeam/Antares_Simulator>).
This package provides functions to create new columns like net load, load factors, upward and
downward margins or to compute aggregated statistics like economic surpluses
of consumers, producers and sectors.",2020-02-26,Veronique Bachelier,https://github.com/rte-antares-rpackage/antaresProcessing,TRUE,https://github.com/rte-antares-rpackage/antaresprocessing,29552,8,2020-02-28T14:38:34Z,3694
antaresRead,"Import, manipulate and explore results generated by 'Antares', a
powerful open source software developed by RTE (Réseau de Transport d’Électricité) to simulate and study electric power systems
(more information about 'Antares' here : <https://antares-simulator.org/>).",2020-03-18,Veronique Bachelier,https://github.com/rte-antares-rpackage/antaresRead,TRUE,https://github.com/rte-antares-rpackage/antaresread,38639,9,2020-03-04T08:58:03Z,4293.222222222223
antaresViz,"Visualize results generated by Antares, a powerful open source software
developed by RTE to simulate and study electric power systems
(more information about Antares here: <https://github.com/AntaresSimulatorTeam/Antares_Simulator>).
This package provides functions that create interactive charts to help
Antares users visually explore the results of their simulations.",2020-05-26,Veronique Bachelier,https://github.com/rte-antares-rpackage/antaresViz,TRUE,https://github.com/rte-antares-rpackage/antaresviz,21816,14,2020-05-26T08:56:49Z,1558.2857142857142
anthro,"Provides WHO Child Growth Standards (z-scores) with
confidence intervals and standard errors around the
prevalence estimates, taking into account complex sample designs.
More information on the methods is
available online:
<http://www.who.int/childgrowth/standards/en/>.",2020-05-21,Dirk Schumacher,https://github.com/dirkschumacher/anthro,TRUE,https://github.com/dirkschumacher/anthro,9660,11,2020-05-21T11:01:43Z,878.1818181818181
AntWeb,"A complete programmatic interface to the AntWeb database from the
California Academy of Sciences.",2014-08-14,Karthik Ram,https://github.com/ropensci/AntWeb,TRUE,https://github.com/ropensci/antweb,26568,8,2019-12-09T12:00:33Z,3321
anyflights,"Supplies a set of functions to query air travel data for user-
specified years and airports. Datasets include on-time flights, airlines,
airports, planes, and weather.",2020-04-27,Simon P. Couch,http://github.com/simonpcouch/anyflights,TRUE,https://github.com/simonpcouch/anyflights,3021,4,2020-05-01T18:11:34Z,755.25
anytime,"Convert input in any one of character, integer, numeric, factor,
or ordered type into 'POSIXct' (or 'Date') objects, using one of a number of
predefined formats, and relying on Boost facilities for date and time parsing.",2020-01-20,Dirk Eddelbuettel,http://dirk.eddelbuettel.com/code/anytime.html,TRUE,https://github.com/eddelbuettel/anytime,445286,124,2020-04-14T21:29:59Z,3591.016129032258
aof,"A breakpoint-based method to detect ontogenetic shifts in
univariate time-activity budget series of central-place foraging insects.
The method finds a single breakpoint according to the likelihood function.
The method was developed with honey bees in order to detect the Age at
Onset of Foraging (AOF), but can be used for the detection of other
ontogenetic shifts in other central-place foraging insects. ",2020-03-09,Fabrice Requier,https://github.com/frareb/aof/,TRUE,https://github.com/frareb/aof,1643,1,2020-05-07T09:38:35Z,1643
aos,"Trigger animation effects on scroll on any HTML element
of 'shiny' and 'rmarkdown', such as any text or plot, thanks to
the 'AOS' Animate On Scroll jQuery library.",2020-04-29,Félix Luginbuhl,"https://felixluginbuhl.com/aos, https://github.com/lgnbhl/aos",TRUE,https://github.com/lgnbhl/aos,708,0,2020-04-25T16:12:17Z,NA
apa,"Formatter functions in the 'apa' package take the return value of a
statistical test function, e.g. a call to chisq.test() and return a string
formatted according to the guidelines of the APA (American Psychological
Association).",2020-04-21,Daniel Gromer,https://github.com/dgromer/apa,TRUE,https://github.com/dgromer/apa,25777,23,2020-04-21T12:43:47Z,1120.7391304347825
apaTables,"A common task faced by researchers is the creation of APA style
(i.e., American Psychological Association style) tables from statistical
output. In R a large number of function calls are often needed to obtain all of
the desired information for a single APA style table. As well, the process of
manually creating APA style tables in a word processor is prone to transcription
errors. This package creates Word files (.doc files) containing APA style tables
for several types of analyses. Using this package minimizes transcription errors
and reduces the number commands needed by the user.",2018-08-29,David Stanley,https://github.com/dstanley4/apaTables,TRUE,https://github.com/dstanley4/apatables,83807,32,2020-04-27T14:44:46Z,2618.96875
apcf,"The adapted pair correlation function transfers the concept of the
pair correlation function from point patterns to patterns of objects of
finite size and irregular shape (e.g. lakes within a country). This is a
reimplementation of the method suggested by Nuske et al. (2009)
<doi:10.1016/j.foreco.2009.09.050> using the libraries 'GEOS' and 'GDAL'
directly instead of through 'PostGIS'. ",2020-02-04,Robert Nuske,https://github.com/rnuske/apcf,TRUE,https://github.com/rnuske/apcf,8446,6,2020-04-14T07:22:41Z,1407.6666666666667
apex,"Toolkit for the analysis of multiple gene data (Jombart et al. 2017) <doi:10.1111/1755-0998.12567>.
Apex implements the new S4 classes 'multidna', 'multiphyDat' and associated methods to handle aligned DNA sequences from multiple genes.",2020-04-11,Klaus Schliep,https://github.com/thibautjombart/apex,TRUE,https://github.com/thibautjombart/apex,36653,4,2020-05-06T05:52:57Z,9163.25
apexcharter,"Provides an 'htmlwidgets' interface to 'apexcharts.js'.
'Apexcharts' is a modern JavaScript charting library to build interactive charts and visualizations with simple API.
'Apexcharts' examples and documentation are available here: <https://apexcharts.com/>.",2020-03-31,Victor Perrier,"https://github.com/dreamRs/apexcharter,
https://dreamrs.github.io/apexcharter",TRUE,https://github.com/dreamrs/apexcharter,7488,68,2020-06-09T15:03:49Z,110.11764705882354
aplot,"For many times, we are not just aligning plots as what 'cowplot' and 'patchwork' did. Users would like to align associated information that requires axes to be exactly matched in subplots, e.g. hierarchical clustering with a heatmap. This package provides utilities to aligns associated subplots to a main plot at different sides (left, right, top and bottom) with axes exactly matched. ",2020-04-07,Guangchuang Yu,https://github.com/YuLab-SMU/aplot,TRUE,https://github.com/yulab-smu/aplot,2976,32,2020-04-15T15:21:04Z,93
applicable,"A modeling package compiling applicability domain methods in R.
It combines different methods to measure the amount of extrapolation new
samples can have from the training set. See Netzeva et al (2005)
<doi:10.1177/026119290503300209> for an overview of applicability domains. ",2020-05-25,Marly Gotti,https://github.com/tidymodels/applicable,TRUE,https://github.com/tidymodels/applicable,172,23,2020-05-26T07:18:05Z,7.478260869565218
aprof,"Assists the evaluation of whether and
where to focus code optimization, using Amdahl's law and visual aids
based on line profiling. Amdahl's profiler organizes profiling output
files (including memory profiling) in a visually appealing way.
It is meant to help to balance development
vs. execution time by helping to identify the most promising sections
of code to optimize and projecting potential gains. The package is
an addition to R's standard profiling tools and is not a wrapper for them.",2018-05-22,Marco D. Visser,http://github.com/MarcoDVisser/aprof,TRUE,https://github.com/marcodvisser/aprof,29453,22,2020-01-18T16:45:45Z,1338.7727272727273
apyramid,"Provides a quick method for visualizing non-aggregated line-list
or aggregated census data stratified by age and one or two categorical
variables (e.g. gender and health status) with any number of values. It
returns a 'ggplot' object, allowing the user to further customize the
output. This package is part of the 'R4Epis' project
<https://r4epis.netlify.com>.",2020-05-08,Zhian N. Kamvar,"https://github.com/R4EPI/apyramid, https://r4epis.netlify.com",TRUE,https://github.com/r4epi/apyramid,3199,4,2020-05-08T14:44:23Z,799.75
aqp,"The Algorithms for Quantitative Pedology (AQP) project was started in 2009 to organize a loosely-related set of concepts and source code on the topic of soil profile visualization, aggregation, and classification into this package (aqp). Over the past 8 years, the project has grown into a suite of related R packages that enhance and simplify the quantitative analysis of soil profile data. Central to the AQP project is a new vocabulary of specialized functions and data structures that can accommodate the inherent complexity of soil profile information; freeing the scientist to focus on ideas rather than boilerplate data processing tasks <doi:10.1016/j.cageo.2012.10.020>. These functions and data structures have been extensively tested and documented, applied to projects involving hundreds of thousands of soil profiles, and deeply integrated into widely used tools such as SoilWeb <https://casoilresource.lawr.ucdavis.edu/soilweb-apps/>. Components of the AQP project (aqp, soilDB, sharpshootR, soilReports packages) serve an important role in routine data analysis within the USDA-NRCS Soil Science Division. The AQP suite of R packages offer a convenient platform for bridging the gap between pedometric theory and practice.",2020-01-24,Dylan Beaudette,https://github.com/ncss-tech/aqp,TRUE,https://github.com/ncss-tech/aqp,146911,18,2020-06-09T19:28:39Z,8161.722222222223
ArchaeoPhases,"Provides a list of functions for the statistical analysis of archaeological dates and groups of dates (see <doi:10.18637/jss.v093.c01> for a description). It is based on the post-processing of the Markov Chains whose stationary distribution is the posterior distribution of a series of dates. Such output can be simulated by different applications as for instance 'ChronoModel' (see <http://www.chronomodel.fr>), 'Oxcal' (see <https://c14.arch.ox.ac.uk/oxcal.html>) or 'BCal' (see <http://bcal.shef.ac.uk/>). The only requirement is to have a csv file containing a sample from the posterior distribution.",2020-05-29,Anne Philippe,NA,TRUE,https://github.com/archaeostat/archaeophases,19960,2,2020-06-09T13:41:13Z,9980
archivist,"Data exploration and modelling is a process in which a lot of data
artifacts are produced. Artifacts like: subsets, data aggregates, plots,
statistical models, different versions of data sets and different versions
of results. The more projects we work with the more artifacts are produced
and the harder it is to manage these artifacts. Archivist helps to store
and manage artifacts created in R. Archivist allows you to store selected
artifacts as a binary files together with their metadata and relations.
Archivist allows to share artifacts with others, either through shared
folder or github. Archivist allows to look for already created artifacts by
using it's class, name, date of the creation or other properties. Makes it
easy to restore such artifacts. Archivist allows to check if new artifact
is the exact copy that was produced some time ago. That might be useful
either for testing or caching.",2019-08-31,Przemyslaw Biecek,https://pbiecek.github.io/archivist/,TRUE,https://github.com/pbiecek/archivist,101647,73,2019-08-26T21:27:49Z,1392.4246575342465
arcos,"A wrapper for the 'ARCOS API' <https://arcos-api.ext.nile.works/__swagger__/>
that returns raw and summarized data frames from the
Drug Enforcement Administration’s Automation of Reports and Consolidated Orders System,
a database that monitors controlled substances transactions between manufacturers and
distributors which was made public by The Washington Post and The Charleston Gazette-Mail.",2020-05-18,Andrew Ba Tran,https://github.com/wpinvestigative/arcos,TRUE,https://github.com/wpinvestigative/arcos,4654,14,2020-04-20T03:44:32Z,332.42857142857144
ARDL,"Creates complex autoregressive distributed lag (ARDL) models
providing just the order and automatically constructs the underlying
unrestricted and restricted error correction model (ECM). It also performs
the bounds-test for cointegration as described in Pesaran et al. (2001) <doi:10.1002/jae.616> and provides the multipliers and the cointegrating
equation.",2020-04-10,Kleanthis Natsiopoulos,https://github.com/Natsiopoulos/ARDL,TRUE,https://github.com/natsiopoulos/ardl,1279,2,2020-04-08T23:44:43Z,639.5
areal,"A pipeable, transparent implementation of areal weighted interpolation
with support for interpolating multiple variables in a single function call.
These tools provide a full-featured workflow for validation and estimation
that fits into both modern data management (e.g. tidyverse) and spatial
data (e.g. sf) frameworks.",2020-05-12,Christopher Prener,https://github.com/slu-openGIS/areal,TRUE,https://github.com/slu-opengis/areal,10582,64,2020-05-12T11:48:12Z,165.34375
argonDash,"Create awesome 'Bootstrap 4' dashboards powered by 'Argon'.
See more here <https://rinterface.github.io/argonDash/>.",2019-11-27,David Granjon,https://github.com/RinteRface/argonDash,TRUE,https://github.com/rinterface/argondash,42173,84,2019-11-27T08:13:48Z,502.0595238095238
argonR,"R wrapper around the argon HTML library.
More at <https://demos.creative-tim.com/argon-design-system/>.",2019-11-27,David Granjon,https://github.com/RinteRface/argonR,TRUE,https://github.com/rinterface/argonr,43416,38,2019-11-27T08:01:44Z,1142.5263157894738
argparse,"A command line parser to
be used with Rscript to write ""#!"" shebang scripts that gracefully
accept positional and optional arguments and automatically generate usage.",2019-03-08,Trevor L Davis,https://github.com/trevorld/r-argparse,TRUE,https://github.com/trevorld/r-argparse,582771,47,2020-02-01T09:24:08Z,12399.382978723404
ari,"Create videos from 'R Markdown' documents, or images and audio
files. These images can come from image files or HTML slides, and the audio
files can be provided by the user or computer voice narration can be created
using 'Amazon Polly'. The purpose of this package is to allow users to create
accessible, translatable, and reproducible lecture videos. See
<https://aws.amazon.com/polly/> for more information.",2020-02-08,Sean Kross,http://github.com/seankross/ari,TRUE,https://github.com/seankross/ari,14529,83,2020-05-29T16:53:30Z,175.04819277108433
aricode,"Implements an efficient O(n) algorithm based on bucket-sorting for
fast computation of standard clustering comparison measures. Available measures
include adjusted Rand index (ARI), normalized information distance (NID),
normalized mutual information (NMI), adjusted mutual information (AMI),
normalized variation information (NVI) and entropy, as described in Vinh et al (2009)
<doi:10.1145/1553374.1553511>.",2019-06-29,Julien Chiquet,https://github.com/jchiquet/aricode (dev version),TRUE,https://github.com/jchiquet/aricode,11730,8,2019-06-29T06:50:55Z,1466.25
arkdb,"Flat text files provide a robust, compressible, and portable
way to store tables from databases. This package provides convenient
functions for exporting tables from relational database connections
into compressed text files and streaming those text files back into
a database without requiring the whole table to fit in working memory.",2018-10-31,Carl Boettiger,https://github.com/ropensci/arkdb,TRUE,https://github.com/ropensci/arkdb,15086,54,2020-03-11T22:18:24Z,279.3703703703704
arkhe,"A collection of classes that represent
archaeological data. This package provides a set of S4 classes that
extend the basic matrix data type (absolute/relative frequency,
presence/absence data, co-occurrence matrix, etc.) upon which package
developers can build subclasses. It also provides a set of generic
methods (mutators and coercion mechanisms) and functions (e.g.
predicates). In addition, a few classes of general interest (e.g. that
represent stratigraphic relationships) are implemented.",2020-03-23,Nicolas Frerebeau,"http://arkhe.archaeo.science, https://github.com/nfrerebeau/arkhe,
https://cran.r-project.org/package=arkhe",TRUE,https://github.com/nfrerebeau/arkhe,4176,0,2020-05-20T16:51:21Z,NA
arm,"Functions to accompany A. Gelman and J. Hill, Data Analysis Using Regression and Multilevel/Hierarchical Models, Cambridge University Press, 2007.",2020-04-27,Yu-Sung Su,https://CRAN.R-project.org/package=arm,TRUE,https://github.com/suyusung/arm,1577259,16,2020-04-27T02:34:39Z,98578.6875
aroma.affymetrix,A cross-platform R framework that facilitates processing of any number of Affymetrix microarray samples regardless of computer system. The only parameter that limits the number of chips that can be processed is the amount of available disk space. The Aroma Framework has successfully been used in studies to process tens of thousands of arrays. This package has actively been used since 2006.,2019-06-23,Henrik Bengtsson,"https://www.aroma-project.org/,
https://github.com/HenrikBengtsson/aroma.affymetrix",TRUE,https://github.com/henrikbengtsson/aroma.affymetrix,69721,5,2019-12-16T05:47:10Z,13944.2
aroma.cn,"Methods for analyzing DNA copy-number data. Specifically,
this package implements the multi-source copy-number normalization (MSCN)
method for normalizing copy-number data obtained on various platforms and
technologies. It also implements the TumorBoost method for normalizing
paired tumor-normal SNP data.",2015-10-28,Henrik Bengtsson,"http://www.aroma-project.org/,
https://github.com/HenrikBengtsson/aroma.cn",TRUE,https://github.com/henrikbengtsson/aroma.cn,24651,1,2019-12-15T01:58:27Z,24651
aroma.core,"Core methods and classes used by higher-level 'aroma.*' packages
part of the Aroma Project, e.g. 'aroma.affymetrix' and 'aroma.cn'.",2020-02-04,Henrik Bengtsson,"https://github.com/HenrikBengtsson/aroma.core,
https://www.aroma-project.org/",TRUE,https://github.com/henrikbengtsson/aroma.core,79930,1,2020-02-04T18:11:00Z,79930
arsenal,"An Arsenal of 'R' functions for large-scale statistical summaries,
which are streamlined to work within the latest reporting tools in 'R' and
'RStudio' and which use formulas and versatile summary statistics for summary
tables and models. The primary functions include tableby(), a Table-1-like
summary of multiple variable types 'by' the levels of one or more categorical
variables; paired(), a Table-1-like summary of multiple variable types paired across
two time points; modelsum(), which performs simple model fits on one or more endpoints
for many variables (univariate or adjusted for covariates);
freqlist(), a powerful frequency table across many categorical variables;
comparedf(), a function for comparing data.frames; and
write2(), a function to output tables to a document.",2020-02-15,Ethan Heinzen,"https://github.com/eheinzen/arsenal,
https://cran.r-project.org/package=arsenal,
https://eheinzen.github.io/arsenal/",TRUE,https://github.com/eheinzen/arsenal,71898,144,2020-05-28T22:29:58Z,499.2916666666667
ARTool,"The Aligned Rank Transform for nonparametric
factorial ANOVAs as described by J. O. Wobbrock,
L. Findlater, D. Gergle, & J. J. Higgins, ""The Aligned
Rank Transform for nonparametric factorial analyses
using only ANOVA procedures"", CHI 2011 <DOI:10.1145/1978942.1978963>.",2020-03-20,Matthew Kay,https://github.com/mjskay/ARTool,TRUE,https://github.com/mjskay/artool,36708,22,2020-03-11T17:59:09Z,1668.5454545454545
ARTP2,Pathway and gene level association test using raw data or summary statistics.,2018-11-30,Han Zhang,https://github.com/zhangh12/ARTP2,TRUE,https://github.com/zhangh12/artp2,19596,4,2019-08-15T21:39:12Z,4899
arules,"Provides the infrastructure for representing,
manipulating and analyzing transaction data and patterns (frequent
itemsets and association rules). Also provides
C implementations of the association mining algorithms Apriori and Eclat.
Hahsler, Gruen and Hornik (2005) <doi:10.18637/jss.v014.i15>.",2020-05-15,Michael Hahsler,https://github.com/mhahsler/arules,TRUE,https://github.com/mhahsler/arules,1539641,119,2020-06-08T14:57:43Z,12938.159663865546
arulesCBA,Provides the infrastructure for association rule-based classification including algorithms like Classification Based on Associations (CBA).,2020-04-20,Michael Hahsler,https://github.com/ianjjohnson/arulesCBA,TRUE,https://github.com/ianjjohnson/arulescba,41233,27,2020-05-09T03:48:08Z,1527.148148148148
arulesNBMiner,NBMiner is an implementation of the model-based mining algorithm for mining NB-frequent itemsets and NB-precise rules. Michael Hahsler (2006) <doi:10.1007/s10618-005-0026-2>. ,2020-04-26,Michael Hahsler,https://github.com/mhahsler/arulesNBMiner,TRUE,https://github.com/mhahsler/arulesnbminer,31586,3,2020-04-26T20:04:43Z,10528.666666666666
arulesViz,Extends package 'arules' with various visualization techniques for association rules and itemsets. The package also includes several interactive visualizations for rule exploration.,2019-05-20,Michael Hahsler,https://github.com/mhahsler/arulesViz,TRUE,https://github.com/mhahsler/arulesviz,733638,33,2020-04-27T16:58:20Z,22231.454545454544
aRxiv,"An interface to the API for 'arXiv'
(<https://arxiv.org>), a repository of electronic preprints for
computer science, mathematics, physics, quantitative biology,
quantitative finance, and statistics.",2019-08-08,Karthik Ram,https://github.com/ropensci/aRxiv,TRUE,https://github.com/ropensci/arxiv,38466,40,2019-12-09T12:01:20Z,961.65
asciiSetupReader,"Lets you open a fixed-width ASCII file (.txt or
.dat) that has an accompanying setup file (.sps or .sas). These file
combinations are sometimes referred to as .txt+.sps, .txt+.sas,
.dat+.sps, or .dat+.sas. This will only run in a txt-sps or txt-sas
pair in which the setup file contains instructions to open that text
file. It will NOT open other text files, .sav, .sas, or .por data
files. Fixed-width ASCII files with setup files are common in older
(pre-2000) government data.",2020-03-21,Jacob Kaplan,https://github.com/jacobkap/asciiSetupReader,TRUE,https://github.com/jacobkap/asciisetupreader,16489,3,2020-03-20T18:33:32Z,5496.333333333333
ashr,"The R package 'ashr' implements an Empirical Bayes
approach for large-scale hypothesis testing and false discovery
rate (FDR) estimation based on the methods proposed in
M. Stephens, 2016, ""False discovery rates: a new deal"",
<DOI:10.1093/biostatistics/kxw041>. These methods can be applied
whenever two sets of summary statistics---estimated effects and
standard errors---are available, just as 'qvalue' can be applied
to previously computed p-values. Two main interfaces are
provided: ash(), which is more user-friendly; and ash.workhorse(),
which has more options and is geared toward advanced users. The
ash() and ash.workhorse() also provides a flexible modeling
interface that can accommodate a variety of likelihoods (e.g.,
normal, Poisson) and mixture priors (e.g., uniform, normal).",2020-02-20,Peter Carbonetto,https://github.com/stephens999/ashr,TRUE,https://github.com/stephens999/ashr,35356,62,2020-04-08T14:01:08Z,570.258064516129
AsioHeaders,"'Asio' is a cross-platform C++ library for network and low-level
I/O programming that provides developers with a consistent asynchronous model
using a modern C++ approach. It is also included in Boost but requires linking
when used with Boost. Standalone it can be used header-only (provided a recent
compiler). 'Asio' is written and maintained by Christopher M. Kohlhoff, and
released under the 'Boost Software License', Version 1.0.",2020-03-11,Dirk Eddelbuettel,NA,TRUE,https://github.com/eddelbuettel/asioheaders,56810,9,2020-05-12T23:09:01Z,6312.222222222223
aslib,"Provides an interface to the algorithm selection benchmark library
at <http://www.aslib.net> and the 'LLAMA' package
(<https://cran.r-project.org/package=llama>) for building
algorithm selection models; see Bischl et al. (2016)
<doi:10.1016/j.artint.2016.04.003>.",2020-05-24,Bernd Bischl,https://github.com/coseal/aslib-r/,TRUE,https://github.com/coseal/aslib-r,12741,6,2020-05-22T19:56:08Z,2123.5
aSPU,"R codes for the (adaptive) Sum of Powered Score ('SPU' and 'aSPU')
tests, inverse variance weighted Sum of Powered score ('SPUw' and 'aSPUw') tests
and gene-based and some pathway based association tests (Pathway based Sum of
Powered Score tests ('SPUpath'), adaptive 'SPUpath' ('aSPUpath') test, 'GEEaSPU'
test for multiple traits - single 'SNP' (single nucleotide polymorphism)
association in generalized estimation equations, 'MTaSPUs' test for multiple
traits - single 'SNP' association with Genome Wide Association Studies ('GWAS')
summary statistics, Gene-based Association Test that uses an extended 'Simes'
procedure ('GATES'), Hybrid Set-based Test ('HYST') and extended version
of 'GATES' test for pathway-based association testing ('GATES-Simes'). ).
The tests can be used with genetic and other data sets with covariates. The
response variable is binary or quantitative. Summary; (1) Single trait-'SNP' set
association with individual-level data ('aSPU', 'aSPUw', 'aSPUr'), (2) Single trait-'SNP'
set association with summary statistics ('aSPUs'), (3) Single trait-pathway
association with individual-level data ('aSPUpath'), (4) Single trait-pathway
association with summary statistics ('aSPUsPath'), (5) Multiple traits-single
'SNP' association with individual-level data ('GEEaSPU'), (6) Multiple traits-
single 'SNP' association with summary statistics ('MTaSPUs'), (7) Multiple traits-'SNP' set association with summary statistics('MTaSPUsSet'), (8) Multiple traits-pathway association with summary statistics('MTaSPUsSetPath').",2020-05-13,Il-Youp Kwak and others,https://github.com/ikwak2/aSPU,TRUE,https://github.com/ikwak2/aspu,29550,5,2020-05-13T04:58:29Z,5910
assignPOP,"Use Monte-Carlo and K-fold cross-validation coupled with machine-
learning classification algorithms to perform population assignment, with
functionalities of evaluating discriminatory power of independent training
samples, identifying informative loci, reducing data dimensionality for genomic
data, integrating genetic and non-genetic data, and visualizing results.",2020-03-16,Kuan-Yu (Alex) Chen,https://github.com/alexkychen/assignPOP,TRUE,https://github.com/alexkychen/assignpop,19011,10,2020-03-16T13:36:36Z,1901.1
ASSISTant,"Clinical trial design for subgroup selection in three-stage group
sequential trial. Includes facilities for design, exploration and analysis of
such trials. An implementation of the initial DEFUSE-3 trial is also provided
as a vignette.",2019-05-03,Balasubramanian Narasimhan,https://github.com/bnaras/ASSISTant,TRUE,https://github.com/bnaras/assistant,16799,0,2019-11-22T03:30:19Z,NA
astsa,"Data sets and scripts to accompany Time Series Analysis and Its Applications: With R Examples (4th ed), by R.H. Shumway and D.S. Stoffer. Springer Texts in Statistics, 2017, <DOI:10.1007/978-3-319-52452-8>, and Time Series: A Data Analysis Approach Using R. Chapman-Hall, 2019, <ISBN: 978-0367221096>. ",2020-05-01,David Stoffer,"https://github.com/nickpoison/astsa,
http://www.stat.pitt.edu/stoffer/tsa4/,
http://www.stat.pitt.edu/stoffer/tsda/",TRUE,https://github.com/nickpoison/astsa,300866,44,2020-06-09T19:51:46Z,6837.863636363636
atable,"Create Tables for Reporting Clinical Trials.
Calculates descriptive statistics and hypothesis tests,
arranges the results in a table ready for reporting with LaTeX, HTML or Word.",2020-04-13,Armin Ströbel,https://github.com/arminstroebel/atable,TRUE,https://github.com/arminstroebel/atable,10065,2,2020-04-13T12:09:08Z,5032.5
attachment,"Tools to help manage dependencies during package
development. This can retrieve all dependencies that are used in R
files in the ""R"" directory, in Rmd files in ""vignettes"" directory and
in 'roxygen2' documentation of functions. There is a function to
update the Description file of your package and a function to create a
file with the R commands to install all dependencies of your package.
All functions to retrieve dependencies of R scripts and Rmd files can
be used independently of a package development.",2020-03-15,Vincent Guyader,https://github.com/Thinkr-open/attachment,TRUE,https://github.com/thinkr-open/attachment,15143,54,2020-06-03T10:01:45Z,280.4259259259259
attempt,"Tools for defensive programming, inspired by 'purrr' mappers and
based on 'rlang'.'attempt' extends and facilitates defensive programming by
providing a consistent grammar, and provides a set of easy to use functions
for common tests and conditions. 'attempt' only depends on 'rlang', and
focuses on speed, so it can be easily integrated in other functions and
used in data analysis. ",2020-05-03,Colin Fay,https://github.com/ColinFay/attempt,TRUE,https://github.com/colinfay/attempt,46918,85,2020-04-17T10:38:35Z,551.9764705882353
attenuation,"Confidence curves, confidence intervals and p-values for
correlation coefficients corrected for attenuation due to measurement error.
Implements the methods described in Moss (2019, <arxiv:1911.01576>).",2019-11-08,Jonas Moss,https://github.com/JonasMoss/attenuation/,TRUE,https://github.com/jonasmoss/attenuation,3183,0,2019-11-08T14:26:00Z,NA
auditor,"Provides an easy to use unified interface for creating validation plots for any model.
The 'auditor' helps to avoid repetitive work consisting of writing code needed to create residual plots.
This visualizations allow to asses and compare the goodness of fit, performance, and similarity of models. ",2020-05-28,Alicja Gosiewska,https://github.com/ModelOriented/auditor,TRUE,https://github.com/modeloriented/auditor,18470,47,2020-05-28T13:26:48Z,392.97872340425533
auk,"Extract and process bird sightings records from
eBird (<http://ebird.org>), an online tool for recording bird
observations. Public access to the full eBird database is via the
eBird Basic Dataset (EBD; see <http://ebird.org/ebird/data/download>
for access), a downloadable text file. This package is an interface to
AWK for extracting data from the EBD based on taxonomic, spatial, or
temporal filters, to produce a manageable file size that can be
imported into R.",2020-04-03,Matthew Strimas-Mackey,"https://github.com/CornellLabofOrnithology/auk,
http://CornellLabofOrnithology.github.io/auk/",TRUE,https://github.com/cornelllabofornithology/auk,24565,68,2020-06-09T17:24:17Z,361.25
auth0,"Uses Auth0 API (see <https://auth0.com> for more
information) to use a simple and secure authentication system. It provides
tools to log in and out a shiny application using social networks or a list
of e-mails.",2019-09-26,Julio Trecenti,NA,TRUE,https://github.com/curso-r/auth0,10817,78,2020-04-21T21:52:36Z,138.67948717948718
autocogs,Automatically calculates cognostic groups for plot objects and list column plot objects. Results are returned in a nested data frame.,2020-04-03,Barret Schloerke,https://github.com/schloerke/autocogs,TRUE,https://github.com/schloerke/autocogs,17603,3,2020-04-03T01:11:27Z,5867.666666666667
AutoDeskR,"An interface to the 'AutoDesk' 'API' Platform including the Authentication
'API' for obtaining authentication to the 'AutoDesk' Forge Platform, Data Management
'API' for managing data across the platform's cloud services, Design Automation 'API'
for performing automated tasks on design files in the cloud, Model
Derivative 'API' for translating design files into different formats, sending
them to the viewer app, and extracting design data, and Viewer for rendering
2D and 3D models (see <https://developer.autodesk.com> for more information).",2017-07-10,Paul Govan,https://github.com/paulgovan/autodeskr,TRUE,https://github.com/paulgovan/autodeskr,15800,5,2020-04-01T00:47:32Z,3160
autoimage,"Functions for displaying multiple images or scatterplots with a color
scale, i.e., heat maps, possibly with projected coordinates. The
package relies on the base graphics system, so graphics are
rendered rapidly.",2020-05-27,Joshua French,NA,TRUE,https://github.com/jpfrench81/autoimage,24036,5,2020-05-26T20:13:21Z,4807.2
autokeras,"R Interface to 'AutoKeras' <https://autokeras.com/>.
'AutoKeras' is an open source software library for Automated Machine
Learning (AutoML). The ultimate goal of AutoML is to provide easily
accessible deep learning tools to domain experts with limited data science
or machine learning background. 'AutoKeras' provides functions to
automatically search for architecture and hyperparameters of deep
learning models.",2020-02-20,Juan Cruz Rodriguez,https://github.com/r-tensorflow/autokeras,TRUE,https://github.com/r-tensorflow/autokeras,2239,62,2020-02-11T00:05:25Z,36.11290322580645
automultinomial,"Fits the autologistic model described in Besag's famous 1974 paper on auto- models <http://www.jstor.org/stable/2984812>. Fits a multicategory generalization of the autologistic model when there are more than 2 response categories. Provides support for both asymptotic and bootstrap confidence intervals. For full model descriptions and a guide to the use of this package, please see the vignette.",2018-10-31,Stephen Berg,NA,TRUE,https://github.com/stephenberg/automultinomial,14891,4,2019-10-23T21:29:38Z,3722.75
autoplotly,"Functionalities to automatically generate interactive visualizations for
statistical results supported by 'ggfortify', such as time series, PCA,
clustering and survival analysis, with 'plotly.js' <https://plot.ly/> and
'ggplot2' style. The generated visualizations can also be easily extended
using 'ggplot2' and 'plotly' syntax while staying interactive.",2018-04-21,Yuan Tang,https://github.com/terrytangyuan/autoplotly,TRUE,https://github.com/terrytangyuan/autoplotly,15715,55,2020-01-23T16:04:32Z,285.72727272727275
autoTS,"Offers a set of functions to easily make predictions for univariate time series.
'autoTS' is a wrapper of existing functions of the 'forecast' and 'prophet' packages,
harmonising their outputs in tidy dataframes and using default values for each.
The core function getBestModel() allows the user to effortlessly benchmark seven
algorithms along with a bagged estimator to identify which one performs the best
for a given time series.",2020-06-05,Vivien Roussez,https://github.com/vivienroussez/autoTS,TRUE,https://github.com/vivienroussez/autots,0,6,2020-06-05T12:31:14Z,0
av,"Bindings to 'FFmpeg' <http://www.ffmpeg.org/> AV library for working with
audio and video in R. Generates high quality video from images or R graphics with
custom audio. Also offers high performance tools for reading raw audio, creating
'spectrograms', and converting between countless audio / video formats. This package
interfaces directly to the C API and does not require any command line utilities.",2020-01-29,Jeroen Ooms,"https://docs.ropensci.org/av (website),
https://github.com/ropensci/av (devel)",TRUE,https://github.com/ropensci/av,208420,66,2020-05-16T09:48:52Z,3157.878787878788
available,"Check if a given package name is available to use. It checks the
name's validity. Checks if it is used on 'GitHub', 'CRAN' and 'Bioconductor'. Checks
for unintended meanings by querying Urban Dictionary, 'Wiktionary' and Wikipedia.",2019-07-19,Jim Hester,https://github.com/ropenscilabs/available,TRUE,https://github.com/ropenscilabs/available,20287,112,2020-05-15T12:01:09Z,181.13392857142858
avar,"Implements the allan variance and allan variance linear regression estimator for latent time series models. More details about the method can be found, for example, in Guerrier, S., Molinari, R., & Stebler, Y. (2016) <doi:10.1109/LSP.2016.2541867>. ",2020-01-15,Stéphane Guerrier,https://github.com/SMAC-Group/avar,TRUE,https://github.com/smac-group/avar,4585,0,2020-01-26T20:39:26Z,NA
AWAPer,"NetCDF files of the Bureau of Meteorology Australian Water Availability Project daily national climate grids are built and used for the efficient extraction of daily point and catchment area weighted precipitation, daily minimum temperature, daily maximum temperature, vapour pressure deficit, solar radiation and various measures of evapotranspiration. For details on the source climate data see <http://www.bom.gov.au/jsp/awap/>.",2020-02-01,Tim Peterson,https://github.com/peterson-tim-j/AWAPer,TRUE,https://github.com/peterson-tim-j/awaper,2364,4,2020-06-02T05:18:06Z,591
aweek,"Which day a week starts depends heavily on the either the local or
professional context. This package is designed to be a lightweight solution
to easily switching between week-based date definitions. ",2020-04-29,Zhian N. Kamvar,https://www.repidemicsconsortium.org/aweek,TRUE,https://github.com/reconhub/aweek,30433,10,2019-06-21T14:31:22Z,3043.3
aws.comprehend,"Client for 'AWS Comprehend' <https://aws.amazon.com/comprehend>, a cloud natural language processing service that can perform a number of quantitative text analyses, including language detection, sentiment analysis, and feature extraction.",2020-03-18,Thomas J. Leeper,https://github.com/cloudyr/aws.comprehend,TRUE,https://github.com/cloudyr/aws.comprehend,9276,11,2020-03-18T14:58:34Z,843.2727272727273
aws.ec2metadata,Retrieve Amazon EC2 instance metadata from within the running instance.,2019-07-15,Thomas J. Leeper,https://github.com/cloudyr/aws.ec2metadata,TRUE,https://github.com/cloudyr/aws.ec2metadata,37038411,9,2019-07-15T14:25:30Z,4115379
aws.iam,"A simple client for the Amazon Web Services ('AWS') Identity
and Access Management ('IAM') 'API' <https://aws.amazon.com/iam/>.",2020-04-07,Thomas J. Leeper,https://github.com/cloudyr/aws.iam,TRUE,https://github.com/cloudyr/aws.iam,22115,10,2020-05-11T04:54:42Z,2211.5
aws.kms,"Client package for the 'AWS Key Management Service' <https://aws.amazon.com/kms/>, a cloud service for managing encryption keys.",2020-04-14,Thomas J. Leeper,https://github.com/cloudyr/aws.kms,TRUE,https://github.com/cloudyr/aws.kms,7226,0,2020-04-13T23:22:47Z,NA
aws.lambda,"A simple client package for the Amazon Web Services ('AWS') Lambda
API <https://aws.amazon.com/lambda/>.",2020-04-15,Thomas J. Leeper,https://github.com/cloudyr/aws.lambda,TRUE,https://github.com/cloudyr/aws.lambda,17596,21,2020-04-29T15:53:42Z,837.9047619047619
aws.polly,"A client for AWS Polly <http://aws.amazon.com/documentation/polly>, a speech synthesis service.",2020-03-11,Thomas J. Leeper,https://github.com/cloudyr/aws.polly,TRUE,https://github.com/cloudyr/aws.polly,23628,19,2020-03-18T11:17:18Z,1243.578947368421
aws.s3,"A simple client package for the Amazon Web Services ('AWS') Simple
Storage Service ('S3') 'REST' 'API' <https://aws.amazon.com/s3/>.",2020-04-07,Simon Urbanek,https://github.com/cloudyr/aws.s3,TRUE,https://github.com/cloudyr/aws.s3,32914965,274,2020-05-27T21:58:33Z,120127.6094890511
aws.signature,"Generates version 2 and version 4 request signatures for Amazon Web Services ('AWS') <https://aws.amazon.com/> Application Programming Interfaces ('APIs') and provides a mechanism for retrieving credentials from environment variables, 'AWS' credentials files, and 'EC2' instance metadata. For use on 'EC2' instances, users will need to install the suggested package 'aws.ec2metadata' <https://cran.r-project.org/package=aws.ec2metadata>.",2020-06-01,Thomas J. Leeper,https://github.com/cloudyr/aws.signature,TRUE,https://github.com/cloudyr/aws.signature,1759157,21,2020-06-01T09:50:45Z,83769.38095238095
aws.transcribe,"Client for 'AWS Transcribe' <https://aws.amazon.com/documentation/transcribe>, a cloud transcription service that can convert an audio media file in English and other languages into a text transcript.",2020-03-11,Thomas J. Leeper,https://github.com/cloudyr/aws.transcribe,TRUE,https://github.com/cloudyr/aws.transcribe,8686,4,2020-03-18T13:11:10Z,2171.5
aws.translate,"A client for 'AWS Translate' <https://aws.amazon.com/documentation/translate>, a machine translation service that will convert a text input in one language into a text output in another language.",2020-03-11,Thomas J. Leeper,https://github.com/cloudyr/aws.translate,TRUE,https://github.com/cloudyr/aws.translate,8735,3,2020-03-18T12:58:06Z,2911.6666666666665
AzureAuth,"Provides Azure Active Directory (AAD) authentication functionality for R users of Microsoft's 'Azure' cloud <https://azure.microsoft.com/>. Use this package to obtain 'OAuth' 2.0 tokens for services including Azure Resource Manager, Azure Storage and others. It supports both AAD v1.0 and v2.0, as well as multiple authentication methods, including device code and resource owner grant. Tokens are cached in a user-specific directory obtained using the 'rappdirs' package. The interface is based on the 'OAuth' framework in the 'httr' package, but customised and streamlined for Azure. Part of the 'AzureR' family of packages.",2020-05-23,Hong Ooi,https://github.com/Azure/AzureAuth https://github.com/Azure/AzureR,TRUE,https://github.com/azure/azureauth,108428,19,2020-05-23T01:25:11Z,5706.736842105263
azuremlsdk,"Interface to the 'Azure Machine Learning' Software Development Kit
('SDK'). Data scientists can use the 'SDK' to train, deploy, automate, and
manage machine learning models on the 'Azure Machine Learning' service. To
learn more about 'Azure Machine Learning' visit the website:
<https://docs.microsoft.com/en-us/azure/machine-learning/service/overview-what-is-azure-ml>.",2020-02-05,Heemanshu Suri,https://github.com/azure/azureml-sdk-for-r,TRUE,https://github.com/azure/azureml-sdk-for-r,11969,43,2020-05-20T23:27:58Z,278.3488372093023
AzureRMR,"A lightweight but powerful R interface to the 'Azure Resource Manager' REST API. The package exposes a comprehensive class framework and related tools for creating, updating and deleting 'Azure' resource groups, resources and templates. While 'AzureRMR' can be used to manage any 'Azure' service, it can also be extended by other packages to provide extra functionality for specific services. Part of the 'AzureR' family of packages.",2020-05-15,Hong Ooi,https://github.com/Azure/AzureRMR https://github.com/Azure/AzureR,TRUE,https://github.com/azure/azurermr,111172,7,2020-06-09T11:58:54Z,15881.714285714286
AzureStor,"Manage storage in Microsoft's 'Azure' cloud: <https://azure.microsoft.com/services/storage>. On the admin side, 'AzureStor' includes features to create, modify and delete storage accounts. On the client side, it includes an interface to blob storage, file storage, and 'Azure Data Lake Storage Gen2': upload and download files and blobs; list containers and files/blobs; create containers; and so on. Authenticated access to storage is supported, via either a shared access key or a shared access signature (SAS). Part of the 'AzureR' family of packages.",2020-06-05,Hong Ooi,https://github.com/Azure/AzureStor https://github.com/Azure/AzureR,TRUE,https://github.com/azure/azurestor,109014,18,2020-06-09T13:58:24Z,6056.333333333333
AzureVM,"Functionality for working with virtual machines (VMs) in Microsoft's 'Azure' cloud: <https://azure.microsoft.com/en-us/services/virtual-machines/>. Includes facilities to deploy, startup, shutdown, and cleanly delete VMs and VM clusters. Deployment configurations can be highly customised, and can make use of existing resources as well as creating new ones. A selection of predefined configurations is provided to allow easy deployment of commonly used Linux and Windows images, including Data Science Virtual Machines. With a running VM, execute scripts and install optional extensions. Part of the 'AzureR' family of packages.",2020-02-06,Hong Ooi,https://github.com/Azure/AzureVM https://github.com/Azure/AzureR,TRUE,https://github.com/azure/azurevm,11425,7,2020-02-06T18:11:01Z,1632.142857142857
babelwhale,"Provides a unified interface to interact with 'docker' and 'singularity' containers.
You can execute a command inside a container, mount a volume or copy a file.",2019-10-03,Robrecht Cannoodt (<https://orcid.org/0000-0003-3641-729X>,https://github.com/dynverse/babelwhale,TRUE,https://github.com/dynverse/babelwhale,7312,10,2019-10-03T13:01:36Z,731.2
BacArena,"Can be used for simulation of organisms living in
communities (Bauer and Zimmermann (2017) <doi:10.1371/journal.pcbi.1005544>).
Each organism is represented individually and genome scale
metabolic models determine the uptake and release of compounds. Biological
processes such as movement, diffusion, chemotaxis and kinetics are available
along with data analysis techniques.",2020-05-20,Johannes Zimmermann,https://BacArena.github.io/,TRUE,https://github.com/euba/bacarena,21783,14,2020-05-20T15:49:49Z,1555.9285714285713
backbone,"Provides methods for extracting from a weighted graph
a binary or signed backbone that retains only the significant edges.
The user may input a weighted graph, or a bipartite graph
from which a weighted graph is first constructed via projection.
Backbone extraction methods include the stochastic degree sequence model (Neal, Z. P. (2014). <doi:10.1016/j.socnet.2014.06.001>),
hypergeometric model (Neal, Z. (2013). <doi:10.1007/s13278-013-0107-y>),
the fixed degree sequence model (Zweig, K. A., and Kaufmann, M. (2011). <doi:10.1007/s13278-011-0021-0>),
as well as a universal threshold method. ",2020-05-15,Rachel Domagalski,"https://github.com/domagal9/backbone,
https://www.zacharyneal.com/backbone",TRUE,https://github.com/domagal9/backbone,4293,11,2020-05-19T15:18:54Z,390.27272727272725
backports,"
Functions introduced or changed since R v3.0.0 are re-implemented in this
package. The backports are conditionally exported in order to let R resolve
the function name to either the implemented backport, or the respective base
version, if available. Package developers can make use of new functions or
arguments by selectively importing specific backports to
support older installations.",2020-05-13,Michel Lang,https://github.com/r-lib/backports,TRUE,https://github.com/r-lib/backports,16755091,45,2020-06-06T20:20:07Z,372335.35555555555
badger,Query information and generate badge for using in README and GitHub Pages.,2019-11-15,Guangchuang Yu,https://github.com/GuangchuangYu/badger,TRUE,https://github.com/guangchuangyu/badger,15235,89,2020-06-05T03:43:33Z,171.17977528089887
baggr,"Running and comparing meta-analyses of data with hierarchical
Bayesian models in Stan, including convenience functions for formatting
data, plotting and pooling measures specific to meta-analysis.",2020-02-28,Witold Wiecek,https://github.com/wwiecek/baggr,TRUE,https://github.com/wwiecek/baggr,5302,9,2020-02-27T14:09:39Z,589.1111111111111
baguette,"Tree- and rule-based models can be bagged using
this package and their predictions equations are stored
in an efficient format to reduce the model objects size
and speed. ",2020-04-14,Max Kuhn,https://github.com/tidymodels/baguette,TRUE,https://github.com/tidymodels/baguette,1097,8,2020-04-24T19:44:45Z,137.125
balance,"Balances have become a cornerstone of compositional data analysis. However,
conceptualizing balances is difficult, especially for high-dimensional data. Most often,
investigators visualize balances with ""balance dendrograms"". However, this visualization
tool does not scale well for large data. This package provides an alternative scheme for
visualizing balances, described in [Quinn (2018) <DOI:10.12688/f1000research.15858.1>].
This package also provides a method for principal balance analysis.",2019-07-10,Thomas Quinn,http://github.com/tpq/balance,TRUE,https://github.com/tpq/balance,9967,2,2019-07-10T04:47:48Z,4983.5
Ball,"Hypothesis tests and sure independence screening (SIS) procedure based on ball statistics, including ball divergence <doi:10.1214/17-AOS1579>, ball covariance <doi:10.1080/01621459.2018.1543600>, and ball correlation <doi:10.1080/01621459.2018.1462709>, are developed to analyze complex data in metric spaces, e.g, shape, directional, compositional and symmetric positive definite matrix data. The ball divergence and ball covariance based distribution-free tests are implemented to detecting distribution difference and association in metric spaces <arXiv:1811.03750>. Furthermore, several generic non-parametric feature selection procedures based on ball correlation, BCor-SIS and all of its variants, are implemented to tackle the challenge in the context of ultra high dimensional data.",2019-12-17,Xueqin Wang,https://github.com/Mamba413/Ball,TRUE,https://github.com/mamba413/ball,22081,12,2020-05-16T03:00:12Z,1840.0833333333333
bama,"Perform mediation analysis in the presence of high-dimensional
mediators based on the potential outcome framework. Bayesian Mediation
Analysis (BAMA), developed by Song et al (2019) <doi:10.1111/biom.13189>,
relies on two Bayesian sparse linear mixed models to simultaneously analyze
a relatively large number of mediators for a continuous exposure and outcome
assuming a small number of mediators are truly active. This sparsity
assumption also allows the extension of univariate mediator analysis by
casting the identification of active mediators as a variable selection
problem and applying Bayesian methods with continuous shrinkage priors on
the effects.",2020-05-02,Alexander Rix,https://github.com/umich-cphds/bama,TRUE,https://github.com/umich-cphds/bama,3970,0,2020-05-01T14:45:33Z,NA
BAMBI,Fit (using Bayesian methods) and simulate mixtures of univariate and bivariate angular distributions. Chakraborty and Wong (2017) <arXiv:1708.07804> .,2019-12-18,Saptarshi Chakraborty,https://arxiv.org/abs/1708.07804,TRUE,https://github.com/c7rishi/bambi,22297,1,2020-04-23T06:32:07Z,22297
bamboo,"Implementation of the Bamboo methods described in Li, Dahl, Vannucci, Joo, and Tsai (2014) <DOI:10.1371/journal.pone.0109832>.",2020-04-02,David B. Dahl,https://github.com/dbdahl/bamboo,TRUE,https://github.com/dbdahl/bamboo,19345,3,2020-04-02T21:40:17Z,6448.333333333333
bamp,"Bayesian Age-Period-Cohort Modeling and Prediction using efficient Markov Chain Monte Carlo Methods. This is the R version of the previous BAMP software as described in Volker Schmid and Leonhard Held (2007) <DOI:10.18637/jss.v021.i08> Bayesian Age-Period-Cohort Modeling and Prediction - BAMP, Journal of Statistical Software 21:8. This package includes checks of convergence using Gelman's R.",2020-01-23,Volker Schmid,https://volkerschmid.github.io/bamp/,TRUE,https://github.com/volkerschmid/bamp,8608,3,2020-01-21T21:50:12Z,2869.3333333333335
bang,"Provides functions for the Bayesian analysis of some simple
commonly-used models, without using Markov Chain Monte Carlo (MCMC)
methods such as Gibbs sampling. The 'rust' package
<https://cran.r-project.org/package=rust> is used to simulate a random
sample from the required posterior distribution, using the generalized
ratio-of-uniforms method. See Wakefield, Gelfand and Smith (1991)
<DOI:10.1007/BF01889987> for details. At the moment three conjugate
hierarchical models are available: beta-binomial, gamma-Poisson and a 1-way
analysis of variance (ANOVA).",2020-02-24,Paul J. Northrop,"https://paulnorthrop.github.io/bang/,
http://github.com/paulnorthrop/bang",TRUE,https://github.com/paulnorthrop/bang,14158,3,2020-02-25T10:20:04Z,4719.333333333333
banR,"A client for the ""Base Adresses Nationale"" (BAN) API, which allows to (batch)
geocode and reverse-geocode French addresses. For more information about the BAN and its API, please see <https://adresse.data.gouv.fr/api>. ",2020-05-11,Joel Gombin,"http://joelgombin.github.io/banR/,
http://github.com/joelgombin/banR/",TRUE,https://github.com/joelgombin/banr,13837,18,2020-05-11T08:54:10Z,768.7222222222222
BARIS,"Allows the user to access and import data from the rich French open data portal through the provided free API <https://doc.data.gouv.fr/api/reference/>.
The portal is free, and no credential is required for extracting datasets. ",2020-05-25,Mohamed El Fodil Ihaddaden,https://github.com/feddelegrand7/BARIS,TRUE,https://github.com/feddelegrand7/baris,1953,13,2020-06-03T12:46:38Z,150.23076923076923
bartCause,Contains a variety of methods to generate typical causal inference estimates using Bayesian Additive Regression Trees (BART) as the underlying regression model (Hill (2012) <doi:10.1198/jcgs.2010.08162>).,2020-04-02,Vincent Dorie,https://github.com/vdorie/bartCause,TRUE,https://github.com/vdorie/bartcause,1163,28,2020-03-31T21:00:54Z,41.535714285714285
baRulho,"Intended to facilitate acoustic analysis of (animal) sound transmission experiments, which typically aim to quantify changes in signal structure when transmitted in a given habitat by broadcasting and re-recording animal sounds at increasing distances. The package offers a workflow with functions to prepare the data set for analysis as well as to calculate and visualize several degradation metrics, including blur ratio, signal-to-noise ratio, excess attenuation and envelope correlation among others (Dabelsteen et al 1993 <doi:10.1121/1.406682>).",2020-06-07,Marcelo Araya-Salas,https://github.com/maRce10/baRulho,TRUE,https://github.com/marce10/barulho,2117,0,2020-05-11T21:35:16Z,NA
BAS,"Package for Bayesian Variable Selection and Model Averaging
in linear models and generalized linear models using stochastic or
deterministic sampling without replacement from posterior
distributions. Prior distributions on coefficients are
from Zellner's g-prior or mixtures of g-priors
corresponding to the Zellner-Siow Cauchy Priors or the
mixture of g-priors from Liang et al (2008)
<DOI:10.1198/016214507000001337>
for linear models or mixtures of g-priors from Li and Clyde
(2019) <DOI:10.1080/01621459.2018.1469992> in generalized linear models.
Other model selection criteria include AIC, BIC and Empirical Bayes
estimates of g. Sampling probabilities may be updated based on the sampled
models using sampling w/out replacement or an efficient MCMC algorithm which
samples models using a tree structure of the model space
as an efficient hash table. See Clyde, Ghosh and Littman (2010)
<DOI:10.1198/jcgs.2010.09049> for details on the sampling algorithms.
Uniform priors over all models or beta-binomial prior distributions on
model size are allowed, and for large p truncated priors on the model
space may be used to enforce sampling models that are full rank.
The user may force variables to always be included in addition to imposing
constraints that higher order interactions are included only if their
parents are included in the model.
This material is based upon work supported by the National Science
Foundation under Division of Mathematical Sciences grant 1106891.
Any opinions, findings, and
conclusions or recommendations expressed in this material are those of
the author(s) and do not necessarily reflect the views of the
National Science Foundation.",2020-01-24,Merlise Clyde,"https://www.r-project.org, https://github.com/merliseclyde/BAS",TRUE,https://github.com/merliseclyde/bas,90339,26,2020-03-09T00:48:42Z,3474.576923076923
base64url,"In contrast to RFC3548, the 62nd character (""+"") is replaced with
""-"", the 63rd character (""/"") is replaced with ""_"". Furthermore, the encoder
does not fill the string with trailing ""="". The resulting encoded strings
comply to the regular expression pattern ""[A-Za-z0-9_-]"" and thus are
safe to use in URLs or for file names.
The package also comes with a simple base32 encoder/decoder suited for
case insensitive file systems.",2018-05-14,Michel Lang,https://github.com/mllg/base64url,TRUE,https://github.com/mllg/base64url,79794,10,2020-01-11T00:16:27Z,7979.4
basetheme,Functions to create and select graphical themes for the base plotting system. Contains: 1) several custom pre-made themes 2) mechanism for creating new themes by making persistent changes to the graphical parameters of base plots.,2019-10-17,Karolis Koncevičius,https://github.com/KKPMW/basetheme,TRUE,https://github.com/kkpmw/basetheme,7445,84,2019-10-17T23:35:30Z,88.63095238095238
basf,"Resurrects the standard plot for shapes established by the
'base' and 'graphics' packages. This is suited to workflows that require
plotting using the established and traditional idioms of plotting spatially
coincident data where it belongs. This package depends on 'sf' and only replaces
the plot method. ",2020-04-15,Michael Sumner,https://github.com/mdsumner/basf,TRUE,https://github.com/mdsumner/basf,1007,0,2020-04-12T13:57:11Z,NA
basictabler,"Easily create tables from data
frames/matrices. Create/manipulate tables
row-by-row, column-by-column or cell-by-cell.
Use common formatting/styling to output
rich tables as 'HTML', 'HTML widgets' or to
'Excel'. ",2020-03-07,Christopher Bailiss,https://github.com/cbailiss/basictabler,TRUE,https://github.com/cbailiss/basictabler,16490,23,2020-03-07T10:04:51Z,716.9565217391304
basket,"Implementation of multisource exchangeability models for Bayesian analyses of prespecified subgroups arising in the context of basket trial design and monitoring. The R 'basket' package facilitates implementation of the binary, symmetric multi-source exchangeability model (MEM) with posterior inference arising from both exact computation and Markov chain Monte Carlo sampling. Analysis output includes full posterior samples as well as posterior probabilities, highest posterior density (HPD) interval boundaries, effective sample sizes (ESS), mean and median estimations, posterior exchangeability probability matrices, and maximum a posteriori MEMs. In addition to providing ""basketwise"" analyses, the package includes similar calculations for ""clusterwise"" analyses for which subgroups are combined into meta-baskets, or clusters, using graphical clustering algorithms that treat the posterior exchangeability probabilities as edge weights. In addition plotting tools are provided to visualize basket and cluster densities as well as their exchangeability. References include Hyman, D.M., Puzanov, I., Subbiah, V., Faris, J.E., Chau, I., Blay, J.Y., Wolf, J., Raje, N.S., Diamond, E.L., Hollebecque, A. and Gervais, R (2015) <doi:10.1056/NEJMoa1502309>; Hobbs, B.P. and Landin, R. (2018) <doi:10.1002/sim.7893>; Hobbs, B.P., Kane, M.J., Hong, D.S. and Landin, R. (2018) <doi:10.1093/annonc/mdy457>; and Kaizer, A.M., Koopmeiners, J.S. and Hobbs, B.P. (2017) <doi:10.1093/biostatistics/kxx031>.",2020-04-07,Michael J. Kane,https://github.com/kaneplusplus/basket,TRUE,https://github.com/kaneplusplus/basket,5769,1,2020-04-22T22:40:59Z,5769
batata,"
Allows the user to manage easily R packages removal. It offers many functions to display installed packages according to
specific dates and removes them if needed. The user is always prompted when running the removal functions in order to confirm
the required action. It offers also a function that removes all the installed packages in case one wants to switch from one R version
to another and start fresh. ",2020-06-09,Mohamed El Fodil Ihaddaden,https://github.com/feddelegrand7/batata,TRUE,https://github.com/feddelegrand7/batata,0,16,2020-06-09T14:07:19Z,0
BatchExperiments,"Extends the BatchJobs package to run statistical experiments on
batch computing clusters. For further details see the project web page.",2017-11-30,Bernd Bischl,https://github.com/tudo-r/BatchExperiments,TRUE,https://github.com/tudo-r/batchexperiments,33444,15,2020-05-19T20:00:48Z,2229.6
BatchJobs,"Provides Map, Reduce and Filter variants to generate jobs on batch
computing systems like PBS/Torque, LSF, SLURM and Sun Grid Engine.
Multicore and SSH systems are also supported. For further details see the
project web page.",2019-05-14,Bernd Bischl,https://github.com/tudo-r/BatchJobs,TRUE,https://github.com/tudo-r/batchjobs,139938,74,2020-05-19T19:59:59Z,1891.054054054054
batchtools,"As a successor of the packages 'BatchJobs' and 'BatchExperiments',
this package provides a parallel implementation of the Map function for high
performance computing systems managed by schedulers 'IBM Spectrum LSF'
(<https://www.ibm.com/us-en/marketplace/hpc-workload-management>),
'OpenLava' (<http://www.openlava.org/>), 'Univa Grid Engine'/'Oracle Grid
Engine' (<http://www.univa.com/>), 'Slurm' (<http://slurm.schedmd.com/>),
'TORQUE/PBS'
(<https://adaptivecomputing.com/cherry-services/torque-resource-manager/>),
or 'Docker Swarm' (<https://docs.docker.com/swarm/>).
A multicore and socket mode allow the parallelization on a local machines,
and multiple machines can be hooked up via SSH to create a makeshift
cluster. Moreover, the package provides an abstraction mechanism to define
large-scale computer experiments in a well-organized and reproducible way.",2020-03-19,Michel Lang,https://github.com/mllg/batchtools,TRUE,https://github.com/mllg/batchtools,93065,113,2020-05-03T19:35:23Z,823.5840707964602
bayes4psy,Contains several Bayesian models for data analysis of psychological tests. A user friendly interface for these models should enable students and researchers to perform professional level Bayesian data analysis without advanced knowledge in programming and Bayesian statistics. This package is based on the Stan platform (Carpenter et el. 2017 <doi:10.18637/jss.v076.i01>).,2020-02-20,Jure Demšar,https://github.com/bstatcomp/bayes4psy,TRUE,https://github.com/bstatcomp/bayes4psy,5754,3,2020-02-21T10:15:20Z,1918
bayesAB,"A suite of functions that allow the user to analyze A/B test
data in a Bayesian framework. Intended to be a drop-in replacement for
common frequentist hypothesis test such as the t-test and chi-sq test.",2019-07-02,Frank Portman,https://github.com/FrankPortman/bayesAB,TRUE,https://github.com/frankportman/bayesab,36276,253,2019-11-13T21:25:11Z,143.38339920948616
bayescopulareg,"Tools for Bayesian copula generalized linear models (GLMs).
The sampling scheme is based on Pitt, Chan, and Kohn (2006) <doi:10.1093/biomet/93.3.537>.
Regression parameters (including coefficients and dispersion parameters) are
estimated via the adaptive random walk Metropolis approach developed by
Haario, Saksman, and Tamminen (1999) <doi:10.1007/s001800050022>.
The prior for the correlation matrix is based on Hoff (2007) <doi:10.1214/07-AOAS107>.",2020-05-28,Ethan Alt,https://github.com/ethan-alt/bayescopulareg,TRUE,https://github.com/ethan-alt/bayescopulareg,658,0,2020-05-28T00:42:48Z,NA
BayesCTDesign,"A set of functions to help clinical trial researchers calculate power and sample size for two-arm Bayesian randomized clinical trials that do or do not incorporate historical control data. At some point during the design process, a clinical trial researcher who is designing a basic two-arm Bayesian randomized clinical trial needs to make decisions about power and sample size within the context of hypothesized treatment effects. Through simulation, the simple_sim() function will estimate power and other user specified clinical trial characteristics at user specified sample sizes given user defined scenarios about treatment effect,control group characteristics, and outcome. If the clinical trial researcher has access to historical control data, then the researcher can design a two-arm Bayesian randomized clinical trial that incorporates the historical data. In such a case, the researcher needs to work through the potential consequences of historical and randomized control differences on trial characteristics, in addition to working through issues regarding power in the context of sample size, treatment effect size, and outcome. If a researcher designs a clinical trial that will incorporate historical control data, the researcher needs the randomized controls to be from the same population as the historical controls. What if this is not the case when the designed trial is implemented? During the design phase, the researcher needs to investigate the negative effects of possible historic/randomized control differences on power, type one error, and other trial characteristics. Using this information, the researcher should design the trial to mitigate these negative effects. Through simulation, the historic_sim() function will estimate power and other user specified clinical trial characteristics at user specified sample sizes given user defined scenarios about historical and randomized control differences as well as treatment effects and outcomes. The results from historic_sim() and simple_sim() can be printed with print_table() and graphed with plot_table() methods. Outcomes considered are Gaussian, Poisson, Bernoulli, Lognormal, Weibull, and Piecewise Exponential. ",2019-08-02,Barry Eggleston,http://github.com/begglest/BayesCTDesign,TRUE,https://github.com/begglest/bayesctdesign,7694,0,2020-04-21T15:16:58Z,NA
bayesdfa,"Implements Bayesian dynamic factor analysis with 'Stan'. Dynamic
factor analysis is a dimension reduction tool for multivariate time series.
'bayesdfa' extends conventional dynamic factor models in several ways.
First, extreme events may be estimated in the latent trend by modeling
process error with a student-t distribution. Second, autoregressive and
moving average components can be optionally included. Third, the estimated
dynamic factors can be analyzed with hidden Markov models to evaluate
support for latent regimes.",2019-05-22,Eric J. Ward,https://github.com/fate-ewi/bayesdfa,TRUE,https://github.com/fate-ewi/bayesdfa,10562,17,2020-06-07T20:42:05Z,621.2941176470588
bayesDP,"Functions for data augmentation using the
Bayesian discount prior function for 1 arm and 2 arm clinical trials.",2020-02-03,Hickey Graeme L.,https://github.com/graemeleehickey/bayesDP,TRUE,https://github.com/graemeleehickey/bayesdp,23200,0,2020-03-16T16:38:09Z,NA
bayesGARCH,"Provides the bayesGARCH() function which performs the
Bayesian estimation of the GARCH(1,1) model with Student's t innovations as described in Ardia (2008) <doi:10.1007/978-3-540-78657-3>.",2020-04-20,David Ardia,https://github.com/ArdiaD/bayesGARCH,TRUE,https://github.com/ardiad/bayesgarch,40792,4,2020-04-19T20:50:32Z,10198
BayesianNetwork,"A 'Shiny' web application for creating interactive Bayesian Network models,
learning the structure and parameters of Bayesian networks, and utilities for classic
network analysis.",2018-12-02,Paul Govan,https://github.com/paulgovan/bayesiannetwork,TRUE,https://github.com/paulgovan/bayesiannetwork,20088,77,2020-03-09T15:33:11Z,260.8831168831169
BayesianTools,"General-purpose MCMC and SMC samplers, as well as plot and
diagnostic functions for Bayesian statistics, with a particular focus on
calibrating complex system models. Implemented samplers include various
Metropolis MCMC variants (including adaptive and/or delayed rejection MH), the
T-walk, two differential evolution MCMCs, two DREAM MCMCs, and a sequential
Monte Carlo (SMC) particle filter.",2019-12-09,Florian Hartig,https://github.com/florianhartig/BayesianTools,TRUE,https://github.com/florianhartig/bayesiantools,31878,58,2020-06-03T19:07:47Z,549.6206896551724
BayesLogit,"Tools for sampling from the PolyaGamma distribution based on Polson, Scott, and Windle (2013) <doi:10.1080/01621459.2013.829001>. Useful for logistic regression.",2019-09-26,Jesse Windle,https://github.com/jwindle/BayesLogit,TRUE,https://github.com/jwindle/bayeslogit,25943,3,2019-09-25T15:20:06Z,8647.666666666666
BayesMallows,"An implementation of the Bayesian version of the Mallows rank model (Vitelli et al., Journal of Machine Learning Research, 2018 <http://jmlr.org/papers/v18/15-481.html>; Crispino et al., to appear in Annals of Applied Statistics). Both Cayley, footrule, Hamming, Kendall, Spearman, and Ulam distances are supported in the models. The rank data to be analyzed can be in the form of complete rankings, top-k rankings, partially missing rankings, as well as consistent and inconsistent pairwise preferences. Several functions for plotting and studying the posterior distributions of parameters are provided. The package also provides functions for estimating the partition function (normalizing constant) of the Mallows rank model, both with the importance sampling algorithm of Vitelli et al. and asymptotic approximation with the IPFP algorithm (Mukherjee, Annals of Statistics, 2016 <doi:10.1214/15-AOS1389>).",2020-03-23,Oystein Sorensen,https://github.com/ocbe-uio/BayesMallows,TRUE,https://github.com/ocbe-uio/bayesmallows,11213,5,2020-05-11T08:38:09Z,2242.6
BayesNetBP,"Belief propagation methods in Bayesian Networks to propagate evidence through the network. The implementation of these methods are based on the article: Cowell, RG (2005). Local Propagation in Conditional Gaussian Bayesian Networks <http://www.jmlr.org/papers/volume6/cowell05a/>. The optional 'cyjShiny' package for running the Shiny app is available at <https://github.com/cytoscape/cyjShiny>. Please see the example in the documentation of 'runBayesNetApp' function for installing 'cyjShiny' package from GitHub.",2020-04-14,Han Yu,NA,TRUE,https://github.com/hyu-ub/bayesnetbp,15588,6,2020-04-10T14:31:37Z,2598
bayesplot,"Plotting functions for posterior analysis, MCMC diagnostics,
prior and posterior predictive checks, and other visualizations
to support the applied Bayesian workflow advocated in
Gabry, Simpson, Vehtari, Betancourt, and Gelman (2019) <doi:10.1111/rssa.12378>.
The package is designed not only to provide convenient functionality
for users, but also a common set of functions that can be easily used by
developers working on a variety of R packages for Bayesian modeling,
particularly (but not exclusively) packages interfacing with 'Stan'.",2020-05-28,Jonah Gabry,https://mc-stan.org/bayesplot,TRUE,https://github.com/stan-dev/bayesplot,570039,225,2020-06-03T22:25:37Z,2533.5066666666667
BayesPostEst,"An implementation of functions to generate and plot postestimation quantities after estimating Bayesian regression models using Markov chain Monte Carlo (MCMC). Functionality includes the estimation of the Precision-Recall curves (see Beger, 2016 <doi:10.2139/ssrn.2765419>), the implementation of the observed values method of calculating predicted probabilities by Hanmer and Kalkan (2013) <doi:10.1111/j.1540-5907.2012.00602.x>, the implementation of the average value method of calculating predicted probabilities (see King, Tomz, and Wittenberg, 2000 <doi:10.2307/2669316>), and the generation and plotting of first differences to summarize typical effects across covariates (see Long 1997, ISBN:9780803973749; King, Tomz, and Wittenberg, 2000 <doi:10.2307/2669316>). This package can be used with MCMC output generated by any Bayesian estimation tool including 'JAGS', 'BUGS', 'MCMCpack', and 'Stan'.",2020-05-28,Johannes Karreth,https://github.com/ShanaScogin/BayesPostEst,TRUE,https://github.com/shanascogin/bayespostest,4379,6,2020-06-09T23:26:05Z,729.8333333333334
BayesSampling,"Allows the user to apply the Bayes Linear approach to finite population with the Simple Random Sampling - BLE_SRS() - and
the Stratified Simple Random Sampling design - BLE_SSRS() - (both without replacement) and to the Ratio estimator (using auxiliary
information) - BLE_Ratio().
The Bayes linear estimation approach is applied to a general linear regression model for finite population prediction in BLE_Reg()
and it is also possible to achieve the design based estimators using vague prior distributions.
Based on Gonçalves, K.C.M, Moura, F.A.S and Migon, H.S.(2014) <https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886>.",2020-04-24,Pedro Soares Figueiredo,"https://www150.statcan.gc.ca/n1/en/catalogue/12-001-X201400111886,
https://github.com/pedrosfig/BayesSampling",TRUE,https://github.com/pedrosfig/bayessampling,684,1,2020-04-20T13:25:01Z,684
bayestestR,"Provides utilities to describe posterior distributions and Bayesian models. It includes point-estimates such as Maximum A Posteriori (MAP), measures of dispersion (Highest Density Interval - HDI; Kruschke, 2015 <doi:10.1016/C2012-0-00477-2>) and indices used for null-hypothesis testing (such as ROPE percentage, pd and Bayes factors).",2020-04-20,Dominique Makowski,https://easystats.github.io/bayestestR/,TRUE,https://github.com/easystats/bayestestr,264586,219,2020-05-29T08:35:47Z,1208.1552511415525
BayesVarSel,"Conceived to calculate Bayes factors in Linear models and then to provide a formal Bayesian answer to testing and variable selection problems. From a theoretical side, the emphasis in this package is placed on the prior distributions and it allows a wide range of them: Jeffreys (1961); Zellner and Siow(1980)<DOI:10.1007/bf02888369>; Zellner and Siow(1984); Zellner (1986)<DOI:10.2307/2233941>; Fernandez et al. (2001)<DOI:10.1016/s0304-4076(00)00076-2>; Liang et al. (2008)<DOI:10.1198/016214507000001337> and Bayarri et al. (2012)<DOI:10.1214/12-aos1013>. The interaction with the package is through a friendly interface that syntactically mimics the well-known lm() command of R. The resulting objects can be easily explored providing the user very valuable information (like marginal, joint and conditional inclusion probabilities of potential variables; the highest posterior probability model, HPM; the median probability model, MPM) about the structure of the true -data generating- model. Additionally, this package incorporates abilities to handle problems with a large number of potential explanatory variables through parallel and heuristic versions of the main commands, Garcia-Donato and Martinez-Beneito (2013)<DOI:10.1080/01621459.2012.742443>. It also allows problems with p>n and p>>n and also incorporates routines to handle problems with variable selection with factors.",2020-02-18,Anabel Forte,https://github.com/comodin19/BayesVarSel,TRUE,https://github.com/comodin19/bayesvarsel,32299,5,2020-05-19T09:12:54Z,6459.8
bayesvl,"Provides users with its associated functions for pedagogical purposes in visually learning Bayesian networks and Markov chain Monte Carlo (MCMC) computations. It enables users to: a) Create and examine the (starting) graphical structure of Bayesian networks; b) Create random Bayesian networks using a dataset with customized constraints; c) Generate 'Stan' code for structures of Bayesian networks for sampling the data and learning parameters; d) Plot the network graphs; e) Perform Markov chain Monte Carlo computations and produce graphs for posteriors checks. The package refers to one reference item, which describes the methods and algorithms: Vuong, Quan-Hoang and La, Viet-Phuong (2019) <doi:10.31219/osf.io/w5dx6> The 'bayesvl' R package. Open Science Framework (May 18).",2019-05-24,Viet-Phuong La,https://github.com/sshpa/bayesvl,TRUE,https://github.com/sshpa/bayesvl,4929,4,2020-05-09T11:01:32Z,1232.25
BayLum,"Bayesian analysis of luminescence data and C-14 age estimates. Bayesian models are based on the following publications: Combes, B. & Philippe, A. (2017) <doi:10.1016/j.quageo.2017.02.003> and Combes et al (2015) <doi:10.1016/j.quageo.2015.04.001>. This includes, amongst others, data import, export, application of age models and palaeodose model.",2018-09-19,Anne Philippe,NA,TRUE,https://github.com/r-lum/baylum,13738,3,2020-05-27T20:59:26Z,4579.333333333333
baymedr,"BAYesian inference for MEDical designs in R. Convenience functions
for the computation of Bayes factors for common biomedical research
designs. Implemented are functions to test the equivalence (equiv_bf),
non-inferiority (infer_bf), and superiority (super_bf) of an experimental
group compared to a control group. Bayes factors for these three tests can
be computed based on raw data (x, y) or summary statistics (n_x, n_y,
mean_x, mean_y, sd_x, sd_y [or ci_margin and ci_level]), making it possible
to reanalyse findings (e.g., from publications) without the need to obtain
the raw data.",2019-10-21,Maximilian Linde,https://github.com/maxlinde/baymedr,TRUE,https://github.com/maxlinde/baymedr,3322,0,2019-10-17T12:04:48Z,NA
baytrends,"Enable users to evaluate long-term trends using a Generalized
Additive Modeling (GAM) approach. The model development includes selecting a
GAM structure to describe nonlinear seasonally-varying changes over time,
incorporation of hydrologic variability via either a river flow or salinity,
the use of an intervention to deal with method or laboratory changes
suspected to impact data values, and representation of left- and
interval-censored data. The approach has been applied to water quality data
in the Chesapeake Bay, a major estuary on the east coast of the United
States to provide insights to a range of management- and research-focused
questions.",2020-03-31,Rebecca Murphy,https://github.com/tetratech/baytrends,TRUE,https://github.com/tetratech/baytrends,9346,3,2020-03-31T16:03:19Z,3115.3333333333335
bazar,"A collection of miscellaneous functions for
copying objects to the clipboard ('Copy');
manipulating strings ('concat', 'mgsub', 'trim', 'verlan');
loading or showing packages ('library_with_dep', 'require_with_dep',
'sessionPackages');
creating or testing for named lists ('nlist', 'as.nlist', 'is.nlist'),
formulas ('is.formula'), empty objects ('as.empty', 'is.empty'),
whole numbers ('as.wholenumber', 'is.wholenumber');
testing for equality ('almost.equal', 'almost.zero') and computing
uniqueness ('almost.unique');
getting modified versions of usual functions ('rle2', 'sumNA');
making a pause or a stop ('pause', 'stopif');
converting into a function ('as.fun');
providing a C like ternary operator ('condition %?% true %:% false');
finding packages and functions ('get_all_pkgs', 'get_all_funs');
and others ('erase', '%nin%', 'unwhich', 'top', 'bot', 'normalize'). ",2019-03-15,Paul Poncet,https://github.com/paulponcet/bazar,TRUE,https://github.com/paulponcet/bazar,78202,0,2019-07-13T23:51:42Z,NA
BBmisc,"Miscellaneous helper functions for and from B. Bischl and
some other guys, mainly for package development.",2017-03-10,Bernd Bischl,https://github.com/berndbischl/BBmisc,TRUE,https://github.com/berndbischl/bbmisc,561546,13,2020-05-25T08:07:27Z,43195.846153846156
bbmle,Methods and functions for fitting maximum likelihood models in R.,2020-02-03,Ben Bolker,https://github.com/bbolker/bbmle,TRUE,https://github.com/bbolker/bbmle,366916,14,2020-04-29T00:21:36Z,26208.285714285714
bbricks,"A set of frequently used Bayesian parametric and nonparametric model structures, as well as a set of tools for common analytical tasks. Structures include linear Gaussian systems, Gaussian and Normal-Inverse-Wishart conjugate structure, Gaussian and Normal-Inverse-Gamma conjugate structure, Categorical and Dirichlet conjugate structure, Dirichlet Process on positive integers, Dirichlet Process in general, Hierarchical Dirichlet Process ... Tasks include updating posteriors, sampling from posteriors, calculating marginal likelihood, calculating posterior predictive densities, sampling from posterior predictive distributions, calculating ""Maximum A Posteriori"" (MAP) estimates ... See <https://chenhaotian.github.io/Bayesian-Bricks/> to get started.",2020-05-07,Haotian Chen,https://github.com/chenhaotian/Bayesian-Bricks,TRUE,https://github.com/chenhaotian/bayesian-bricks,1790,3,2020-05-07T19:04:49Z,596.6666666666666
bbsBayes,"The North American Breeding Bird Survey (BBS) is a long-running
program that seeks to monitor the status and trends of the breeding birds in
North America. Since its start in 1966, the BBS has accumulated over 50 years
of data for over 300 species of North American Birds. Given the temporal and
spatial structure of the data, hierarchical Bayesian models are used to assess
the status and trends of these 300+ species of birds. 'bbsBayes' allows you to perform
hierarchical Bayesian analysis of BBS data. You can run a full
model analysis for one or more species that you choose, or you can take
more control and specify how the data should be stratified, prepared
for 'JAGS', or modelled. The functions provided here allow you to replicate
analyses performed by the United State Geological Survey (USGS, see Link
and Sauer (2011) <doi:10.1525/auk.2010.09220>) and Canadian Wildlife Service
(CWS, see Smith and Edwards (2020) <doi:10.1101/2020.03.26.010215>).",2020-05-31,Brandon P.M. Edwards,https://github.com/BrandonEdwards/bbsBayes,TRUE,https://github.com/brandonedwards/bbsbayes,136,12,2020-05-30T20:04:14Z,11.333333333333334
bbw,"The blocked weighted bootstrap (BBW) is an estimation technique
for use with data from two-stage cluster sampled surveys in which either
prior weighting (e.g. population-proportional sampling or PPS as used in
Standardized Monitoring and Assessment of Relief and Transitions or SMART
surveys) or posterior weighting (e.g. as used in rapid assessment method or
RAM and simple spatial sampling method or S3M surveys). The method was
developed by Accion Contra la Faim, Brixton Health, Concern Worldwide,
Global Alliance for Improved Nutrition, UNICEF Sierra Leone, UNICEF Sudan
and Valid International. It has been tested by the Centers for Disease
Control (CDC) using infant and young child feeding (IYCF) data. See Cameron
et al (2008) <doi:10.1162/rest.90.3.414> for application of bootstrap
to cluster samples. See Aaron et al (2016) <doi:10.1371/journal.pone.0163176>
and Aaron et al (2016) <doi:10.1371/journal.pone.0162462> for application
of the blocked weighted bootstrap to estimate indicators from two-stage
cluster sampled surveys.",2018-01-17,Mark Myatt,https://github.com/validmeasures/bbw,TRUE,https://github.com/validmeasures/bbw,10257,2,2020-01-05T22:36:22Z,5128.5
bcdata,"Search, query, and download tabular and
'geospatial' data from the British Columbia Data Catalogue
(<https://catalogue.data.gov.bc.ca/>). Search catalogue data records
based on keywords, data licence, sector, data format, and B.C.
government organization. View metadata directly in R, download many
data formats, and query 'geospatial' data available via the B.C.
government Web Feature Service ('WFS') using 'dplyr' syntax.",2019-12-17,Andy Teucher,"https://bcgov.github.io/bcdata/,
https://catalogue.data.gov.bc.ca/,
https://github.com/bcgov/bcdata",TRUE,https://github.com/bcgov/bcdata,3949,45,2020-06-04T23:32:36Z,87.75555555555556
Bchron,"Enables quick calibration of radiocarbon dates under various
calibration curves (including user generated ones); age-depth modelling
as per the algorithm of Haslett and Parnell (2008) <DOI:10.1111/j.1467-9876.2008.00623.x>; Relative sea level
rate estimation incorporating time uncertainty in polynomial regression
models (Parnell and Gehrels 2015) <DOI:10.1002/9781118452547.ch32>; non-parametric phase modelling via
Gaussian mixtures as a means to determine the activity of a site
(and as an alternative to the Oxcal function SUM; currently
unpublished), and reverse calibration of dates from calibrated into
un-calibrated years (also unpublished).",2020-04-13,Andrew Parnell,http://andrewcparnell.github.io/Bchron/,TRUE,https://github.com/andrewcparnell/bchron,48248,17,2020-04-13T12:00:31Z,2838.1176470588234
bcmaps,"Provides access to various spatial layers for B.C., such as
administrative boundaries, natural resource management boundaries, etc.
All layers are imported from the 'bcmapsdata' package as 'sf' or 'Spatial' objects
through function calls in this package. All layers are in B.C. 'Albers' equal-area projection
<http://spatialreference.org/ref/epsg/nad83-bc-albers/>, which is the B.C.
government standard.",2020-04-29,Andy Teucher,https://github.com/bcgov/bcmaps,TRUE,https://github.com/bcgov/bcmaps,13558,42,2020-04-28T16:41:00Z,322.8095238095238
bcrm,"Implements a wide variety of one- and two-parameter Bayesian CRM
designs. The program can run interactively, allowing the user to enter outcomes
after each cohort has been recruited, or via simulation to assess operating
characteristics. See Sweeting et al. (2013): <doi:10.18637/jss.v054.i13>.",2019-08-23,Graham Wheeler,https://github.com/mikesweeting/bcrm,TRUE,https://github.com/mikesweeting/bcrm,30575,0,2019-08-19T09:40:49Z,NA
bcTSNE,"Implements the projected t-SNE method for batch correction of
high-dimensional data. Please see Aliverti et al. (2020)
<doi:10.1093/bioinformatics/btaa189> for more information.",2020-04-28,Dayne L Filer,https://github.com/emanuelealiverti/BC_tSNE,TRUE,https://github.com/emanuelealiverti/bc_tsne,660,4,2020-04-15T14:38:19Z,165
bdchecks,Supplies a Shiny app and a set of functions to perform and managing data checks for biodiversity data. ,2019-02-18,Povilas Gibas,https://github.com/bd-R/bdchecks,TRUE,https://github.com/bd-r/bdchecks,6698,1,2020-03-28T04:33:26Z,6698
bdclean,"Provides features to manage the complete workflow for biodiversity data cleaning. Uploading data, gathering input from users (in order to adjust cleaning procedures), cleaning data and finally, generating various reports and several versions of the data. Facilitates user-level data cleaning, designed for the inexperienced R user. T Gueta et al (2018) <doi:10.3897/biss.2.25564>. T Gueta et al (2017) <doi:10.3897/tdwgproceedings.1.20311>.",2019-04-11,Thiloshon Nagarajah,"https://github.com/bd-R/bdclean,
https://bd-r.github.io/The-bdverse/index.html",TRUE,https://github.com/bd-r/bdclean,5786,5,2020-05-16T10:51:58Z,1157.2
bdl,"Interface to Local Data Bank ('Bank Danych Lokalnych' - 'bdl') API
<https://api.stat.gov.pl/Home/BdlApi?lang=en> with set of useful tools like
quick plotting and map generating using data from bank. ",2020-04-01,Artur Sławomirski,https://github.com/statisticspoland/R_Package_to_API_BDL,TRUE,https://github.com/statisticspoland/r_package_to_api_bdl,3656,10,2020-03-29T21:54:24Z,365.6
bdpar,"
Provide a tool to easily build customized data flows to pre-process large volumes
of information from different sources. To this end, 'bdpar' allows to (i) easily use and
create new functionalities and (ii) develop new data source extractors according to the
user needs. Additionally, the package provides by default a predefined data flow
to extract and pre-process the most relevant information (tokens, dates, ... ) from some textual
sources (SMS, Email, tweets, YouTube comments).",2020-02-20,Miguel Ferreiro-Díaz,https://github.com/miferreiro/bdpar,TRUE,https://github.com/miferreiro/bdpar,4928,4,2020-02-20T09:47:40Z,1232
beakr,"A minimalist web framework for developing application programming
interfaces in R that provides a flexible framework for handling common
HTTP-requests, errors, logging, and an ability to integrate any R code as
server middle-ware.",2020-02-10,Jonathan Callahan,https://github.com/MazamaScience/beakr,TRUE,https://github.com/mazamascience/beakr,2069,55,2020-02-21T22:35:38Z,37.61818181818182
beam,"Fast Bayesian inference of marginal and conditional independence structures from high-dimensional data. Leday and Richardson (2019), Biometrics, <doi:10.1111/biom.13064>.",2020-05-28,Gwenael G.R. Leday,https://github.com/gleday/beam,TRUE,https://github.com/gleday/beam,11032,0,2020-05-28T17:52:32Z,NA
beats,"Import and process electrocardiogram (ECG) data.
Reads binary data files from UFI devices (.ube files) and provides a
Shiny app for finding and exporting heart beats.",2020-02-28,Max Czapanskiy,https://github.com/FlukeAndFeather/beats,TRUE,https://github.com/flukeandfeather/beats,1812,1,2020-02-20T15:59:05Z,1812
beautier,"'BEAST2' (<https://www.beast2.org>) is a widely used
Bayesian phylogenetic tool, that uses DNA/RNA/protein data
and many model priors to create a posterior of jointly estimated
phylogenies and parameters.
'BEAUti 2' (which is part of 'BEAST2') is a GUI tool
that allows users to specify the many possible setups
and generates the XML file 'BEAST2' needs to run.
This package provides a way to create 'BEAST2' input
files without active user input, but using
R function calls instead.",2020-05-06,Richèl J.C. Bilderbeek,"https://docs.ropensci.org/beautier,
https://github.com/ropensci/beautier",TRUE,https://github.com/ropensci/beautier,9420,6,2020-04-22T11:56:07Z,1570
BEDMatrix,"A matrix-like data structure that allows for efficient,
convenient, and scalable subsetting of binary genotype/phenotype files
generated by PLINK (<https://www.cog-genomics.org/plink2>), the whole
genome association analysis toolset, without loading the entire file into
memory.",2020-03-11,Alexander Grueneberg,https://github.com/QuantGen/BEDMatrix,TRUE,https://github.com/quantgen/bedmatrix,26565,10,2020-03-11T17:54:51Z,2656.5
beezdemand,"Facilitates many of the analyses performed in studies of
behavioral economic demand. The package supports commonly-used options for
modeling operant demand including (1) data screening proposed by Stein,
Koffarnus, Snider, Quisenberry, & Bickel (2015; <doi:10.1037/pha0000020>),
(2) fitting models of demand such as linear (Hursh, Raslear, Bauman,
& Black, 1989, <doi:10.1007/978-94-009-2470-3_22>), exponential (Hursh & Silberberg, 2008,
<doi:10.1037/0033-295X.115.1.186>) and modified exponential (Koffarnus,
Franck, Stein, & Bickel, 2015, <doi:10.1037/pha0000045>), and (3) calculating
numerous measures relevant to applied behavioral economists (Intensity,
Pmax, Omax). Also supports plotting and comparing data.",2018-07-31,Brent Kaplan,https://github.com/brentkaplan/beezdemand,TRUE,https://github.com/brentkaplan/beezdemand,8349,8,2020-06-08T15:57:20Z,1043.625
beginr,"Useful functions for R beginners, including hints for the arguments of the 'plot()' function, self-defined functions for error bars, user-customized pair plots and hist plots, enhanced linear regression figures, etc.. This package could be helpful to R experts as well.",2019-05-02,Peng Zhao,https://github.com/pzhaonet/beginr,TRUE,https://github.com/pzhaonet/beginr,16187,12,2020-02-10T08:52:24Z,1348.9166666666667
behavr,Implements an S3 class based on 'data.table' to store and process efficiently ethomics (high-throughput behavioural) data.,2019-01-03,Quentin Geissmann,https://github.com/rethomics/behavr,TRUE,https://github.com/rethomics/behavr,12218,4,2020-06-09T01:45:46Z,3054.5
belg,"Calculates the Boltzmann entropy of a landscape gradient.
This package uses the analytical method created by Gao, P., Zhang, H.
and Li, Z., 2018 (<doi:10.1111/tgis.12315>) and by Gao, P. and Li, Z., 2019
(<doi:10.1007/s10980-019-00854-3>). It also extend the original ideas by
allowing calculations on data with missing values.",2020-04-01,Jakub Nowosad,https://r-spatialecology.github.io/belg/,TRUE,https://github.com/r-spatialecology/belg,12220,9,2020-04-29T12:16:21Z,1357.7777777777778
bench,Tools to accurately benchmark and analyze execution times for R expressions.,2020-01-13,Jim Hester,https://github.com/r-lib/bench,TRUE,https://github.com/r-lib/bench,72927,176,2020-05-20T12:43:06Z,414.35795454545456
benchmarkme,"Benchmark your CPU and compare against other CPUs.
Also provides functions for obtaining system specifications, such as
RAM, CPU type, and R version.",2020-05-09,Colin Gillespie,https://github.com/csgillespie/benchmarkme,TRUE,https://github.com/csgillespie/benchmarkme,52838,27,2020-05-09T20:59:15Z,1956.962962962963
benchmarkmeData,"Crowd sourced benchmarks from running the
'benchmarkme' package.",2020-04-23,Colin Gillespie,https://github.com/csgillespie/benchmarkme-data,TRUE,https://github.com/csgillespie/benchmarkme-data,52627,1,2020-04-23T14:31:48Z,52627
benford.analysis,Provides tools that make it easier to validate data using Benford's Law.,2018-12-21,Carlos Cinelli,http://github.com/carloscinelli/benford.analysis,TRUE,https://github.com/carloscinelli/benford.analysis,36968,28,2019-08-24T05:04:03Z,1320.2857142857142
berryFunctions,"Draw horizontal histograms, color scattered points by 3rd dimension,
enhance date- and log-axis plots, zoom in X11 graphics, trace errors and warnings,
use the unit hydrograph in a linear storage cascade, convert lists to data.frames and arrays,
fit multiple functions.",2020-06-06,Berry Boessenkool,https://github.com/brry/berryFunctions,TRUE,https://github.com/brry/berryfunctions,61949,8,2020-06-09T12:04:45Z,7743.625
BEST,"An alternative to t-tests, producing posterior estimates
for group means and standard deviations and their differences and
effect sizes.",2020-05-18,John K. Kruschke and Mike Meredith,NA,TRUE,https://github.com/mikemeredith/best,50153,15,2020-05-22T06:35:25Z,3343.5333333333333
bestNormalize,"Estimate a suite of normalizing transformations, including
a new adaptation of a technique based on ranks which can guarantee
normally distributed transformed data if there are no ties: ordered
quantile normalization (ORQ). ORQ normalization combines a rank-mapping
approach with a shifted logit approximation that allows
the transformation to work on data outside the original domain. It is
also able to handle new data within the original domain via linear
interpolation. The package is built to estimate the best normalizing
transformation for a vector consistently and accurately. It implements
the Box-Cox transformation, the Yeo-Johnson transformation, three types
of Lambert WxF transformations, and the ordered quantile normalization
transformation. It also estimates the normalization efficacy of other
commonly used transformations, and finally it allows users to specify
custom transformations or normalization statistics.",2020-06-08,Ryan Andrew Peterson,https://github.com/petersonR/bestNormalize,TRUE,https://github.com/petersonr/bestnormalize,95532,17,2020-06-09T14:14:52Z,5619.529411764706
bets.covid19,"Implements likelihood inference for early epidemic analysis. BETS is short for the four key epidemiological events being modeled: Begin of exposure, End of exposure, time of Transmission, and time of Symptom onset. The package contains a dataset of the trajectory of confirmed cases during the coronavirus disease (COVID-19) early outbreak. More detail of the statistical methods can be found in Zhao et al. (2020) <arXiv:2004.07743>.",2020-05-12,Qingyuan Zhao,https://github.com/qingyuanzhao/bets.covid19,TRUE,https://github.com/qingyuanzhao/bets.covid19,463,26,2020-06-01T18:24:53Z,17.807692307692307
BFpack,"Implementation of various default Bayes factors
for testing statistical hypotheses. The package is
intended for applied quantitative researchers in the
social and behavioral sciences, medical research,
and related fields. The Bayes factor tests can be
executed for statistical models such as
univariate and multivariate normal linear models,
generalized linear models, special cases of
linear mixed models, survival models, relational
event models. Parameters that can be tested are
location parameters (e.g., regression coefficients),
variances (e.g., group variances), and measures of
association (e.g,. bivariate correlations).
The statistical underpinnings are
described in
Mulder, Hoijtink, and Xin (2019) <arXiv:1904.00679>,
Mulder and Gelissen (2019) <arXiv:1807.05819>,
Mulder (2016) <DOI:10.1016/j.jmp.2014.09.004>,
Mulder and Fox (2019) <DOI:10.1214/18-BA1115>,
Mulder and Fox (2013) <DOI:10.1007/s11222-011-9295-3>,
Boeing-Messing, van Assen, Hofman, Hoijtink, and Mulder <DOI:10.1037/met0000116>,
Hoijtink, Mulder, van Lissa, and Gu, (2018) <DOI:10.31234/osf.io/v3shc>,
Gu, Mulder, and Hoijtink, (2018) <DOI:10.1111/bmsp.12110>,
Hoijtink, Gu, and Mulder, (2018) <DOI:10.1111/bmsp.12145>, and
Hoijtink, Gu, Mulder, and Rosseel, (2018) <DOI:10.1037/met0000187>.",2020-05-11,Joris Mulder,https://github.com/jomulder/BFpack,TRUE,https://github.com/jomulder/bfpack,4090,6,2020-05-22T15:03:18Z,681.6666666666666
BFS,Search and download data from the Swiss Federal Statistical Office <https://www.bfs.admin.ch/>.,2020-03-25,Félix Luginbuhl,"https://felixluginbuhl.com/BFS, https://github.com/lgnbhl/BFS",TRUE,https://github.com/lgnbhl/bfs,3996,4,2020-03-30T16:58:08Z,999
bfsl,"Provides the solution from York (1968) <doi:10.1016/S0012-821X(68)80059-7>
for fitting a straight line to bivariate data with errors in both coordinates.
It gives unbiased estimates of the intercept, slope and standard errors of the
best-fit straight line to independent points with (possibly correlated)
normally distributed errors in both x and y. Other commonly used
errors-in-variables methods, such as orthogonal distance regression, geometric
mean regression or Deming regression are special cases of York’s solution.",2018-12-16,Patrick Sturm,https://github.com/pasturm/bfsl,TRUE,https://github.com/pasturm/bfsl,6165,0,2020-04-17T10:26:03Z,NA
bfsMaps,"At the Swiss Federal Statistical Office (SFSO), spatial maps of Switzerland are available free of charge as 'Cartographic bases for small-scale thematic mapping'. This package contains convenience functions to import ESRI (Environmental Systems Research Institute) shape files using the package 'rgdal' and to plot them easily and quickly without having to worry too much about the technical details.
It contains utilities to combine multiple areas to one single polygon and to find neighbours for single regions. For any point on a map, a special locator can be used to determine to which municipality, district or canton it belongs.",2020-04-17,Andri Signorell,https://github.com/AndriSignorell/bfsMaps/,TRUE,https://github.com/andrisignorell/bfsmaps,1275,0,2020-04-28T15:45:34Z,NA
bfw,"Derived from the work of Kruschke (2015, <ISBN:9780124058880>),
the present package aims to provide a framework for conducting Bayesian
analysis using Markov chain Monte Carlo (MCMC) sampling utilizing the
Just Another Gibbs Sampler ('JAGS', Plummer, 2003, <http://mcmc-jags.sourceforge.net/>).
The initial version includes several modules for conducting Bayesian
equivalents of chi-squared tests, analysis of variance (ANOVA),
multiple (hierarchical) regression, softmax regression, and for fitting data
(e.g., structural equation modeling).",2019-11-25,Øystein Olav Skaar,https://github.com/oeysan/bfw/,TRUE,https://github.com/oeysan/bfw,11096,9,2019-11-25T08:02:17Z,1232.888888888889
BGData,"An umbrella package providing a phenotype/genotype data structure
and scalable and efficient computational methods for large genomic datasets
in combination with several other packages: 'BEDMatrix', 'LinkedMatrix',
and 'symDMatrix'.",2019-01-25,Alexander Grueneberg,https://github.com/QuantGen/BGData,TRUE,https://github.com/quantgen/bgdata,14333,17,2020-05-12T21:18:25Z,843.1176470588235
BGGM,"Fit Bayesian Gaussian graphical models. The methods are separated into
two Bayesian approaches for inference: hypothesis testing and estimation. There are
extensions for confirmatory hypothesis testing, comparing Gaussian graphical models,
and node wise predictability. These methods were recently introduced in the Gaussian
graphical model literature, including
Williams (2019) <doi:10.31234/osf.io/x8dpr>,
Williams and Mulder (2019) <doi:10.31234/osf.io/ypxd8>,
Williams, Rast, Pericchi, and Mulder (2019) <doi:10.31234/osf.io/yt386>.",2020-05-31,Donald Williams,NA,TRUE,https://github.com/donaldrwilliams/bggm,2572,20,2020-06-08T22:17:02Z,128.6
bggum,"Provides a Metropolis-coupled Markov chain Monte Carlo sampler,
post-processing and parameter estimation functions, and plotting utilities
for the generalized graded unfolding model of Roberts, Donoghue, and
Laughlin (2000) <doi:10.1177/01466216000241001>.",2020-01-19,JBrandon Duck-Mayr,https://github.com/duckmayr/bggum,TRUE,https://github.com/duckmayr/bggum,2731,2,2020-01-19T13:37:45Z,1365.5
BH,"Boost provides free peer-reviewed portable C++ source
libraries. A large part of Boost is provided as C++ template code
which is resolved entirely at compile-time without linking. This
package aims to provide the most useful subset of Boost libraries
for template use among CRAN packages. By placing these libraries in
this package, we offer a more efficient distribution system for CRAN
as replication of this code in the sources of other packages is
avoided. As of release 1.72.0-3, the following Boost libraries are
included: 'accumulators' 'algorithm' 'align' 'any' 'atomic' 'bimap'
'bind' 'circular_buffer' 'compute' 'concept' 'config' 'container'
'date_time' 'detail' 'dynamic_bitset' 'exception' 'flyweight'
'foreach' 'functional' 'fusion' 'geometry' 'graph' 'heap' 'icl'
'integer' 'interprocess' 'intrusive' 'io' 'iostreams' 'iterator'
'math' 'move' 'mp11' 'mpl' 'multiprcecision' 'numeric' 'pending'
'phoenix' 'polygon' 'preprocessor' 'propery_tree' 'random' 'range'
'scope_exit' 'smart_ptr' 'sort' 'spirit' 'tuple' 'type_traits'
'typeof' 'unordered' 'utility' 'uuid'.",2020-01-08,Dirk Eddelbuettel,https://github.com/eddelbuettel/bh,TRUE,https://github.com/eddelbuettel/bh,19932259,66,2020-05-03T19:22:25Z,302003.92424242425
bib2df,Parse a BibTeX file to a data.frame to make it accessible for further analysis and visualization.,2019-05-22,Philipp Ottolinger,https://github.com/ropensci/bib2df,TRUE,https://github.com/ropensci/bib2df,16578,81,2019-12-09T12:07:40Z,204.66666666666666
bibliometrix,"Tool for quantitative research in scientometrics and bibliometrics.
It provides various routines for importing bibliographic data from 'SCOPUS' (<http://scopus.com>),
'Clarivate Analytics Web of Science' (<http://www.webofknowledge.com/>), 'Digital Science Dimensions'
(<https://www.dimensions.ai/>), 'Cochrane Library' (<http://www.cochranelibrary.com/>)
and 'PubMed' (<https://www.ncbi.nlm.nih.gov/pubmed/>) databases, performing bibliometric analysis
and building networks for co-citation, coupling, scientific collaboration and co-word analysis.",2020-05-25,Massimo Aria,"https://www.bibliometrix.org,
https://github.com/massimoaria/bibliometrix",TRUE,https://github.com/massimoaria/bibliometrix,181549,148,2020-06-01T19:01:45Z,1226.6824324324325
biclique,"A tool for enumerating maximal complete bipartite graphs. The input should be a edge list file or a binary matrix file.
The output are maximal complete bipartite graphs. Algorithms used can be found in this paper Y. Lu et al. BMC Res Notes 13, 88 (2020) <doi:10.1186/s13104-020-04955-0>.",2020-03-03,Yuping Lu,https://github.com/YupingLu/biclique,TRUE,https://github.com/yupinglu/biclique,10895,14,2020-03-03T21:51:21Z,778.2142857142857
biclustermd,"Biclustering is a statistical learning technique that simultaneously
partitions and clusters rows and columns of a data matrix. Since the solution
space of biclustering is in infeasible to completely search with current
computational mechanisms, this package uses a greedy heuristic. The algorithm
featured in this package is, to the best our knowledge, the first biclustering
algorithm to work on data with missing values. Li, J., Reisner, J., Pham, H.,
Olafsson, S., and Vardeman, S. (2020) Biclustering with Missing Data. Information
Sciences, 510, 304–316.",2020-04-15,John Reisner,http://github.com/jreisner/biclustermd,TRUE,https://github.com/jreisner/biclustermd,4669,3,2020-04-15T01:03:16Z,1556.3333333333333
BifactorIndicesCalculator,"The calculator computes bifactor indices such as explained common variance (ECV), hierarchical Omega (OmegaH), percentage of uncontaminated correlations (PUC), item explained common variance (I-ECV), and more. This package is an R version of the 'Excel' based 'Bifactor Indices Calculator' (Dueber, 2017) <doi:10.13023/edp.tool.01> with added convenience features for directly utilizing output from several programs that can fit confirmatory factor analysis or item response models.",2020-04-11,David Dueber,https://github.com/ddueber/BifactorIndicesCalculator,TRUE,https://github.com/ddueber/bifactorindicescalculator,3501,2,2020-04-10T01:44:49Z,1750.5
bife,"Estimates fixed effects binary choice models (logit and probit) with potentially many
individual fixed effects and computes average partial effects. Incidental parameter bias can be
reduced with an asymptotic bias-correction proposed by Fernandez-Val (2009)
<doi:10.1016/j.jeconom.2009.02.007>.",2020-01-12,Amrei Stammann,https://github.com/amrei-stammann/bife,TRUE,https://github.com/amrei-stammann/bife,38053,1,2020-01-19T12:42:15Z,38053
BIFIEsurvey,"
Contains tools for survey statistics (especially in educational
assessment) for datasets with replication designs (jackknife,
bootstrap, replicate weights; see Kolenikov, 2010;
Pfefferman & Rao, 2009a, 2009b, <doi:10.1016/S0169-7161(09)70003-3>,
<doi:10.1016/S0169-7161(09)70037-9>); Shao, 1996,
<doi:10.1080/02331889708802523>).
Descriptive statistics, linear and logistic regression,
path models for manifest variables with measurement error
correction and two-level hierarchical regressions for weighted
samples are included. Statistical inference can be conducted for
multiply imputed datasets and nested multiply imputed datasets
and is in particularly suited for the analysis of plausible values
(for details see George, Oberwimmer & Itzlinger-Bruneforth, 2016;
Bruneforth, Oberwimmer & Robitzsch, 2016; Robitzsch, Pham &
Yanagida, 2016; <doi:10.17888/fdb-demo:bistE813I-16a>).
The package development was supported by BIFIE (Federal Institute for
Educational Research, Innovation and Development of the Austrian
School System; Salzburg, Austria).",2019-06-12,Alexander Robitzsch,"http://www.bifie.at,
https://www.bifie.at/bildungsforschung/forschungsdatenbibliothek,
https://www.bifie.at/large-scale-assessment-mit-r-methodische-grundlagen-der-oesterreichischen-bildungsstandardueberpruefung,
https://github.com/alexanderrobitzsch/BIFIEsurvey,
https://sites.google.com/site/alexanderrobitzsch2/software",TRUE,https://github.com/alexanderrobitzsch/bifiesurvey,125753,1,2019-06-12T15:24:00Z,125753
bigassertr,"
Enhanced message functions (cat() / message() / warning() / error())
using wrappers around sprintf(). Also, multiple assertion functions
(e.g. to check class, length, values, files, arguments, etc.).",2020-04-01,Florian Privé,https://github.com/privefl/bigassertr,TRUE,https://github.com/privefl/bigassertr,16977,1,2020-04-01T07:55:39Z,16977
BIGDAWG,"Data sets and functions for chi-squared Hardy-Weinberg and case-control association tests of highly polymorphic genetic data [e.g., human leukocyte antigen (HLA) data]. Performs association tests at multiple levels of polymorphism (haplotype, locus and HLA amino-acids) as described in Pappas DJ, Marin W, Hollenbach JA, Mack SJ (2016) <doi:10.1016/j.humimm.2015.12.006>. Combines rare variants to a common class to account for sparse cells in tables as described by Hollenbach JA, Mack SJ, Thomson G, Gourraud PA (2012) <doi:10.1007/978-1-61779-842-9_14>.",2019-11-12,Derek Pappas,"http://tools.immunogenomics.org/,
https://github.com/IgDAWG/BIGDAWG",TRUE,https://github.com/igdawg/bigdawg,30306,2,2019-10-18T01:52:44Z,15153
BIGL,"Response surface methods for drug synergy analysis. Available
methods include generalized and classical Loewe formulations as well as Highest
Single Agent methodology. Response surfaces can be plotted in an interactive
3-D plot and formal statistical tests for presence of synergistic effects are
available. Implemented methods and tests are described in the article
""BIGL: Biochemically Intuitive Generalized Loewe null model for prediction
of the expected combined effect compatible with partial agonism and antagonism""
by Koen Van der Borght, Annelies Tourny, Rytis Bagdziunas, Olivier Thas,
Maxim Nazarov, Heather Turner, Bie Verbist & Hugo Ceulemans (2017)
<doi:10.1038/s41598-017-18068-5>.",2020-02-20,Heather Turner,https://github.com/openanalytics/BIGL,TRUE,https://github.com/openanalytics/bigl,22107,4,2020-02-04T14:29:06Z,5526.75
biglasso,"Extend lasso and elastic-net model fitting for ultrahigh-dimensional,
multi-gigabyte data sets that cannot be loaded into memory. It's much more
memory- and computation-efficient as compared to existing lasso-fitting packages
like 'glmnet' and 'ncvreg', thus allowing for very powerful big data analysis
even with an ordinary laptop.",2019-09-09,Yaohui Zeng,"https://github.com/YaohuiZeng/biglasso,
https://arxiv.org/abs/1701.05936",TRUE,https://github.com/yaohuizeng/biglasso,43694,70,2020-02-13T17:56:46Z,624.2
bigmemory,"Create, store, access, and manipulate massive matrices.
Matrices are allocated to shared memory and may use memory-mapped
files. Packages 'biganalytics', 'bigtabulate', 'synchronicity', and
'bigalgebra' provide advanced functionality.",2019-12-23,Michael J. Kane,https://github.com/kaneplusplus/bigmemory,TRUE,https://github.com/kaneplusplus/bigmemory,431107,87,2019-12-23T00:36:01Z,4955.252873563219
bigparallelr,"Utility functions for easy parallelism in R. Include some reexports
from other packages, utility functions for splitting and parallelizing over
blocks, and choosing and setting the number of cores used.",2020-01-09,Florian Privé,https://github.com/privefl/bigparallelr,TRUE,https://github.com/privefl/bigparallelr,12175,1,2020-02-29T17:23:47Z,12175
bigQueryR,"Interface with 'Google BigQuery',
see <https://cloud.google.com/bigquery/> for more information.
This package uses 'googleAuthR' so is compatible with similar packages,
including 'Google Cloud Storage' (<https://cloud.google.com/storage/>) for result extracts. ",2019-10-09,Mark Edmondson,http://code.markedmondson.me/bigQueryR/,TRUE,https://github.com/cloudyr/bigqueryr,47510,31,2020-03-12T11:53:12Z,1532.5806451612902
bigreadr,"Read large text files by splitting them in smaller files.
Package 'bigreadr' also provides some convenient wrappers around fread()
and fwrite() from package 'data.table'. ",2019-10-18,Florian Privé,https://github.com/privefl/bigreadr,TRUE,https://github.com/privefl/bigreadr,23452,27,2019-11-02T08:25:29Z,868.5925925925926
bigrquery,Easily talk to Google's 'BigQuery' database from R.,2020-05-15,Hadley Wickham,https://github.com/rstats-db/bigrquery,TRUE,https://github.com/rstats-db/bigrquery,431039,393,2020-05-15T16:09:57Z,1096.7913486005089
bigsnpr,"Easy-to-use, efficient, flexible and scalable tools
for analyzing massive SNP arrays <doi:10.1093/bioinformatics/bty185>.",2020-03-09,Florian Privé,https://privefl.github.io/bigsnpr,TRUE,https://github.com/privefl/bigsnpr,3693,55,2020-06-01T05:40:05Z,67.14545454545454
bigsparser,"Provides a sparse matrix format with data stored on disk, to be
used in both R and C++. This is intended for more efficient use of sparse
data in C++ and also when parallelizing, since data on disk does not need
copying. Only a limited number of features will be implemented. For now,
conversion can be performed from a 'dgCMatrix' of R package 'Matrix'.",2020-05-25,Florian Privé,https://github.com/privefl/bigsparser,TRUE,https://github.com/privefl/bigsparser,1281,2,2020-05-15T14:54:49Z,640.5
bigstatsr,"Easy-to-use, efficient, flexible and scalable statistical tools.
Package bigstatsr provides and uses Filebacked Big Matrices via memory-mapping.
It provides for instance matrix operations, Principal Component Analysis,
sparse linear supervised models, utility functions and more
<doi:10.1093/bioinformatics/bty185>.",2020-03-12,Florian Privé,https://privefl.github.io/bigstatsr,TRUE,https://github.com/privefl/bigstatsr,23776,101,2020-03-11T17:15:11Z,235.40594059405942
bigstep,"Selecting linear and generalized linear models for large data sets
using modified stepwise procedure and modern selection criteria (like
modifications of Bayesian Information Criterion). Selection can be
performed on data which exceed RAM capacity.",2019-07-25,Piotr Szulc,http://github.com/pmszulc/bigstep,TRUE,https://github.com/pmszulc/bigstep,16186,1,2019-07-23T06:55:23Z,16186
bigutilsr,"Utility functions for large-scale data. For now, package 'bigutilsr'
mainly includes functions for outlier detection and PCA projection.",2020-05-15,Florian Privé,https://github.com/privefl/bigutilsr,TRUE,https://github.com/privefl/bigutilsr,9604,6,2020-03-30T16:06:58Z,1600.6666666666667
BigVAR,Estimates VAR and VARX models with structured Lasso Penalties.,2019-12-02,Will Nicholson,http://www.github.com/wbnicholson/BigVAR,TRUE,https://github.com/wbnicholson/bigvar,25067,31,2020-03-07T17:53:16Z,808.6129032258065
billboarder,"Provides an 'htmlwidgets' interface to 'billboard.js',
a re-usable easy interface JavaScript chart library, based on D3 v4+.
Chart types include line charts, scatterplots, bar/lollipop charts, histogram/density plots, pie/donut charts and gauge charts.
All charts are interactive, and a proxy method is implemented to smoothly update a chart without rendering it again in 'shiny' apps. ",2020-01-09,Victor Perrier,https://github.com/dreamRs/billboarder,TRUE,https://github.com/dreamrs/billboarder,43810,145,2020-05-18T10:32:01Z,302.13793103448273
binb,"A collection of 'LaTeX' styles using 'Beamer' customization for
pdf-based presentation slides in 'RMarkdown'. At present it contains
'RMarkdown' adaptations of the LaTeX themes 'Metropolis' (formerly 'mtheme')
theme by Matthias Vogelgesang and others (now included in 'TeXLive'), the
'IQSS' by Ista Zahn (which is included here), and the 'Monash' theme by
Rob J Hyndman. Additional (free) fonts may be needed: 'Metropolis' prefers
'Fira', and 'IQSS' requires 'Libertinus'.",2019-11-02,Dirk Eddelbuettel,https://github.com/eddelbuettel/binb,TRUE,https://github.com/eddelbuettel/binb,13082,136,2020-06-09T03:03:27Z,96.19117647058823
binman,"Tools and functions for managing the download of binary files.
Binary repositories are defined in 'YAML' format. Defining new
pre-download, download and post-download templates allow additional
repositories to be added.",2018-07-18,John Harrison,https://github.com/ropensci/binman,TRUE,https://github.com/ropensci/binman,135582,12,2019-12-09T12:08:28Z,11298.5
binmapr,"The raw NGS (Next Generation Sequencing) variants called
from GBS (Genotyping by Sequencing) / WES (Whole Exon Sequencing)/
WGS (Whole Genome Sequencing) may include many error sites. The
'binmapr' could fix the potential error sites and generate highly
confident markers for downstream analysis, such as QTL (quantitative
trait locus) mapping, genetic map construction.
Davey, J.W. (2011) <doi:10.1038/nrg3012>.",2019-10-20,Zhougeng Xu,https://github.com/xuzhougeng/binmapr,TRUE,https://github.com/xuzhougeng/binmapr,3130,8,2019-10-15T07:51:47Z,391.25
bioacoustics,"Contains all the necessary tools to process audio recordings of
various formats (e.g., WAV, WAC, MP3, ZC), filter noisy files,
display audio signals, detect and extract automatically acoustic
features for further analysis such as classification.",2020-05-24,Jean Marchal,https://github.com/wavx/bioacoustics/,TRUE,https://github.com/wavx/bioacoustics,22254,23,2020-05-23T18:34:05Z,967.5652173913044
bioC.logs,Download stats reports from the BioConductor.org stats website.,2020-02-13,Marcelo Ponce,https://github.com/mponce0/bioC.logs,TRUE,https://github.com/mponce0/bioc.logs,2155,0,2020-02-26T18:41:15Z,NA
BiocManager,A convenient tool to install and update Bioconductor packages.,2019-11-16,Martin Morgan,NA,TRUE,https://github.com/bioconductor/biocmanager,1444786,28,2020-05-23T20:05:55Z,51599.5
biocompute,"Tools to create, validate, and export BioCompute Objects
described in King et al. (2019) <doi:10.17605/osf.io/h59uh>.
Users can encode information in data frames, and compose
BioCompute Objects from the domains defined by the standard.
A checksum validator and a JSON schema validator are provided.
This package also supports exporting BioCompute Objects as JSON,
PDF, HTML, or 'Word' documents, and exporting to cloud-based platforms.",2019-11-28,Nan Xiao,"https://sbg.github.io/biocompute/,
https://github.com/sbg/biocompute",TRUE,https://github.com/sbg/biocompute,3071,1,2020-04-23T16:11:09Z,3071
biogram,"Tools for extraction and analysis of various
n-grams (k-mers) derived from biological sequences (proteins
or nucleic acids). Contains QuiPT (quick permutation test) for fast
feature-filtering of the n-gram data.",2020-03-31,Michal Burdukiewicz,https://github.com/michbur/biogram,TRUE,https://github.com/michbur/biogram,25558,6,2020-04-04T19:59:18Z,4259.666666666667
bioimagetools,"Tools for 3D imaging, mostly for biology/microscopy.
Read and write TIFF stacks. Functions for segmentation, filtering and analyzing 3D point patterns.",2020-05-29,Volker Schmid,https://bioimaginggroup.github.io/bioimagetools,TRUE,https://github.com/bioimaginggroup/bioimagetools,19344,3,2020-05-29T11:12:05Z,6448
BioInstaller,"
Can be used to integrate massive bioinformatics resources, such as tool/script and database. It provides the R functions and Shiny web application. Hundreds of bioinformatics tool/script and database have been included.",2018-11-20,Jianfeng Li,https://github.com/JhuangLab/BioInstaller,TRUE,https://github.com/jhuanglab/bioinstaller,38832,34,2019-11-28T07:59:31Z,1142.1176470588234
biomartr,"Perform large scale genomic data retrieval and functional annotation retrieval. This package aims to provide users with a standardized
way to automate genome, proteome, 'RNA', coding sequence ('CDS'), 'GFF', and metagenome
retrieval from 'NCBI RefSeq', 'NCBI Genbank', 'ENSEMBL', 'ENSEMBLGENOMES',
and 'UniProt' databases. Furthermore, an interface to the 'BioMart' database
(Smedley et al. (2009) <doi:10.1186/1471-2164-10-22>) allows users to retrieve
functional annotation for genomic loci. In addition, users can download entire databases such
as 'NCBI RefSeq' (Pruitt et al. (2007) <doi:10.1093/nar/gkl842>), 'NCBI nr',
'NCBI nt', 'NCBI Genbank' (Benson et al. (2013) <doi:10.1093/nar/gks1195>), etc. as
well as 'ENSEMBL' and 'ENSEMBLGENOMES' with only one command.",2020-01-10,Hajk-Georg Drost,"https://docs.ropensci.org/biomartr,
https://github.com/ropensci/biomartr",TRUE,https://github.com/ropensci/biomartr,58748,127,2020-06-04T07:21:25Z,462.5826771653543
BIOMASS,"Contains functions to estimate aboveground biomass/carbon and its uncertainty in tropical forests.
These functions allow to (1) retrieve and to correct taxonomy, (2) estimate wood density and its uncertainty,
(3) construct height-diameter models, (4) manage tree and plot coordinates,
(5) estimate the aboveground biomass/carbon at the stand level with associated uncertainty.
To cite BIOMASS, please use citation(""BIOMASS"").
See more in the article of Réjou-Méchain et al. (2017) <doi:10.1111/2041-210X.12753>.",2019-05-03,Maxime Réjou-Méchain,https://github.com/AMAP-dev/BIOMASS,TRUE,https://github.com/amap-dev/biomass,21519,5,2020-04-02T13:00:27Z,4303.8
BioMedR,"Calculating 293 chemical descriptors and 14 kinds of chemical fingerprints, 9920 protein descriptors based on protein sequences, more than 6000 DNA/RNA descriptors from nucleotide sequences, and six types of interaction descriptors using three different combining strategies. ",2019-07-05,Min-feng Zhu,https://github.com/wind22zhu/BioMedR,TRUE,https://github.com/wind22zhu/biomedr,6654,4,2019-10-19T11:02:19Z,1663.5
bioRad,"Extract, visualize and summarize aerial movements of birds and
insects from weather radar data. See <doi:10.1111/ecog.04028>
for a software paper describing package and methodologies.",2020-05-11,Adriaan M. Dokter,"https://github.com/adokter/bioRad,
https://adokter.github.io/bioRad",TRUE,https://github.com/adokter/biorad,8126,11,2020-05-27T18:27:10Z,738.7272727272727
bipartite,"Functions to visualise webs and calculate a series of indices commonly used to describe pattern in (ecological) webs. It focuses on webs consisting of only two levels (bipartite), e.g. pollination webs or predator-prey-webs. Visualisation is important to get an idea of what we are actually looking at, while the indices summarise different aspects of the web's topology. ",2020-04-03,Carsten F. Dormann,https://github.com/biometry/bipartite,TRUE,https://github.com/biometry/bipartite,119467,15,2020-05-29T12:57:01Z,7964.466666666666
BIRDS,"It helps making the evaluation and preparation of biodiversity data
easy, systematic and reproducible. It also helps the users to overlay the
point observations into a custom grid that is useful for further analysis.
The review summarise statistics that helps evaluate whether a set of species
observations is fit-for-use and take decisions upon its use of on further
analyses. It does so by quantifying the sampling effort (amount of effort
expended during an event) and data completeness (data gaps) to help judge
whether the data is representative, valid and fit for any intended purpose.
The 'BIRDS' package is most useful when working with heterogeneous data sets
with variation in the sampling process, i.e. where data have been collected
and reported in various ways and therefore varying in sampling effort
and data completeness (i.e. how well the reported observations describe the
true state). Primary biodiversity data (PBD) combining data from different
data sets, like e.g. Global Biodiversity Information Facility (GBIF) mediated
data, commonly vary in the ways data has been generated - containing
opportunistically collected presence-only data together with and data from
systematic monitoring programs. The set of tools provided is aimed at
understanding the process that generated the data (i.e. observing, recording
and reporting species into databases). There is a non-vital function on this
package (makeDggrid()) that depends the package 'dggridR' that is no longer on CRAN.
You can find it here <https://github.com/r-barnes/dggridR>. References:
Ruete (2015) <doi:10.3897/BDJ.3.e5361>; Szabo, Vesk, Baxter & Possingham (2010)
<doi:10.1890/09-0877.1>; Telfer, Preston 6 Rothery (2002) <doi:10.1016/S0006-3207(02)00050-2>.",2020-03-20,Debora Arlt,https://github.com/greensway/BIRDS,TRUE,https://github.com/greensway/birds,1614,3,2020-06-04T21:19:56Z,538
biscale,"Provides a 'ggplot2' centric approach to bivariate mapping. This is a
technique that maps two quantities simultaneously rather than the single value
that most thematic maps display. The package provides a suite of tools
for calculating breaks using multiple different approaches, a selection of
palettes appropriate for bivariate mapping and a scale function for 'ggplot2'
calls that adds those palettes to maps. A tool for creating bivariate legends
is also included.",2020-05-06,Christopher Prener,https://github.com/slu-openGIS/biscale,TRUE,https://github.com/slu-opengis/biscale,5627,58,2020-05-06T17:02:54Z,97.01724137931035
BisqueRNA,"Provides tools to accurately estimate cell type abundances
from heterogeneous bulk expression. A reference-based method utilizes
single-cell information to generate a signature matrix and transformation
of bulk expression for accurate regression based estimates. A marker-based
method utilizes known cell-specific marker genes to measure relative
abundances across samples.
For more details, see Jew and Alvarez et al (2019) <doi:10.1101/669911>.",2020-05-04,Brandon Jew,https://www.biorxiv.org/content/10.1101/669911v1,TRUE,https://github.com/cozygene/bisque,5749,23,2020-05-04T04:24:22Z,249.95652173913044
bitmexr,"A client for cryptocurrency exchange BitMEX
<https://www.bitmex.com/> including the ability to obtain historic
trade data and place, edit and cancel orders. BitMEX's Testnet and
live API are both supported.",2020-05-25,Harry Fisher,"https://github.com/hfshr/bitmexr, https://hfshr.github.io/bitmexr",TRUE,https://github.com/hfshr/bitmexr,718,2,2020-06-02T17:53:55Z,359
bitsqueezr,"Provides a implementation of floating-point quantization algorithms for use in precision-preserving
compression, similar to the approach taken in the 'netCDF operators' (NCO) software package and
described in Zender (2016) <doi:10.5194/gmd-2016-63>.",2020-01-17,Daniel Baston,https://github.com/dbaston/bitsqueezr,TRUE,https://github.com/dbaston/bitsqueezr,6571,0,2019-09-30T17:35:23Z,NA
BivRec,"A collection of models for bivariate alternating recurrent event data analysis.
Includes non-parametric and semi-parametric methods.",2020-01-15,Sandra Castro-Pearson,https://github.com/SandraCastroPearson/BivRec,TRUE,https://github.com/sandracastropearson/bivrec,7594,1,2020-01-19T18:18:52Z,7594
bjscrapeR,"Drawing heavy influence from 'blscrapeR', this package scrapes crime data from <https://www.bjs.gov/>. Specifically, it scrapes data from the National Crime Victimization Survey which tracks personal and household crime in the USA. The idea is to utilize the 'tidyverse' methodology to create an efficient work flow when dealing with crime statistics.",2018-06-06,Dylan McDowell,https://github.com/dylanjm/bjscrapeR,TRUE,https://github.com/dylanjm/bjscraper,8124,4,2019-06-28T04:46:29Z,2031
bkmr,"Implementation of a statistical approach
for estimating the joint health effects of multiple
concurrent exposures.",2017-03-24,Jennifer F. Bobb,https://github.com/jenfb/bkmr,TRUE,https://github.com/jenfb/bkmr,16825,12,2020-05-25T20:57:54Z,1402.0833333333333
blandr,"Carries out Bland Altman analyses (also known as a Tukey
mean-difference plot) as described by JM Bland and DG Altman in
1986 <doi:10.1016/S0140-6736(86)90837-8>. This package was created in
2015 as existing Bland-Altman analysis functions did not calculate
confidence intervals. This package was created to rectify this,
and create reproducible plots. This package is also available as a module
for the 'jamovi' statistical spreadsheet (see <https://www.jamovi.org>
for more information).",2018-05-10,Deepankar Datta,https://github.com/deepankardatta/blandr/,TRUE,https://github.com/deepankardatta/blandr,15914,10,2020-03-28T07:15:04Z,1591.4
blastula,"Compose and send out responsive HTML email messages that render
perfectly across a range of email clients and device sizes. Helper functions
let the user insert embedded images, web link buttons, and 'ggplot2' plot
objects into the message body. Messages can be sent through an 'SMTP'
server, through the 'RStudio Connect' service, or through the 'Mailgun' API
service <http://mailgun.com/>.",2020-05-19,Richard Iannone,https://github.com/rich-iannone/blastula,TRUE,https://github.com/rich-iannone/blastula,37627,297,2020-05-19T16:43:28Z,126.6902356902357
blindrecalc,"Computation of key characteristics and plots for blinded sample size recalculation.
Continuous as well as binary endpoints are supported in superiority and non-inferiority trials.
The implemented methods include the approaches by
Lu, K. (2019) <doi:10.1002/pst.1737>,
Kieser, M. and Friede, T. (2000) <doi:10.1002/(SICI)1097-0258(20000415)19:7%3C901::AID-SIM405%3E3.0.CO;2-L>,
Friede, T. and Kieser, M. (2004) <doi:10.1002/pst.140>,
Friede, T., Mitchell, C., Mueller-Veltern, G. (2007) <doi:10.1002/bimj.200610373>, and
Friede, T. and Kieser, M. (2011) <doi:10.3414/ME09-01-0063>.",2020-05-11,Maximilian Pilz,https://github.com/imbi-heidelberg/blindrecalc,TRUE,https://github.com/imbi-heidelberg/blindrecalc,389,2,2020-05-11T14:35:39Z,194.5
blme,"Maximum a posteriori estimation for linear and generalized
linear mixed-effects models in a Bayesian setting. Extends
'lme4' by Douglas Bates, Martin Maechler, Ben Bolker, and Steve Walker.",2015-06-14,Vincent Dorie,https://github.com/vdorie/blme,TRUE,https://github.com/vdorie/blme,169550,25,2020-02-26T19:20:44Z,6782
blob,"R's raw vector is useful for storing a single
binary object. What if you want to put a vector of them in a data
frame? The 'blob' package provides the blob object, a list of raw
vectors, suitable for use as a column in data frame.",2020-01-20,Kirill Müller,https://github.com/tidyverse/blob,TRUE,https://github.com/tidyverse/blob,3104956,28,2020-01-23T12:25:21Z,110891.28571428571
blockCV,"Creating spatially or environmentally separated folds for cross-validation to provide a robust error estimation in spatially structured environments; Investigating and visualising the effective range of spatial autocorrelation in continuous raster covariates to find an initial realistic distance band to separate training and testing datasets spatially described in Valavi, R. et al. (2019) <doi:10.1111/2041-210X.13107>.",2020-02-23,Roozbeh Valavi,https://github.com/rvalavi/blockCV,TRUE,https://github.com/rvalavi/blockcv,2912,58,2020-04-22T13:30:58Z,50.206896551724135
blockForest,"A random forest variant 'block forest' ('BlockForest') tailored to the
prediction of binary, survival and continuous outcomes using block-structured
covariate data, for example, clinical covariates plus measurements of a certain
omics data type or multi-omics data, that is, data for which measurements of
different types of omics data and/or clinical data for each patient exist. Examples
of different omics data types include gene expression measurements, mutation data
and copy number variation measurements.
Block forest are presented in Hornung & Wright (2019). The package includes four
other random forest variants for multi-omics data: 'RandomBlock', 'BlockVarSel',
'VarProb', and 'SplitWeights'. These were also considered in Hornung & Wright (2019),
but performed worse than block forest in their comparison study based on 20 real
multi-omics data sets. Therefore, we recommend to use block forest ('BlockForest')
in applications. The other random forest variants can, however, be consulted for
academic purposes, for example, in the context of further methodological
developments.
Reference: Hornung, R. & Wright, M. N. (2019) Block Forests: random forests for blocks of clinical and omics covariate data. BMC Bioinformatics 20:358. <doi:10.1186/s12859-019-2942-y>.",2019-12-06,Roman Hornung,https://github.com/bips-hb/blockForest,TRUE,https://github.com/bips-hb/blockforest,8250,3,2019-12-06T08:01:03Z,2750
blockRAR,"Computes power for response-adaptive randomization with a block design that captures both the time and treatment effect. T. Chandereng, R. Chappell (2019) <arXiv:1904.07758>.",2020-01-21,Thevaa Chandereng,https://github.com/thevaachandereng/blockRAR/,TRUE,https://github.com/thevaachandereng/blockrar,5652,2,2020-06-07T17:51:49Z,2826
blogdown,"Write blog posts and web pages in R Markdown. This package supports
the static site generator 'Hugo' (<https://gohugo.io>) best, and it also
supports 'Jekyll' (<http://jekyllrb.com>) and 'Hexo' (<https://hexo.io>).",2020-05-22,Yihui Xie,https://github.com/rstudio/blogdown,TRUE,https://github.com/rstudio/blogdown,144961,1156,2020-05-28T15:33:27Z,125.39878892733564
blorr,"Tools designed to make it easier for beginner and intermediate users to build and validate
binary logistic regression models. Includes bivariate analysis, comprehensive regression output,
model fit statistics, variable selection procedures, model validation techniques and a 'shiny'
app for interactive model building.",2020-05-28,Aravind Hebbali,"URL: https://blorr.rsquaredacademy.com/,
https://github.com/rsquaredacademy/blorr",TRUE,https://github.com/rsquaredacademy/blorr,15594,12,2020-05-28T13:25:52Z,1299.5
blscrapeR,"Scrapes various data from <https://www.bls.gov/>. The U.S. Bureau of Labor Statistics is the statistical branch of the United States Department of Labor. The package has additional functions to help parse, analyze and visualize the data.",2019-12-17,Kris Eberwein,https://github.com/keberwein/blscrapeR,TRUE,https://github.com/keberwein/blscraper,34028,70,2019-12-17T16:56:17Z,486.1142857142857
bltm,"Fits latent threshold model for simulated data
and describes how to adjust model using real data. Implements algorithm
proposed by Nakajima and West (2013) <doi:10.1080/07350015.2012.747847>.
This package has a function to generate data, a function to configure
priors and a function to fit the model. Examples may be checked inside
the demonstration files.",2019-07-18,Julio Trecenti,https://github.com/curso-r/bltm,TRUE,https://github.com/curso-r/bltm,4238,1,2019-07-13T18:39:36Z,4238
BMA,"Package for Bayesian model averaging and variable selection for linear models,
generalized linear models and survival models (cox
regression).",2020-03-11,Adrian Raftery,"http://stats.research.att.com/volinsky/bma.html,
https://github.com/hanase/BMA",TRUE,https://github.com/hanase/bma,195487,6,2020-03-10T23:13:48Z,32581.166666666668
bmass,"Multivariate tool for analyzing genome-wide association
study results in the form of univariate summary statistics. The
goal of 'bmass' is to comprehensively test all possible multivariate
models given the phenotypes and datasets provided. Multivariate
models are determined by assigning each phenotype to being either
Unassociated (U), Directly associated (D) or Indirectly associated
(I) with the genetic variant of interest. Test results for each model
are presented in the form of Bayes factors, thereby allowing direct
comparisons between models. The underlying framework implemented
here is based on the modeling developed in ""A Unified Framework
for Association Analysis with Multiple Related Phenotypes"",
M. Stephens (2013) <doi:10.1371/journal.pone.0065245>.",2019-05-17,Michael Turchin,https://github.com/mturchin20/bmass,TRUE,https://github.com/mturchin20/bmass,4654,8,2020-05-17T00:57:35Z,581.75
BMTME,"Genomic selection and prediction models with the capacity to use multiple traits and environments, through ready-to-use Bayesian models. It consists a group of functions
that help to create regression models for some genomic models proposed by Montesinos-López, et al. (2016) <doi:10.1534/g3.116.032359>
also in Montesinos-López et al. (2018) <doi:10.1534/g3.118.200728> and Montesinos-López et al. (2018) <doi:10.2134/agronj2018.06.0362>.",2020-05-26,Francisco Javier Luna-Vazquez,https://github.com/frahik/BMTME,TRUE,https://github.com/frahik/bmtme,7983,6,2019-10-17T21:30:09Z,1330.5
bnclassify,"State-of-the art algorithms for learning discrete Bayesian network classifiers from data, including a number of those described in Bielza & Larranaga (2014) <doi:10.1145/2576868>, with functions for prediction, model evaluation and inspection.",2020-03-12,Mihaljevic Bojan,http://github.com/bmihaljevic/bnclassify,TRUE,https://github.com/bmihaljevic/bnclassify,29535,15,2020-04-02T14:17:15Z,1969
bnpsd,"The Pritchard-Stephens-Donnelly (PSD) admixture model has k intermediate subpopulations from which n individuals draw their alleles dictated by their individual-specific admixture proportions. The BN-PSD model additionally imposes the Balding-Nichols (BN) allele frequency model to the intermediate populations, which therefore evolved independently from a common ancestral population T with subpopulation-specific FST (Wright's fixation index) parameters. The BN-PSD model can be used to yield complex population structures. Method described in Ochoa and Storey (2016) <doi:10.1101/083923>.",2020-01-10,Alejandro Ochoa,https://github.com/StoreyLab/bnpsd/,TRUE,https://github.com/storeylab/bnpsd,10854,6,2020-05-28T20:34:05Z,1809
BNrich,"Maleknia et al. (2020) <doi:10.1101/2020.01.13.905448>. A novel pathway enrichment analysis package based on Bayesian network to investigate the topology features of the pathways. firstly, 187 kyoto encyclopedia of genes and genomes (KEGG) human non-metabolic pathways which their cycles were eliminated by biological approach, enter in analysis as Bayesian network structures. The constructed Bayesian network were optimized by the Least Absolute Shrinkage Selector Operator (lasso) and the parameters were learned based on gene expression data. Finally, the impacted pathways were enriched by Fisher’s Exact Test on significant parameters.",2020-04-04,Samaneh Maleknia,https://github.com/Samaneh-Bioinformatics/BNrich,TRUE,https://github.com/samaneh-bioinformatics/bnrich,1581,0,2020-04-04T07:37:58Z,NA
bnspatial,"Allows spatial implementation of Bayesian networks and mapping in geographical space. It makes maps of expected value (or most likely state) given known and unknown conditions, maps of uncertainty measured as coefficient of variation or Shannon index (entropy), maps of probability associated to any states of any node of the network. Some additional features are provided as well: parallel processing options, data discretization routines and function wrappers designed for users with minimal knowledge of the R language. Outputs can be exported to any common GIS format. ",2020-01-17,Dario Masante,http://github.com/dariomasante/bnspatial,TRUE,https://github.com/dariomasante/bnspatial,21968,13,2020-01-30T12:10:29Z,1689.8461538461538
bold,"A programmatic interface to the Web Service methods provided by
Bold Systems (<http://www.boldsystems.org/>) for genetic 'barcode' data.
Functions include methods for searching by sequences by taxonomic names,
ids, collectors, and institutions; as well as a function for searching
for specimens, and downloading trace files.",2020-05-01,Scott Chamberlain,"https://docs.ropensci.org/bold, https://github.com/ropensci/bold",TRUE,https://github.com/ropensci/bold,148265,12,2020-05-01T21:15:40Z,12355.416666666666
bomrang,"Provides functions to interface with Australian Government Bureau
of Meteorology ('BOM') data, fetching data and returning a tidy data frame
of precis forecasts, historical and current weather data from stations,
agriculture bulletin data, 'BOM' 0900 or 1500 weather bulletins and
downloading and importing radar and satellite imagery files. Data (c)
Australian Government Bureau of Meteorology Creative Commons (CC)
Attribution 3.0 licence or Public Access Licence (PAL) as appropriate. See
<http://www.bom.gov.au/other/copyright.shtml> for further details.",2020-01-20,Adam H. Sparks,"https://github.com/ropensci/bomrang,
https://docs.ropensci.org/bomrang/",TRUE,https://github.com/ropensci/bomrang,30733,66,2020-01-20T22:31:56Z,465.6515151515151
bookdown,Output formats and utilities for authoring books and technical documents with R Markdown.,2020-05-15,Yihui Xie,https://github.com/rstudio/bookdown,TRUE,https://github.com/rstudio/bookdown,620701,1955,2020-05-23T19:10:02Z,317.49411764705883
bookdownplus,"A collection and selector of R 'bookdown' templates. 'bookdownplus' helps you write academic journal articles, guitar books, chemical equations, mails, calendars, and diaries. R 'bookdownplus' extends the features of 'bookdown', and simplifies the procedure. Users only have to choose a template, clarify the book title and author name, and then focus on writing the text. No need to struggle in 'YAML' and 'LaTeX'.",2020-02-26,Peng Zhao,https://github.com/pzhaonet/bookdownplus,TRUE,https://github.com/pzhaonet/bookdownplus,31933,173,2020-03-17T21:21:46Z,184.58381502890174
boot.heterogeneity,"Implements a bootstrap-based heterogeneity test for standardized mean differences (d), Fisher-transformed Pearson's correlations (r), and natural-logarithm-transformed odds ratio (or) in meta-analysis studies. Depending on the presence of moderators, this Monte Carlo based test can be implemented in the random- or mixed-effects model. This package uses rma() function from the R package 'metafor' to obtain parameter estimates and likelihoods, so installation of R package 'metafor' is required. This approach refers to the studies of Anscombe (1956) <doi:10.2307/2332926>, Haldane (1940) <doi:10.2307/2332614>, Hedges (1981) <doi:10.3102/10769986006002107>, Hedges & Olkin (1985, ISBN:978-0123363800), Silagy, Lancaster, Stead, Mant, & Fowler (2004) <doi:10.1002/14651858.CD000146.pub2>, Viechtbauer (2010) <doi:10.18637/jss.v036.i03>, and Zuckerman (1994, ISBN:978-0521432009). ",2020-05-08,Ge Jiang,https://github.com/gabriellajg/boot.heterogeneity/,TRUE,https://github.com/gabriellajg/boot.heterogeneity,429,0,2020-05-07T06:31:49Z,NA
bootstrapFP,"Finite Population bootstrap algorithms to estimate the variance
of the Horvitz-Thompson estimator for single-stage sampling.
For a survey of bootstrap methods for finite populations, see Mashreghi et Al. (2016) <doi:10.1214/16-SS113>.",2019-02-24,Roberto Sichera,NA,TRUE,https://github.com/rhobis/bootstrapfp,6378,0,2019-12-04T11:38:48Z,NA
BOSSreg,"Best orthogonalized subset selection (BOSS) is a least-squares (LS) based subset selection method, that performs best subset selection upon an orthogonalized basis of ordered predictors, with the computational effort of a single ordinary LS fit. This package provides a highly optimized implementation of BOSS and estimates a heuristic degrees of freedom for BOSS, which can be plugged into an information criterion (IC) such as AICc in order to select the subset from candidates. It provides various choices of IC, including AIC, BIC, AICc, Cp and GCV. It also implements the forward stepwise selection (FS) with no additional computational cost, where the subset of FS is selected via cross-validation (CV). CV is also an option for BOSS. For details see: Tian, Hurvich and Simonoff (2019), ""On the Use of Information Criteria for Subset Selection in Least Squares Regression"", <arXiv:1911.10191>.",2019-12-06,Sen Tian,https://github.com/sentian/BOSSreg,TRUE,https://github.com/sentian/bossreg,2688,1,2020-01-15T04:16:54Z,2688
botor,"Fork-safe, raw access to the 'Amazon Web Services' ('AWS') 'SDK' via the 'boto3' 'Python' module, and convenient helper functions to query the 'Simple Storage Service' ('S3') and 'Key Management Service' ('KMS'), partial support for 'IAM', the 'Systems Manager Parameter Store' and 'Secrets Manager'.",2020-02-16,Gergely Daróczi,https://daroczig.github.io/botor,TRUE,https://github.com/daroczig/botor,4658,22,2020-05-22T19:07:41Z,211.72727272727272
boundingbox,"Generate ground truth cases for object localization algorithms.
Cycle through a list of images, select points around which to generate bounding
boxes and assign classifiers. Output the coordinates, and images annotated with
boxes and labels. For an example study that uses bounding boxes for image
localization and classification see Ibrahim, Badr, Abdallah, and Eissa (2012)
""Bounding Box Object Localization Based on Image Superpixelization""
<doi:10.1016/j.procs.2012.09.119>.",2020-06-09,David Stomski,<https://github.com/stomperusa/boundingbox>,TRUE,https://github.com/stomperusa/boundingbox,0,1,2020-06-06T00:15:06Z,0
boxr,"An R interface for the remote file hosting service 'Box'
(<https://www.box.com/>). In addition to uploading and downloading files,
this package includes functions which mirror base R operations for local
files, (e.g. box_load(), box_save(), box_read(), box_setwd(), etc.), as well
as 'git' style functions for entire directories (e.g. box_fetch(),
box_push()).",2019-11-19,Ian Lyttle,https://github.com/r-box/boxr/,TRUE,https://github.com/r-box/boxr,32003,43,2020-04-27T16:32:38Z,744.2558139534884
bpbounds,"Implementation of the nonparametric bounds for the average causal
effect under an instrumental variable model by Balke and Pearl (Bounds on
Treatment Effects from Studies with Imperfect Compliance, JASA, 1997, 92,
439, 1171-1176). The package can calculate bounds for a binary outcome, a
binary treatment/phenotype, and an instrument with either 2 or 3
categories. The package implements bounds for situations where these 3
variables are measured in the same dataset (trivariate data) or where the
outcome and instrument are measured in one study and the
treatment/phenotype and instrument are measured in another study
(bivariate data).",2020-01-21,Tom Palmer,https://github.com/remlapmot/bpbounds,TRUE,https://github.com/remlapmot/bpbounds,7782,0,2020-06-07T09:13:36Z,NA
bpnreg,"Fitting Bayesian multiple and mixed-effect regression models for
circular data based on the projected normal distribution. Both continuous
and categorical predictors can be included. Sampling from the posterior is
performed via an MCMC algorithm. Posterior descriptives of all parameters,
model fit statistics and Bayes factors for hypothesis tests for inequality
constrained hypotheses are provided. See Cremers, Mulder & Klugkist (2018)
<doi:10.1111/bmsp.12108> and Nuñez-Antonio & Guttiérez-Peña (2014)
<doi:10.1016/j.csda.2012.07.025>.",2020-02-04,Jolien Cremers,https://github.com/joliencremers/bpnreg,TRUE,https://github.com/joliencremers/bpnreg,9477,2,2020-02-05T08:11:37Z,4738.5
bracer,"Performs brace expansions on strings. Made popular by Unix shells, brace expansion allows users to concisely generate certain character vectors by taking a single string and (recursively) expanding the comma-separated lists and double-period-separated integer and character sequences enclosed within braces in that string. The double-period-separated numeric integer expansion also supports padding the resulting numbers with zeros.",2019-09-03,Trevor Davis,https://github.com/trevorld/bracer,TRUE,https://github.com/trevorld/bracer,4153,1,2019-11-24T19:09:12Z,4153
brainGraph,"A set of tools for performing graph theory analysis of brain MRI
data. It works with data from a Freesurfer analysis (cortical thickness,
volumes, local gyrification index, surface area), diffusion tensor
tractography data (e.g., from FSL) and resting-state fMRI data (e.g., from
DPABI). It contains a graphical user interface for graph visualization and
data exploration, along with several functions for generating useful
figures.",2019-11-07,Christopher G. Watson,https://github.com/cwatson/brainGraph,TRUE,https://github.com/cwatson/braingraph,27726,79,2019-11-06T05:31:18Z,350.9620253164557
BRDT,"This is an implementation of design methods for binomial reliability demonstration tests (BRDTs) with failure count data.
The acceptance decision uncertainty of BRDT has been quantified and the impacts of the uncertainty on related reliability assurance activities such as reliability growth (RG) and warranty services (WS) are evaluated.
This package is associated with the work from the published paper ""Optimal Binomial Reliability Demonstration Tests Design under Acceptance Decision Uncertainty"" by Suiyao Chen et al. (2020) <doi:10.1080/08982112.2020.1757703>.",2020-06-09,Suiyao Chen,https://github.com/ericchen12377/BRDT,TRUE,https://github.com/ericchen12377/brdt,0,2,2020-06-09T19:08:00Z,0
breakDown,"Model agnostic tool for decomposition of predictions from black boxes.
Break Down Table shows contributions of every variable to a final prediction.
Break Down Plot presents variable contributions in a concise graphical way.
This package work for binary classifiers and general regression models. ",2020-04-05,Przemyslaw Biecek,https://pbiecek.github.io/breakDown/,TRUE,https://github.com/pbiecek/breakdown,29388,87,2020-04-04T23:57:50Z,337.7931034482759
breathtestcore,"Reads several formats of 13C data (IRIS/Wagner,
BreathID) and CSV. Creates artificial sample data for testing. Fits
Maes/Ghoos, Bluck-Coward self-correcting formula using 'nls', 'nlme'.
Methods to fit breath test curves with Bayesian Stan methods are
refactored to package 'breathteststan'. For a Shiny GUI, see package
'dmenne/breathtestshiny' on github.",2020-03-22,Dieter Menne,https://github.com/dmenne/breathtestcore,TRUE,https://github.com/dmenne/breathtestcore,14985,1,2020-06-08T08:03:44Z,14985
breathteststan,"Stan-based curve-fitting function
for use with package 'breathtestcore' by the same author.
Stan functions are refactored here for easier testing.",2020-03-22,Dieter Menne,https://github.com/dmenne/breathteststan,TRUE,https://github.com/dmenne/breathteststan,17500,3,2020-04-13T07:36:34Z,5833.333333333333
brglm2,"Estimation and inference from generalized linear models based on various methods for bias reduction and maximum penalized likelihood with powers of the Jeffreys prior as penalty. The 'brglmFit' fitting method can achieve reduction of estimation bias by solving either the mean bias-reducing adjusted score equations in Firth (1993) <doi:10.1093/biomet/80.1.27> and Kosmidis and Firth (2009) <doi:10.1093/biomet/asp055>, or the median bias-reduction adjusted score equations in Kenne et al. (2016) <arXiv:1604.04768>, or through the direct subtraction of an estimate of the bias of the maximum likelihood estimator from the maximum likelihood estimates as in Cordeiro and McCullagh (1991) <http://www.jstor.org/stable/2345592>. See Kosmidis et al (2019) <doi:10.1007/s11222-019-09860-6> for more details. Estimation in all cases takes place via a quasi Fisher scoring algorithm, and S3 methods for the construction of of confidence intervals for the reduced-bias estimates are provided. In the special case of generalized linear models for binomial and multinomial responses (both ordinal and nominal), the adjusted score approaches return estimates with improved frequentist properties, that are also always finite, even in cases where the maximum likelihood estimates are infinite (e.g. complete and quasi-complete separation). 'brglm2' also provides pre-fit and post-fit methods for detecting separation and infinite maximum likelihood estimates in binomial response generalized linear models.",2020-03-19,Ioannis Kosmidis,https://github.com/ikosmidis/brglm2,TRUE,https://github.com/ikosmidis/brglm2,27917,5,2020-03-19T15:35:26Z,5583.4
brickr,"
Generate digital LEGO models using 'tidyverse' functions.
Convert image files into 2D and 3D LEGO mosaics, complete with piece counts and instructions.
Render 3D models using simple data frame instructions.
Developed under the LEGO Group's Fair Play policy <https://www.lego.com/en-us/legal/notices-and-policies/fair-play/>.",2020-05-09,Ryan Timpe,https://github.com/ryantimpe/brickr,TRUE,https://github.com/ryantimpe/brickr,1930,312,2020-05-09T20:02:55Z,6.185897435897436
bridgesampling,"Provides functions for estimating marginal likelihoods, Bayes
factors, posterior model probabilities, and normalizing constants in general,
via different versions of bridge sampling (Meng & Wong, 1996,
<http://www3.stat.sinica.edu.tw/statistica/j6n4/j6n43/j6n43.htm>).
Gronau, Singmann, & Wagenmakers (2020) <doi:10.18637/jss.v092.i10>.",2020-02-26,Quentin F. Gronau,https://github.com/quentingronau/bridgesampling,TRUE,https://github.com/quentingronau/bridgesampling,208871,19,2020-02-24T23:15:25Z,10993.21052631579
brio,"Functions to handle basic input output, these functions always
read and write UTF-8 (8-bit Unicode Transformation Format) files and provide
more explicit control over line endings.",2020-03-26,Jim Hester,https://github.com/r-lib/brio,TRUE,https://github.com/r-lib/brio,1670,20,2020-04-20T13:08:53Z,83.5
BRISC,Fits Bootstrap with univariate spatial regression models using Bootstrap for Rapid Inference on Spatial Covariances (BRISC) for large datasets using Nearest Neighbor Gaussian Processes detailed in Saha and Datta (2018) <doi:10.1002/sta4.184>.,2019-08-19,Arkajyoti Saha,https://github.com/ArkajyotiSaha/BRISC,TRUE,https://github.com/arkajyotisaha/brisc,8546,1,2019-08-22T18:41:21Z,8546
BRL,"Implementation of the record linkage methodology proposed by Sadinle (2017) <doi:10.1080/01621459.2016.1148612>. It handles the bipartite record linkage problem, where two duplicate-free datafiles are to be merged.",2020-01-13,Mauricio Sadinle,https://github.com/msadinle/BRL,TRUE,https://github.com/msadinle/brl,3256,3,2020-01-11T01:25:32Z,1085.3333333333333
brms,"Fit Bayesian generalized (non-)linear multivariate multilevel models
using 'Stan' for full Bayesian inference. A wide range of distributions
and link functions are supported, allowing users to fit -- among others --
linear, robust linear, count data, survival, response times, ordinal,
zero-inflated, hurdle, and even self-defined mixture models all in a
multilevel context. Further modeling options include non-linear and
smooth terms, auto-correlation structures, censored data, meta-analytic
standard errors, and quite a few more. In addition, all parameters of the
response distribution can be predicted in order to perform distributional
regression. Prior specifications are flexible and explicitly encourage
users to apply prior distributions that actually reflect their beliefs.
Model fit can easily be assessed and compared with posterior predictive
checks and leave-one-out cross-validation. References: Bürkner (2017)
<doi:10.18637/jss.v080.i01>; Bürkner (2018) <doi:10.32614/RJ-2018-017>;
Carpenter et al. (2017) <doi:10.18637/jss.v076.i01>.",2020-05-27,Paul-Christian Bürkner,"https://github.com/paul-buerkner/brms,
http://discourse.mc-stan.org",TRUE,https://github.com/paul-buerkner/brms,411359,714,2020-06-09T16:24:28Z,576.1330532212885
Brobdingnag,"Handles very large numbers in R. Real numbers are held
using their natural logarithms, plus a logical flag indicating
sign. The package includes a vignette that gives a
step-by-step introduction to using S4 methods.",2018-08-13,Robin K. S. Hankin,https://github.com/RobinHankin/Brobdingnag.git,TRUE,https://github.com/robinhankin/brobdingnag,211351,1,2020-04-30T08:43:11Z,211351
broman,"Miscellaneous R functions, including functions related to
graphics (mostly for base graphics), permutation tests, running
mean/median, and general utilities.",2020-05-22,Karl W Broman,https://github.com/kbroman/broman,TRUE,https://github.com/kbroman/broman,43235,157,2020-05-21T15:29:32Z,275.38216560509557
broom,"Summarizes key information about statistical
objects in tidy tibbles. This makes it easy to report results, create
plots and consistently work with large numbers of models at once.
Broom provides three verbs that each provide different types of
information about a model. tidy() summarizes information about model
components such as coefficients of a regression. glance() reports
information about an entire model, such as goodness of fit measures
like AIC and BIC. augment() adds information about individual
observations to a dataset, such as fitted values or influence
measures.",2020-04-20,Alex Hayes,http://github.com/tidyverse/broom,TRUE,https://github.com/tidyverse/broom,10361715,953,2020-06-09T16:03:05Z,10872.733473242393
broom.mixed,"Convert fitted objects from various R mixed-model packages
into tidy data frames along the lines of the 'broom' package.
The package provides three
S3 generics for each model: tidy(), which summarizes a model's statistical findings such as
coefficients of a regression; augment(), which adds columns to the original
data such as predictions, residuals and cluster assignments; and glance(), which
provides a one-row summary of model-level statistics.",2020-05-17,Ben Bolker,http://github.com/bbolker/broom.mixed,TRUE,https://github.com/bbolker/broom.mixed,73266,158,2020-05-29T00:44:08Z,463.7088607594937
broomExtra,"Provides helper functions that assist in data
analysis workflows involving regression analyses. The goal is to
combine the functionality offered by different set of packages
('broom', 'broom.mixed', 'parameters', and 'performance') through a
common syntax to return tidy dataframes containing model parameters
and performance measure summaries. The 'grouped_' variants of the
generics provides a convenient way to execute functions across a
combination of grouping variable(s) in a dataframe.",2020-05-11,Indrajeet Patil,"https://indrajeetpatil.github.io/broomExtra/,
https://github.com/IndrajeetPatil/broomExtra",TRUE,https://github.com/indrajeetpatil/broomextra,52301,30,2020-05-30T21:16:09Z,1743.3666666666666
brranching,"Includes methods for fetching 'phylogenies' from a variety
of sources, including the 'Phylomatic' web service
(<http://phylodiversity.net/phylomatic>), and 'Phylocom'
(<https://github.com/phylocom/phylocom/>).",2019-07-27,Scott Chamberlain,https://github.com/ropensci/brranching,TRUE,https://github.com/ropensci/brranching,27516,13,2020-06-03T23:30:36Z,2116.6153846153848
brunnermunzel,"Provides the functions for Brunner-Munzel test and
permuted Brunner-Munzel test,
which enable to use formula, matrix, and table as argument.
These functions are based on Brunner and Munzel (2000)
<doi:10.1002/(SICI)1521-4036(200001)42:1%3C17::AID-BIMJ17%3E3.0.CO;2-U>
and Neubert and Brunner (2007) <doi:10.1016/j.csda.2006.05.024>,
and are written with FORTRAN.",2020-01-08,Toshiaki Ara,https://github.com/toshi-ara/brunnermunzel,TRUE,https://github.com/toshi-ara/brunnermunzel,8462,3,2020-01-07T13:07:33Z,2820.6666666666665
bs4Dash,"Make 'Bootstrap 4' dashboards. Use the full power
of 'AdminLTE3', a dashboard template built on top of 'Bootstrap 4'
<https://github.com/ColorlibHQ/AdminLTE>.",2019-11-27,David Granjon,"https://rinterface.github.io/bs4Dash/index.html,
https://github.com/RinteRface/bs4Dash",TRUE,https://github.com/rinterface/bs4dash,52042,189,2020-05-27T23:08:18Z,275.35449735449737
bsam,"Tools to fit Bayesian state-space models to animal tracking data. Models are provided for location
filtering, location filtering and behavioural state estimation, and their hierarchical versions.
The models are primarily intended for fitting to ARGOS satellite tracking data but options exist to fit
to other tracking data types. For Global Positioning System data, consider the 'moveHMM' package.
Simplified Markov Chain Monte Carlo convergence diagnostic plotting is provided but users are encouraged
to explore tools available in packages such as 'coda' and 'boa'.",2017-07-01,Ian Jonsen,https://github.com/ianjonsen/bsam,TRUE,https://github.com/ianjonsen/bsam,17634,13,2020-01-24T13:07:27Z,1356.4615384615386
bSims,"A highly scientific and utterly addictive
bird point count simulator
to test statistical assumptions, aid survey design,
and have fun while doing it.
The simulations follow time-removal and distance sampling models
based on Matsuoka et al. (2012) <doi:10.1525/auk.2012.11190>,
Solymos et al. (2013) <doi:10.1111/2041-210X.12106>,
and Solymos et al. (2018) <doi:10.1650/CONDOR-18-32.1>,
and sound attenuation experiments by
Yip et al. (2017) <doi:10.1650/CONDOR-16-93.1>.",2019-12-20,Peter Solymos,https://github.com/psolymos/bSims,TRUE,https://github.com/psolymos/bsims,2686,1,2020-05-28T21:54:21Z,2686
bsplus,"The Bootstrap framework lets you add some JavaScript functionality to your web site by
adding attributes to your HTML tags - Bootstrap takes care of the JavaScript
<https://getbootstrap.com/javascript>. If you are using R Markdown or Shiny, you can
use these functions to create collapsible sections, accordion panels, modals, tooltips,
popovers, and an accordion sidebar framework (not described at Bootstrap site).",2018-04-05,Ian Lyttle,https://github.com/ijlyttle/bsplus,TRUE,https://github.com/ijlyttle/bsplus,26670,118,2020-05-16T18:38:47Z,226.01694915254237
bssm,"Efficient methods for Bayesian inference of state space models
via particle Markov chain Monte Carlo (MCMC) and MCMC based on parallel
importance sampling type weighted estimators
(Vihola, Helske, and Franks, 2020, <arXiv:1609.02541>).
Gaussian, Poisson, binomial, negative binomial, and Gamma
observation densities and basic stochastic volatility models with Gaussian state
dynamics, as well as general non-linear Gaussian models and discretised
diffusion models are supported.",2020-06-09,Jouni Helske,NA,TRUE,https://github.com/helske/bssm,28538,15,2020-06-09T13:49:25Z,1902.5333333333333
BSW,Implements a modified Newton-type algorithm (BSW algorithm) for solving the maximum likelihood estimation problem in fitting a log-binomial model under linear inequality constraints.,2020-03-25,Adam Bekhit,https://github.com/adam-bec/BSW,TRUE,https://github.com/adam-bec/bsw,1157,0,2020-03-24T14:24:27Z,NA
btergm,"Temporal Exponential Random Graph Models (TERGM) estimated by maximum pseudolikelihood with bootstrapped confidence intervals or Markov Chain Monte Carlo maximum likelihood. Goodness of fit assessment for ERGMs, TERGMs, and SAOMs. Micro-level interpretation of ERGMs and TERGMs.",2020-04-07,Philip Leifeld,http://github.com/leifeld/btergm,TRUE,https://github.com/leifeld/btergm,128371,6,2020-04-06T19:19:02Z,21395.166666666668
BTM,"Biterm Topic Models find topics in collections of short texts.
It is a word co-occurrence based topic model that learns topics by modeling word-word co-occurrences patterns which are called biterms.
This in contrast to traditional topic models like Latent Dirichlet Allocation and Probabilistic Latent Semantic Analysis
which are word-document co-occurrence topic models.
A biterm consists of two words co-occurring in the same short text window.
This context window can for example be a twitter message, a short answer on a survey, a sentence of a text or a document identifier.
The techniques are explained in detail in the paper 'A Biterm Topic Model For Short Text' by Xiaohui Yan, Jiafeng Guo, Yanyan Lan, Xueqi Cheng (2013) <https://github.com/xiaohuiyan/xiaohuiyan.github.io/blob/master/paper/BTM-WWW13.pdf>.",2020-05-02,Jan Wijffels,https://github.com/bnosac/BTM,TRUE,https://github.com/bnosac/btm,10324,47,2020-05-27T18:01:45Z,219.6595744680851
bucky,"Provides functions for various statistical techniques commonly used in the social sciences, including functions to compute clustered robust standard errors, combine results across multiply-imputed data sets, and simplify the addition of robust and clustered robust standard errors.",2019-12-17,Alexander Tahk,http://github.com/atahk/bucky,TRUE,https://github.com/atahk/bucky,13011,6,2019-12-17T19:00:36Z,2168.5
buildmer,"Finds the largest possible regression model that will still converge
for various types of regression analyses (including mixed models and generalized
additive models) and then optionally performs stepwise elimination similar to the
forward and backward effect-selection methods in SAS, based on the change in
log-likelihood or its significance, Akaike's Information Criterion, the Bayesian
Information Criterion, or the explained deviance.",2020-05-27,Cesko C. Voeten,NA,TRUE,https://github.com/cvoeten/buildmer,10016,1,2020-06-07T18:18:51Z,10016
buildr,Working with reproducible reports or any other similar projects often requires to run the script that builds the output file in a specified way. One can become tired from repeatedly switching to the build script and sourcing it. The 'buildr' package does this one simple thing via 'RStudio' addin – user can set up the keyboard shortcut and run the build script with one keystroke anywhere anytime. The second way is to pass buildr() command to console which does the same thing. Both ways source the build.R (case insensitive) file present in the current working directory.,2020-05-12,Jan Netik,https://github.com/netique/buildr,TRUE,https://github.com/netique/buildr,669,1,2020-05-12T10:53:05Z,669
bunching,"Implementation of the bunching estimator for kinks and notches.
Allows for flexible estimation of counterfactual (e.g. controlling for round number bunching, accounting for other bunching masses within bunching window, fixing bunching point to be minimum, maximum or median value in its bin, etc.).
It produces publication-ready plots in the style followed since Chetty et al. (2011) <DOI:10.1093/qje/qjr013>, with lots of functionality to set plot options.",2019-09-23,Panos Mavrokonstantis,http://github.com/mavpanos/bunching,TRUE,https://github.com/mavpanos/bunching,3713,1,2020-05-19T09:48:45Z,3713
bupaR,"Comprehensive Business Process Analysis toolkit. Creates S3-class for event log objects, and related handler functions. Imports related packages for filtering event data, computation of descriptive statistics, handling of 'Petri Net' objects and visualization of process maps. See also packages 'edeaR','processmapR', 'eventdataR' and 'processmonitR'.",2020-01-22,Gert Janssenswillen,"https://www.bupar.net, https://github.com/bupaverse/bupaR",TRUE,https://github.com/bupaverse/bupar,42098,14,2020-04-30T06:52:58Z,3007
burnr,"Tools to read, write, parse, and analyze forest fire history data (e.g. FHX). Described in Malevich et al. (2018) <doi:10.1016/j.dendro.2018.02.005>.",2019-08-21,Steven Malevich,https://github.com/ltrr-arizona-edu/burnr/,TRUE,https://github.com/ltrr-arizona-edu/burnr,23656,8,2020-03-30T18:25:04Z,2957
butcher,Provides a set of five S3 generics to axe components of fitted model objects and help reduce the size of model objects saved to disk.,2020-01-23,Joyce Cahoon,"https://tidymodels.github.io/butcher,
https://github.com/tidymodels/butcher",TRUE,https://github.com/tidymodels/butcher,6314,62,2020-05-14T17:40:15Z,101.83870967741936
BuyseTest,"Implementation of the Generalized Pairwise Comparisons (GPC)
as defined in Buyse (2010) <doi:10.1002/sim.3923> for complete observations,
and extended in Peron (2018) <doi:10.1177/0962280216658320> to deal with right-censoring.
GPC compare two groups of observations (intervention vs. control group)
regarding several prioritized endpoints to estimate the probability that a random observation drawn from
one group performs better than a random observation drawn from the other group (Mann-Whitney parameter).
The net benefit and win ratio statistics,
i.e. the difference and ratio between the probabilities relative to the intervention and control groups,
can then also be estimated. Confidence intervals and p-values are obtained using permutations, a non-parametric bootstrap, or the asymptotic theory.
The software enables the use of thresholds of minimal importance difference,
stratification, non-prioritized endpoints (O'Brien test), and can handle right-censoring and competing-risks.",2020-05-07,Brice Ozenne,https://github.com/bozenne/BuyseTest,TRUE,https://github.com/bozenne/buysetest,20311,1,2020-05-27T13:27:15Z,20311
BVAR,"Estimation of hierarchical Bayesian vector autoregressive models.
Implements hierarchical prior selection for conjugate priors in the fashion
of Giannone, Lenza & Primiceri (2015) <doi:10.1162/REST_a_00483>. Functions
to compute and identify impulse responses, calculate forecasts,
forecast error variance decompositions and scenarios are available.
Several methods to print, plot and summarise results facilitate analysis.",2020-05-05,Nikolas Kuschnig,https://github.com/nk027/bvar,TRUE,https://github.com/nk027/bvar,8466,9,2020-05-11T10:50:51Z,940.6666666666666
bvartools,"Assists in the set-up of algorithms for Bayesian inference of vector autoregressive (VAR) models. Functions for posterior simulation, forecasting, impulse response analysis and forecast error variance decomposition are largely based on the introductory texts of Koop and Korobilis (2010) <doi:10.1561/0800000013> and Luetkepohl (2007, ISBN: 9783540262398). ",2019-08-20,Franz X. Mohr,https://github.com/franzmohr/bvartools,TRUE,https://github.com/franzmohr/bvartools,6318,6,2020-06-03T20:57:04Z,1053
BWStest,"Performs the 'Baumgartner-Weiss-Schindler' two-sample test of equal
probability distributions, <doi:10.2307/2533862>. Also performs
similar rank-based tests for equal probability distributions due to
Neuhauser <doi:10.1080/10485250108832874> and
Murakami <doi:10.1080/00949655.2010.551516>.",2018-10-18,Steven E. Pav,https://github.com/shabbychef/BWStest,TRUE,https://github.com/shabbychef/bwstest,41480,0,2019-09-02T16:25:04Z,NA
bwsTools,"Tools to design best-worst scaling designs (i.e., balanced incomplete block designs) and
to analyze data from these designs, using aggregate and individual methods such as: difference
scores, Louviere, Lings, Islam, Gudergan, & Flynn (2013) <doi:10.1016/j.ijresmar.2012.10.002>;
analytical estimation, Lipovetsky & Conklin (2014) <doi:10.1016/j.jocm.2014.02.001>; empirical
Bayes, Lipovetsky & Conklin (2015) <doi:10.1142/S1793536915500028>; Elo, Hollis (2018)
<doi:10.3758/s13428-017-0898-2>; and network-based measures.",2020-03-19,Mark White,https://github.com/markhwhiteii/bwsTools,TRUE,https://github.com/markhwhiteii/bwstools,2598,3,2020-06-09T01:35:57Z,866
bysykkel,"Functions to get and download city bike data from
the website and API service of each city bike service in Norway. The
package aims to reduce time spent on getting Norwegian city bike data,
and lower barriers to start analyzing it. The data is retrieved from
Oslo City Bike, Bergen City Bike, and Trondheim City Bike. The data is
made available under NLOD 2.0 <https://data.norge.no/nlod/en/2.0>.",2020-04-19,Iman Ghayoornia,http://github.com/imangR/bysykkel,TRUE,https://github.com/imangr/bysykkel,6294,0,2020-04-19T14:02:25Z,NA
c14bazAAR,"Query different C14 date databases and apply basic data cleaning, merging and calibration steps.",2020-01-12,Clemens Schmid,"https://docs.ropensci.org/c14bazAAR,
https://github.com/ropensci/c14bazAAR",TRUE,https://github.com/ropensci/c14bazaar,8334,19,2020-04-23T13:07:10Z,438.63157894736844
c3,"Create interactive charts with the 'C3.js' <http://c3js.org/> charting library. All plot
types in 'C3.js' are available and include line, bar, scatter, and mixed geometry plots. Plot
annotations, labels and axis are highly adjustable. Interactive web based charts can be embedded
in R Markdown documents or Shiny web applications. ",2020-03-16,Matt Johnson,https://github.com/mrjoh3/c3,TRUE,https://github.com/mrjoh3/c3,10613,36,2020-03-16T13:02:49Z,294.80555555555554
C50,"C5.0 decision trees and rule-based models for pattern recognition that extend the work of Quinlan (1993, ISBN:1-55860-238-0).",2020-05-26,Max Kuhn,https://topepo.github.io/C5.0,TRUE,https://github.com/topepo/c5.0,525329,40,2020-01-09T20:20:51Z,13133.225
CAISEr,"Functions for performing experimental comparisons of algorithms
using adequate sample sizes for power and accuracy. Implements the
methodology originally presented in Campelo and Takahashi (2019)
<doi:10.1007/s10732-018-9396-7>
for the comparison of two algorithms, and later generalised in
Campelo and Wanner (Submitted, 2019) <arxiv:1908.01720>.",2020-02-04,Felipe Campelo,https://fcampelo.github.io/CAISEr/,TRUE,https://github.com/fcampelo/caiser,12204,1,2020-02-04T10:08:04Z,12204
calculus,"Efficient C++ optimized functions for numerical and symbolic calculus. It includes basic symbolic arithmetic, tensor calculus, Einstein summing convention, fast computation of the Levi-Civita symbol and generalized Kronecker delta, Taylor series expansion, multivariate Hermite polynomials, accurate high-order derivatives, differential operators (Gradient, Jacobian, Hessian, Divergence, Curl, Laplacian) and numerical integration in arbitrary orthogonal coordinate systems: cartesian, polar, spherical, cylindrical, parabolic or user defined by custom scale factors. ",2020-03-23,Emanuele Guidotti,https://github.com/emanuele-guidotti/calculus,TRUE,https://github.com/emanuele-guidotti/calculus,4391,24,2020-05-20T23:42:33Z,182.95833333333334
calcUnique,"This is a one-function package that will pass only unique values to a computationally-expensive function that returns an output of the same length as the input.
In importing and working with tidy data, it is common to have index columns, often including time stamps that are far from unique. Some functions to work with these such as text conversion to other variable types (e.g. as.POSIXct()), various grep()-based functions, and often the cut() function are relatively slow when working with tens of millions of rows or more.",2020-05-04,Stephen Froehlich,https://github.com/stephenbfroehlich/calcUnique,TRUE,https://github.com/stephenbfroehlich/calcunique,691,0,2020-05-04T18:45:11Z,NA
calibrar,"Automated parameter estimation for complex (ecological) models in R.
This package allows the parameter estimation or calibration of complex models,
including stochastic ones. It is a generic tool that can be used for fitting
any type of models, especially those with non-differentiable objective functions.
It supports multiple phases and constrained optimization.
It implements maximum likelihood estimation methods and automated construction
of the objective function from simulated model outputs.
See <http://roliveros-ramos.github.io/calibrar> for more details.",2016-02-17,Ricardo Oliveros-Ramos,http://roliveros-ramos.github.io/calibrar,TRUE,https://github.com/roliveros-ramos/calibrar,14720,4,2020-02-04T01:50:10Z,3680
calibrator,"Performs Bayesian calibration of computer models as per
Kennedy and O'Hagan 2001. The package includes routines to find the
hyperparameters and parameters; see the help page for stage1() for a
worked example using the toy dataset. A tutorial is provided in the
calex.Rnw vignette; and a suite of especially simple one dimensional
examples appears in inst/doc/one.dim/.",2019-03-07,Robin K. S. Hankin,https://github.com/RobinHankin/calibrator.git,TRUE,https://github.com/robinhankin/calibrator,35397,1,2020-05-05T21:26:27Z,35397
calmate,A multi-array post-processing method of allele-specific copy-number estimates (ASCNs).,2015-10-27,Henrik Bengtsson,https://github.com/HenrikBengtsson/calmate/,TRUE,https://github.com/henrikbengtsson/calmate,20755,0,2019-12-09T00:29:10Z,NA
camsRad,"Copernicus Atmosphere Monitoring Service (CAMS) radiations service
provides time series of global, direct, and diffuse irradiations on horizontal
surface, and direct irradiation on normal plane for the actual weather
conditions as well as for clear-sky conditions.
The geographical coverage is the field-of-view of the Meteosat satellite,
roughly speaking Europe, Africa, Atlantic Ocean, Middle East. The time coverage
of data is from 2004-02-01 up to 2 days ago. Data are available with a time step
ranging from 15 min to 1 month. For license terms and to create an account,
please see <http://www.soda-pro.com/web-services/radiation/cams-radiation-service>. ",2016-11-30,Lukas Lundstrom,https://github.com/ropenscilabs/camsRad,TRUE,https://github.com/ropenscilabs/camsrad,12475,8,2019-12-09T12:16:03Z,1559.375
camtrapR,"Management of and data extraction from camera trap data in wildlife studies. The package provides a workflow for storing and sorting camera trap photos (and videos), tabulates records of species and individuals, and creates detection/non-detection matrices for occupancy and spatial capture-recapture analyses with great flexibility. In addition, it can visualise species activity data and provides simple mapping functions with GIS export.",2020-04-23,Juergen Niedballa,"https://github.com/jniedballa/camtrapR,
https://jniedballa.github.io/camtrapR,
https://groups.google.com/forum/#!forum/camtrapr",TRUE,https://github.com/jniedballa/camtrapr,47112,2,2020-05-25T19:10:24Z,23556
cancensus,"Integrated, convenient, and uniform access to Canadian
Census data and geography retrieved using the 'CensusMapper' API. This package produces analysis-ready
tidy data frames and spatial data in multiple formats, as well as convenience functions
for working with Census variables, variable hierarchies, and region selection. API
keys are freely available with free registration at <https://censusmapper.ca/api>.
Census data and boundary geometries are reproduced and distributed on an ""as
is"" basis with the permission of Statistics Canada (Statistics Canada 2001; 2006;
2011; 2016).",2020-05-12,Jens von Bergmann,"https://github.com/mountainMath/cancensus,
https://mountainmath.github.io/cancensus/,
https://censusmapper.ca/api",TRUE,https://github.com/mountainmath/cancensus,15416,45,2020-05-30T09:12:48Z,342.5777777777778
candisc,"Functions for computing and visualizing
generalized canonical discriminant analyses and canonical correlation analysis
for a multivariate linear model.
Traditional canonical discriminant analysis is restricted to a one-way 'MANOVA'
design and is equivalent to canonical correlation analysis between a set of quantitative
response variables and a set of dummy variables coded from the factor variable.
The 'candisc' package generalizes this to higher-way 'MANOVA' designs
for all factors in a multivariate linear model,
computing canonical scores and vectors for each term. The graphic functions provide low-rank (1D, 2D, 3D)
visualizations of terms in an 'mlm' via the 'plot.candisc' and 'heplot.candisc' methods. Related plots are
now provided for canonical correlation analysis when all predictors are quantitative.",2020-04-22,Michael Friendly,NA,TRUE,https://github.com/friendly/candisc,159436,2,2020-05-17T17:06:12Z,79718
Canopy,"A statistical framework and computational procedure for identifying
the sub-populations within a tumor, determining the mutation profiles of each
subpopulation, and inferring the tumor's phylogenetic history. The input are
variant allele frequencies (VAFs) of somatic single nucleotide alterations
(SNAs) along with allele-specific coverage ratios between the tumor and matched
normal sample for somatic copy number alterations (CNAs). These quantities can
be directly taken from the output of existing software. Canopy provides a
general mathematical framework for pooling data across samples and sites to
infer the underlying parameters. For SNAs that fall within CNA regions, Canopy
infers their temporal ordering and resolves their phase. When there are
multiple evolutionary configurations consistent with the data, Canopy outputs
all configurations along with their confidence assessment.",2017-12-18,Yuchao Jiang,https://github.com/yuchaojiang/Canopy,TRUE,https://github.com/yuchaojiang/canopy,17935,42,2019-06-19T14:45:10Z,427.0238095238095
canprot,"Compositional analysis of differentially expressed proteins in
cancer and cell culture proteomics experiments. The data include lists of up-
and down-regulated proteins in different cancer types (breast, colorectal,
liver, lung, pancreatic, prostate) and laboratory conditions (hypoxia,
hyperosmotic stress, high glucose, 3D cell culture, and proteins secreted in
hypoxia), together with amino acid compositions computed for protein sequences
obtained from UniProt. Functions are provided to calculate compositional metrics
including protein length, carbon oxidation state, and stoichiometric hydration
state. In addition, phylostrata (evolutionary ages) of protein-coding genes are
compiled using data from Liebeskind et al. (2016) <doi:10.1093/gbe/evw113> or
Trigos et al. (2017) <doi:10.1073/pnas.1617743114>. The vignettes contain
plots of compositional differences, phylostrata for human proteins, and
references for all datasets.",2020-05-11,Jeffrey Dick,http://github.com/jedick/canprot,TRUE,https://github.com/jedick/canprot,12453,2,2020-06-08T01:02:46Z,6226.5
cansim,"Searches for, accesses, and retrieves new-format and old-format Statistics Canada data
tables, as well as individual vectors, as tidy data frames. This package deals with encoding issues, allows for
bilingual English or French language data retrieval, and bundles convenience functions
to make it easier to work with retrieved table data. Optional caching features are provided.",2020-03-13,Jens von Bergmann,"https://github.com/mountainMath/cansim,
https://mountainmath.github.io/cansim/",TRUE,https://github.com/mountainmath/cansim,10903,19,2020-05-13T01:51:44Z,573.8421052631579
canvasXpress,"Enables creation of visualizations using the CanvasXpress framework
in R. CanvasXpress is a standalone JavaScript library for reproducible research
with complete tracking of data and end-user modifications stored in a single
PNG image that can be played back. See <https://www.canvasxpress.org> for more
information.",2020-04-11,Connie Brett,https://github.com/neuhausi/canvasXpress.git,TRUE,https://github.com/neuhausi/canvasxpress,47364,233,2020-06-02T19:58:25Z,203.27896995708156
canvasXpress.data,"Contains the prepared data that is needed for the 'shiny' application examples in the
'canvasXpress' package. This package also includes datasets used for automated 'testthat' tests.
Scotto L, Narayan G, Nandula SV, Arias-Pulido H et al. (2008) <doi:10.1002/gcc.20577>.
Davis S, Meltzer PS (2007) <doi:10.1093/bioinformatics/btm254>.",2020-05-19,Connie Brett,https://github.com/neuhausi/canvasXpress.data.git,TRUE,https://github.com/neuhausi/canvasxpress.data,1725,0,2020-05-19T21:40:42Z,NA
captioner,"Provides a method for automatically numbering figures,
tables, or other objects. Captions can be displayed in full, or as citations.
This is especially useful for adding figures and tables to R markdown
documents without having to numbering them manually.",2015-07-16,Letaw Alathea,https://github.com/adletaw/captioner,TRUE,https://github.com/adletaw/captioner,35044,101,2020-02-13T19:26:46Z,346.970297029703
caracas,"Computer algebra via the 'SymPy' library (<https://www.sympy.org/>).
This makes it possible to solve equations symbolically,
find symbolic integrals, symbolic sums and other important quantities. ",2020-05-21,Mikkel Meyer Andersen,https://github.com/r-cas/caracas,TRUE,https://github.com/r-cas/caracas,2095,6,2020-06-08T13:03:28Z,349.1666666666667
caRamel,"Multi-objective optimizer initially developed for the calibration of hydrological models.
The algorithm is a hybrid of the MEAS algorithm (Efstratiadis and Koutsoyiannis (2005) <doi:10.13140/RG.2.2.32963.81446>) by using the directional search method based on the simplexes of the objective space
and the epsilon-NGSA-II algorithm with the method of classification of the parameter vectors archiving management by epsilon-dominance (Reed and Devireddy <doi:10.1142/9789812567796_0004>).",2019-05-28,Fabrice Zaoui,https://github.com/fzao/caRamel,TRUE,https://github.com/fzao/caramel,10586,1,2019-09-30T13:10:04Z,10586
CARBayes,"Implements a class of univariate and multivariate spatial generalised linear mixed models for areal unit data, with inference in a Bayesian setting using Markov chain Monte Carlo (MCMC) simulation. The response variable can be binomial, Gaussian, multinomial, Poisson or zero-inflated Poisson (ZIP), and spatial autocorrelation is modelled by a set of random effects that are assigned a conditional autoregressive (CAR) prior distribution. A number of different models are available for univariate spatial data, including models with no random effects as well as random effects modelled by different types of CAR prior, including the BYM model (Besag et al. (1991) <doi:10.1007/BF00116466>), the Leroux model (Leroux et al. (2000) <doi:10.1007/978-1-4612-1284-3_4>) and the localised model (Lee et al. (2015) <doi:10.1002/env.2348>). Additionally, a multivariate CAR (MCAR) model for multivariate spatial data is available, as is a two-level hierarchical model for modelling data relating to individuals within areas. Full details are given in the vignette accompanying this package. The initial creation of this package was supported by the Economic and Social Research Council (ESRC) grant RES-000-22-4256, and on-going development has been supported by the Engineering and Physical Science Research Council (EPSRC) grant EP/J017442/1, ESRC grant ES/K006460/1, Innovate UK / Natural Environment Research Council (NERC) grant NE/N007352/1 and the TB Alliance. ",2020-03-13,Duncan Lee,http://github.com/duncanplee/CARBayes,TRUE,https://github.com/duncanplee/carbayes,103977,3,2020-03-13T08:29:50Z,34659
CARBayesST,"Implements a class of spatio-temporal generalised linear mixed models for areal unit data, with inference in a Bayesian setting using Markov chain Monte Carlo (MCMC) simulation. The response variable can be binomial, Gaussian, or Poisson, but for some models only the binomial and Poisson data likelihoods are available. The spatio-temporal autocorrelation is modelled by random effects, which are assigned conditional autoregressive (CAR) style prior distributions. A number of different random effects structures are available, including models similar to Bernardinelli et al. (1995) <doi:10.1002/sim.4780142112>, Rushworth et al. (2014) <doi:10.1016/j.sste.2014.05.001> and Lee et al. (2016) <doi:10.1214/16-AOAS941>. Full details are given in the vignette accompanying this package. The creation of this package was supported by the Engineering and Physical Sciences Research Council (EPSRC) grant EP/J017442/1 and the Medical Research Council (MRC) grant MR/L022184/1.",2020-03-09,Duncan Lee,http://github.com/duncanplee/CARBayesST,TRUE,https://github.com/duncanplee/carbayesst,36116,4,2020-03-06T12:50:52Z,9029
carbonate,"Create beautiful images of source code using
'carbon.js'<https://carbon.now.sh/about>.",2020-02-07,Jonathan Sidi,https://github.com/yonicd/carbonate,TRUE,https://github.com/yonicd/carbonate,9488,143,2020-05-30T04:56:55Z,66.34965034965035
caret,"Misc functions for training and plotting classification and
regression models.",2020-03-20,Max Kuhn,https://github.com/topepo/caret/,TRUE,https://github.com/topepo/caret,6106514,1212,2020-03-20T03:07:25Z,5038.377887788779
caretEnsemble,"Functions for creating ensembles of caret models: caretList()
and caretStack(). caretList() is a convenience function for fitting multiple
caret::train() models to the same dataset. caretStack() will make linear or
non-linear combinations of these models, using a caret::train() model as a
meta-model, and caretEnsemble() will make a robust linear combination of
models using a GLM.",2019-12-12,Zachary A. Deane-Mayer,https://github.com/zachmayer/caretEnsemble,TRUE,https://github.com/zachmayer/caretensemble,131548,210,2020-05-01T11:15:12Z,626.4190476190477
Carlson,"Evaluation of the Carlson elliptic integrals and the incomplete elliptic integrals with complex arguments. The implementations use Carlson's algorithms <doi.org/10.1007/BF02198293>. Applications of elliptic integrals include probability distributions, geometry, physics, mechanics, electrodynamics, statistical mechanics, astronomy, geodesy, geodesics on conics, and magnetic field calculations.",2020-03-04,Stéphane Laurent,https://github.com/stla/Carlson,TRUE,https://github.com/stla/carlson,1681,0,2020-02-26T09:40:27Z,NA
cartograflow,"Functions to prepare and filter an origin-destination matrix for thematic flow mapping purposes.
This comes after Bahoken, Francoise (2016), Mapping flow matrix a contribution, PhD in Geography - Territorial sciences. See Bahoken (2017) <doi:10.4000/netcom.2565>.",2020-06-03,Sylvain Blondeau,https://github.com/fbahoken/cartogRaflow,TRUE,https://github.com/fbahoken/cartograflow,5109,6,2020-06-05T20:59:34Z,851.5
cartogram,Construct continuous and non-contiguous area cartograms.,2019-12-07,Sebastian Jeworutzki,https://github.com/sjewo/cartogram,TRUE,https://github.com/sjewo/cartogram,118226,91,2020-02-18T19:58:56Z,1299.1868131868132
cartography,"Create and integrate maps in your R workflow. This package helps
to design cartographic representations such as proportional symbols,
choropleth, typology, flows or discontinuities maps. It also offers several
features that improve the graphic presentation of maps, for instance, map
palettes, layout elements (scale, north arrow, title...), labels or legends.
See Giraud and Lambert (2017) <doi:10.1007/978-3-319-57336-6_13>.",2020-04-20,Timothée Giraud,https://github.com/riatelab/cartography/,TRUE,https://github.com/riatelab/cartography,103149,329,2020-06-09T11:07:13Z,313.5227963525836
Cascade,"A modeling tool allowing gene selection, reverse engineering, and prediction in cascade networks. Jung, N., Bertrand, F., Bahram, S., Vallat, L., and Maumy-Bertrand, M. (2014) <doi:10.1093/bioinformatics/btt705>.",2019-08-24,Frederic Bertrand,"http://www-irma.u-strasbg.fr/~fbertran/,
https://github.com/fbertran/Cascade",TRUE,https://github.com/fbertran/cascade,7323,1,2019-10-01T10:30:34Z,7323
CascadeData,"These experimental expression data (5 leukemic 'CLL' B-lymphocyte of aggressive form from 'GSE39411', <doi:10.1073/pnas.1211130110>), after B-cell receptor stimulation, are used as examples by packages such as the 'Cascade' one, a modeling tool allowing gene selection, reverse engineering, and prediction in cascade networks. Jung, N., Bertrand, F., Bahram, S., Vallat, L., and Maumy-Bertrand, M. (2014) <doi:10.1093/bioinformatics/btt705>.",2019-02-07,Frederic Bertrand,"http://www-irma.u-strasbg.fr/~fbertran/,
https://github.com/fbertran/CascadeData",TRUE,https://github.com/fbertran/cascadedata,7343,1,2019-10-01T10:35:07Z,7343
casebase,"Implements the case-base sampling approach of Hanley and Miettinen (2009) <DOI:10.2202/1557-4679.1125>,
Saarela and Arjas (2015) <DOI:10.1111/sjos.12125>, and Saarela (2015) <DOI:10.1007/s10985-015-9352-x>, for fitting flexible hazard
regression models to survival data with single event type or multiple competing causes via logistic and multinomial regression.
From the fitted hazard function, cumulative incidence, risk functions of time, treatment and profile
can be derived. This approach accommodates any log-linear hazard function of prognostic time, treatment,
and covariates, and readily allows for non-proportionality. We also provide a plot method for visualizing
incidence density via population time plots.",2017-04-28,Sahir Bhatnagar,http://sahirbhatnagar.com/casebase/,TRUE,https://github.com/sahirbhatnagar/casebase,11289,4,2020-05-28T15:59:42Z,2822.25
casen,"Funciones para realizar estadistica descriptiva e inferencia con el
disenio complejo de la Encuesta CASEN (Encuesta de Caracterizacion
Socio-Economica). Incluye datasets que permiten armonizar los codigos de
comunas que cambian entre anios y permite convertir a los codigos oficiales de
SUBDERE.
(Functions to compute descriptive and inferential statistics with CASEN
Survey [Socio-Economic Characterization Survey] complex design. Includes
datasets to harmonize commune codes that change across years and allows to
convert to official SUBDERE codes.)",2020-04-08,Mauricio Vargas,https://pachamaltese.github.io/casen/,TRUE,https://github.com/pachamaltese/casen,1755,3,2020-04-08T04:39:39Z,585
CAST,"Supporting functionality to run 'caret' with spatial or spatial-temporal data. 'caret' is a frequently used package for model training and prediction using machine learning. This package includes functions to improve spatial-temporal modelling tasks using 'caret'. It prepares data for Leave-Location-Out and Leave-Time-Out cross-validation which are target-oriented validation strategies for spatial-temporal models. To decrease overfitting and improve model performances, the package implements a forward feature selection that selects suitable predictor variables in view to their contribution to the target-oriented performance.",2020-05-19,Hanna Meyer,https://github.com/HannaMeyer/CAST,TRUE,https://github.com/hannameyer/cast,17071,29,2020-06-05T12:29:45Z,588.6551724137931
cat.dt,"Implements the Merged Tree-CAT method (Javier Rodriguez-Cuadrado et al., 2020, <doi:10.1016/j.eswa.2019.113066>) to generate Computerized Adaptive Tests (CATs) based on a decision tree. The tree growth is controlled by merging branches with similar trait distributions and estimations. This package has the necessary tools for creating CATs and estimate the subject's ability level. ",2020-04-23,Javier Rodriguez-Cuadrado,https://github.com/jlaria/cat.dt,TRUE,https://github.com/jlaria/cat.dt,4353,1,2020-05-02T16:20:13Z,4353
categoryEncodings,"Simple, fast, and automatic encodings for category data using
a data.table backend. Most of the methods are an implementation
of ""Sufficient Representation for Categorical Variables"" by
Johannemann, Hadad, Athey, Wager (2019) <arXiv:1908.09874>,
particularly their mean, sparse principal component analysis,
low rank representation, and multinomial logit encodings.",2020-03-02,Juraj Szitas,https://github.com/JSzitas/categoryEncodings,TRUE,https://github.com/jszitas/categoryencodings,1714,0,2020-01-30T17:08:53Z,NA
cati,"Detect and quantify community assembly processes using trait values of individuals or populations, the T-statistics (Violle et al. (2012) <doi:10.1016/j.tree.2011.11.014>) and other metrics, and dedicated null models described in Taudiere & Violle (2016) <doi:10.1111/ecog.01433>.",2020-03-02,Adrien Taudiere,https://github.com/adrientaudiere/cati,TRUE,https://github.com/adrientaudiere/cati,23166,7,2019-12-16T14:34:36Z,3309.4285714285716
catsim,"Computes a structural similarity metric (after the style of
MS-SSIM for images) for binary and categorical 2D and 3D images. Can be
based on accuracy (simple matching), Cohen's kappa, Rand index, adjusted
Rand index, Jaccard index, Dice index, normalized mutual information, or
adjusted mutual information. In addition, has fast computation
of Cohen's kappa, the Rand indices, and the two mutual informations.
Implements the methods of Thompson and Maitra (2020) <arXiv:2004.09073>.",2020-05-06,Geoffrey Thompson,"http://github.com/gzt/catsim, https://gzt.github.io/catsim",TRUE,https://github.com/gzt/catsim,793,2,2020-05-06T03:46:17Z,396.5
catSurv,"Provides methods of computerized adaptive testing for survey researchers. See Montgomery and Rossiter (2019) <doi:10.1093/jssam/smz027>. Includes functionality for data fit with the classic item response methods including the latent trait model, Birnbaum`s three parameter model, the graded response, and the generalized partial credit model. Additionally, includes several ability parameter estimation and item selection routines. During item selection, all calculations are done in compiled C++ code.",2019-12-09,Erin Rossiter,NA,TRUE,https://github.com/erossiter/catsurv,11590,6,2019-12-09T20:29:30Z,1931.6666666666667
cattonum,"Functions for aggregate encoding, dummy encoding,
frequency encoding, label encoding, leave-one-out encoding,
mean encoding, median encoding, and one-hot encoding.",2020-02-09,Bernie Gray,https://github.com/bfgray3/cattonum,TRUE,https://github.com/bfgray3/cattonum,11674,29,2020-06-07T00:30:51Z,402.55172413793105
causaloptim,"When causal quantities are not identifiable from the observed data, it still may be possible
to bound these quantities using the observed data. We outline a class of problems for which the
derivation of tight bounds is always a linear programming problem and can therefore, at least
theoretically, be solved using a symbolic linear optimizer. We extend and generalize the
approach of Balke and Pearl (1994) <doi:10.1016/B978-1-55860-332-5.50011-0> and we provide
a user friendly graphical interface for setting up such problems via directed acyclic
graphs (DAG), which only allow for problems within this class to be depicted. The user can
then define linear constraints to further refine their assumptions to meet their specific
problem, and then specify a causal query using a text interface. The program converts this
user defined DAG, query, and constraints, and returns tight bounds. The bounds can be
converted to R functions to evaluate them for specific datasets, and to latex code for
publication. The methods and proofs of tightness and validity of the bounds are described in
a preprint by Sachs, Gabriel, and Sjölander (2020)
<https://sachsmc.github.io/causaloptim/articles/CausalBoundsMethods.pdf>.",2020-05-07,Michael C Sachs,https://github.com/sachsmc/causaloptim,TRUE,https://github.com/sachsmc/causaloptim,1746,7,2020-05-07T14:36:26Z,249.42857142857142
CAWaR,"Tools to process ground-truth data on crop types and perform a phenology based crop type classification. These tools were developed in the scope of the CAWa project and extend on the work of Conrad et al. (2011) <doi:10.1080/01431161.2010.550647>. Moreover, they introduce an innovative classification and validation scheme that utilizes spatially independent samples as proposed by Remelgado et al. (2017) <doi:10.1002/rse2.70>.",2020-06-04,Ruben Remelgado,https://github.com/RRemelgado/fieldRS/,TRUE,https://github.com/rremelgado/fieldrs,2724,10,2020-06-02T13:27:19Z,272.4
CBDA,"Classification performed on Big Data. It uses concepts from compressive sensing, and implements ensemble predictor (i.e., 'SuperLearner') and knockoff filtering as the main machine learning and feature mining engines.",2018-04-16,Simeone Marino,https://github.com/SOCR/CBDA,TRUE,https://github.com/socr/cbda,8576,12,2020-01-23T00:51:37Z,714.6666666666666
cbsodataR,"The data and meta data from Statistics
Netherlands (<https://www.cbs.nl>) can be browsed and downloaded. The client uses
the open data API of Statistics Netherlands.",2020-02-20,Edwin de Jonge,https://github.com/edwindj/cbsodataR,TRUE,https://github.com/edwindj/cbsodatar,26311,15,2020-05-27T21:51:48Z,1754.0666666666666
ccafs,"Client for Climate Change, Agriculture, and Food Security ('CCAFS')
General Circulation Models ('GCM') data. Data is stored in Amazon 'S3', from
which we provide functions to fetch data.",2017-02-24,Scott Chamberlain,https://github.com/ropensci/ccafs,TRUE,https://github.com/ropensci/ccafs,12556,10,2019-12-09T12:18:13Z,1255.6
CCAMLRGIS,"Loads and creates spatial data, including layers and tools that are relevant
to the activities of the Commission for the Conservation of Antarctic Marine Living
Resources. Provides two categories of functions: load functions and create functions.
Load functions are used to import existing spatial layers from the online CCAMLR GIS
such as the ASD boundaries. Create functions are used to create layers from user data
such as polygons and grids.",2020-06-07,Stephane Thanassekos,https://github.com/ccamlr/CCAMLRGIS,TRUE,https://github.com/ccamlr/ccamlrgis,2600,3,2020-06-06T11:11:19Z,866.6666666666666
cchsflow,"Supporting the use of the Canadian Community Health Survey
(CCHS) by transforming variables from each cycle into harmonized,
consistent versions that span survey cycles (currently, 2001 to
2014). CCHS data used in this library is accessed and adapted in
accordance to the Statistics Canada Open Licence Agreement. This
package uses rec_with_table(), which was developed from 'sjmisc'
rec(). Lüdecke D (2018). ""sjmisc: Data and Variable Transformation
Functions"". Journal of Open Source Software, 3(26), 754.
<doi:10.21105/joss.00754>.",2020-03-30,Doug Manuel,https://github.com/Big-Life-Lab/cchsflow,TRUE,https://github.com/big-life-lab/cchsflow,2334,8,2020-03-30T16:10:11Z,291.75
cdata,"Supplies higher-order coordinatized data specification and fluid transform operators that include pivot and anti-pivot as special cases.
The methodology is describe in 'Zumel', 2018, ""Fluid data reshaping with 'cdata'"", <http://winvector.github.io/FluidData/FluidDataReshapingWithCdata.html> , doi:10.5281/zenodo.1173299 .
This package introduces the idea of explicit control table specification of data transforms.
Works on in-memory data or on remote data using 'rquery' and 'SQL' database interfaces.",2020-02-01,John Mount,"https://github.com/WinVector/cdata/,
https://winvector.github.io/cdata/",TRUE,https://github.com/winvector/cdata,69573,40,2020-02-15T17:51:40Z,1739.325
cdcfluview,"The 'U.S.' Centers for Disease Control and Prevention (CDC) maintain
a portal <https://gis.cdc.gov/grasp/fluview/fluportaldashboard.html> for
accessing state, regional and national influenza statistics as well as
mortality surveillance data. The web interface makes it difficult and
time-consuming to select and retrieve influenza data. Tools are provided
to access the data provided by the portal's underlying 'API'.",2020-04-02,Bob Rudis,https://github.com/hrbrmstr/cdcfluview,TRUE,https://github.com/hrbrmstr/cdcfluview,23424,44,2020-04-01T19:57:14Z,532.3636363636364
cdcsis,"Conditional distance correlation <doi:10.1080/01621459.2014.993081> is a novel conditional dependence measurement of two multivariate random variables given a confounding variable. This package provides conditional distance correlation, performs the conditional distance correlation sure independence screening procedure for ultrahigh dimensional data <http://www3.stat.sinica.edu.tw/statistica/J28N1/J28N114/J28N114.html>, and conducts conditional distance covariance test for conditional independence assumption of two multivariate variable.",2019-07-10,Wenhao Hu,https://github.com/Mamba413/cdcsis,TRUE,https://github.com/mamba413/cdcsis,20105,1,2019-07-11T02:07:51Z,20105
cde,"Facilitates searching, download and plotting of Water Framework
Directive (WFD) reporting data for all waterbodies within the UK Environment
Agency area. The types of data that can be downloaded are: WFD status
classification data, Reasons for Not Achieving Good (RNAG) status,
objectives set for waterbodies, measures put in place to improve water
quality and details of associated protected areas. The site accessed is
<https://environment.data.gov.uk/catchment-planning/>. The data are made
available under the Open Government Licence v3.0
<https://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/>.
This package has been peer-reviewed by rOpenSci (v. 0.4.0).",2019-09-04,Rob Briers,https://github.com/ropensci/cde,TRUE,https://github.com/ropensci/cde,3845,4,2020-02-06T12:57:53Z,961.25
cder,"Connect to the California Data Exchange Center (CDEC)
Web Service <http://cdec.water.ca.gov/>. 'CDEC' provides a centralized
database to store, process, and exchange real-time hydrologic information
gathered by various cooperators throughout California. The 'CDEC' Web Service
<http://cdec.water.ca.gov/dynamicapp/wsSensorData> provides a data download
service for accessing historical records. ",2020-01-24,Michael Koohafkan,https://github.com/mkoohafkan/cder,TRUE,https://github.com/mkoohafkan/cder,4643,1,2020-01-24T17:59:37Z,4643
CDM,"
Functions for cognitive diagnosis modeling and multidimensional item response modeling
for dichotomous and polytomous item responses. This package enables the estimation of
the DINA and DINO model (Junker & Sijtsma, 2001, <doi:10.1177/01466210122032064>),
the multiple group (polytomous) GDINA model (de la Torre, 2011,
<doi:10.1007/s11336-011-9207-7>), the multiple choice DINA model (de la Torre, 2009,
<doi:10.1177/0146621608320523>), the general diagnostic model (GDM; von Davier, 2008,
<doi:10.1348/000711007X193957>), the structured latent class model (SLCA; Formann, 1992,
<doi:10.1080/01621459.1992.10475229>) and regularized latent class analysis
(Chen, Li, Liu, & Ying, 2017, <doi:10.1007/s11336-016-9545-6>).
See George, Robitzsch, Kiefer, Gross, and Uenlue (2017) <doi:10.18637/jss.v074.i02>
or Robitzsch and George (2019, <doi:10.1007/978-3-030-05584-4_26>)
for further details on estimation and the package structure.
For tutorials on how to use the CDM package see
George and Robitzsch (2015, <doi:10.20982/tqmp.11.3.p189>) as well as
Ravand and Robitzsch (2015).",2020-03-10,Alexander Robitzsch,"https://github.com/alexanderrobitzsch/CDM,
https://sites.google.com/site/alexanderrobitzsch2/software",TRUE,https://github.com/alexanderrobitzsch/cdm,231327,9,2020-03-11T14:58:36Z,25703
cdom,Wrapper functions to model and extract various quantitative information from absorption spectra of chromophoric dissolved organic matter (CDOM).,2016-03-04,Philippe Massicotte,https://github.com/PMassicotte/cdom,TRUE,https://github.com/pmassicotte/cdom,15408,4,2020-04-13T23:21:58Z,3852
censusapi,"A wrapper for the U.S. Census Bureau APIs that returns data frames of
Census data and metadata. Available datasets include the
Decennial Census, American Community Survey, Small Area Health Insurance Estimates,
Small Area Income and Poverty Estimates, Population Estimates and Projections, and more.",2019-04-13,Hannah Recht,https://github.com/hrecht/censusapi,TRUE,https://github.com/hrecht/censusapi,54265,100,2020-03-28T14:37:14Z,542.65
censusxy,"Provides access to the U.S. Census Bureau's A.P.I for matching American
street addresses with their longitude and latitude. This includes both single address matching
as well as batch functionality for multiple addresses. Census geographies can be appended to
addresses if desired, and reverse geocoding of point locations to census geographies is also
supported. ",2020-05-28,Christopher Prener,https://github.com/slu-openGIS/censusxy,TRUE,https://github.com/slu-opengis/censusxy,4609,6,2020-05-28T18:46:28Z,768.1666666666666
CePa,Use pathway topology information to assign weight to pathway nodes.,2020-02-25,Zuguang Gu,https://github.com/jokergoo/CePa,TRUE,https://github.com/jokergoo/cepa,25818,0,2020-05-23T20:01:40Z,NA
cepR,"
Retorna detalhes de dados de CEPs brasileiros, bairros, logradouros
e tal. (Returns info of Brazilian postal codes, city names, addresses
and so on.)",2020-06-02,Robert Myles McDonnell,https://github.com/RobertMyles/cepR,TRUE,https://github.com/robertmyles/cepr,10812,13,2020-06-02T15:29:11Z,831.6923076923077
cepreader,"Read Condensed Cornell Ecology Program ('CEP') and legacy
'CANOCO' files into R data frames.",2019-05-08,Jari Oksanen,"https://cran.r-project.org/,
https://github.com/vegandevs/cepreader/",TRUE,https://github.com/vegandevs/cepreader,9737,0,2020-02-06T12:46:34Z,NA
ceramic,"Download imagery tiles to a standard cache and load the data into raster objects.
Facilities for 'AWS' terrain <https://aws.amazon.com/public-datasets/terrain/> terrain and 'Mapbox'
<https://www.mapbox.com/> servers are provided. ",2019-07-20,Michael Sumner,https://github.com/hypertidy/ceramic,TRUE,https://github.com/hypertidy/ceramic,4260,64,2020-06-07T08:05:54Z,66.5625
cetcolor,"Collection of perceptually uniform colour maps made by Peter Kovesi
(2015) ""Good Colour Maps: How to Design Them"" <arXiv:1509.03700>
at the Centre for Exploration Targeting (CET).",2018-07-10,James Balamuta,"https://github.com/coatless/cetcolor,
http://thecoatlessprofessor.com/projects/cetcolor/,
http://peterkovesi.com/projects/colourmaps/",TRUE,https://github.com/coatless/cetcolor,11226,22,2020-01-07T19:51:51Z,510.27272727272725
ceterisParibus,"Ceteris Paribus Profiles (What-If Plots) are designed to present model
responses around selected points in a feature space.
For example around a single prediction for an interesting observation.
Plots are designed to work in a model-agnostic fashion, they are working
for any predictive Machine Learning model and allow for model comparisons.
Ceteris Paribus Plots supplement the Break Down Plots from 'breakDown' package.",2020-03-28,Przemyslaw Biecek,https://pbiecek.github.io/ceterisParibus/,TRUE,https://github.com/pbiecek/ceterisparibus,11941,38,2020-03-26T10:31:57Z,314.2368421052632
cgdsr,"Provides a basic set of R functions for querying the Cancer
Genomics Data Server (CGDS), hosted by the Computational Biology Center at
Memorial-Sloan-Kettering Cancer Center (MSKCC) at <www.cbioportal.org>.",2019-06-26,Anders Jacobsen,https://github.com/cBioPortal/cgdsr,TRUE,https://github.com/cbioportal/cgdsr,61715,17,2019-06-25T20:17:02Z,3630.294117647059
CGE,"Developing general equilibrium models, computing general equilibrium and simulating economic dynamics with structural dynamic models in LI (2019, ISBN: 9787521804225) ""General Equilibrium and Structural Dynamics: Perspectives of New Structural Economics. Beijing: Economic Science Press"". When developing complex general equilibrium models, GE package should be used in addition to this package.",2020-05-24,LI Wu,NA,TRUE,https://github.com/liwur/cge,13465,0,2020-01-31T02:42:46Z,NA
CGGP,"Run computer experiments using the adaptive composite grid
algorithm with a Gaussian process model.
The algorithm works best when running an experiment that can evaluate thousands
of points from a deterministic computer simulation.
This package is an implementation of a forthcoming paper by Plumlee,
Erickson, Ankenman, et al. For a preprint of the paper,
contact the maintainer of this package.",2020-03-29,Collin Erickson,https://github.com/CollinErickson/CGGP,TRUE,https://github.com/collinerickson/cggp,4674,1,2020-03-31T00:06:03Z,4674
cghRA,"Provides functions to import data from Agilent CGH arrays and process them according to the cghRA workflow. Implements several algorithms such as WACA, STEPS and cnvScore and an interactive graphical interface.",2017-03-03,Sylvain Mareschal,http://www.ovsa.fr/cghRA,TRUE,https://github.com/maressyl/r.cghra,13310,0,2020-05-03T10:33:49Z,NA
CGPfunctions,Miscellaneous functions useful for teaching statistics as well as actually practicing the art. They typically are not new methods but rather wrappers around either base R or other packages.,2020-05-27,Chuck Powell,https://github.com/ibecav/CGPfunctions,TRUE,https://github.com/ibecav/cgpfunctions,18777,10,2020-05-27T17:58:24Z,1877.7
cgraph,"Allows to create, evaluate, and differentiate computational graphs in R. A computational graph is a graph representation of a multivariate function decomposed by its (elementary) operations. Nodes in the graph represent arrays while edges represent dependencies among the arrays. An advantage of expressing a function as a computational graph is that this enables to differentiate the function by automatic differentiation. The 'cgraph' package supports various operations including basic arithmetic, trigonometry operations, and linear algebra operations. It differentiates computational graphs by reverse automatic differentiation. The flexible architecture of the package makes it applicable to solve a variety of problems including local sensitivity analysis, gradient-based optimization, and machine learning.",2020-02-09,Ron Triepels,https://cgraph.org/,TRUE,https://github.com/triepels/cgraph,18971,11,2020-04-16T12:19:50Z,1724.6363636363637
chandwich,"Performs adjustments of a user-supplied independence loglikelihood
function using a robust sandwich estimator of the parameter covariance
matrix, based on the methodology in Chandler and Bate (2007)
<doi:10.1093/biomet/asm015>. This can be used for cluster correlated data
when interest lies in the parameters of the marginal distributions or for
performing inferences that are robust to certain types of model
misspecification. Functions for profiling the adjusted loglikelihoods are
also provided, as are functions for calculating and plotting confidence
intervals, for single model parameters, and confidence regions, for pairs
of model parameters. Nested models can be compared using an adjusted
likelihood ratio test.",2019-07-11,Paul J. Northrop,http://github.com/paulnorthrop/chandwich,TRUE,https://github.com/paulnorthrop/chandwich,10354,1,2019-11-26T22:58:02Z,10354
changepoint,"Implements various mainstream and specialised changepoint methods for finding single and multiple changepoints within data. Many popular non-parametric and frequentist methods are included. The cpt.mean(), cpt.var(), cpt.meanvar() functions should be your first point of call.",2016-10-04,Rebecca Killick,https://github.com/rkillick/changepoint/,TRUE,https://github.com/rkillick/changepoint,167781,79,2019-07-19T09:31:01Z,2123.8101265822784
changepoint.geo,Implements the high-dimensional changepoint detection method GeomCP and the related mappings used for changepoint detection. These methods view the changepoint problem from a geometrical viewpoint and aim to extract relevant geometrical features in order to detect changepoints. The geomcp() function should be your first point of call. References: Grundy et al. (2020) <doi:10.1007/s11222-020-09940-y>. ,2020-03-31,Thomas Grundy,https://github.com/grundy95/changepoint.geo/,TRUE,https://github.com/grundy95/changepoint.geo,1026,2,2020-03-31T13:12:13Z,513
changer,Changing the name of an existing R package is annoying but common task especially in the early stages of package development. This package (mostly) automates this task.,2018-10-21,Jouni Helske,https://github.com/helske/changer,TRUE,https://github.com/helske/changer,7866,13,2020-02-17T16:03:08Z,605.0769230769231
cheatR,"A set of functions to compare texts for similarity, and plot a graph of similarities among the compared texts. These functions were originally developed for detection of overlap in course hand-in.",2020-05-06,Mattan S. Ben-Shachar,https://mattansb.github.io/cheatR,TRUE,https://github.com/mattansb/cheatr,525,16,2020-05-06T19:48:15Z,32.8125
chebpol,"Contains methods for creating multivariate/multidimensional
interpolations of functions on a hypercube. If available through fftw3, the DCT-II/FFT
is used to compute coefficients for a Chebyshev interpolation.
Other interpolation methods for arbitrary Cartesian grids are also provided, a piecewise multilinear,
and the Floater-Hormann barycenter method. For scattered data polyharmonic splines with a linear term
is provided. The time-critical parts are written in C for speed. All interpolants are parallelized if
used to evaluate more than one point.",2019-12-09,Simen Gaure,https://github.com/sgaure/chebpol,TRUE,https://github.com/sgaure/chebpol,41808,5,2019-12-09T11:45:16Z,8361.6
checkdown,"Creates auto checking check-fields and check-boxes for 'rmarkdown' html. It could be used in class, when teacher share materials and tasks, so student can solve some problems and check themselves. In contrast with the 'learnr' package the 'checkdown' package works without 'shiny'.",2020-05-17,George Moroz,https://agricolamz.github.io/checkdown/,TRUE,https://github.com/agricolamz/checkdown,2193,15,2020-05-20T13:01:06Z,146.2
checkLuhn,"Confirms if the number is Luhn compliant.
Can check if credit card, IMEI number or any other Luhn based number is correct.
For more info see: <https://en.wikipedia.org/wiki/Luhn_algorithm>.",2018-09-24,Adam Deacon,https://github.com/adamjdeacon/checkLuhn,TRUE,https://github.com/adamjdeacon/checkluhn,9871,2,2020-05-19T14:21:59Z,4935.5
checkmate,"Tests and assertions to perform frequent argument checks. A
substantial part of the package was written in C to minimize any worries
about execution time overhead.",2020-02-06,Michel Lang,https://github.com/mllg/checkmate,TRUE,https://github.com/mllg/checkmate,6056273,151,2020-06-06T20:14:30Z,40107.76821192053
checkpoint,"The goal of checkpoint is to solve the problem of package
reproducibility in R. Specifically, checkpoint allows you to install packages
as they existed on CRAN on a specific snapshot date as if you had a CRAN time
machine. To achieve reproducibility, the checkpoint() function installs the
packages required or called by your project and scripts to a local library
exactly as they existed at the specified point in time. Only those packages
are available to your project, thereby avoiding any package updates that came
later and may have altered your results. In this way, anyone using checkpoint's
checkpoint() can ensure the reproducibility of your scripts or projects at any
time. To create the snapshot archives, once a day (at midnight UTC) Microsoft
refreshes the Austria CRAN mirror on the ""Microsoft R Archived Network""
server (<https://mran.microsoft.com/>). Immediately after completion
of the rsync mirror process, the process takes a snapshot, thus creating the
archive. Snapshot archives exist starting from 2014-09-17.",2020-02-23,Hong Ooi,https://github.com/RevolutionAnalytics/checkpoint,TRUE,https://github.com/revolutionanalytics/checkpoint,103366,136,2020-04-24T09:03:37Z,760.0441176470588
checkr,"Expressive, assertive, pipe-friendly functions
to check the properties of common R objects.
In the case of failure the functions issue informative error messages.",2019-04-25,Joe Thorley,https://github.com/poissonconsulting/checkr,TRUE,https://github.com/poissonconsulting/checkr,21015,9,2020-05-12T19:54:40Z,2335
cheddar,"Provides a flexible, extendable representation of an ecological community and a range of functions for analysis and visualisation, focusing on food web, body mass and numerical abundance data. Allows inter-web comparisons such as examining changes in community structure over environmental, temporal or spatial gradients.",2020-02-13,Lawrence Hudson with contributions from Dan Reuman and Rob Emerson,https://github.com/quicklizard99/cheddar/,TRUE,https://github.com/quicklizard99/cheddar,30470,12,2020-02-12T20:37:21Z,2539.1666666666665
cheese,"Contains tools for working with data during statistical analysis, promoting flexible, intuitive, and reproducible workflows. There are functions designated for specific statistical tasks such building a custom univariate descriptive table, computing pairwise association statistics, etc. These are built on a collection of data manipulation tools designed for general use that are motivated by the functional programming concept.",2020-04-30,Alex Zajichek,"https://zajichek.github.io/cheese,
https://github.com/zajichek/cheese",TRUE,https://github.com/zajichek/cheese,6629,0,2020-04-30T13:07:00Z,NA
chemCal,"Simple functions for plotting linear
calibration functions and estimating standard errors for measurements
according to the Handbook of Chemometrics and Qualimetrics: Part A
by Massart et al. There are also functions estimating the limit
of detection (LOD) and limit of quantification (LOQ).
The functions work on model objects from - optionally weighted - linear
regression (lm) or robust linear regression ('rlm' from the 'MASS' package).",2018-07-17,Johannes Ranke,"https://pkgdown.jrwb.de/chemCal,
https://cgit.jrwb.de/chemCal/about",TRUE,https://github.com/jranke/chemcal,29528,2,2020-05-20T06:44:47Z,14764
ChemometricsWithR,"Functions and scripts used in the book ""Chemometrics with R - Multivariate Data Analysis in the Natural Sciences and Life Sciences"" by Ron Wehrens, Springer (2011). Data used in the package are available from github.",2019-01-07,Ron Wehrens,https://github.com/rwehrens/CWR,TRUE,https://github.com/rwehrens/cwr,42082,4,2019-12-18T11:24:57Z,10520.5
ChemoSpec,"A collection of functions for top-down exploratory data analysis
of spectral data including nuclear magnetic resonance (NMR), infrared (IR),
Raman, X-ray fluorescence (XRF) and other similar types of spectroscopy.
Includes functions for plotting and inspecting spectra, peak alignment,
hierarchical cluster analysis (HCA), principal components analysis (PCA) and
model-based clustering. Robust methods appropriate for this type of
high-dimensional data are available. ChemoSpec is designed for structured
experiments, such as metabolomics investigations, where the samples fall into
treatment and control groups. Graphical output is formatted consistently for
publication quality plots. ChemoSpec is intended to be very user friendly and
to help you get usable results quickly. A vignette covering typical operations
is available.",2020-01-24,Bryan A. Hanson,https://bryanhanson.github.io/ChemoSpec/,TRUE,https://github.com/bryanhanson/chemospec,58648,32,2020-01-24T20:03:09Z,1832.75
ChemoSpec2D,"A collection of functions for exploratory chemometrics of 2D spectroscopic data sets such as COSY (correlated spectroscopy) and HSQC (heteronuclear single quantum coherence) 2D NMR (nuclear magnetic resonance) spectra. 'ChemoSpec2D' deploys methods aimed primarily at classification of samples and the identification of spectral features which are important in distinguishing samples from each other. Each 2D spectrum (a matrix) is treated as the unit of observation, and thus the physical sample in the spectrometer corresponds to the sample from a statistical perspective. In addition to chemometric tools, a few tools are provided for plotting 2D spectra, but these are not intended to replace the functionality typically available on the spectrometer. 'ChemoSpec2D' takes many of its cues from 'ChemoSpec' and tries to create consistent graphical output and to be very user friendly.",2020-02-19,Bryan A. Hanson,https://github.com/bryanhanson/ChemoSpec2D,TRUE,https://github.com/bryanhanson/chemospec2d,8109,1,2020-02-19T16:50:01Z,8109
ChemoSpecUtils,Functions supporting the common needs of packages 'ChemoSpec' and 'ChemoSpec2D'.,2020-04-20,Bryan A. Hanson,https://github.com/bryanhanson/ChemoSpecUtils,TRUE,https://github.com/bryanhanson/chemospecutils,18241,0,2020-04-20T13:12:01Z,NA
childesr,"Tools for connecting to 'CHILDES', an open repository for
transcripts of parent-child interaction. For more information on the
underlying data, see <http://childes-db.stanford.edu>.",2019-10-17,Mika Braginsky,https://github.com/langcog/childesr,TRUE,https://github.com/langcog/childesr,9761,7,2019-10-16T23:04:44Z,1394.4285714285713
chilemapas,"Mapas terrestres con topologias simplificadas. Estos mapas no
tienen precision geodesica, por lo que aplica el DFL-83 de 1979 de la Republica
de Chile y se consideran referenciales sin validez legal.
No se incluyen los territorios antarticos y bajo ningun evento estos mapas
significan que exista una cesion u ocupacion de territorios soberanos en
contra del Derecho Internacional por parte de Chile. Esta paquete esta
documentado intencionalmente en castellano asciificado para que funcione sin
problema en diferentes plataformas.
(Terrestrial maps with simplified toplogies. These maps lack geodesic
precision, therefore DFL-83 1979 of the Republic of Chile applies and are
considered to have no legal validity.
Antartic territories are excluded and under no event these maps mean
there is a cession or occupation of sovereign territories against International
Laws from Chile. This package was intentionally documented in asciified
spanish to make it work without problem on different platforms.)",2020-03-28,Mauricio Vargas,https://pachamaltese.github.io/chilemapas/,TRUE,https://github.com/pachamaltese/chilemapas,2876,11,2020-04-23T17:32:41Z,261.45454545454544
chisq.posthoc.test,Perform post hoc analysis based on residuals of Pearson's Chi-squared Test for Count Data based on T. Mark Beasley & Randall E. Schumacker (1995) <doi: 10.1080/00220973.1995.9943797>.,2019-10-25,Daniel Ebbert,http://chisq-posthoc-test.ebbert.nrw/,TRUE,https://github.com/ebbertd/chisq.posthoc.test,4588,0,2019-11-06T11:01:56Z,NA
chk,"For developers to check user-supplied function
arguments. It is designed to be simple, fast and customizable. Error
messages follow the tidyverse style guide.",2020-05-29,Joe Thorley,https://github.com/poissonconsulting/chk,TRUE,https://github.com/poissonconsulting/chk,11567,23,2020-05-29T17:36:36Z,502.9130434782609
chlorpromazineR,"As different antipsychotic medications have different potencies,
the doses of different medications cannot be directly compared. Various
strategies are used to convert doses into a common reference so that
comparison is meaningful. Chlorpromazine (CPZ) has historically been used
as a reference medication into which other antipsychotic doses can be
converted, as ""chlorpromazine-equivalent doses"". Using conversion keys
generated from widely-cited scientific papers (Gardner et. al 2010
<doi:10.1176/appi.ajp.2009.09060802>, Leucht et al. 2016
<doi:10.1093/schbul/sbv167>), antipsychotic doses are converted
to CPZ (or any specified antipsychotic) equivalents. The use of the package
is described in the included vignette. Not for clinical use.",2019-10-11,Eric Brown,https://github.com/ropensci/chlorpromazineR,TRUE,https://github.com/ropensci/chlorpromaziner,3354,5,2020-02-12T01:47:27Z,670.8
cholera,"Amends errors, augments data and aids analysis of John Snow's map
of the 1854 London cholera outbreak.",2019-08-28,Peter Li,https://github.com/lindbrook/cholera,TRUE,https://github.com/lindbrook/cholera,15617,111,2020-06-05T21:37:29Z,140.6936936936937
chorrrds,"Extracts music chords from the 'CifraClub' website <https://www.cifraclub.com.br/>.
The package also has functions for cleaning the extracted data and
feature extraction.",2020-03-16,Bruna Wundervald,https://github.com/r-music/chorrrds,TRUE,https://github.com/r-music/chorrrds,14576,78,2020-03-16T12:24:23Z,186.87179487179486
chromer,"A programmatic interface to the Chromosome Counts Database
(http://ccdb.tau.ac.il/). This package is part of the rOpenSci suite
(http://ropensci.org)",2015-01-13,Matthew Pennell,http://www.github.com/ropensci/chromer,TRUE,https://github.com/ropensci/chromer,18400,6,2019-11-29T04:55:48Z,3066.6666666666665
chromseq,"Chromosome files in the 'Fasta' format usually contain large sequences like human genome.
Sometimes users have to split these chromosomes into different files according to their
chromosome number. The 'chromseq' can help to handle this. So the selected chromosome sequence can be
used for downstream analysis like motif finding. Howard Y. Chang(2019)
<doi:10.1038/s41587-019-0206-z>.",2020-05-11,Shaoqian Ma,https://github.com/MSQ-123/chromseq,TRUE,https://github.com/msq-123/chromseq,439,0,2020-05-19T08:34:02Z,NA
chunked,"Data stored in text file can be processed chunkwise using 'dplyr' commands. These
are recorded and executed per data chunk, so large files can be processed with
limited memory using the 'LaF' package.",2020-03-24,Edwin de Jonge,https://github.com/edwindj/chunked,TRUE,https://github.com/edwindj/chunked,20865,145,2020-05-14T13:58:52Z,143.89655172413794
cicerone,Provide step by step guided tours of 'Shiny' applications.,2020-02-29,John Coene,https://cicerone.john-coene.com/,TRUE,https://github.com/johncoene/cicerone,1758,69,2020-04-03T16:57:53Z,25.47826086956522
cimir,"Connect to the California Irrigation Management
Information System (CIMIS) Web API. See the CIMIS main page
<https://cimis.water.ca.gov> and web API documentation
<https://et.water.ca.gov> for more information.",2020-01-22,Michael Koohafkan,https://github.com/mkoohafkan/cimir,TRUE,https://github.com/mkoohafkan/cimir,7188,3,2020-01-24T18:00:33Z,2396
circlize,"Circular layout is an efficient way for the visualization of huge
amounts of information. Here this package provides an implementation
of circular layout generation in R as well as an enhancement of available
software. The flexibility of the package is based on the usage of low-level
graphics functions such that self-defined high-level graphics can be easily
implemented by users for specific purposes. Together with the seamless
connection between the powerful computational and visual environment in R,
it gives users more convenience and freedom to design figures for
better understanding complex patterns behind multiple dimensional data.
The package is described in Gu et al. 2014 <doi:10.1093/bioinformatics/btu393>.",2020-04-30,Zuguang Gu,"https://github.com/jokergoo/circlize,
http://jokergoo.github.io/circlize_book/book/",TRUE,https://github.com/jokergoo/circlize,562765,545,2020-06-09T12:21:15Z,1032.5963302752293
circumplex,"Tools for analyzing and visualizing circular data,
including scoring functions for relevant instruments and a
generalization of the bootstrapped structural summary method from
Zimmermann & Wright (2017) <doi:10.1177/1073191115621795> and
functions for creating publication-ready tables and figures from the
results. Future versions will include tools for circular fit and
reliability analyses, as well as visualization enhancements.",2020-04-29,Jeffrey Girard,https://github.com/jmgirard/circumplex,TRUE,https://github.com/jmgirard/circumplex,15600,6,2020-06-03T02:40:49Z,2600
cIRT,"Jointly model the accuracy of cognitive responses and item choices
within a Bayesian hierarchical framework as described by Culpepper and
Balamuta (2015) <doi:10.1007/s11336-015-9484-7>. In addition, the package
contains the datasets used within the analysis of the paper.",2020-03-23,Steven Andrew Culpepper,"https://tmsalab.github.io/cIRT, https://github.com/tmsalab/cIRT",TRUE,https://github.com/tmsalab/cirt,19292,3,2020-03-22T20:03:15Z,6430.666666666667
citecorp,"Client for the Open Citations Corpus (<http://opencitations.net/>).
Includes a set of functions for getting one identifier type from another,
as well as getting references and citations for a given identifier.",2020-04-16,Scott Chamberlain,"https://github.com/ropenscilabs/citecorp (devel),
https://docs.ropensci.org/citecorp/ (docs)",TRUE,https://github.com/ropenscilabs/citecorp,4740,10,2020-04-15T16:48:07Z,474
ciTools,"Functions to append confidence intervals, prediction intervals,
and other quantities of interest to data frames. All appended quantities
are for the response variable, after conditioning on the model and covariates.
This package has a data frame first syntax that allows for easy piping.
Currently supported models include (log-) linear, (log-) linear mixed,
generalized linear models, generalized linear mixed models, and
accelerated failure time models.",2019-01-08,John Haman,https://github.com/jthaman/ciTools,TRUE,https://github.com/jthaman/citools,30413,93,2019-07-10T22:42:16Z,327.02150537634407
citr,"Functions and an 'RStudio' add-in that search 'Bib(La)TeX'-files or
'Zotero' libraries (via the 'Better BibTeX' plugin) to insert formatted Markdown
citations into the current document.",2019-08-19,Frederik Aust,https://github.com/crsh/citr,TRUE,https://github.com/crsh/citr,42918,286,2020-06-04T08:42:08Z,150.06293706293707
civis,"A convenient interface for making
requests directly to the 'Civis Platform API' <https://www.civisanalytics.com/platform/>.
Full documentation available 'here' <https://civisanalytics.github.io/civis-r/>.",2020-02-24,Patrick Miller,https://github.com/civisanalytics/civis-r,TRUE,https://github.com/civisanalytics/civis-r,96642,13,2020-05-20T20:18:26Z,7434
classInt,Selected commonly used methods for choosing univariate class intervals for mapping or other graphics purposes.,2020-04-07,Roger Bivand,"https://r-spatial.github.io/classInt/,
https://github.com/r-spatial/classInt/",TRUE,https://github.com/r-spatial/classint,2930670,20,2020-03-27T19:03:29Z,146533.5
classyfireR,Access to the ClassyFire RESTful API <http://classyfire.wishartlab.com>. Retrieve existing entity classifications and submit new entities for classification. ,2020-02-18,Tom Wilson,https://github.com/aberHRML/classyfireR,TRUE,https://github.com/aberhrml/classyfirer,11564,2,2020-02-18T10:42:33Z,5782
cld2,"Bindings to Google's C++ library Compact Language Detector 2
(see <https://github.com/cld2owners/cld2#readme> for more information). Probabilistically
detects over 80 languages in plain text or HTML. For mixed-language input it returns the
top three detected languages and their approximate proportion of the total classified
text bytes (e.g. 80% English and 20% French out of 1000 bytes). There is also a 'cld3'
package on CRAN which uses a neural network model instead.",2018-05-11,Jeroen Ooms,"https://github.com/ropensci/cld2 (devel)
https://github.com/cld2owners/cld2 (upstream)",TRUE,https://github.com/ropensci/cld2,25117,31,2019-12-08T22:40:50Z,810.2258064516129
cld3,"Google's Compact Language Detector 3 is a neural network model for language
identification and the successor of 'cld2' (available from CRAN). The algorithm is still
experimental and takes a novel approach to language detection with different properties
and outcomes. It can be useful to combine this with the Bayesian classifier results
from 'cld2'. See <https://github.com/google/cld3#readme> for more information.",2020-01-31,Jeroen Ooms,"https://docs.ropensci.org/cld3, https://github.com/ropensci/cld3
(devel) https://github.com/google/cld3 (upstream)",TRUE,https://github.com/ropensci/cld3,20096,23,2020-01-31T11:29:27Z,873.7391304347826
clean,"A wrapper around the new 'cleaner' package, that allows
data cleaning functions for classes 'logical', 'factor', 'numeric',
'character', 'currency' and 'Date' to make data cleaning fast and
easy. Relying on very few dependencies, it provides smart guessing,