Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Most Starred R Packages
# load packages & custom functions ---------------------------------------------
library(tidyverse)
library(httr)
library(cranlogs)
library(ggrepel)
library(scales)
library(lubridate)
library(knitr)
library(stringr)
gh_from_url <- function(x){
if(!grepl(',', x)){
x <- strsplit(x, " ")[[1]]
x <- trimws(x[min(which(grepl(pattern='http://github.com|https://github.com|http://www.github.com', x, ignore.case=TRUE)))])
} else {
x <- strsplit(x, ",")[[1]]
x <- trimws(x[min(which(grepl(pattern='http://github.com|https://github.com|http://www.github.com', x, ignore.case=TRUE)))])
}
x <- gsub("http://", "https://", tolower(x))
x <- gsub("www\\.github\\.com", "github.com", x)
x <- gsub("/$", "", x)
x <- gsub("^github.com", "https://github.com", x)
x <- gsub("/issues", "", x)
x <- gsub("\\.git", "", x)
return(x)
}
aut_maintainer_from_details <- function(x){
x <- gsub("'|\"", "", x)
if(grepl(',', x)){
x <- strsplit(x, "\\],")[[1]]
aut_cre_ind <- grepl(pattern='\\[aut, cre|\\[cre, aut|\\[cre', x, ignore.case=TRUE)
if(any(aut_cre_ind)){
x <- x[min(which(aut_cre_ind))]
x <- gsub("\\[aut, cre|\\[cre, aut|\\[cre", "", x)
}
x <- strsplit(x, ",")[[1]][1]
x <- trimws(gsub("\\]", "", x))
x <- trimws(gsub(" \\[aut", "", x))
}
x <- trimws(gsub(" \\(.*\\)$", "", x))
x <- trimws(gsub(" <.*>$", "", x))
return(x)
}
gh_star_count <- function(url){
stars <- tryCatch({
this_url <- gsub("https://github.com/", "https://api.github.com/repos/", url)
req <- GET(this_url, gtoken)
stop_for_status(req)
cont <- content(req)
cont$stargazers_count
}, error = function(e){
return(NA_integer_)
})
return(stars)
}
gh_last_commit_date <- function(url){
last_commit <- tryCatch({
this_url <- gsub("https://github.com/", "https://api.github.com/repos/", url)
req <- GET(paste0(this_url, "/commits?page=1&per_page=1"), gtoken)
stop_for_status(req)
cont <- content(req)
cont[[1]]$commit$committer$date
}, error = function(e){
return(NA_character_)
})
return(last_commit)
}
# authenticate to github -------------------------------------------------------
# use Hadley's key and secret
myapp <- oauth_app("github",
key = "56b637a5baffac62cad9",
secret = "8e107541ae1791259e9987d544ca568633da2ebf")
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
gtoken <- config(token = github_token)
# pull list of packages --------------------------------------------------------
# get list of currently available packages on CRAN
pkgs <- tools::CRAN_package_db()
# remove duplicate MD5sum column since tibbles can't handle duplicate column names
pkgs <- pkgs[,unique(names(pkgs))]
# filter out lines any duplicates
pkgs <- pkgs %>%
rename(Name = Package) %>%
distinct(Name, .keep_all = TRUE)
# get details for each package -------------------------------------------------
all_pkg_details <- NULL
# old fashioned looping!
# WARNING: This takes awhile to complete
for(i in 1:nrow(pkgs)){
if(i %% 100 == 0){
message(sprintf("Processing package #%s out of %s", i, nrow(pkgs)))
}
this_url <- pkgs[i,]$URL
on_github <- FALSE
this_github_url <- NA_character_
gh_stars <- NA_integer_
gh_last_commit <- NA_character_
if(!is.null(this_url)){
on_github <- grepl('http://github.com|https://github.com|http://www.github.com', this_url)
if(on_github){
this_github_url <- gh_from_url(this_url)
gh_stars <- gh_star_count(this_github_url)
gh_last_commit <- gh_last_commit_date(this_github_url)
} else {
# check the BugReports URL as a backup (e.g. shiny package references GitHub this way)
issues_on_github <- grepl('http://github.com|https://github.com|http://www.github.com', pkgs[i,]$BugReports)
if(length(issues_on_github) == 0 || !issues_on_github){
this_github_url <- NA_character_
} else {
this_github_url <- gh_from_url(pkgs[i,]$BugReports)
gh_stars <- gh_star_count(this_github_url)
gh_last_commit <- gh_last_commit_date(this_github_url)
on_github <- TRUE
}
}
} else {
this_url <- NA_character_
}
downloads <- cran_downloads(pkgs[i,]$Name, from = "2014-01-01", to = "2019-04-05")
all_pkg_details <- rbind(all_pkg_details,
tibble(name = pkgs[i,]$Name,
description = pkgs[i,]$Description,
published = pkgs[i,]$Published,
author = aut_maintainer_from_details(pkgs[i,]$Author),
url = this_url,
github_ind = on_github,
github_url = this_github_url,
downloads = sum(downloads$count),
stars = gh_stars,
last_commit = gh_last_commit))
}
# basic summary stats ----------------------------------------------------------
# remove observations where the GitHub URL refers to a repository that
# is not specific to R and therefore might have an inflated star count
all_pkg_details_clean <- all_pkg_details %>%
filter(!(name %in% c('xgboost', 'h2o', 'feather', 'prophet', 'mlflow'))) %>%
#filter(as_datetime(last_commit) >= today() - years(1)) %>% # MUST BE RECENTLY BEING WORKED ON IN LAST YEAR!!!
mutate(downloads_per_star = downloads / stars,
downloads_per_star = ifelse(!is.finite(downloads_per_star), NA_real_, downloads_per_star))
# proportion of all packages listing github
sum(all_pkg_details$github_ind)
mean(all_pkg_details$github_ind)
# proportion of packages with stars
mean(!is.na(all_pkg_details$stars))
# typical number of stars per package
mean(all_pkg_details_clean$stars, na.rm=TRUE)
median(all_pkg_details_clean$stars, na.rm=TRUE)
max(all_pkg_details_clean$stars, na.rm=TRUE)
# typical number of downloads per package
mean(all_pkg_details_clean$downloads, na.rm=TRUE)
median(all_pkg_details_clean$downloads, na.rm=TRUE)
# percent of packages over 10 stars
mean(all_pkg_details_clean$stars > 10, na.rm=TRUE)
mean(all_pkg_details_clean$downloads_per_star, na.rm=TRUE)
median(all_pkg_details_clean$downloads_per_star, na.rm=TRUE)
# stars histogram --------------------------------------------------------------
ggplot(data=all_pkg_details_clean, mapping=aes(stars)) +
geom_histogram(aes(fill=..count..), bins=60) +
scale_x_continuous(trans = "log1p", breaks=c(0,1,2,3,10,100,1000,3000)) +
labs(x = "Stars",
y = "Count",
fill = "Count",
caption = "Source: api.github.com as of 6/16/18") +
ggtitle("Distribution of GitHub Stars on R Packages") +
theme_bw() +
theme(panel.grid.minor = element_blank(),
plot.caption=element_text(hjust = 0))
# stars to downloads scatterplot -----------------------------------------------
plot_dat <- all_pkg_details_clean
idx_label <- which(with(plot_dat, downloads > 10000000 | stars > 1000))
plot_dat$name2 <- plot_dat$name
plot_dat$name <- ""
plot_dat$name[idx_label] <- plot_dat$name2[idx_label]
ggplot(data=plot_dat, aes(stars, downloads, label = name)) +
geom_point(color = ifelse(plot_dat$name == "", "grey50", "red")) +
geom_text_repel(box.padding = .5) +
scale_y_continuous(labels = comma) +
scale_x_continuous(labels = comma) +
labs(x = "GitHub Stars",
y = "CRAN Downloads",
caption = "Sources:\napi.github.com as of 6/16/18\ncranlogs as of 1/1/14 - 6/15/18") +
ggtitle("Relationship Between CRAN Downloads and GitHub Stars") +
theme_bw() +
theme(plot.caption=element_text(hjust = 0))
# author stats -----------------------------------------------------------------
# summary by author
authors_detail <- all_pkg_details_clean %>%
group_by(author) %>%
summarize(downloads = sum(downloads, na.rm=TRUE),
stars = sum(stars, na.rm=TRUE)) %>%
mutate(downloads_per_star = downloads / stars,
downloads_per_star = ifelse(!is.finite(downloads_per_star), NA_real_, downloads_per_star)) %>%
arrange(desc(downloads))
# popular authors
pop_authors <- tibble(author = c('Hadley Wickham',
'Dirk Eddelbuettel',
'Yihui Xie',
'Winston Chang',
'Jennifer Bryan',
'JJ Allaire',
'Jeroen Ooms',
'Scott Chamberlain',
'Jim Hester',
'Kirill Müller'),
notable_packages = c('ggplot2, dplyr, httr',
'Rcpp, BH',
'knitr, rmarkdown, bookdown',
'R6, shiny',
'readxl, gapminder, googlesheets',
'rstudioapi, reticulate, tensorflow',
'jsonlite, curl, openssl',
'geojsonio, taxize',
'devtools, memoise, readr',
'tibble, DBI')
)
author_stats <- pop_authors %>%
inner_join(., authors_detail, by='author') %>%
select(author, notable_packages, downloads, stars, downloads_per_star) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# single author
#all_pkg_details_clean %>% filter(author == 'Dirk Eddelbuettel') %>% arrange(desc(downloads))
# top 10 lists -----------------------------------------------------------------
# Top 10 Most Starred Packages
top_starred <- all_pkg_details_clean %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(stars)) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Top 10 Most Downloaded Packages with stars
top_downloaded <- all_pkg_details_clean %>%
filter(!is.na(stars)) %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(downloads)) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 1)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Bottom 10 Packages by Downloads per Star (frequently starred)
frequently_starred <- all_pkg_details_clean %>%
filter(downloads > 100) %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(downloads_per_star) %>%
slice(1:10) %>%
mutate(downloads_per_star = round(downloads_per_star, 2)) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
# Top 10 Packages by Downloads per Star (infrequently starred)
infrequently_starred <- all_pkg_details_clean %>%
select(name, author, downloads, stars, downloads_per_star) %>%
arrange(desc(downloads_per_star)) %>%
slice(1:10) %>%
rename_all(. %>% gsub("_", " ", .) %>% str_to_title)
We can't make this file beautiful and searchable because it's too large.
name,description,published,author,url,github_ind,github_url,downloads,stars,last_commit
abbyyR,"Get text from images of text using Abbyy Cloud Optical Character
Recognition (OCR) API. Easily OCR images, barcodes, forms, documents with
machine readable zones, e.g. passports. Get the results in a variety of formats
including plain text and XML. To learn more about the Abbyy OCR API, see
<http://ocrsdk.com/>.",2018-05-30,Gaurav Sood,http://github.com/soodoku/abbyyR,TRUE,https://github.com/soodoku/abbyyr,25953,36,1549384577
ABCoptim,"An implementation of Karaboga (2005) Artificial Bee Colony
Optimization algorithm <http://mf.erciyes.edu.tr/abc/pub/tr06_2005.pdf>.
This (working) version is a Work-in-progress, which is
why it has been implemented using pure R code. This was developed upon the basic
version programmed in C and distributed at the algorithm's official website.",2017-11-06,George Vega Yon,"http://github.com/gvegayon/ABCoptim, http://mf.erciyes.edu.tr/abc/",TRUE,https://github.com/gvegayon/abcoptim,27147,16,1531033423
abctools,Tools for approximate Bayesian computation including summary statistic selection and assessing coverage.,2018-07-17,Matt Nunes,https://github.com/dennisprangle/abctools,TRUE,https://github.com/dennisprangle/abctools,130057,5,1531835371
ABHgenotypeR,"Easy to use functions to visualize marker data
from biparental populations. Useful for both analyzing and
presenting genotypes in the ABH format.",2016-02-04,Stefan Reuscher,http://github.com/StefanReuscher/ABHgenotypeR,TRUE,https://github.com/stefanreuscher/abhgenotyper,12634,4,1528676655
abjutils,"The Brazilian Jurimetrics Association (ABJ in
Portuguese, see <http://www.abjur.org.br/en/> for more information) is
a non-profit organization which aims to investigate and promote the
use of statistics and probability in the study of Law and its
institutions. This package implements general purpose tools used by
ABJ, such as functions for sampling and basic manipulation of
Brazilian lawsuits identification number. It also implements functions
for text cleaning, such as accentuation removal.",2019-02-07,Caio Lente,https://github.com/abjur/abjutils,TRUE,https://github.com/abjur/abjutils,10232,11,1551129780
ACMEeqtl,"We use a non-linear model, termed ACME,
that reflects a parsimonious biological model for
allelic contributions of cis-acting eQTLs.
With non-linear least-squares algorithm we
estimate maximum likelihood parameters. The ACME model
provides interpretable effect size estimates and
p-values with well controlled Type-I error.
Includes both R and (much faster) C implementations.
For more details see Palowitch et al. (2017) <doi:10.1111/biom.12810>.",2018-03-06,Andrey A Shabalin (<https://orcid.org/0000-0003-0309-6821>),https://github.com/andreyshabalin/ACMEeqtl,TRUE,https://github.com/andreyshabalin/acmeeqtl,8925,0,1550594311
adapr,"Tracks reading and writing within R scripts that are organized into
a directed acyclic graph. Contains an interactive shiny application adaprApp().
Uses git2r package, Git and file hashes to track version histories of input
and output. See package vignette for how to get started. V1.02 adds parallel
execution of project scripts and function map in vignette. Makes project
specification argument last in order. V2.0 adds project specific libraries, packrat option, and adaprSheet().",2017-11-30,Jon Gelfond,NA,TRUE,https://github.com/gelfondjal/adapr,10871,8,1553178019
adaptMCMC,Enables sampling from arbitrary distributions if the log density is known up to a constant; a common situation in the context of Bayesian inference. The implemented sampling algorithm was proposed by Vihola (2012) <DOI:10.1007/s11222-011-9269-5> and achieves often a high efficiency by tuning the proposal distributions to a user defined acceptance rate.,2018-01-14,Andreas Scheidegger,https://github.com/scheidan/adaptMCMC,TRUE,https://github.com/scheidan/adaptmcmc,25336,6,1547481671
adaptMT,"Implementation of adaptive p-value thresholding (AdaPT), including both a framework that allows the user to specify any
algorithm to learn local false discovery rate and a pool of convenient functions that implement specific
algorithms. See Lei, Lihua and Fithian, William (2016) <arXiv:1609.06035>.",2018-07-31,Lihua Lei,"https://arxiv.org/abs/1609.06035,
https://github.com/lihualei71/adaptMT",TRUE,https://github.com/lihualei71/adaptmt,3029,6,1541176886
AdaSampling,"Implements the adaptive sampling procedure, a framework for both positive unlabeled learning and learning with class label noise. Yang, P., Ormerod, J., Liu, W., Ma, C., Zomaya, A., Yang, J. (2018) <doi:10.1109/TCYB.2018.2816984>.",2018-06-27,Pengyi Yang & Dinuka Perera,https://github.com/PengyiYang/AdaSampling/,TRUE,https://github.com/pengyiyang/adasampling,4273,4,1530146361
addinslist,"Browse through a continuously updated list of existing RStudio
addins and install/uninstall their corresponding packages.",2016-09-29,Dean Attali,https://github.com/daattali/addinslist,TRUE,https://github.com/daattali/addinslist,25213,450,1545104433
ade4,"Tools for multivariate data analysis. Several methods are provided for the analysis (i.e., ordination) of one-table (e.g., principal component analysis, correspondence analysis), two-table (e.g., coinertia analysis, redundancy analysis), three-table (e.g., RLQ analysis) and K-table (e.g., STATIS, multiple coinertia analysis). The philosophy of the package is described in Dray and Dufour (2007) <doi:10.18637/jss.v022.i04>.",2018-08-31,Stéphane Dray <stephane.dray@univ-lyon1.fr>,"http://pbil.univ-lyon1.fr/ADE-4, Mailing list:
http://listes.univ-lyon1.fr/wws/info/adelist",TRUE,https://github.com/sdray/ade4,1087403,8,1539853609
adegenet,"Toolset for the exploration of genetic and genomic data. Adegenet
provides formal (S4) classes for storing and handling various genetic data,
including genetic markers with varying ploidy and hierarchical population
structure ('genind' class), alleles counts by populations ('genpop'), and
genome-wide SNP data ('genlight'). It also implements original multivariate
methods (DAPC, sPCA), graphics, statistical tests, simulation tools, distance
and similarity measures, and several spatial methods. A range of both empirical
and simulated datasets is also provided to illustrate various methods.",2018-02-02,Thibaut Jombart,https://github.com/thibautjombart/adegenet,TRUE,https://github.com/thibautjombart/adegenet,205368,69,1549425918
adegraphics,Graphical functionalities for the representation of multivariate data. It is a complete re-implementation of the functions available in the 'ade4' package.,2018-12-18,Stéphane Dray <stephane.dray@univ-lyon1.fr> and Aurélie Siberchicot <aurelie.siberchicot@univ-lyon1.fr>,"http://pbil.univ-lyon1.fr/ADE-4, Mailing list:
http://listes.univ-lyon1.fr/wws/info/adelist",TRUE,https://github.com/sdray/adegraphics,82848,5,1553534056
AdhereR,"Computation of adherence to medications from Electronic Health care
Data and visualization of individual medication histories and adherence
patterns. The package implements a set of S3 classes and
functions consistent with current adherence guidelines and definitions.
It allows the computation of different measures of
adherence (as defined in the literature, but also several original ones),
their publication-quality plotting,
the estimation of event duration and time to initiation,
the interactive exploration of patient medication history and
the real-time estimation of adherence given various parameter settings.
It scales from very small datasets stored in flat CSV files to very large
databases and from single-thread processing on mid-range consumer
laptops to parallel processing on large heterogeneous computing clusters.
It exposes a standardized interface allowing it to be used from other
programming languages and platforms, such as Python.",2019-02-11,Dan Dediu,https://github.com/ddediu/AdhereR,TRUE,https://github.com/ddediu/adherer,10205,11,1549891268
adjclust,"Implements a constrained version of hierarchical agglomerative
clustering, in which each observation is associated to a position, and
only adjacent clusters can be merged. Typical application fields in
bioinformatics include Genome-Wide Association Studies or Hi-C data
analysis, where the similarity between items is a decreasing function of
their genomic distance. Taking advantage of this feature, the implemented
algorithm is time and memory efficient. This algorithm is described in
Chapter 4 of Alia Dehman (2015)
<https://hal.archives-ouvertes.fr/tel-01288568v1>.",2018-09-26,Pierre Neuvial,https://github.com/pneuvial/adjclust,TRUE,https://github.com/pneuvial/adjclust,5695,10,1541523365
ADMMsigma,"Estimates a penalized precision matrix via the alternating direction method of multipliers (ADMM) algorithm. It currently supports a general elastic-net penalty that allows for both ridge and lasso-type penalties as special cases. This package is an alternative to the 'glasso' package.
See Boyd et al (2010) <doi:10.1561/2200000016> for details regarding the estimation method.",2018-08-02,Matt Galloway,https://github.com/MGallow/ADMMsigma,TRUE,https://github.com/mgallow/admmsigma,5984,2,1533172670
adnuts,"Bayesian inference using the no-U-turn (NUTS) algorithm by
Hoffman and Gelman (2014) <http://www.jmlr.org/papers/v15/hoffman14a.html>.
Designed for 'AD Model Builder' ('ADMB') models,
or when R functions for log-density and log-density gradient
are available, such as 'Template Model Builder' ('TMB')
models and other special cases. Functionality is similar to 'Stan',
and the 'rstan' and 'shinystan' packages are used for diagnostics and
inference.",2019-04-04,Cole Monnahan,https://github.com/colemonnahan/adnuts,TRUE,https://github.com/colemonnahan/adnuts,3904,12,1554391527
adoptr,"Optimize one or two-arm, two-stage designs for clinical trials with respect to several
pre-implemented objective criteria or implement custom objectives.
Optimization under uncertainty and conditional (given stage-one outcome) constraints are supported.",2019-04-01,Kevin Kunzmann,https://github.com/kkmann/adoptr,TRUE,https://github.com/kkmann/adoptr,389,2,1554300504
adpss,"Provides the functions for planning and conducting a
clinical trial with adaptive sample size determination. Maximal statistical
efficiency will be exploited even when dramatic or multiple adaptations
are made. Such a trial consists of adaptive determination of sample size
at an interim analysis and implementation of frequentist statistical test at the
interim and final analysis with a prefixed significance level. The required
assumptions for the stage-wise test statistics are independent and stationary
increments and normality. Predetermination of adaptation rule is not required.",2018-09-20,Kosuke Kashiwabara,https://github.com/ca4wa/R-adpss,TRUE,https://github.com/ca4wa/r-adpss,3114,0,1537434072
afex,"Convenience functions for analyzing factorial experiments using ANOVA or
mixed models. aov_ez(), aov_car(), and aov_4() allow specification of
between, within (i.e., repeated-measures), or mixed (i.e., split-plot)
ANOVAs for data in long format (i.e., one observation per row),
automatically aggregating multiple observations per individual and cell
of the design. mixed() fits mixed models using lme4::lmer() and computes
p-values for all fixed effects using either Kenward-Roger or Satterthwaite
approximation for degrees of freedom (LMM only), parametric bootstrap
(LMMs and GLMMs), or likelihood ratio tests (LMMs and GLMMs).
afex_plot() provides a high-level interface for interaction or one-way
plots using ggplot2, combining raw data and model estimates. afex uses
type 3 sums of squares as default (imitating commercial statistical software).",2019-02-19,Henrik Singmann (<https://orcid.org/0000-0002-4842-3657>),"http://afex.singmann.science/, https://github.com/singmann/afex",TRUE,https://github.com/singmann/afex,124129,51,1553249597
aftgee,"A collection of methods for both the rank-based estimates and least-square estimates
to the Accelerated Failure Time (AFT) model.
For rank-based estimation, it provides approaches that include the computationally
efficient Gehan's weight and the general's weight such as the logrank weight.
Details of the rank-based estimation can be found in
Chiou et al. (2014) <doi:10.1007/s11222-013-9388-2> and
Chiou et al. (2015) <doi:10.1002/sim.6415>.
For the least-square estimation, the estimating equation is solved with
generalized estimating equations (GEE).
Moreover, in multivariate cases, the dependence working correlation structure
can be specified in GEE's setting.
Details on the least-squares estimation can be found in
Chiou et al. (2014) <doi:10.1007/s10985-014-9292-x>.",2018-07-24,Sy Han Chiou,http://github.com/stc04003/aftgee,TRUE,https://github.com/stc04003/aftgee,28456,0,1545867174
AGD,"Tools for the analysis of growth data: to extract an
LMS table from a gamlss object, to calculate the standard
deviation scores and its inverse, and to superpose two wormplots
from different models. The package contains a some varieties of
reference tables, especially for The Netherlands.",2018-05-29,Stef van Buuren <stef.vanbuuren@tno.nl>,https://github.com/stefvanbuuren/AGD,TRUE,https://github.com/stefvanbuuren/agd,87381,1,1527715724
AGHmatrix,"Computation of A (pedigree), G (genomic-base), and H (A corrected
by G) relationship matrices for diploid and autopolyploid species. Several methods
are implemented considering additive and non-additive models.",2019-03-26,Rodrigo Amadeu,http://github.com/prmunoz/AGHmatrix,TRUE,https://github.com/prmunoz/aghmatrix,4100,1,1553206261
agop,"Tools supporting multi-criteria and group decision making,
including variable number of criteria, by means of
aggregation operators, spread measures,
fuzzy logic connectives, fusion functions,
and preordered sets. Possible applications include,
but are not limited to, quality management, scientometrics,
software engineering, etc.",2019-03-08,Marek Gagolewski,http://www.gagolewski.com/software/,TRUE,https://github.com/gagolews/agop,19318,3,1552474311
AGread,"Standardize the process of bringing various modes of output files
into R. For more information, see:
<https://actigraph.desk.com/customer/en/portal/articles/2515800-what-do-the-different-mode-numbers-mean-in-a-csv-or-dat-file->.
Additionally, processes are provided to read and minimally pre-
process raw data from primary accelerometer and inertial measurement unit files,
as well as binary .gt3x files. ActiGraph monitors are used to estimate physical
activity outcomes via body-worn sensors that measure (e.g.) acceleration or
rotational velocity.",2019-03-13,Paul R. Hibbing,https://github.com/paulhibbing/AGread,TRUE,https://github.com/paulhibbing/agread,4814,1,1552529341
agridat,"Datasets from books, papers, and websites related to agriculture.
Example graphics and analyses are included. Data come from small-plot trials,
multi-environment trials, uniformity trials, yield monitors, and more.",2018-07-06,Kevin Wright (<https://orcid.org/0000-0002-0617-8673>),https://github.com/kwstat/agridat,TRUE,https://github.com/kwstat/agridat,41094,47,1554409853
agriwater,"Spatial modeling of energy balance and actual
evapotranspiration using satellite images and meteorological data.
Options of satellite are: Landsat-8 (with and without thermal bands),
Sentinel-2 and MODIS. Respectively spatial resolutions are 30, 100,
10 and 250 meters. User can use data from a single meteorological
station or a grid of meteorological stations (using any spatial
interpolation method). Teixeira (2010) <doi:10.3390/rs0251287>.
Teixeira et al. (2015) <doi:10.3390/rs71114597>.
Silva, Manzione, and Albuquerque Filho (2018) <doi:10.3390/horticulturae4040044>.",2019-01-30,"Cesar de Oliveira Ferreira Silva
(<https://orcid.org/0000-0002-5152-6497>)",NA,TRUE,https://github.com/cesarofs/agriwater,846,1,1547247824
ahnr,"Implementation of the Artificial Hydrocarbon Networks for data
modeling.",2018-06-18,Jose Roberto Ayala Solares,https://github.com/jroberayalas/ahnr,TRUE,https://github.com/jroberayalas/ahnr,7370,1,1532034689
aire.zmvm,"Tools for downloading hourly averages, daily maximums and minimums from each of the
pollution, wind, and temperature measuring stations or geographic zones in the Mexico City
metro area. The package also includes the locations of each of the stations and zones. See
<http://aire.cdmx.gob.mx/> for more information.",2019-03-30,Diego Valle-Jones,"https://hoyodesmog.diegovalle.net/aire.zmvm/,
https://github.com/diegovalle/aire.zmvm",TRUE,https://github.com/diegovalle/aire.zmvm,8810,5,1554405358
airportr,"Retrieves open source airport data and provides tools to look up information, translate names into codes and vice-verse, as well as some basic calculation functions for measuring distances.",2018-10-06,Dmitry Shkolnik,https://github.com/dshkol/airportr,TRUE,https://github.com/dshkol/airportr,2256,4,1538849707
airr,"Schema definitions and read, write and validation tools for data
formatted in accordance with the AIRR Data Representation schemas defined
by the AIRR Community <http://docs.airr-community.org>.",2018-08-17,Jason Vander Heiden,http://docs.airr-community.org,TRUE,https://github.com/airr-community/airr-standards,3887,10,1554392190
ALA4R,"The Atlas of Living Australia (ALA) provides tools to enable users
of biodiversity information to find, access, combine and visualise data on
Australian plants and animals; these have been made available from
<https://ala.org.au/>. ALA4R provides a subset of the tools to be
directly used within R. It enables the R community to directly access data
and resources hosted by the ALA.",2019-04-02,Peggy Newman,https://github.com/AtlasOfLivingAustralia/ALA4R,TRUE,https://github.com/atlasoflivingaustralia/ala4r,12430,29,1554182431
albopictus,Implements discrete time deterministic and stochastic age-structured population dynamics models described in Erguler and others (2016) <doi:10.1371/journal.pone.0149282> and Erguler and others (2017) <doi:10.1371/journal.pone.0174293>.,2018-11-29,Kamil Erguler,https://github.com/kerguler/albopictusR,TRUE,https://github.com/kerguler/albopictusr,7574,0,1549446934
alfred,"Provides direct access to the ALFRED (<https://alfred.stlouisfed.org>) and FRED (<https://fred.stlouisfed.org>) databases.
Its functions return tidy data frames for different releases of the specified time series.
Note that this product uses the FRED© API but is not endorsed or certified by the Federal Reserve Bank of St. Louis.",2019-04-01,Onno Kleen (<https://orcid.org/0000-0003-4731-4640>),https://github.com/onnokleen/alfred/,TRUE,https://github.com/onnokleen/alfred,13415,6,1554108449
alignfigR,"Create extensible figures of multiple sequence alignments, using the 'ggplot2' plotting engine. 'alignfigr' will create a baseline figure of a multiple sequence alignment which can be fully customized to the user's liking with standard 'ggplot2' features.",2018-07-05,Stephanie J. Spielman,https://github.com/sjspielman/alignfigR,TRUE,https://github.com/sjspielman/alignfigr,3017,6,1532355701
alpaca,"Provides a routine to concentrate out factors with many levels during the
optimization of the log-likelihood function of the corresponding generalized linear model (glm).
The package is based on the algorithm proposed by Stammann (2018) <arXiv:1707.01815> and is
restricted to glm's that are based on maximum likelihood estimation and non-linear. It also offers
an efficient algorithm to recover estimates of the fixed effects in a post-estimation routine.
The package also includes robust and multi-way clustered standard errors.",2018-07-31,Amrei Stammann,https://github.com/amrei-stammann/alpaca,TRUE,https://github.com/amrei-stammann/alpaca,3137,9,1541927992
alphavantager,"
Alpha Vantage has free historical financial information.
All you need to do is get a free API key at <https://www.alphavantage.co>.
Then you can use the R interface to retrieve free equity information.
Refer to the Alpha Vantage website for more information.",2019-03-11,Matt Dancho,https://github.com/business-science/alphavantager,TRUE,https://github.com/business-science/alphavantager,19760,28,1552596113
alterryx,"A tool to access each of the 'Alteryx' Gallery 'API' endpoints.
Users can queue jobs, poll job status, and retrieve application output as
a data frame. You will need an 'Alteryx' Server license and have 'Alteryx'
Gallery running to utilize this package. The 'API' is accessed through the
'URL' that you setup for the server running 'Alteryx' Gallery and more
information on the endpoints can be found at
<https://gallery.alteryx.com/api-docs/>.",2018-11-21,Michael Treadwell,"https://github.com/mtreadwell/alterryx,
https://gallery.alteryx.com/api-docs/",TRUE,https://github.com/mtreadwell/alterryx,8815,2,1542824694
ambient,"Generation of natural looking noise has many application within
simulation, procedural generation, and art, to name a few. The 'ambient'
package provides an interface to the 'FastNoise' C++ library and allows for
efficient generation of perlin, simplex, worley, cubic, value, and white
noise with optional pertubation in either 2, 3, or 4 (in case of simplex and
white noise) dimensions.",2018-08-30,Thomas Lin Pedersen,https://github.com/thomasp85/ambient,TRUE,https://github.com/thomasp85/ambient,2465,30,1535554163
ameco,Annual macro-economic database provided by the European Commission.,2018-05-04,Eric Persson,http://github.com/expersso/ameco,TRUE,https://github.com/expersso/ameco,19586,3,1547638079
amt,"Manage and analyze animal movement data. The functionality of 'amt' includes methods to calculate track statistics (e.g. step lengths, speed, or turning angles), prepare data for fitting habitat selection analyses (resource and step-selection functions), and simulation of space-use from fitted step-selection functions.",2019-03-19,Johannes Signer,https://github.com/jmsigner/amt,TRUE,https://github.com/jmsigner/amt,8313,4,1553759254
AmyloGram,"Predicts amyloid proteins using random forests trained on the
n-gram encoded peptides. The implemented algorithm can be accessed from
both the command line and shiny-based GUI.",2017-10-11,Michal Burdukiewicz,https://github.com/michbur/AmyloGram,TRUE,https://github.com/michbur/amylogram,8446,4,1554366129
analogsea,"Provides a set of functions for interacting with the 'Digital
Ocean' API at <https://developers.digitalocean.com/documentation/v2>, including
creating images, destroying them, rebooting, getting details on regions, and
available images.",2018-01-04,Scott Chamberlain,https://github.com/sckott/analogsea,TRUE,https://github.com/sckott/analogsea,27040,99,1553629515
analogue,"Fits Modern Analogue Technique and Weighted Averaging transfer
function models for prediction of environmental data from species
data, and related methods used in palaeoecology.",2018-10-23,Gavin L. Simpson (<https://orcid.org/0000-0002-9084-8413>),https://github.com/gavinsimpson/analogue,TRUE,https://github.com/gavinsimpson/analogue,47691,9,1539121291
analysisPipelines,"Enables data scientists to compose pipelines of analysis which consist of data manipulation, exploratory analysis & reporting, as well as modeling steps. Data scientists can use tools of their choice through an R interface, and compose interoperable pipelines between R, Spark, and Python.
Credits to Mu Sigma for supporting the development of the package.
Note - To enable pipelines involving Spark tasks, the package uses the 'SparkR' package.
The SparkR package needs to be installed to use Spark as an engine within a pipeline. SparkR is distributed natively with Apache Spark and is not distributed on CRAN. The SparkR version needs to directly map to the Spark version (hence the native distribution), and care needs to be taken to ensure that this is configured properly.
To install SparkR from Github, run the following command if you know the Spark version: 'devtools::install_github('apache/spark@v2.x.x', subdir='R/pkg')'.
The other option is to install SparkR by running the following terminal commands if Spark has already been installed: '$ export SPARK_HOME=/path/to/spark/directory && cd $SPARK_HOME/R/lib/SparkR/ && R -e ""devtools::install('.')""'.",2019-01-08,Mu Sigma,https://github.com/Mu-Sigma/analysis-pipelines,TRUE,https://github.com/mu-sigma/analysis-pipelines,1312,10,1552218953
angstroms,"Helper functions for working with Regional Ocean Modeling System 'ROMS' output. See
<https://www.myroms.org/> for more information about 'ROMS'. ",2017-05-01,Michael D. Sumner,https://github.com/mdsumner/angstroms,TRUE,https://github.com/mdsumner/angstroms,5490,2,1553489875
animation,"Provides functions for animations in statistics, covering topics
in probability theory, mathematical statistics, multivariate statistics,
non-parametric statistics, sampling survey, linear models, time series,
computational statistics, data mining and machine learning. These functions
may be helpful in teaching statistics and data analysis. Also provided in this
package are a series of functions to save animations to various formats, e.g.
Flash, 'GIF', HTML pages, 'PDF' and videos. 'PDF' animations can be inserted
into 'Sweave' / 'knitr' easily.",2018-12-11,Yihui Xie,https://yihui.name/animation,TRUE,https://github.com/yihui/animation,471249,139,1553106824
ANN2,"Training of neural networks for classification and regression tasks
using mini-batch gradient descent. Special features include a function for
training autoencoders, which can be used to detect anomalies, and some
related plotting functions. Multiple activation functions are supported,
including tanh, relu, step and ramp. For the use of the step and ramp
activation functions in detecting anomalies using autoencoders, see
Hawkins et al. (2002) <doi:10.1007/3-540-46145-0_17>. Furthermore,
several loss functions are supported, including robust ones such as Huber
and pseudo-Huber loss, as well as L1 and L2 regularization. The possible
options for optimization algorithms are RMSprop, Adam and SGD with momentum.
The package contains a vectorized C++ implementation that facilitates
fast training through mini-batch learning.",2019-03-30,Bart Lammers,https://github.com/bflammers/ANN2,TRUE,https://github.com/bflammers/ann2,18351,4,1554461429
AnnotationBustR,Extraction of subsequences into FASTA files from GenBank annotations where gene names may vary among accessions.,2018-04-09,Samuel R. Borstein <sam@borstein.com>,"https://github.com/sborstein/AnnotationBustR,
https://www.ncbi.nlm.nih.gov/nuccore,
https://en.wikipedia.org/wiki/FASTA_format",TRUE,https://github.com/sborstein/annotationbustr,7471,0,1536774614
anomalize,"
The 'anomalize' package enables a ""tidy"" workflow for detecting anomalies in data.
The main functions are time_decompose(), anomalize(), and time_recompose().
When combined, it's quite simple to decompose time series, detect anomalies,
and create bands separating the ""normal"" data from the anomalous data at scale (i.e. for multiple time series).
Time series decomposition is used to remove trend and seasonal components via the time_decompose() function
and methods include seasonal decomposition of time series by Loess (""stl"") and
seasonal decomposition by piecewise medians (""twitter""). The anomalize() function implements
two methods for anomaly detection of residuals including using an inner quartile range (""iqr"")
and generalized extreme studentized deviation (""gesd""). These methods are based on
those used in the 'forecast' package and the Twitter 'AnomalyDetection' package.
Refer to the associated functions for specific references for these methods. ",2018-04-17,Matt Dancho,https://github.com/business-science/anomalize,TRUE,https://github.com/business-science/anomalize,17802,160,1523961692
antaresProcessing,"
Process results generated by 'Antares', a powerful open source software developed by
RTE (Réseau de Transport d’Électricité) to simulate and study electric power systems (more information about
'Antares' here: <https://github.com/AntaresSimulatorTeam/Antares_Simulator>). You can see the results of several ANTARES studies here : <http://bpnumerique.rte-france.com/>.
This package provides functions to create new columns like net load, load factors, upward and
downward margins or to compute aggregated statistics like economic surpluses
of consumers, producers and sectors.",2018-12-10,Jalal-Edine ZAWAM,https://github.com/rte-antares-rpackage/antaresProcessing,TRUE,https://github.com/rte-antares-rpackage/antaresprocessing,20207,8,1538147862
antaresRead,"Import, manipulate and explore results generated by 'Antares', a
powerful open source software developed by RTE (Réseau de Transport d’Électricité) to simulate and study electric power systems
(more information about 'Antares' here : <https://github.com/AntaresSimulatorTeam/Antares_Simulator>). You can see the results of several ANTARES studies here : <http://bpnumerique.rte-france.com/>. ",2019-02-13,Frederic Breant,https://github.com/rte-antares-rpackage/antaresRead,TRUE,https://github.com/rte-antares-rpackage/antaresread,26259,7,1550048909
antaresViz,"Visualize results generated by Antares, a powerful open source software
developed by RTE to simulate and study electric power systems
(more information about Antares here: <https://github.com/AntaresSimulatorTeam/Antares_Simulator>).
This package provides functions that create interactive charts to help
Antares users visually explore the results of their simulations.
You can see the results of several ANTARES studies here : <http://bpnumerique.rte-france.com/>.",2018-10-11,Jalal-Edine ZAWAM,https://github.com/rte-antares-rpackage/antaresViz,TRUE,https://github.com/rte-antares-rpackage/antaresviz,13861,12,1539250460
anthro,"Provides WHO Child Growth Standards (z-scores) with
confidence intervals and standard errors around the
prevalence estimates, taking into account complex sample designs.
More information on the methods is
available online:
<http://www.who.int/childgrowth/standards/en/>.",2019-03-23,Dirk Schumacher,https://github.com/dirkschumacher/anthro,TRUE,https://github.com/dirkschumacher/anthro,592,6,1554405078
AntWeb,"A complete programmatic interface to the AntWeb database from the
California Academy of Sciences.",2014-08-14,Karthik Ram,https://github.com/ropensci/AntWeb,TRUE,https://github.com/ropensci/antweb,24674,7,1526049821
anytime,"Convert input in any one of character, integer, numeric, factor,
or ordered type into 'POSIXct' (or 'Date') objects, using one of a number of
predefined formats, and relying on Boost facilities for date and time parsing.",2018-11-14,Dirk Eddelbuettel,http://dirk.eddelbuettel.com/code/anytime.html,TRUE,https://github.com/eddelbuettel/anytime,134743,95,1554256626
apa,"Formatter functions in the 'apa' package take the return value of a
statistical test function, e.g. a call to chisq.test() and return a string
formatted according to the guidelines of the APA (American Psychological
Association).",2019-03-04,Daniel Gromer,https://github.com/dgromer/apa,TRUE,https://github.com/dgromer/apa,14483,23,1551703797
ApacheLogProcessor,Provides capabilities to process Apache HTTPD Log files.The main functionalities are to extract data from access and error log files to data frames.,2018-07-19,Diogo Silveira Mendonca,https://github.com/diogosmendonca/ApacheLogProcessor,TRUE,https://github.com/diogosmendonca/apachelogprocessor,17190,8,1531967229
apaTables,"A common task faced by researchers is the creation of APA style
(i.e., American Psychological Association style) tables from statistical
output. In R a large number of function calls are often needed to obtain all of
the desired information for a single APA style table. As well, the process of
manually creating APA style tables in a word processor is prone to transcription
errors. This package creates Word files (.doc files) containing APA style tables
for several types of analyses. Using this package minimizes transcription errors
and reduces the number commands needed by the user.",2018-08-29,David Stanley,https://github.com/dstanley4/apaTables,TRUE,https://github.com/dstanley4/apatables,47726,22,1540144915
apcf,"The adapted pair correlation function transfers the concept of the
pair correlation function from point patterns to patterns of objects of
finite size and irregular shape (e.g. lakes within a country). This is a
reimplementation of the method suggested by Nuske et al. (2009)
<doi:10.1016/j.foreco.2009.09.050> using the libraries 'GEOS' and 'GDAL'
directly instead of through 'PostGIS'. ",2019-01-17,Robert Nuske (<https://orcid.org/0000-0001-9773-2061>),https://github.com/rnuske/apcf,TRUE,https://github.com/rnuske/apcf,1048,1,1553173722
aphid,"Designed for the development and application of
hidden Markov models and profile HMMs for biological sequence analysis.
Contains functions for multiple and pairwise sequence alignment,
model construction and parameter optimization, file import/export,
implementation of the forward, backward and Viterbi algorithms for
conditional sequence probabilities, tree-based sequence weighting,
and sequence simulation.
Features a wide variety of potential applications including
database searching, gene-finding and annotation, phylogenetic
analysis and sequence classification.
Based on the models and algorithms described in Durbin et
al (1998, ISBN: 9780521629713).",2019-03-15,Shaun Wilkinson,http://github.com/shaunpwilkinson/aphid,TRUE,https://github.com/shaunpwilkinson/aphid,11547,9,1552783289
aprof,"Assists the evaluation of whether and
where to focus code optimization, using Amdahl's law and visual aids
based on line profiling. Amdahl's profiler organizes profiling output
files (including memory profiling) in a visually appealing way.
It is meant to help to balance development
vs. execution time by helping to identify the most promising sections
of code to optimize and projecting potential gains. The package is
an addition to R's standard profiling tools and is not a wrapper for them.",2018-05-22,Marco D. Visser,http://github.com/MarcoDVisser/aprof,TRUE,https://github.com/marcodvisser/aprof,23594,22,1526559932
aqp,"The Algorithms for Quantitative Pedology (AQP) project was started in 2009 to organize a loosely-related set of concepts and source code on the topic of soil profile visualization, aggregation, and classification into this package (aqp). Over the past 8 years, the project has grown into a suite of related R packages that enhance and simplify the quantitative analysis of soil profile data. Central to the AQP project is a new vocabulary of specialized functions and data structures that can accommodate the inherent complexity of soil profile information; freeing the scientist to focus on ideas rather than boilerplate data processing tasks <doi:10.1016/j.cageo.2012.10.020>. These functions and data structures have been extensively tested and documented, applied to projects involving hundreds of thousands of soil profiles, and deeply integrated into widely used tools such as SoilWeb <https://casoilresource.lawr.ucdavis.edu/soilweb-apps/>. Components of the AQP project (aqp, soilDB, sharpshootR, soilReports packages) serve an important role in routine data analysis within the USDA-NRCS Soil Science Division. The AQP suite of R packages offer a convenient platform for bridging the gap between pedometric theory and practice.",2019-01-03,Dylan Beaudette,https://github.com/ncss-tech/aqp,TRUE,https://github.com/ncss-tech/aqp,118737,10,1553721632
ar.matrix,Using sparse precision matricies and Choleski factorization simulates data that is auto-regressive.,2018-12-02,Neal Marquez,NA,TRUE,https://github.com/nmmarquez/ar.matrix,1531,2,1550357398
arc,"Implements the Classification-based on
Association Rules (CBA) (Bing Liu, Wynne Hsu, Yiming Ma (1999) <http://dl.acm.org/citation.cfm?id=3000292.3000305>) algorithm for association rule classification (ARC).
The package also contains several convenience methods that allow to automatically
set CBA parameters (minimum confidence, minimum support) and it also natively
handles numeric attributes by integrating a pre-discretization step.
The rule generation phase is handled by the 'arules' package.
To further decrease the size of the CBA models produced by the 'arc' package, postprocessing by the
'qCBA' package is suggested.",2018-04-18,Tomas Kliegr,https://github.com/kliegr/arc,TRUE,https://github.com/kliegr/arc,11926,2,1535645783
archiDART,"Analysis of complex plant root system architectures (RSA) using the output files created by Data Analysis of Root Tracings (DART), an open-access software dedicated to the study of plant root architecture and development across time series (Le Bot et al (2010) ""DART: a software to analyse root system architecture and development from captured images"", Plant and Soil, <DOI:10.1007/s11104-009-0005-2>), and RSA data encoded with the Root System Markup Language (RSML) (Lobet et al (2015) ""Root System Markup Language: toward a unified root architecture description language"", Plant Physiology, <DOI:10.1104/pp.114.253625>). More information can be found in Delory et al (2016) ""archiDART: an R package for the automated computation of plant root architectural traits"", Plant and Soil, <DOI:10.1007/s11104-015-2673-4>.",2018-04-03,Benjamin M Delory,https://archidart.github.io/,TRUE,https://github.com/archidart/archidart,18931,0,1554215702
archivist,"Data exploration and modelling is a process in which a lot of data
artifacts are produced. Artifacts like: subsets, data aggregates, plots,
statistical models, different versions of data sets and different versions
of results. The more projects we work with the more artifacts are produced
and the harder it is to manage these artifacts. Archivist helps to store
and manage artifacts created in R. Archivist allows you to store selected
artifacts as a binary files together with their metadata and relations.
Archivist allows to share artifacts with others, either through shared
folder or github. Archivist allows to look for already created artifacts by
using it's class, name, date of the creation or other properties. Makes it
easy to restore such artifacts. Archivist allows to check if new artifact
is the exact copy that was produced some time ago. That might be useful
either for testing or caching.",2019-01-02,Przemyslaw Biecek,https://pbiecek.github.io/archivist/,TRUE,https://github.com/pbiecek/archivist,76766,52,1550870611
areal,"A pipeable, transparent implementation of areal weighted interpolation
with support for interpolating multiple variables in a single function call.
These tools provide a full-featured workflow for validation and estimation
that fits into both modern data management (e.g. tidyverse) and spatial
data (e.g. sf) frameworks.",2018-12-31,Christopher Prener (<https://orcid.org/0000-0002-4310-9888>),https://github.com/slu-openGIS/areal,TRUE,https://github.com/slu-opengis/areal,1814,30,1553395038
arena2r,Reads Arena <https://www.arenasimulation.com/> CSV output files and generates nice tables and plots. The package contains a Shiny App that can be used to interactively visualize Arena's results.,2018-10-19,Pedro Nascimento de Lima,https://github.com/pedroliman/arena2r,TRUE,https://github.com/pedroliman/arena2r,2424,0,1539982420
argonDash,"Create awesome 'Bootstrap 4' dashboards powered by 'Argon'.
See more here <https://rinterface.github.io/argonDash/>.",2018-12-03,David Granjon,https://github.com/RinteRface/argonDash,TRUE,https://github.com/rinterface/argondash,3456,34,1553607923
argonR,"R wrapper around the argon HTML library.
More at <https://demos.creative-tim.com/argon-design-system/>.",2018-11-18,David Granjon,https://github.com/RinteRface/argonR,TRUE,https://github.com/rinterface/argonr,3597,26,1554238733
argparse,"A command line parser to
be used with Rscript to write ""#!"" shebang scripts that gracefully
accept positional and optional arguments and automatically generate usage.",2019-03-08,Trevor L Davis,https://github.com/trevorld/r-argparse,TRUE,https://github.com/trevorld/r-argparse,95354,29,1552077763
aricode,"Implements an efficient O(n) algorithm based on bucket-sorting for
fast computation of standard clustering comparison measures. Available measures
include adjusted Rand index (ARI), normalized information distance (NID),
normalized mutual information (NMI), normalized variation information (NVI) and
entropy, as described in Vinh et al (2009) <doi:10.1145/1553374.1553511>.",2018-05-02,Julien Chiquet (<https://orcid.org/0000-0002-3629-3429>),https://github.com/jchiquet/aricode (dev version),TRUE,https://github.com/jchiquet/aricode,3195,2,1530795732
arkdb,"Flat text files provide a robust, compressible, and portable
way to store tables from databases. This package provides convenient
functions for exporting tables from relational database connections
into compressed text files and streaming those text files back into
a database without requiring the whole table to fit in working memory.",2018-10-31,Carl Boettiger,https://github.com/ropensci/arkdb,TRUE,https://github.com/ropensci/arkdb,7154,45,1545679673
aroma.affymetrix,A cross-platform R framework that facilitates processing of any number of Affymetrix microarray samples regardless of computer system. The only parameter that limits the number of chips that can be processed is the amount of available disk space. The Aroma Framework has successfully been used in studies to process tens of thousands of arrays. This package has actively been used since 2006.,2018-04-16,Henrik Bengtsson,"http://www.aroma-project.org/,
https://github.com/HenrikBengtsson/aroma.affymetrix",TRUE,https://github.com/henrikbengtsson/aroma.affymetrix,54003,3,1523911260
aroma.core,"Core methods and classes used by higher-level 'aroma.*' packages
part of the Aroma Project, e.g. 'aroma.affymetrix' and 'aroma.cn'.",2018-05-03,Henrik Bengtsson,"https://github.com/HenrikBengtsson/aroma.core,
http://www.aroma-project.org/",TRUE,https://github.com/henrikbengtsson/aroma.core,66119,1,1527555941
arsenal,"An Arsenal of 'R' functions for large-scale statistical summaries,
which are streamlined to work within the latest reporting tools in 'R' and
'RStudio' and which use formulas and versatile summary statistics for summary
tables and models. The primary functions include tableby(), a Table-1-like
summary of multiple variable types 'by' the levels of one or more categorical
variables; paired(), a Table-1-like summary of multiple variable types paired across
two time points; modelsum(), which performs simple model fits on one or more endpoints
for many variables (univariate or adjusted for covariates);
freqlist(), a powerful frequency table across many categorical variables;
comparedf(), a function for comparing data.frames; and
write2(), a function to output tables to a document.",2019-03-25,Ethan Heinzen,"https://github.com/eheinzen/arsenal,
https://cran.r-project.org/package=arsenal",TRUE,https://github.com/eheinzen/arsenal,31852,45,1553785123
ARTool,"The Aligned Rank Transform for nonparametric
factorial ANOVAs as described by J. O. Wobbrock,
L. Findlater, D. Gergle, & J. J. Higgins, ""The Aligned
Rank Transform for nonparametric factorial analyses
using only ANOVA procedures"", CHI 2011 <DOI:10.1145/1978942.1978963>.",2019-02-03,Matthew Kay,https://github.com/mjskay/ARTool,TRUE,https://github.com/mjskay/artool,24153,15,1549235077
ARTP2,Pathway and gene level association test using raw data or summary statistics.,2018-11-30,Han Zhang,https://github.com/zhangh12/ARTP2,TRUE,https://github.com/zhangh12/artp2,11876,3,1550252555
arules,"Provides the infrastructure for representing,
manipulating and analyzing transaction data and patterns (frequent
itemsets and association rules). Also provides
C implementations of the association mining algorithms Apriori and Eclat.",2019-03-07,Michael Hahsler,https://github.com/mhahsler/arules,TRUE,https://github.com/mhahsler/arules,1005922,89,1551917088
arulesCBA,"Provides a function to build an association rule-based classifier for data frames, and to classify incoming data frames using such a classifier.",2018-12-16,Ian Johnson,https://github.com/ianjjohnson/arulesCBA,TRUE,https://github.com/ianjjohnson/arulescba,26741,20,1550595506
arulesViz,Extends package 'arules' with various visualization techniques for association rules and itemsets. The package also includes several interactive visualizations for rule exploration.,2018-12-05,Michael Hahsler,"https://github.com/mhahsler/arulesViz,
http://lyle.smu.edu/IDA/arules/",TRUE,https://github.com/mhahsler/arulesviz,537582,21,1550618572
aRxiv,"An interface to the API for 'arXiv'
(<https://arxiv.org>), a repository of electronic preprints for
computer science, mathematics, physics, quantitative biology,
quantitative finance, and statistics.",2017-04-28,Karl Broman,https://github.com/ropensci/aRxiv,TRUE,https://github.com/ropensci/arxiv,28044,35,1553257533
asciiSetupReader,"Lets you open a fixed-width ASCII file (.txt or
.dat) that has an accompanying setup file (.sps or .sas). These file
combinations are sometimes referred to as .txt+.sps, .txt+.sas,
.dat+.sps, or .dat+.sas. This will only run in a txt-sps or txt-sas
pair in which the setup file contains instructions to open that text
file. It will NOT open other text files, .sav, .sas, or .por data
files. Fixed-width ASCII files with setup files are common in older
(pre-2000) government data.",2019-02-05,Jacob Kaplan,https://github.com/jacobkap/asciiSetupReader,TRUE,https://github.com/jacobkap/asciisetupreader,7191,3,1551107293
ashr,"The R package 'ashr' implements an Empirical Bayes
approach for large-scale hypothesis testing and false discovery
rate (FDR) estimation based on the methods proposed in
M. Stephens, 2016, ""False discovery rates: a new deal"",
<DOI:10.1093/biostatistics/kxw041>. These methods can be applied
whenever two sets of summary statistics---estimated effects and
standard errors---are available, just as 'qvalue' can be applied
to previously computed p-values. Two main interfaces are
provided: ash(), which is more user-friendly; and ash.workhorse(),
which has more options and is geared toward advanced users. The
ash() and ash.workhorse() also provides a flexible modeling
interface that can accomodate a variety of likelihoods (e.g.,
normal, Poisson) and mixture priors (e.g., uniform, normal).",2019-02-22,Peter Carbonetto,https://github.com/stephens999/ashr,TRUE,https://github.com/stephens999/ashr,12539,55,1552591855
AsioHeaders,"'Asio' is a cross-platform C++ library for network and low-level
I/O programming that provides developers with a consistent asynchronous model
using a modern C++ approach. It is also included in Boost but requires linking
when used with Boost. Standalone it can be used header-only (provided a recent
compiler). 'Asio' is written and maintained by Christopher M. Kohlhoff, and
released under the 'Boost Software License', Version 1.0.",2018-09-10,Dirk Eddelbuettel,NA,TRUE,https://github.com/eddelbuettel/asioheaders,18442,8,1536589727
aslib,"Provides an interface to the algorithm selection benchmark library
at <http://www.aslib.net> and the 'LLAMA' package
(<https://cran.r-project.org/web/packages/llama/index.html>) for building
algorithm selection models.",2016-11-25,Bernd Bischl <bernd_bischl@gmx.net>,https://github.com/coseal/aslib-r/,TRUE,https://github.com/coseal/aslib-r,6826,5,1545686670
assertr,"Provides functionality to assert conditions
that have to be met so that errors in data used in
analysis pipelines can fail quickly. Similar to
'stopifnot()' but more powerful, friendly, and easier
for use in pipelines.",2019-01-22,Tony Fischetti,https://github.com/ropensci/assertr,TRUE,https://github.com/ropensci/assertr,31679,233,1549897311
ASSISTant,"Clinical trial design for subgroup selection in three-stage group
sequential trial. Includes facilities for design, exploration and analysis of
such trials. An implementation of the initial DEFUSE-3 trial is also provided
as a vignette.",2016-05-03,Balasubramanian Narasimhan,https://github.com/bnaras/ASSISTant,TRUE,https://github.com/bnaras/assistant,8959,0,1548825804
atsd,"Provides functions for retrieving time-series and related
meta-data such as entities, metrics, and tags from the Axibase
Time-Series Database (ATSD). ATSD is a non-relational clustered
database used for storing performance measurements from IT infrastructure
resources: servers, network devices, storage systems, and applications.",2018-01-29,Axibase Corporation,https://github.com/axibase/atsd-api-r/,TRUE,https://github.com/axibase/atsd-api-r,12437,1,1537508168
auditor,"Provides an easy to use unified interface for creating validation plots for any model.
The 'auditor' helps to avoid repetitive work consisting of writing code needed to create residual plots.
This visualizations allow to asses and compare the goodness of fit, performance, and similarity of models. ",2018-09-19,Alicja Gosiewska,https://mi2datalab.github.io/auditor/,TRUE,https://github.com/mi2datalab/auditor,6380,18,1554395124
augmentedRCBD,"Functions for analysis of data generated from experiments in
augmented randomised complete block design according to Federer, W.T. (1961)
<doi:10.2307/2527837>. Computes analysis of variance, adjusted means,
descriptive statistics, genetic variability statistics etc. Further includes
data visualization and report generation functions.",2018-07-10,J. Aravind (<https://orcid.org/0000-0002-4791-442X>),https://github.com/aravind-j/augmentedRCBD,TRUE,https://github.com/aravind-j/augmentedrcbd,2931,1,1547726857
auk,"Extract and process bird sightings records from
eBird (<http://ebird.org>), an online tool for recording bird
observations. Public access to the full eBird database is via the
eBird Basic Dataset (EBD; see <http://ebird.org/ebird/data/download>
for access), a downloadable text file. This package is an interface to
AWK for extracting data from the EBD based on taxonomic, spatial, or
temporal filters, to produce a manageable file size that can be
imported into R.",2019-02-04,"Matthew Strimas-Mackey
(<https://orcid.org/0000-0001-8929-7776>)","https://github.com/CornellLabofOrnithology/auk,
http://CornellLabofOrnithology.github.io/auk/",TRUE,https://github.com/cornelllabofornithology/auk,12715,49,1552661071
auth0,"Uses Auth0 API (see <https://auth0.com> for more
information) to use a simple and secure authentication system. It provides
tools to log in and out a shiny application using social networks or a list
of e-mails.",2019-02-13,Julio Trecenti,NA,TRUE,https://github.com/curso-r/auth0,2390,22,1553034800
autocogs,Automatically calculates cognostic groups for plot objects and list column plot objects. Results are returned in a nested data frame.,2019-02-12,Barret Schloerke,https://github.com/schloerke/autocogs,TRUE,https://github.com/schloerke/autocogs,5516,2,1549910028
AutoDeskR,"An interface to the 'AutoDesk' 'API' Platform including the Authentication
'API' for obtaining authentication to the 'AutoDesk' Forge Platform, Data Management
'API' for managing data across the platform's cloud services, Design Automation 'API'
for performing automated tasks on design files in the cloud, Model
Derivative 'API' for translating design files into different formats, sending
them to the viewer app, and extracting design data, and Viewer for rendering
2D and 3D models (see <https://developer.autodesk.com> for more information).",2017-07-10,Paul Govan,https://github.com/paulgovan/autodeskr,TRUE,https://github.com/paulgovan/autodeskr,8909,5,1541553107
automagic,Parse R code in a given directory for R packages and attempt to install them from CRAN or GitHub. Optionally use a dependencies file for tighter control over which package versions to install.,2019-03-05,Cole Brokamp,https://github.com/cole-brokamp/automagic,TRUE,https://github.com/cole-brokamp/automagic,7263,33,1552186260
automultinomial,"Fits the autologistic model described in Besag's famous 1974 paper on auto- models <http://www.jstor.org/stable/2984812>. Fits a multicategory generalization of the autologistic model when there are more than 2 response categories. Provides support for both asymptotic and bootstrap confidence intervals. For full model descriptions and a guide to the use of this package, please see the vignette.",2018-10-31,Stephen Berg,NA,TRUE,https://github.com/stephenberg/automultinomial,7842,4,1541005003
autoplotly,"Functionalities to automatically generate interactive visualizations for
statistical results supported by 'ggfortify', such as time series, PCA,
clustering and survival analysis, with 'plotly.js' <https://plot.ly/> and
'ggplot2' style. The generated visualizations can also be easily extended
using 'ggplot2' and 'plotly' syntax while staying interactive.",2018-04-21,Yuan Tang (<https://orcid.org/0000-0001-5243-233X>),https://github.com/terrytangyuan/autoplotly,TRUE,https://github.com/terrytangyuan/autoplotly,6442,36,1524322544
autoshiny,Static code compilation of a 'shiny' app given an R function (into 'ui.R' and 'server.R' files or into a 'shiny' app object). See examples at <https://github.com/alekrutkowski/autoshiny>.,2018-06-25,Aleksander Rutkowski,https://github.com/alekrutkowski/autoshiny,TRUE,https://github.com/alekrutkowski/autoshiny,3617,4,1529313640
autovarCore,"Automatically find the best vector autoregression
models and networks for a given time series data set. 'AutovarCore'
evaluates eight kinds of models: models with and without log
transforming the data, lag 1 and lag 2 models, and models with and
without weekday dummy variables. For each of these 8 model configurations,
'AutovarCore' evaluates all possible combinations for including
outlier dummies (at 2.5x the standard deviation of the residuals)
and retains the best model. Model evaluation includes the Eigenvalue
stability test and a configurable set of residual tests. These eight
models are further reduced to four models because 'AutovarCore'
determines whether adding weekday dummies improves the model fit.",2018-06-04,Ando Emerencia,NA,TRUE,https://github.com/roqua/autovarcore,16536,3,1531407626
av,"Bindings to 'FFmpeg' <http://www.ffmpeg.org/> AV library for working with audio
and video in R. Generate high quality videos files by capturing images from the R graphics
device combined with custom audio stream. This package interfaces directly to the C API
and does not require any command line utilities.",2018-09-30,Jeroen Ooms (<https://orcid.org/0000-0002-4035-0289>),"https://github.com/ropensci/av (devel) http://www.ffmpeg.org/
(upstream)",TRUE,https://github.com/ropensci/av,15556,56,1547079798
available,"Check if a given package name is available to use. It checks the
name's validity. Checks if it is used on 'GitHub', 'CRAN' and 'Bioconductor'. Checks
for unintended meanings by querying Urban Dictionary, 'Wiktionary' and Wikipedia.",2018-11-08,Jim Hester,https://github.com/ropenscilabs/available,TRUE,https://github.com/ropenscilabs/available,7925,88,1541687975
aweek,"Which day a week starts depends heavily on the either the local or
professional context. This package is designed to be a lightweight solution
to easily switching between week-based date definitions. ",2019-03-08,Zhian N. Kamvar,https://github.com/reconhub/aweek,TRUE,https://github.com/reconhub/aweek,1875,3,1552921298
aws.cloudtrail,"A simple client package for the Amazon Web Services ('AWS') 'CloudTrail'
'API' <https://aws.amazon.com/cloudtrail/>.",2017-07-04,Thomas J. Leeper,https://github.com/cloudyr/aws.cloudtrail,TRUE,https://github.com/cloudyr/aws.cloudtrail,5805,0,1533052999
aws.comprehend,"Client for 'AWS Comprehend' <https://aws.amazon.com/comprehend>, a cloud natural language processing service that can perform a number of quantitative text analyses, including language detection, sentiment analysis, and feature extraction.",2018-04-12,Thomas J. Leeper,https://github.com/cloudyr/aws.comprehend,TRUE,https://github.com/cloudyr/aws.comprehend,3278,3,1533052886
aws.ec2metadata,Retrieve Amazon EC2 instance metadata from within the running instance.,2018-07-26,Thomas J. Leeper (<https://orcid.org/0000-0003-4097-6326>),https://github.com/cloudyr/aws.ec2metadata,TRUE,https://github.com/cloudyr/aws.ec2metadata,111777,5,1532605923
aws.iam,"A simple client for the Amazon Web Services ('AWS') Identity
and Access Management ('IAM') 'API' <https://aws.amazon.com/iam/>.",2017-07-01,Thomas J. Leeper,https://github.com/cloudyr/aws.iam,TRUE,https://github.com/cloudyr/aws.iam,7731,6,1533052967
aws.kms,"Client package for the 'AWS Key Management Service' <https://aws.amazon.com/kms/>, a cloud service for managing encryption keys.",2018-08-01,Thomas J. Leeper (<https://orcid.org/0000-0003-4097-6326>),https://github.com/cloudyr/aws.kms,TRUE,https://github.com/cloudyr/aws.kms,2659,0,1533052758
aws.lambda,A simple client package for the Amazon Web Services ('AWS') Lambda 'API' <https://aws.amazon.com/lambda/>.,2017-07-02,Thomas J. Leeper,https://github.com/cloudyr/aws.lambda,TRUE,https://github.com/cloudyr/aws.lambda,5950,16,1533053050
aws.polly,"A client for AWS Polly <http://aws.amazon.com/documentation/polly>, a speech synthesis service.",2016-12-08,Thomas J. Leeper,https://github.com/cloudyr/aws.polly,TRUE,https://github.com/cloudyr/aws.polly,8703,17,1533158731
aws.s3,"A simple client package for the Amazon Web Services ('AWS') Simple
Storage Service ('S3') 'REST' 'API' <https://aws.amazon.com/s3/>.",2018-05-25,Thomas J. Leeper (<https://orcid.org/0000-0003-4097-6326>),https://github.com/cloudyr/aws.s3,TRUE,https://github.com/cloudyr/aws.s3,736119,214,1535995202
aws.ses,"A simple client package for the Amazon Web Services (AWS) Simple
Email Service (SES) <http://aws.amazon.com/ses/> REST API.",2016-12-20,Thomas J. Leeper,https://github.com/cloudyr/aws.ses,TRUE,https://github.com/cloudyr/aws.ses,7540,5,1533053144
aws.signature,"Generates version 2 and version 4 request signatures for Amazon Web Services ('AWS') <https://aws.amazon.com/> Application Programming Interfaces ('APIs') and provides a mechanism for retrieving credentials from environment variables, 'AWS' credentials files, and 'EC2' instance metadata. For use on 'EC2' instances, users will need to install the suggested package 'aws.ec2metadata' <https://cran.r-project.org/package=aws.ec2metadata>.",2018-07-27,Thomas J. Leeper (<https://orcid.org/0000-0003-4097-6326>),https://github.com/cloudyr/aws.signature,TRUE,https://github.com/cloudyr/aws.signature,688907,14,1533739617
aws.sns,"A simple client package for the Amazon Web Services ('AWS') Simple
Notification Service ('SNS') 'API' <https://aws.amazon.com/sns/>.",2017-07-04,Thomas J. Leeper,https://github.com/cloudyr/aws.sns,TRUE,https://github.com/cloudyr/aws.sns,8935,9,1533053166
aws.sqs,"A simple client package for the Amazon Web Services ('AWS') Simple
Queue Service ('SQS') <https://aws.amazon.com/sqs/> 'API'.",2017-07-04,Thomas J. Leeper,https://github.com/cloudyr/aws.sqs,TRUE,https://github.com/cloudyr/aws.sqs,9018,12,1534001502
aws.transcribe,"Client for 'AWS Transcribe' <https://aws.amazon.com/documentation/transcribe>, a cloud transcription service that can convert an audio media file in English and other languages into a text transcript.",2018-04-09,Thomas J. Leeper (<https://orcid.org/0000-0003-4097-6326>),https://github.com/cloudyr/aws.transcribe,TRUE,https://github.com/cloudyr/aws.transcribe,3183,1,1533053224
aws.translate,"A client for 'AWS Translate' <https://aws.amazon.com/documentation/translate>, a machine translation service that will convert a text input in one language into a text output in another language.",2018-04-12,Thomas J. Leeper (<https://orcid.org/0000-0003-4097-6326>),https://github.com/cloudyr/aws.translate,TRUE,https://github.com/cloudyr/aws.translate,3171,2,1533053251
awsjavasdk,"Provides boilerplate access to all of the classes included in the
Amazon Web Services ('AWS') Java Software Development Kit (SDK) via
package:'rJava'. According to Amazon, the 'SDK helps take the complexity
out of coding by providing Java APIs for many AWS services including
Amazon S3, Amazon EC2, DynamoDB, and more'. You can read more about the
included Java code on Amazon's website:
<https://aws.amazon.com/sdk-for-java/>.",2017-01-01,Russell Pierce,https://github.com/zapier/awsjavasdk,TRUE,https://github.com/zapier/awsjavasdk,6834,4,1542898372
awspack,A bundle of all of 'cloudyr' project <http://cloudyr.github.io/> packages for Amazon Web Services ('AWS') <https://aws.amazon.com/>. It depends upon all of the 'cloudyr' project's 'AWS' packages. It is mainly useful for installing the entire suite of packages; more likely than not you will only want to load individual packages one at a time.,2017-07-05,Thomas J. Leeper,https://github.com/cloudyr/awspack,TRUE,https://github.com/cloudyr/awspack,6036,13,1533053285
AzureAuth,"Provides Azure Active Directory (AAD) authentication functionality for R users of Microsoft's 'Azure' cloud <https://azure.microsoft.com/>. Use this package to obtain 'OAuth' 2.0 tokens for services including Azure Resource Manager, Azure Storage and others. It supports both AAD v1.0 and v2.0, as well as multiple authentication methods, including device code and resource owner grant. Tokens are cached in a user-specific directory obtained using the 'rappdirs' package. The interface is based on the 'OAuth' framework in the 'httr' package, but customised and streamlined for Azure.",2019-03-22,Hong Ooi,https://github.com/cloudyr/AzureAuth,TRUE,https://github.com/cloudyr/azureauth,3099,3,1553500521
AzureContainers,"An interface to container functionality in Microsoft's 'Azure' cloud: <https://azure.microsoft.com/en-us/overview/containers/>. Manage 'Azure Container Instance' (ACI), 'Azure Container Registry' (ACR) and 'Azure Kubernetes Service' (AKS) resources, push and pull images, and deploy services. On the client side, lightweight shells to the 'docker', 'kubectl' and 'helm' commandline tools are provided.",2019-02-14,Hong Ooi,https://github.com/cloudyr/AzureContainers,TRUE,https://github.com/cloudyr/azurecontainers,1856,6,1554082368
AzureGraph,"A simple interface to the 'Microsoft Graph' API <https://docs.microsoft.com/en-us/graph/overview>. 'Graph' is a comprehensive framework for accessing data in various online Microsoft services. Currently, this package aims to provide an R interface only to the 'Azure Active Directory' part, with a view to supporting interoperability of R and 'Azure': users, groups, registered apps and service principals. However it can be easily extended to cover other services.",2019-03-31,Hong Ooi,https://github.com/cloudyr/AzureGraph,TRUE,https://github.com/cloudyr/azuregraph,79,1,1554267235
AzureRMR,"A lightweight but powerful R interface to the 'Azure Resource Manager' REST API. The package exposes classes and methods for 'OAuth' authentication and working with subscriptions and resource groups. It also provides functionality for creating and deleting 'Azure' resources and deploying templates. While 'AzureRMR' can be used to manage any 'Azure' service, it can also be extended by other packages to provide extra functionality for specific services.",2019-04-02,Hong Ooi,https://github.com/cloudyr/AzureRMR,TRUE,https://github.com/cloudyr/azurermr,4111,14,1554113364
AzureStor,"Manage storage in Microsoft's 'Azure' cloud: <https://azure.microsoft.com/services/storage>. On the admin side, 'AzureStor' includes features to create, modify and delete storage accounts. On the client side, it includes an interface to blob storage, file storage, and 'Azure Data Lake Storage Gen2': upload and download files and blobs; list containers and files/blobs; create containers; and so on. Authenticated access to storage is supported, via either a shared access key or a shared access signature (SAS).",2019-03-21,Hong Ooi,https://github.com/cloudyr/AzureStor,TRUE,https://github.com/cloudyr/azurestor,3708,12,1553172531
AzureVM,"Functionality for working with virtual machines (VMs) in Microsoft's 'Azure' cloud: <https://azure.microsoft.com/en-us/services/virtual-machines/>. Includes facilities to create, startup, shutdown, and cleanly delete VMs and VM clusters. With a running VM, execute scripts and install optional extensions. A selection of VM templates based on the 'Data Science Virtual Machine' (DSVM) is supplied; this allows fast and easy provisioning of a VM preinstalled with several software packages useful for data science. Alternatively, users can provide VM templates of their own.",2018-12-25,Hong Ooi,https://github.com/cloudyr/AzureVM,TRUE,https://github.com/cloudyr/azurevm,2499,2,1552376732
babynames,"US baby names provided by the SSA. This package contains all
names used for at least 5 children of either sex.",2019-01-12,Hadley Wickham,http://github.com/hadley/babynames,TRUE,https://github.com/hadley/babynames,89588,83,1553201016
BacArena,"Can be used for simulation of organisms living in
communities. Each organism is represented individually and genome scale
metabolic models determine the uptake and release of compounds. Biological
processes such as movement, diffusion, chemotaxis and kinetics are available
along with data analysis techniques.",2019-02-15,Johannes Zimmermann,https://BacArena.github.io/,TRUE,https://github.com/euba/bacarena,13756,8,1553094440
backpipe,"Provides a backward-pipe operator for 'magrittr' (%<%) or
'pipeR' (%<<%) that allows for a performing operations from right-to-left.
This allows writing more legible code where right-to-left ordering is
natural. This is common with hierarchies and nested structures such as
trees, directories or markup languages (e.g. HTML and XML).
The package also includes a R-Studio add-in that can be bound to a keyboard
shortcut. ",2018-06-26,Christopher Brown,https://github.com/decisionpatterns/backpipe,TRUE,https://github.com/decisionpatterns/backpipe,11918,16,1529949776
backports,"
Functions introduced or changed since R v3.0.0 are re-implemented in this
package. The backports are conditionally exported in order to let R resolve
the function name to either the implemented backport, or the respective base
version, if available. Package developers can make use of new functions or
arguments by selectively importing specific backports to
support older installations.",2018-12-14,Michel Lang (<https://orcid.org/0000-0001-9754-0393>),https://github.com/r-lib/backports,TRUE,https://github.com/r-lib/backports,7403535,38,1552899719
badgecreatr,"Tired of copy and pasting almost identical markdown for badges in
every new R-package that you create, on Github or other code-sharing sites?
This package allows you to easily paste badges. If you want to, it will also search
your DESCRIPTION file and extract the package name,
license, R-version, and current projectversion and transform that into
badges. It will also search for a "".travis.yml"" file and create a ""Travis"""" badge,
if you use ""Codecov.io"" to check your code coverage after a ""Travis"" build
this package will also build a ""Codecov.io""-badge. All the badges can be placed
individually or can be placed below the top ""YAML"""" content of your ""RMarkdown
file"" (Readme.Rmd) or ""README.md"" file.
Currently creates badges for Projectstatus (""Repostatus.org""), license
Travis Build Status, Codecov, Minimal R version, CRAN status, CRAN downloads,
Github stars and forks, Package rank, rdocumentation,
current version of your package and last change of ""README.Rmd"".",2019-01-07,Roel M. Hogervorst,"https://github.com/RMHogervorst/badgecreatr,
https://rmhogervorst.nl/badgecreatr",TRUE,https://github.com/rmhogervorst/badgecreatr,7567,47,1549708118
badger,"Query information and generate badge for using in README
and GitHub Pages.",2019-01-08,Guangchuang Yu (<https://orcid.org/0000-0002-6485-8781>),https://github.com/GuangchuangYu/badger,TRUE,https://github.com/guangchuangyu/badger,6606,42,1547805376
balance,"Balances have become a cornerstone of compositional data analysis. However,
conceptualizing balances is difficult, especially for high-dimensional data. Most often,
investigators visualize balances with ""balance dendrograms"". However, this visualization
tool does not scale well for large data. This package provides an alternative scheme for
visualizing balances, described in [Quinn (2018) <DOI:10.12688/f1000research.15858.1>].
This package also provides a method for principal balance analysis.",2018-12-10,Thomas Quinn,http://github.com/tpq/balance,TRUE,https://github.com/tpq/balance,2803,2,1554417807
BALCONY,Facilitates the evolutionary analysis and structure conservation study of specified amino acids in proteins.,2019-02-28,Michal Stolarczyk,NA,TRUE,https://github.com/michalstolarczyk/balcony,10241,1,1554563146
Ball,"Hypothesis tests and sure independence screening (SIS) procedure based on ball statistics, including ball divergence <doi:10.1214/17-AOS1579>, ball covariance, and ball correlation <doi:10.1080/01621459.2018.1462709>, are developed to analyze complex data. The ball divergence and ball covariance based distribution-free tests are implemented to detecting distribution difference and association in metric spaces <arXiv:1811.03750>. Furthermore, a generic non-parametric SIS procedure based on ball correlation and all of its variants are implemented to tackle the challenge in the context of ultra high dimensional data.",2018-12-14,Xueqin Wang,https://github.com/Mamba413/Ball,TRUE,https://github.com/mamba413/ball,12129,7,1552611292
BAMBI,Fit (using Bayesian methods) and simulate mixtures of univariate and bivariate angular distributions. Chakraborty and Wong (2017) <arXiv:1708.07804> .,2019-03-16,Saptarshi Chakraborty,https://arxiv.org/abs/1708.07804,TRUE,https://github.com/c7rishi/bambi,13702,0,1554182231
bamboo,"Implementation of the Bamboo methods described in Li, Dahl, Vannucci, Joo, and Tsai (2014) <DOI:10.1371/journal.pone.0109832>.",2018-10-19,David B. Dahl,https://github.com/dbdahl/bamboo,TRUE,https://github.com/dbdahl/bamboo,12287,2,1549412347
BaMORC,"Provides reference correction for protein NMR spectra. Bayesian Model Optimized Reference Correction (BaMORC) is utilizing Bayesian probabilistic framework to perform protein NMR referencing correction, currently for alpha and beta carbon-13 chemical shifts, without any resonance assignment and/or three-dimensional protein structure. For more detailed explanation, please refer to the paper ""Automatic 13C Chemical Shift Reference Correction for Unassigned Protein NMR Spectra"" <https://rdcu.be/4ly5> (Journal of Biomolecular NMR, Aug 2018)"" <doi:10.1007/s10858-018-0202-5>.",2019-01-02,Xi Chen (<https://orcid.org/0000-0001-7094-6748>),https://github.com/MoseleyBioinformaticsLab/BaMORC,TRUE,https://github.com/moseleybioinformaticslab/bamorc,1655,0,1541164054
bamp,"Bayesian Age-Period-Cohort Modeling and Prediction using efficient Markov Chain Monte Carlo Methods. This is the R version of the previous BAMP software as described in Volker Schmid and Leonhard Held (2007) <DOI:10.18637/jss.v021.i08> Bayesian Age-Period-Cohort Modeling and Prediction - BAMP, Journal of Statistical Software 21:8. This package includes checks of convergence using Gelman's R.",2019-01-08,Volker Schmid,https://volkerschmid.github.io/bamp/,TRUE,https://github.com/volkerschmid/bamp,2379,2,1546943112
BANEScarparkinglite,"Contains functions for importing and working with the BANES car parking
records and other related datasets. For the full version of the package, including
all datasets, see the repo at <https://github.com/owenjonesuob/BANEScarparking>.
The original dataset of parking records can be found at
<https://data.bathhacked.org/Government-and-Society/BANES-Historic-Car-Park-Occupancy/x29s-cczc>.",2018-06-30,Owen Jones,https://github.com/owenjonesuob/BANEScarparkinglite,TRUE,https://github.com/owenjonesuob/banescarparkinglite,4543,0,1530370054
bang,"Provides functions for the Bayesian analysis of some simple
commonly-used models, without using Markov Chain Monte Carlo (MCMC)
methods such as Gibbs sampling. The 'rust' package
<https://cran.r-project.org/package=rust> is used
to simulate a random sample from the required posterior distribution.
At the moment three conjugate hierarchical models are available:
beta-binomial, gamma-Poisson and a 1-way analysis of variance (ANOVA).",2017-11-20,Paul J. Northrop,http://github.com/paulnorthrop/bang,TRUE,https://github.com/paulnorthrop/bang,4486,3,1526918444
banR,"A client for the ""Base Adresses Nationale"" (BAN) API, which allows to (batch)
geocode and reverse-geocode French addresses. For more information about the BAN and its API, please see <https://adresse.data.gouv.fr/api>. ",2017-08-03,Joel Gombin,http://github.com/joelgombin/banR,TRUE,https://github.com/joelgombin/banr,5292,14,1527691415
baRcodeR,"Tools to generate unique identifiers and printable barcoded labels for sample management.
The creation of unique ID codes and printable PDF files can be initiated by standard commands,
user prompts, or through a GUI addin for R Studio. Both single-level and hierarchical labels can
be created in the command line interactively or non-interactively. ",2019-01-10,Robert Colautti,https://github.com/yihanwu/baRcodeR,TRUE,https://github.com/yihanwu/barcoder,3484,10,1552342683
BAS,"Package for Bayesian Variable Selection and Model Averaging in linear models and
generalized linear models using stochastic or
deterministic sampling without replacement from posterior
distributions. Prior distributions on coefficients are
from Zellner's g-prior or mixtures of g-priors
corresponding to the Zellner-Siow Cauchy Priors or the
mixture of g-priors from Liang et al (2008)
<DOI:10.1198/016214507000001337>
for linear models or mixtures of g-priors in GLMs of Li and Clyde (2018)
<arXiv:1503.06913>. Other model
selection criteria include AIC, BIC and Empirical Bayes estimates of g.
Sampling probabilities may be updated based on the sampled models
using Sampling w/out Replacement or an efficient MCMC algorithm
samples models using the BAS tree structure as an efficient hash table.
Uniform priors over all models or beta-binomial prior distributions on
model size are allowed, and for large p truncated priors on the model
space may be used. The user may force variables to always be included.
Details behind the sampling algorithm are provided in
Clyde, Ghosh and Littman (2010) <DOI:10.1198/jcgs.2010.09049>.
This material is based upon work supported by the National Science
Foundation under Grant DMS-1106891. Any opinions, findings, and
conclusions or recommendations expressed in this material are those of
the author(s) and do not necessarily reflect the views of the
National Science Foundation.",2018-10-30,Merlise Clyde,"https://www.r-project.org, https://github.com/merliseclyde/BAS",TRUE,https://github.com/merliseclyde/bas,75739,21,1540912868
base2grob,"Convert base plot function call (using expression or formula) to 'grob' object that compatible to the 'grid' ecosystem. With this package, we are able to e.g. using 'cowplot' to align base plots with 'ggplot' objects and using 'ggsave' to export base plot to file.",2018-04-25,Guangchuang Yu (<https://orcid.org/0000-0002-6485-8781>),https://github.com/GuangchuangYu/base2grob,TRUE,https://github.com/guangchuangyu/base2grob,6125,8,1524633768
base64url,"In contrast to RFC3548, the 62nd character (""+"") is replaced with
""-"", the 63rd character (""/"") is replaced with ""_"". Furthermore, the encoder
does not fill the string with trailing ""="". The resulting encoded strings
comply to the regular expression pattern ""[A-Za-z0-9_-]"" and thus are
safe to use in URLs or for file names.
The package also comes with a simple base32 encoder/decoder suited for
case insensitive file systems.",2018-05-14,Michel Lang (<https://orcid.org/0000-0001-9754-0393>),https://github.com/mllg/base64url,TRUE,https://github.com/mllg/base64url,30907,8,1536838772
baseballDBR,"A tool for gathering and analyzing data from the Baseball Databank <http://www.baseball-databank.org/>, which includes player performance statistics from major league baseball in the United States beginning in the year 1871.",2017-06-15,Kris Eberwein,https://github.com/keberwein/moneyball,TRUE,https://github.com/keberwein/moneyball,5027,6,1539094809
basictabler,"Easily create tables from data
frames/matrices. Create/manipulate tables
row-by-row, column-by-column or cell-by-cell.
Use common formatting/styling to output
rich tables as 'HTML', 'HTML widgets' or to
'Excel'. ",2019-03-21,Christopher Bailiss,https://github.com/cbailiss/basictabler,TRUE,https://github.com/cbailiss/basictabler,7291,11,1553163623
basicTrendline,"Plot, draw regression line and confidence interval, and show regression equation, R-square and P-value, as simple as possible, by using different models (""line2P"", ""line3P"", ""log2P"", ""exp2P"", ""exp3P"", ""power2P"", ""power3P"") built in the 'trendline()' function.",2018-07-26,Weiping Mei,https://github.com/PhDMeiwp/basicTrendline,TRUE,https://github.com/phdmeiwp/basictrendline,9018,6,1535162680
batchtools,"As a successor of the packages 'BatchJobs' and 'BatchExperiments',
this package provides a parallel implementation of the Map function for high
performance computing systems managed by schedulers 'IBM Spectrum LSF'
(<https://www.ibm.com/us-en/marketplace/hpc-workload-management>),
'OpenLava' (<http://www.openlava.org/>), 'Univa Grid Engine'/'Oracle Grid
Engine' (<http://www.univa.com/>), 'Slurm' (<http://slurm.schedmd.com/>),
'TORQUE/PBS'
(<http://www.adaptivecomputing.com/products/open-source/torque/>), or
'Docker Swarm' (<https://docs.docker.com/swarm/>).
A multicore and socket mode allow the parallelization on a local machines,
and multiple machines can be hooked up via SSH to create a makeshift
cluster. Moreover, the package provides an abstraction mechanism to define
large-scale computer experiments in a well-organized and reproducible way.",2018-08-16,Michel Lang (<https://orcid.org/0000-0001-9754-0393>),https://github.com/mllg/batchtools,TRUE,https://github.com/mllg/batchtools,42070,88,1550492098
bayesAB,"A suite of functions that allow the user to analyze A/B test
data in a Bayesian framework. Intended to be a drop-in replacement for
common frequentist hypothesis test such as the t-test and chi-sq test.",2018-07-14,Frank Portman,https://github.com/FrankPortman/bayesAB,TRUE,https://github.com/frankportman/bayesab,19819,214,1547739917
bayesboot,"Functions for performing the Bayesian bootstrap as introduced by
Rubin (1981) <doi:10.1214/aos/1176345338> and for summarizing the result.
The implementation can handle both summary statistics that works on a
weighted version of the data and summary statistics that works on a
resampled data set.",2018-06-29,Rasmus Bååth,https://github.com/rasmusab/bayesboot,TRUE,https://github.com/rasmusab/bayesboot,11465,39,1530223211
bayesCT,"Simulation and analysis of Bayesian adaptive clinical trial, incorporates historical
data and allows early stopping for futility or early success. ",2019-03-13,Thevaa Chandereng,https://github.com/thevaachandereng/bayesCT/,TRUE,https://github.com/thevaachandereng/bayesct,1387,4,1552855671
BayesCTDesign,"A set of functions to help clinical trial researchers calculate power and sample size for two-arm Bayesian randomized clinical trials that do or do not incorporate historical control data. At some point during the design process, a clinical trial researcher who is designing a basic two-arm Bayesian randomized clinical trial needs to make decisions about power and sample size within the context of hypothesized treatment effects. Through simulation, the simple_sim() function will estimate power and other user specified clinical trial characteristics at user specified sample sizes given user defined scenarios about treatment effect,control group characteristics, and outcome. If the clinical trial researcher has access to historical control data, then the researcher can design a two-arm Bayesian randomized clinical trial that incorporates the historical data. In such a case, the researcher needs to work through the potential consequences of historical and randomized control differences on trial characteristics, in addition to working through issues regarding power in the context of sample size, treatment effect size, and outcome. If a researcher designs a clinical trial that will incorporate historical control data, the researcher needs the randomized controls to be from the same population as the historical controls. What if this is not the case when the designed trial is implemented? During the design phase, the researcher needs to investigate the negative effects of possible historic/randomized control differences on power, type one error, and other trial characteristics. Using this information, the researcher should design the trial to mitigate these negative effects. Through simulation, the historic_sim() function will estimate power and other user specified clinical trial characteristics at user specified sample sizes given user defined scenarios about historical and randomized control differences as well as treatment effects and outcomes. The results from historic_sim() and simple_sim() can be printed with print_table() and graphed with plot_table() methods. Outcomes considered are Gaussian, Poisson, Bernoulli, Lognormal, Weibull, and Piecewise Exponential. ",2018-08-14,Barry Eggleston,http://github.com/begglest/BayesCTDesign,TRUE,https://github.com/begglest/bayesctdesign,2136,0,1534174324
bayesdfa,"Implements Bayesian dynamic factor analysis with 'Stan'. Dynamic
factor analysis is a dimension reduction tool for multivariate time series.
'bayesdfa' extends conventional dynamic factor models in several ways.
First, extreme events may be estimated in the latent trend by modeling
process error with a student-t distribution. Second, autoregressive and
moving average components can be optionally included. Third, the estimated
dynamic factors can be analyzed with hidden Markov models to evaluate
support for latent regimes.",2019-03-05,Eric J. Ward,https://github.com/fate-ewi/bayesdfa,TRUE,https://github.com/fate-ewi/bayesdfa,3345,8,1551798992
bayesDP,"Functions for data augmentation using the
Bayesian discount prior function for 1 arm and 2 arm clinical trials.",2018-07-10,Shawn Balcome,https://github.com/donaldmusgrove/bayesDP,TRUE,https://github.com/donaldmusgrove/bayesdp,14915,3,1539615039
BayesFactor,"A suite of functions for computing
various Bayes factors for simple designs, including contingency tables,
one- and two-sample designs, one-way designs, general ANOVA designs, and
linear regression.",2018-05-19,Richard D. Morey,https://richarddmorey.github.io/BayesFactor/,TRUE,https://github.com/richarddmorey/bayesfactor,139578,82,1551430715
BayesianNetwork,"A 'Shiny' web application for creating interactive Bayesian Network models,
learning the structure and parameters of Bayesian networks, and utilities for classic
network analysis.",2018-12-02,Paul Govan (<https://orcid.org/0000-0002-1821-8492>),https://github.com/paulgovan/bayesiannetwork,TRUE,https://github.com/paulgovan/bayesiannetwork,11438,53,1543716268
BayesianTools,"General-purpose MCMC and SMC samplers, as well as plot and
diagnostic functions for Bayesian statistics, with a particular focus on
calibrating complex system models. Implemented samplers include various
Metropolis MCMC variants (including adaptive and/or delayed rejection MH), the
T-walk, two differential evolution MCMCs, two DREAM MCMCs, and a sequential
Monte Carlo (SMC) particle filter.",2019-01-21,Florian Hartig,https://github.com/florianhartig/BayesianTools,TRUE,https://github.com/florianhartig/bayesiantools,17623,49,1550846657
BayesMallows,"An implementation of the Bayesian version of the Mallows rank model (Vitelli et al., Journal of Machine Learning Research, 2018 <http://jmlr.org/papers/v18/15-481.html>; Crispino et al., to appear in Annals of Applied Statistics). Both Cayley, footrule, Hamming, Kendall, Spearman, and Ulam distances are supported in the models. The rank data to be analyzed can be in the form of complete rankings, top-k rankings, partially missing rankings, as well as consistent and inconsistent pairwise preferences. Several functions for plotting and studying the posterior distributions of parameters are provided. The package also provides functions for estimating the partition function (normalizing constant) of the Mallows rank model, both with the importance sampling algorithm of Vitelli et al. and asymptotic approximation with the IPFP algorithm (Mukherjee, Annals of Statistics, 2016 <doi:10.1214/15-AOS1389>).",2019-02-22,Oystein Sorensen,https://github.com/osorensen/BayesMallows,TRUE,https://github.com/osorensen/bayesmallows,3908,2,1551082731
BayesNetBP,"Belief propagation methods in Bayesian Networks to propagate evidence through the network. The implementation of these methods are based on the article: Cowell, RG (2005). Local Propagation in Conditional Gaussian Bayesian Networks <http://www.jmlr.org/papers/volume6/cowell05a/>.",2018-08-22,Han Yu,https://github.com/hyu-ub/BayesNetBP,TRUE,https://github.com/hyu-ub/bayesnetbp,8181,5,1550525383
bayesplot,"Plotting functions for posterior analysis, model checking,
and MCMC diagnostics. The package is designed not only to provide convenient
functionality for users, but also a common set of functions that can be
easily used by developers working on a variety of R packages for Bayesian
modeling, particularly (but not exclusively) packages interfacing with 'Stan'.",2018-08-02,Jonah Gabry,http://mc-stan.org/bayesplot,TRUE,https://github.com/stan-dev/bayesplot,302667,166,1552429023
BayesTwin,"Bayesian analysis of item-level hierarchical twin data using an integrated item response theory model. Analyses are based on Schwabe & van den Berg (2014) <doi:10.1007/s10519-014-9649-7>, Molenaar & Dolan (2014) <doi:10.1007/s10519-014-9647-9>, Schwabe, Jonker & van den Berg (2016) <doi:10.1007/s10519-015-9768-9> and Schwabe, Boomsma & van den Berg (2016) <doi:10.1016/j.lindif.2017.01.018>.",2017-06-26,Inga Schwabe,http://www.ingaschwabe.com,TRUE,https://github.com/ingaschwabe/bayestwin,4423,0,1538136274
BayesVarSel,"Conceived to calculate Bayes factors in linear models and then to provide a formal Bayesian answer to testing and variable selection problems. From a theoretical side, the emphasis in this package is placed on the prior distributions and it allows a wide range of them: Jeffreys (1961); Zellner and Siow(1980)<DOI:10.1007/bf02888369>; Zellner and Siow(1984); Zellner (1986)<DOI:10.2307/2233941>; Fernandez et al. (2001)<DOI:10.1016/s0304-4076(00)00076-2>; Liang et al. (2008)<DOI:10.1198/016214507000001337> and Bayarri et al. (2012)<DOI:10.1214/12-aos1013>. The interaction with the package is through a friendly interface that syntactically mimics the well-known lm() command of R. The resulting objects can be easily explored providing the user very valuable information (like marginal, joint and conditional inclusion probabilities of potential variables; the highest posterior probability model, HPM; the median probability model, MPM) about the structure of the true -data generating- model. Additionally, this package incorporates abilities to handle problems with a large number of potential explanatory variables through parallel and heuristic versions of the main commands, Garcia-Donato and Martinez-Beneito (2013)<DOI:10.1080/01621459.2012.742443>.",2017-12-04,Anabel Forte,https://github.com/carlosvergara/BayesVarSel,TRUE,https://github.com/carlosvergara/bayesvarsel,26662,0,1540813034
bayfoxr,"A Bayesian, global planktic foraminifera core top calibration to
modern sea-surface temperatures. Includes four calibration models,
considering species-specific calibration parameters and seasonality.",2019-02-06,Steven Malevich,https://github.com/brews/bayfoxr/,TRUE,https://github.com/brews/bayfoxr,684,1,1549650077
BayLum,"Bayesian analysis of luminescence data and C-14 age estimates. Bayesian models are based on the following publications: Combes, B. & Philippe, A. (2017) <doi:10.1016/j.quageo.2017.02.003> and Combes et al (2015) <doi:10.1016/j.quageo.2015.04.001>. This includes, amongst others, data import, export, application of age models and palaeodose model.",2018-09-19,Anne Philippe,NA,TRUE,https://github.com/r-lum/baylum,6605,2,1551289173
baytrends,"Enable users to evaluate long-term trends using a Generalized
Additive Modeling (GAM) approach. The model development includes selecting a
GAM structure to describe nonlinear seasonally-varying changes over time,
incorporation of hydrologic variability via either a river flow or salinity,
the use of an intervention to deal with method or laboratory changes
suspected to impact data values, and representation of left- and
interval-censored data. The approach has been applied to water quality data
in the Chesapeake Bay, a major estuary on the east coast of the United
States to provide insights to a range of management- and research-focused
questions.",2019-03-14,Rebecca Murphy,https://github.com/tetratech/baytrends,TRUE,https://github.com/tetratech/baytrends,3102,2,1552586101
bazar,"A collection of miscellaneous functions for
copying objects to the clipboard ('Copy');
manipulating strings ('concat', 'mgsub', 'trim', 'verlan');
loading or showing packages ('library_with_dep', 'require_with_dep',
'sessionPackages');
creating or testing for named lists ('nlist', 'as.nlist', 'is.nlist'),
formulas ('is.formula'), empty objects ('as.empty', 'is.empty'),
whole numbers ('as.wholenumber', 'is.wholenumber');
testing for equality ('almost.equal', 'almost.zero') and computing
uniqueness ('almost.unique');
getting modified versions of usual functions ('rle2', 'sumNA');
making a pause or a stop ('pause', 'stopif');
converting into a function ('as.fun');
providing a C like ternary operator ('condition %?% true %:% false');
finding packages and functions ('get_all_pkgs', 'get_all_funs');
and others ('erase', '%nin%', 'unwhich', 'top', 'bot', 'normalize'). ",2019-03-15,Paul Poncet,https://github.com/paulponcet/bazar,TRUE,https://github.com/paulponcet/bazar,39251,0,1554455660
BBI,"Set of functions to calculate Benthic Biotic Indices from
composition data, obtained whether from morphotaxonomic inventories or
sequencing data. Based on reference ecological weights publicly available for
a set of commonly used marine biotic indices, such as AMBI (A Marine Biotic Index, Borja et al., 2000) <doi:10.1016/S0025-326X(00)00061-8>
NSI (Norwegian Sensitivity Index) and ISI (Indicator Species Index) (Rygg 2013, <ISBN:978-82-577-6210-0>). It provides the ecological quality status of the samples based on each BBI as well as the normalized Ecological Quality Ratio.",2018-10-17,Tristan Cordier,https://github.com/trtcrd/BBI,TRUE,https://github.com/trtcrd/bbi,3872,1,1544017201
bbw,"The blocked weighted bootstrap (BBW) is an estimation technique
for use with data from two-stage cluster sampled surveys in which either
prior weighting (e.g. population-proportional sampling or PPS as used in
Standardized Monitoring and Assessment of Relief and Transitions or SMART
surveys) or posterior weighting (e.g. as used in rapid assessment method or
RAM and simple spatial sampling method or S3M surveys). The method was
developed by Accion Contra la Faim, Brixton Health, Concern Worldwide,
Global Alliance for Improved Nutrition, UNICEF Sierra Leone, UNICEF Sudan
and Valid International. It has been tested by the Centers for Disease
Control (CDC) using infant and young child feeding (IYCF) data. See Cameron
et al (2008) <doi:10.1162/rest.90.3.414> for application of bootstrap
to cluster samples. See Aaron et al (2016) <doi:10.1371/journal.pone.0163176>
and Aaron et al (2016) <doi:10.1371/journal.pone.0162462> for application
of the blocked weighted bootstrap to estimate indicators from two-stage
cluster sampled surveys.",2018-01-17,Mark Myatt,https://github.com/validmeasures/bbw,TRUE,https://github.com/validmeasures/bbw,3648,1,1543834566
bcaboot,Computation of bootstrap confidence intervals in an almost automatic fashion.,2018-08-31,Balasubramanian Narasimhan,"https://bnaras.github.io/bcaboot,
https://github.com/bnaras/bcaboot",TRUE,https://github.com/bnaras/bcaboot,2310,5,1547767679
Bchron,"Enables quick calibration of radiocarbon dates under various
calibration curves (including user generated ones); age-depth modelling
as per the algorithm of Haslett and Parnell (2008) <DOI:10.1111/j.1467-9876.2008.00623.x>; Relative sea level
rate estimation incorporating time uncertainty in polynomial regression
models (Parnell and Gehrels 2015) <DOI:10.1002/9781118452547.ch32>; non-parametric phase modelling via
Gaussian mixtures as a means to determine the activity of a site
(and as an alternative to the Oxcal function SUM; currently
unpublished), and reverse calibration of dates from calibrated into
un-calibrated years (also unpublished).",2018-06-15,Andrew Parnell,https://github.com/andrewcparnell/Bchron,TRUE,https://github.com/andrewcparnell/bchron,39321,12,1539938915
bcmaps,"Provides access to various spatial layers for B.C., such as
administrative boundaries, natural resource management boundaries, etc.
All layers are imported from the 'bcmapsdata' package as 'sf' or 'Spatial' objects
through function calls in this package. All layers are in B.C. 'Albers' equal-area projection
<http://spatialreference.org/ref/epsg/nad83-bc-albers/>, which is the B.C.
government standard.",2019-02-15,Andy Teucher,https://github.com/bcgov/bcmaps,TRUE,https://github.com/bcgov/bcmaps,5906,40,1550254080
bdchecks,Supplies a Shiny app and a set of functions to perform and managing data checks for biodiversity data. ,2019-02-18,Povilas Gibas (<https://orcid.org/0000-0001-5311-6021>),https://github.com/bd-R/bdchecks,TRUE,https://github.com/bd-r/bdchecks,778,0,1554051128
beautier,"'BEAST2' (<http://www.beast2.org>) is a widely used
Bayesian phylogenetic tool, that uses DNA/RNA/protein data
and many model priors to create a posterior of jointly estimated
phylogenies and parameters.
'BEAUti 2' (which is part of 'BEAST2') is a GUI tool
that allows users to specify the many possible setups
and generates the XML file 'BEAST2' needs to run.
This package provides a way to create 'BEAST2' input
files without active user input, but using
R function calls instead.",2019-03-01,"Richèl J.C. Bilderbeek
(<https://orcid.org/0000-0003-1107-7049>)",https://github.com/ropensci/beautier,TRUE,https://github.com/ropensci/beautier,1116,3,1552304068
BEDMatrix,"A matrix-like data structure that allows for efficient,
convenient, and scalable subsetting of binary genotype/phenotype files
generated by PLINK (<https://www.cog-genomics.org/plink2>), the whole
genome association analysis toolset, without loading the entire file into
memory.",2018-08-06,Alexander Grueneberg,https://github.com/QuantGen/BEDMatrix,TRUE,https://github.com/quantgen/bedmatrix,13743,5,1551895939
beezdemand,"Facilitates many of the analyses performed in studies of
behavioral economic demand. The package supports commonly-used options for
modeling operant demand including (1) data screening proposed by Stein,
Koffarnus, Snider, Quisenberry, & Bickel (2015; <doi:10.1037/pha0000020>),
(2) fitting models of demand such as linear (Hursh, Raslear, Bauman,
& Black, 1989, <doi:10.1007/978-94-009-2470-3_22>), exponential (Hursh & Silberberg, 2008,
<doi:10.1037/0033-295X.115.1.186>) and modified exponential (Koffarnus,
Franck, Stein, & Bickel, 2015, <doi:10.1037/pha0000045>), and (3) calculating
numerous measures relevant to applied behavioral economists (Intensity,
Pmax, Omax). Also supports plotting and comparing data.",2018-07-31,Brent Kaplan,https://github.com/brentkaplan/beezdemand,TRUE,https://github.com/brentkaplan/beezdemand,2574,5,1553010448
beginr,"Useful functions for R beginners, including hints for the arguments of the 'plot()' function, self-defined functions for error bars, user-customized pair plots and hist plots, enhanced linear regression figures, etc.. This package could be helpful to R experts as well.",2019-02-28,Peng Zhao,https://github.com/pzhaonet/beginr,TRUE,https://github.com/pzhaonet/beginr,7478,8,1551349413
behaviorchange,"Contains specialised analyses and
visualisation tools for behavior change science.
These facilitate conducting determinant studies
(for example, using confidence interval-based
estimation of relevance, CIBER, or CIBERlite
plots), systematically developing, reporting,
and analysing interventions (for example, using
acyclic behavior change diagrams or ABCDs), and
reporting about intervention effectiveness (for
example, using the Numbers Needed for Change). This
package is especially useful for researchers in
the field of behavior change or health psychology
and to behavior change professionals such as
intervention developers and prevention workers.",2019-01-17,Gjalt-Jorn Peters,https://a-bc.eu/R/behaviorchange,TRUE,https://github.com/academy-of-behavior-change/behaviorchange,3571,0,1547674110
behavr,Implements an S3 class based on 'data.table' to store and process efficiently ethomics (high-throughput behavioural) data.,2019-01-03,Quentin Geissmann,https://github.com/rethomics/behavr,TRUE,https://github.com/rethomics/behavr,4676,2,1554501780
belg,"Calculates the Boltzmann entropy of a landscape gradient.
This package uses the analytical method created by Gao, P., Zhang, H.
and Li, Z., 2018 (<doi:10.1111/tgis.12315>). It also extend the original
idea by allowing calculations on data with missing values.",2018-06-17,Jakub Nowosad (<https://orcid.org/0000-0002-1057-3721>),https://github.com/Nowosad/belg,TRUE,https://github.com/nowosad/belg,5196,8,1550344483
bench,Tools to accurately benchmark and analyze execution times for R expressions.,2018-06-06,Jim Hester,https://github.com/r-lib/bench,TRUE,https://github.com/r-lib/bench,10821,118,1540559286
benchmarkme,"Benchmark your CPU and compare against other CPUs. Also provides
functions for obtaining system specifications, such as
RAM, CPU type, and R version.",2019-01-28,Colin Gillespie,https://github.com/csgillespie/benchmarkme,TRUE,https://github.com/csgillespie/benchmarkme,25603,15,1550652440
benchmarkmeData,Crowd sourced benchmarks from running the 'benchmarkme' package.,2019-03-08,Colin Gillespie,https://github.com/csgillespie/benchmarkme-data,TRUE,https://github.com/csgillespie/benchmarkme-data,24985,1,1552031800
benford.analysis,Provides tools that make it easier to validate data using Benford's Law.,2018-12-21,Carlos Cinelli,http://github.com/carloscinelli/benford.analysis,TRUE,https://github.com/carloscinelli/benford.analysis,29475,20,1545373160
berryFunctions,"Draw horizontal histograms, color scattered points by 3rd dimension,
enhance date- and log-axis plots, zoom in X11 graphics, trace errors and warnings,
use the unit hydrograph in a linear storage cascade, convert lists to data.frames and arrays,
fit multiple functions.",2018-03-25,Berry Boessenkool,https://github.com/brry/berryFunctions,TRUE,https://github.com/brry/berryfunctions,43998,5,1553942576
bestNormalize,"Estimate a suite of normalizing transformations, including
a new adaptation of a technique based on ranks which can guarantee
normally distributed transformed data if there are no ties: ordered
quantile normalization (ORQ). ORQ normalization combines a rank-mapping
approach with a shifted logit approximation that allows
the transformation to work on data outside the original domain. It is
also able to handle new data within the original domain via linear
interpolation. The package is built to estimate the best normalizing
transformation for a vector consistently and accurately. It implements
the Box-Cox transformation, the Yeo-Johnson transformation, three types
of Lambert WxF transformations, and the ordered quantile normalization
transformation. It also estimates the normalization efficacy of other
commonly used transformations. ",2018-09-25,Ryan Andrew Peterson,https://github.com/petersonR/bestNormalize,TRUE,https://github.com/petersonr/bestnormalize,10815,9,1537897483
BETS,"It provides access to and information about the most important
Brazilian economic time series - from the Getulio Vargas Foundation <http://portal.fgv.br/en>,
the Central Bank of Brazil <http://www.bcb.gov.br> and the Brazilian Institute of Geography
and Statistics <http://www.ibge.gov.br>. It also presents tools for managing, analysing (e.g.
generating dynamic reports with a complete analysis of a series) and exporting
these time series.",2018-09-28,Talitha Speranza,https://github.com/nmecsys/BETS,TRUE,https://github.com/nmecsys/bets,30766,8,1542832881
bfsl,"Provides the solution from York (1968) <doi:10.1016/S0012-821X(68)80059-7>
for fitting a straight line to bivariate data with errors in both coordinates.
It gives unbiased estimates of the intercept, slope and standard errors of the
best-fit straight line to independent points with (possibly correlated)
normally distributed errors in both x and y. Other commonly used
errors-in-variables methods, such as orthogonal distance regression, geometric
mean regression or Deming regression are special cases of York’s solution.",2018-12-16,Patrick Sturm,https://github.com/pasturm/bfsl,TRUE,https://github.com/pasturm/bfsl,1170,0,1545384367
bfw,"Derived from the work of Kruschke (2015, <ISBN:9780124058880>),
the present package aims to provide a framework for conducting Bayesian
analysis using Markov chain Monte Carlo (MCMC) sampling utilizing the
Just Another Gibbs Sampler ('JAGS', Plummer, 2003, <http://mcmc-jags.sourceforge.net/>).
The initial version includes several modules for conducting Bayesian
equivalents of chi-squared tests, analysis of variance (ANOVA),
multiple (hierarchical) regression, softmax regression, and for fitting data
(e.g., structural equation modeling).",2019-02-04,Øystein Olav Skaar,https://github.com/oeysan/bfw/,TRUE,https://github.com/oeysan/bfw,4989,8,1549277646
BGData,"An umbrella package providing a phenotype/genotype data structure
and scalable and efficient computational methods for large genomic datasets
in combination with several other packages: 'BEDMatrix', 'LinkedMatrix',
and 'symDMatrix'.",2019-01-25,Alexander Grueneberg,https://github.com/QuantGen/BGData,TRUE,https://github.com/quantgen/bgdata,6784,12,1550613353
BH,"Boost provides free peer-reviewed portable C++ source
libraries. A large part of Boost is provided as C++ template code
which is resolved entirely at compile-time without linking. This
package aims to provide the most useful subset of Boost libraries
for template use among CRAN package. By placing these libraries in
this package, we offer a more efficient distribution system for CRAN
as replication of this code in the sources of other packages is
avoided. As of release 1.69.0-1, the following Boost libraries are
included: 'algorithm' 'align' 'any' 'atomic' 'bimap' 'bind'
'circular_buffer' 'compute' 'concept' 'config' 'container' 'date_time'
'detail' 'dynamic_bitset' 'exception' 'filesystem' 'flyweight' 'foreach'
'functional' 'fusion' 'geometry' 'graph' 'heap' 'icl' 'integer'
'interprocess' 'intrusive' 'io' 'iostreams' 'iterator' 'math' 'move' 'mpl'
'multiprcecision' 'numeric' 'pending' 'phoenix' 'preprocessor'
'propery_tree' 'random' 'range' 'scope_exit' 'smart_ptr' 'sort' 'spirit'
'tuple' 'type_traits' 'typeof' 'unordered' 'utility' 'uuid'.",2019-01-07,Dirk Eddelbuettel,NA,TRUE,https://github.com/eddelbuettel/bh,12056296,56,1549379145
BIFIEsurvey,"
Contains tools for survey statistics (especially in educational
assessment) for datasets with replication designs (jackknife,
bootstrap, replicate weights; see Kolenikov, 2010;
Pfefferman & Rao, 2009a, 2009b, <doi:10.1016/S0169-7161(09)70003-3>,
<doi:10.1016/S0169-7161(09)70037-9>); Shao, 1996,
<doi:10.1080/02331889708802523>).
Descriptive statistics, linear and logistic regression,
path models for manifest variables with measurement error
correction and two-level hierarchical regressions for weighted
samples are included. Statistical inference can be conducted for
multiply imputed datasets and nested multiply imputed datasets
and is in particularly suited for the analysis of plausible values
(for details see George, Oberwimmer & Itzlinger-Bruneforth, 2016;
Bruneforth, Oberwimmer & Robitzsch, 2016; Robitzsch, Pham &
Yanagida, 2016; <doi:10.17888/fdb-demo:bistE813I-16a>).
The package development was supported by BIFIE (Federal Institute for
Educational Research, Innovation and Development of the Austrian
School System; Salzburg, Austria).",2019-03-20,Alexander Robitzsch,"http://www.bifie.at,
https://www.bifie.at/bildungsforschung/forschungsdatenbibliothek,
https://www.bifie.at/large-scale-assessment-mit-r-methodische-grundlagen-der-oesterreichischen-bildungsstandardueberpruefung,
https://github.com/alexanderrobitzsch/BIFIEsurvey,
https://sites.google.com/site/alexanderrobitzsch2/software",TRUE,https://github.com/alexanderrobitzsch/bifiesurvey,90660,0,1553884077
BIGDAWG,"Data sets and functions for chi-squared Hardy-Weinberg and case-control association tests of highly polymorphic genetic data [e.g., human leukocyte antigen (HLA) data]. Performs association tests at multiple levels of polymorphism (haplotype, locus and HLA amino-acids) as described in Pappas DJ, Marin W, Hollenbach JA, Mack SJ (2016) <doi:10.1016/j.humimm.2015.12.006>. Combines rare variants to a common class to account for sparse cells in tables as described by Hollenbach JA, Mack SJ, Thomson G, Gourraud PA (2012) <doi:10.1007/978-1-61779-842-9_14>.",2018-02-08,Derek Pappas <dpappas@chori.org>,"http://tools.immunogenomics.org/,
https://github.com/IgDAWG/BIGDAWG",TRUE,https://github.com/igdawg/bigdawg,21052,1,1538077802
bigdist,"Provides utilities to compute, store and access distance matrices on disk as file-backed matrices provided by the 'bigstatsr' package. File-backed distance matrices are stored as a symmetric matrix to facilitate out-of-memory operations on file-backed matrix while the in-memory 'dist' object stores only the lower diagonal elements. 'disto' provides an unified interface to work with in-memory and disk-based distance matrices.",2019-03-16,Komala Sheshachala Srikanth,https://github.com/talegari/bigdist,TRUE,https://github.com/talegari/bigdist,264,1,1552737650
bigIntegerAlgos,"Features the multiple polynomial quadratic sieve algorithm
for factoring large integers and a vectorized factoring function that
returns the complete factorization of an integer. Utilizes the C
library GMP (GNU Multiple Precision Arithmetic) and classes created
by Antoine Lucas et al. found in the 'gmp' package.",2018-04-30,Joseph Wood,"https://github.com/jwood000/bigIntegerAlgos,
http://mathworld.wolfram.com/QuadraticSieve.html",TRUE,https://github.com/jwood000/bigintegeralgos,4003,3,1525103537
bigKRLS,"Functions for Kernel-Regularized Least Squares optimized for speed and memory usage are provided along with visualization tools. For working papers, sample code, and recent presentations visit <https://sites.google.com/site/petemohanty/software/>. bigKRLS, as well its dependencies, require current versions of R and its compilers (and RStudio if used). For details, see <https://github.com/rdrr1990/bigKRLS/blob/master/INSTALL.md>.",2019-03-22,Pete Mohanty (<https://orcid.org/0000-0001-8531-3345>),https://github.com/rdrr1990/bigKRLS,TRUE,https://github.com/rdrr1990/bigkrls,14815,24,1553031005
BIGL,"Response surface methods for drug synergy analysis. Available
methods include generalized and classical Loewe formulations as well as Highest
Single Agent methodology. Response surfaces can be plotted in an interactive
3-D plot and formal statistical tests for presence of synergistic effects are
available. Implemented methods and tests are described in the article
""BIGL: Biochemically Intuitive Generalized Loewe null model for prediction
of the expected combined effect compatible with partial agonism and antagonism""
by Koen Van der Borght, Annelies Tourny, Rytis Bagdziunas, Olivier Thas,
Maxim Nazarov, Heather Turner, Bie Verbist & Hugo Ceulemans (2017)
<doi:10.1038/s41598-017-18068-5>.",2019-02-28,Heather Turner,https://github.com/openanalytics/BIGL,TRUE,https://github.com/openanalytics/bigl,10701,4,1552074397
bigmemory,"Create, store, access, and manipulate massive matrices.
Matrices are allocated to shared memory and may use memory-mapped
files. Packages 'biganalytics', 'bigtabulate', 'synchronicity', and
'bigalgebra' provide advanced functionality.",2018-01-11,Michael J. Kane <kaneplusplus@gmail.com>,https://github.com/kaneplusplus/bigmemory,TRUE,https://github.com/kaneplusplus/bigmemory,259751,73,1542397483
bigQueryR,"Interface with 'Google BigQuery',
see <https://cloud.google.com/bigquery/> for more information.
This package uses 'googleAuthR' so is compatible with similar packages,
including 'Google Cloud Storage' (<https://cloud.google.com/storage/>) for result extracts. ",2018-06-08,Mark Edmondson,http://code.markedmondson.me/bigQueryR/,TRUE,https://github.com/cloudyr/bigqueryr,26902,27,1554280979
bigreadr,Read large text files by splitting them in smaller files.,2018-08-13,Florian Privé,https://github.com/privefl/bigreadr,TRUE,https://github.com/privefl/bigreadr,4260,17,1535307245
bigrquery,Easily talk to Google's 'BigQuery' database from R.,2019-02-05,Hadley Wickham,https://github.com/rstats-db/bigrquery,TRUE,https://github.com/rstats-db/bigrquery,273020,329,1552823701
bigstatsr,"Easy-to-use, efficient, flexible and scalable statistical tools.
Package bigstatsr provides and uses Filebacked Big Matrices via memory-mapping.
It provides for instance matrix operations, Principal Component Analysis,
sparse linear supervised models, utility functions and more
<doi:10.1093/bioinformatics/bty185>.",2019-03-03,Florian Privé,https://privefl.github.io/bigstatsr,TRUE,https://github.com/privefl/bigstatsr,8640,69,1554469309
bigstep,"Selecting linear and generalized linear models for large data sets
using modified stepwise procedure and modern selection criteria (like
modifications of Bayesian Information Criterion). Selection can be
performed on data which exceed RAM capacity.",2019-03-21,Piotr Szulc,http://github.com/pmszulc/bigstep,TRUE,https://github.com/pmszulc/bigstep,8283,1,1536776764
bigtime,"Estimation of large Vector AutoRegressive (VAR), Vector AutoRegressive with Exogenous Variables X (VARX) and Vector AutoRegressive Moving Average (VARMA) Models with Structured Lasso Penalties, see Nicholson, Bien and Matteson (2017) <arXiv:1412.5250v2> and Wilms, Basu, Bien and Matteson (2017) <arXiv:1707.09208>.",2017-11-09,Ines Wilms,http://github.com/ineswilms/bigtime,TRUE,https://github.com/ineswilms/bigtime,5153,6,1535724920
BigVAR,Estimates VAR and VARX models with structured Lasso Penalties.,2019-01-22,Will Nicholson,http://www.github.com/wbnicholson/BigVAR,TRUE,https://github.com/wbnicholson/bigvar,14575,22,1548391180
bikedata,"Download and aggregate data from all public hire bicycle systems
which provide open data, currently including 'Santander' Cycles in London,
U.K., and from the U.S.A., 'citibike' in New York City NY, 'Divvy' in
Chicago IL, 'Capital Bikeshare' in Washington DC, 'Hubway' in Boston MA,
'Metro' in Los Angeles LA, and 'Indego' in Philadelphia PA.",2018-10-22,Mark Padgham (<https://orcid.org/0000-0003-2172-5265>),https://github.com/ropensci/bikedata,TRUE,https://github.com/ropensci/bikedata,14773,54,1543307367
bikeshare14,"Anonymised Bay Area bike share trip data for the year 2014.
Also contains additional metadata on stations and weather.",2019-01-03,Arunkumar Srinivasan,http://github.com/arunsrinivasan/bikeshare14,TRUE,https://github.com/arunsrinivasan/bikeshare14,6645,1,1546398851
billboarder,"Provides an 'htmlwidgets' interface to 'billboard.js',
a re-usable easy interface JavaScript chart library, based on D3 v4+.
Chart types include line charts, scatterplots, bar/lollipop charts, histogram/density plots, pie/donut charts and gauge charts.
All charts are interactive, and a proxy method is implemented to smoothly update a chart without rendering it again in 'shiny' apps. ",2019-01-03,Victor Perrier,https://github.com/dreamRs/billboarder,TRUE,https://github.com/dreamrs/billboarder,11717,101,1553031014
binb,"A collection of 'LaTeX' styles using 'Beamer' customization for
pdf-based presentation slides in 'RMarkdown'. At present it contains
'RMarkdown' adaptations of the LaTeX themes 'Metropolis' (formerly 'mtheme')
theme by Matthias Vogelgesang and others (now included in 'TeXLive'), the
'IQSS' by Ista Zahn (which is included here), and the 'Monash' theme by
Rob J Hyndman. Additional (free) fonts may be needed: 'Metropolis' prefers
'Fira', and 'IQSS' requires 'Libertinus'.",2018-10-12,Dirk Eddelbuettel,https://github.com/eddelbuettel/binb,TRUE,https://github.com/eddelbuettel/binb,4238,73,1553654598
binman,"Tools and functions for managing the download of binary files.
Binary repositories are defined in 'YAML' format. Defining new
pre-download, download and post-download templates allow additional
repositories to be added.",2018-07-18,John Harrison (original author),https://github.com/ropensci/binman,TRUE,https://github.com/ropensci/binman,81760,11,1547017532
binneR,A spectral binning approach for high resolution flow infusion mass spectrometry data.,2019-03-19,Jasen Finch,https://github.com/aberHRML/binneR,TRUE,https://github.com/aberhrml/binner,5607,0,1553006830
BinQuasi,Identify peaks in ChIP-seq data with biological replicates using a one-sided quasi-likelihood ratio test in quasi-Poisson or quasi-negative binomial models.,2018-07-27,Emily Goren,https://github.com/emilygoren/BinQuasi,TRUE,https://github.com/emilygoren/binquasi,4427,2,1532672019
bioacoustics,"Contains all the necessary tools to process audio recordings of
various formats (e.g., WAV, WAC, MP3, ZC), filter noisy files,
display audio signals, detect and extract automatically acoustic
features for further analysis such as classification.",2019-02-08,Jean Marchal,https://github.com/wavx/bioacoustics/,TRUE,https://github.com/wavx/bioacoustics,11256,15,1552584216
BioCircos,"Implement in 'R' interactive Circos-like visualizations of genomic data, to map information
such as genetic variants, genomic fusions and aberrations to a circular genome, as proposed by the
'JavaScript' library 'BioCircos.js', based on the 'JQuery' and 'D3' technologies. The output is by
default displayed in stand-alone HTML documents or in the 'RStudio' viewer pane. Moreover it can be
integrated in 'R Markdown' documents and 'Shiny' applications.",2019-03-19,Loan Vulliard [trl,https://github.com/lvulliard/BioCircos.R,TRUE,https://github.com/lvulliard/biocircos.r,6385,19,1552403505
BiocManager,A convenient tool to install and update Bioconductor packages.,2018-11-13,Martin Morgan (<https://orcid.org/0000-0002-5874-8148>),NA,TRUE,https://github.com/bioconductor/biocmanager,253427,20,1554155736
biogram,"Tools for extraction and analysis of various
n-grams (k-mers) derived from biological sequences (proteins
or nucleic acids). Contains QuiPT (quick permutation test) for fast
feature-filtering of the n-gram data.",2017-01-06,Michal Burdukiewicz,https://github.com/michbur/biogram,TRUE,https://github.com/michbur/biogram,16638,3,1553767849
BioInstaller,"
Can be used to integrate massive bioinformatics resources, such as tool/script and database. It provides the R functions and Shiny web application. Hundreds of bioinformatics tool/script and database have been included.",2018-11-20,Jianfeng Li (<https://orcid.org/0000-0003-2349-208X>),https://github.com/JhuangLab/BioInstaller,TRUE,https://github.com/jhuanglab/bioinstaller,28979,20,1542727846
biomartr,"Perform large scale genomic data retrieval and functional annotation retrieval. This package aims to provide users with a standardized
way to automate genome, proteome, 'RNA', coding sequence ('CDS'), 'GFF', and metagenome
retrieval from 'NCBI RefSeq', 'NCBI Genbank', 'ENSEMBL', 'ENSEMBLGENOMES',
and 'UniProt' databases. Furthermore, an interface to the 'BioMart' database
(Smedley et al. (2009) <doi:10.1186/1471-2164-10-22>) allows users to retrieve
functional annotation for genomic loci. In addition, users can download entire databases such
as 'NCBI RefSeq' (Pruitt et al. (2007) <doi:10.1093/nar/gkl842>), 'NCBI nr',
'NCBI nt', 'NCBI Genbank' (Benson et al. (2013) <doi:10.1093/nar/gks1195>), etc. as
well as 'ENSEMBL' and 'ENSEMBLGENOMES' with only one command.",2018-06-27,Hajk-Georg Drost (<https://orcid.org/0000-0002-1567-306X>),https://github.com/ropensci/biomartr,TRUE,https://github.com/ropensci/biomartr,37444,94,1554225582
BIOMASS,"Contains functions to estimate aboveground biomass/carbon and its uncertainty in tropical forests.
These functions allow to (1) retrieve and to correct taxonomy, (2) estimate wood density and its uncertainty,
(3) construct height-diameter models, (4) manage tree and plot coordinates,
(5) estimate the aboveground biomass/carbon at the stand level with associated uncertainty.
To cite BIOMASS, please use citation(""BIOMASS"").
See more in the article of Réjou-Méchain et al. (2017) <doi:10.1111/2041-210X.12753>.",2019-03-26,Maxime Réjou-Méchain,https://github.com/AMAP-dev/BIOMASS,TRUE,https://github.com/amap-dev/biomass,11734,1,1553589774
BioMedR,"Calculating 293 chemical descriptors and 14 kinds of chemical fingerprints, 9920 protein descriptors based on protein sequences, more than 6000 DNA/RNA descriptors from nucleotide sequences, and six types of interaction descriptors using three different combining strategies. ",2019-04-04,Min-feng Zhu <wind2zhu@163.com>,https://github.com/wind22zhu/BioMedR,TRUE,https://github.com/wind22zhu/biomedr,1219,0,1523890937
bioRad,"Extract, visualize and summarize aerial movements of birds and
insects from weather radar data.",2018-12-14,Adriaan M. Dokter (<https://orcid.org/0000-0001-6573-066X>),"https://github.com/adokter/bioRad,
https://adokter.github.io/bioRad",TRUE,https://github.com/adokter/biorad,1525,9,1544821263
bioset,"Functions to help dealing with raw data from measurements, like
reading and transforming raw values organized in matrices, calculating and
converting concentrations and calculating precision of duplicates /
triplicates / ... . It is compatible with and building on top of some
'tidyverse'-packages.",2018-11-13,Eike Christian Kühn,https://github.com/randomchars42/bioset,TRUE,https://github.com/randomchars42/bioset,6243,2,1542118356
bipartite,"Functions to visualise webs and calculate a series of indices commonly used to describe pattern in (ecological) webs. It focuses on webs consisting of only two levels (bipartite), e.g. pollination webs or predator-prey-webs. Visualisation is important to get an idea of what we are actually looking at, while the indices summarise different aspects of the web's topology. ",2018-07-11,Carsten F. Dormann,https://github.com/biometry/bipartite,TRUE,https://github.com/biometry/bipartite,90246,9,1553449061
BIS,"Provides an interface to data provided by the Bank for International
Settlements <https://www.bis.org>, allowing for programmatic retrieval of a
large quantity of (central) banking data.",2018-05-22,Eric Persson,https://www.bis.org,TRUE,https://github.com/expersso/bis,6350,5,1526979620
bisect,"An implementation of Bisect, a method for inferring cell type composition of samples based on methylation sequencing data (Whole Genome Bisulfite Sequencing and Reduced Representation Sequencing). The method is specifically tailored for sequencing data, and therefore works better than methods developed for methylation arrays. It contains a supervised mode that requires a reference (the methylation probabilities in the pure cell types), and a semi-supervised mode, that requires cell counts for a subset of the samples, but does not require a reference.",2018-04-16,Eyal Fisher,https://github.com/EyalFisher/BiSect,TRUE,https://github.com/eyalfisher/bisect,2676,0,1523904149
BivRec,"Alternating recurrent event data arise frequently in biomedical and social sciences where 2 types of events such as hospital admissions and discharge occur alternatively over time.
As such we implement a collection of non-parametric and semiparametric methods to analyze such data.
The main functions are biv.rec.fit() and biv.rec.np(). Use biv.rec.fit() for estimation of covariate effects on the two alternating event gap times (xij and yij) using semiparametric methods. The method options are ""Lee.et.al"" and ""Chang"".
Use biv.rec.np() for estimation of the joint cumulative distribution function (cdf) for the two alternating events gap times (xij and yij) as well as the marginal survival function for type I gap times (xij) and the conditional cdf of the type II gap times (yij) given an interval of type I gap times (xij) in a non-parametric fashion.
The package also provides options to simulate and visualize the data and results of analysis. ",2018-11-16,Sandra Castro-Pearson,NA,TRUE,https://github.com/sandracastropearson/bivrec,1737,1,1553790334
biwavelet,"This is a port of the WTC MATLAB package written by Aslak Grinsted
and the wavelet program written by Christopher Torrence and Gibert P.
Compo. This package can be used to perform univariate and bivariate
(cross-wavelet, wavelet coherence, wavelet clustering) analyses.",2018-05-19,Tarik C. Gouhier,https://github.com/tgouhier/biwavelet,TRUE,https://github.com/tgouhier/biwavelet,34495,18,1535549173
bizdays,"Business days calculations based on a list of holidays and
nonworking weekdays. Quite useful for fixed income and derivatives pricing.",2018-06-25,Wilson Freitas <wilson.freitas@gmail.com>,https://github.com/wilsonfreitas/R-bizdays,TRUE,https://github.com/wilsonfreitas/r-bizdays,63921,22,1554370851
bjscrapeR,"Drawing heavy influence from 'blscrapeR', this package scrapes crime data from <https://www.bjs.gov/>. Specifically, it scrapes data from the National Crime Victimization Survey which tracks personal and household crime in the USA. The idea is to utilize the 'tidyverse' methodology to create an efficient work flow when dealing with crime statistics.",2018-06-06,Dylan McDowell,https://github.com/dylanjm/bjscrapeR,TRUE,https://github.com/dylanjm/bjscraper,2704,3,1530031071
blandr,"Carries out Bland Altman analyses (also known as a Tukey
mean-difference plot) as described by JM Bland and DG Altman in
1986 <doi:10.1016/S0140-6736(86)90837-8>. This package was created in
2015 as existing Bland-Altman analysis functions did not calculate
confidence intervals. This package was created to rectify this,
and create reproducible plots. This package is also available as a module
for the 'jamovi' statistical spreadsheet (see <https://www.jamovi.org>
for more information).",2018-05-10,Deepankar Datta,https://github.com/deepankardatta/blandr/,TRUE,https://github.com/deepankardatta/blandr,6928,7,1548686068
blastula,"Compose and send out responsive HTML email messages that render
perfectly across a range of email clients and device sizes. Messages are
composed using 'Markdown' and a text interpolation system that allows for
the injection of evaluated R code within the message body, footer, and
subject line. Helper functions let the user insert embedded images, web
link buttons, and 'ggplot2' plot objects into the message body. Messages
can be sent through an 'SMTP' server or through the 'Mailgun' API service
<http://mailgun.com/>.",2018-07-19,Richard Iannone (<https://orcid.org/0000-0003-3925-190X>),https://github.com/rich-iannone/blastula,TRUE,https://github.com/rich-iannone/blastula,10566,188,1553177963
blme,"Maximum a posteriori estimation for linear and generalized
linear mixed-effects models in a Bayesian setting. Extends
'lme4' by Douglas Bates, Martin Maechler, Ben Bolker, and Steve Walker.",2015-06-14,Vincent Dorie <vjd4@nyu.edu>,https://github.com/vdorie/blme,TRUE,https://github.com/vdorie/blme,118844,14,1548384631
blob,"R's raw vector is useful for storing a single binary object.
What if you want to put a vector of them in a data frame? The 'blob'
package provides the blob object, a list of raw vectors, suitable for
use as a column in data frame.",2018-03-25,Kirill Müller,https://github.com/tidyverse/blob,TRUE,https://github.com/tidyverse/blob,1448135,21,1553334597
blockseg,"Segments a matrix in blocks with constant values.
BRAULT V, CHIQUET J. and LEVY-LEDUC C. (2017) <doi:10.1214/17-EJS1270>.",2018-07-03,"Authors@R: c(
person(Julien",https://github.com/jchiquet/blockseg (dev version),TRUE,https://github.com/jchiquet/blockseg,8978,0,1552063477
blogdown,"Write blog posts and web pages in R Markdown. This package supports
the static site generator 'Hugo' (<https://gohugo.io>) best, and it also
supports 'Jekyll' (<http://jekyllrb.com>) and 'Hexo' (<https://hexo.io>).",2019-03-11,Yihui Xie (<https://orcid.org/0000-0003-0645-5666>),https://github.com/rstudio/blogdown,TRUE,https://github.com/rstudio/blogdown,63702,930,1552582269
blorr,"Tools designed to make it easier for beginner and intermediate users to build and validate
binary logistic regression models. Includes bivariate analysis, comprehensive regression output,
model fit statistics, variable selection procedures, model validation techniques and a 'shiny'
app for interactive model building.",2019-03-12,Aravind Hebbali (<https://orcid.org/0000-0001-9220-9669>),"URL: https://blorr.rsquaredacademy.com/,
https://github.com/rsquaredacademy/blorr",TRUE,https://github.com/rsquaredacademy/blorr,4180,9,1552461177
blscrapeR,"Scrapes various data from <https://www.bls.gov/>. The U.S. Bureau of Labor Statistics is the statistical branch of the United States Department of Labor. The package has additional functions to help parse, analyze and visualize the data.",2019-01-29,Kris Eberwein,https://github.com/keberwein/blscrapeR,TRUE,https://github.com/keberwein/blscraper,23516,50,1548866413
bmlm,Easy estimation of Bayesian multilevel mediation models with Stan.,2019-02-21,Matti Vuorre (<https://orcid.org/0000-0001-5052-066X>),https://github.com/mvuorre/bmlm/,TRUE,https://github.com/mvuorre/bmlm,21403,16,1550777883
BMRBr,"Nuclear magnetic resonance (NMR) is a highly versatile analytical technique for studying molecular configuration, conformation,
and dynamics, especially those of biomacromolecules such as proteins. Biological Magnetic Resonance Data Bank ('BMRB') is a repository
for Data from NMR Spectroscopy on Proteins, Peptides, Nucleic Acids, and other Biomolecules. Currently, 'BMRB' offers an R package
'RBMRB' to fetch data, however, it doesn't easily offer individual data file downloading and storing in a local directory. When using
'RBMRB', the data will stored as an R object, which fundamentally hinders the NMR researches to access the rich information from raw
data, for example, the metadata. Here, 'BMRBr' File Downloader ('BMRBr') offers a more fundamental, low level downloader, which will
download original deposited .str format file. This type of file contains information such as entry title, authors, citation, protein
sequences, and so on.
Many factors affect NMR experiment outputs, such as temperature, resonance sensitivity and etc., approximately 40% of the entries in the 'BMRB' have
chemical shift accuracy problems [1,2] Unfortunately, current reference correction methods are heavily dependent on the availability of
assigned protein chemical shifts or protein structure. This is my current research project is going to solve, which will be included
in the future release of the package. The current version of the package is sufficient and robust enough for downloading individual
'BMRB' data file from the 'BMRB' database <http://www.bmrb.wisc.edu>. The functionalities of this package includes but not limited:
* To simplifies NMR researches by combine data downloading and results analysis together.
* To allows NMR data reaches a broader audience that could utilize more than just chemical shifts but also metadata.
* To offer reference corrected data for entries without assignment or structure information (future release).
Reference:
[1] E.L. Ulrich, H. Akutsu, J.F. Doreleijers, Y. Harano, Y.E. Ioannidis, J. Lin, et al., BioMagResBank, Nucl. Acids Res. 36 (2008) D402–8. <doi:10.1093/nar/gkm957>.
[2] L. Wang, H.R. Eghbalnia, A. Bahrami, J.L. Markley, Linear analysis of carbon-13 chemical shift differences and its application to the detection and correction of errors in referencing and spin system identifications, J. Biomol. NMR. 32 (2005) 13–22. <doi:10.1007/s10858-005-1717-0>.",2018-08-24,Xi Chen (<https://orcid.org/0000-0001-7094-6748>),https://github.com/billchenxi/BMRBr,TRUE,https://github.com/billchenxi/bmrbr,4524,1,1552016555
BMTME,"Genomic selection and prediction models with the capacity to use multiple traits and environments, through ready-to-use Bayesian models. It consists a group of functions
that help to create regression models for some genomic models proposed by Montesinos-López, et al. (2016) <doi:10.1534/g3.116.032359>
also in Montesinos-López et al. (2018) <doi:10.1534/g3.118.200728> and Montesinos-López et al. (2018) <doi:10.2134/agronj2018.06.0362>.",2019-02-12,"Francisco Javier Luna-Vazquez
(<https://orcid.org/0000-0002-5370-7152>)",https://github.com/frahik/BMTME,TRUE,https://github.com/frahik/bmtme,937,3,1553391691
bnclassify,"State-of-the art algorithms for learning discrete Bayesian network classifiers from data, including a number of those described in Bielza & Larranaga (2014) <doi:10.1145/2576868>, with functions for prediction, model evaluation and inspection.",2019-03-14,Mihaljevic Bojan,http://github.com/bmihaljevic/bnclassify,TRUE,https://github.com/bmihaljevic/bnclassify,17527,16,1554312211
bnpsd,"The Pritchard-Stephens-Donnelly (PSD) admixture model has k intermediate subpopulations from which n individuals draw their alleles dictated by their individual-specific admixture proportions. The BN-PSD model additionally imposes the Balding-Nichols (BN) allele frequency model to the intermediate populations, which therefore evolved independently from a common ancestral population T with subpopulation-specific FST (Wright's fixation index) parameters. The BN-PSD model can be used to yield complex population structures. Method described in Ochoa and Storey (2016) <doi:10.1101/083923>.",2019-02-12,Alejandro Ochoa,https://github.com/StoreyLab/bnpsd/,TRUE,https://github.com/storeylab/bnpsd,3849,4,1550087321
bnspatial,"Allows spatial implementation of Bayesian networks and mapping in geographical space. It makes maps of expected value (or most likely state) given known and unknown conditions, maps of uncertainty measured as coefficient of variation or Shannon index (entropy), maps of probability associated to any states of any node of the network. Some additional features are provided as well: parallel processing options, data discretization routines and function wrappers designed for users with minimal knowledge of the R language. Outputs can be exported to any common GIS format. Development was funded by the European Union FP7 (2007-2013), under project ROBIN (<http://robinproject.info>).",2019-03-21,Dario Masante,http://github.com/dariomasante/bnspatial,TRUE,https://github.com/dariomasante/bnspatial,13188,8,1553260076
bold,"A programmatic interface to the Web Service methods provided by
Bold Systems (<http://www.boldsystems.org/>) for genetic 'barcode' data.
Functions include methods for searching by sequences by taxonomic names,
ids, collectors, and institutions; as well as a function for searching
for specimens, and downloading trace files.",2018-12-14,Scott Chamberlain (<https://orcid.org/0000-0003-1444-9135>),https://github.com/ropensci/bold,TRUE,https://github.com/ropensci/bold,114631,10,1552004666
bomrang,"Provides functions to interface with Australian Government Bureau
of Meteorology ('BOM') data, fetching data and returning a tidy data frame
of precis forecasts, historical and current weather data from stations,
agriculture bulletin data, 'BOM' 0900 or 1500 weather bulletins and
downloading and importing radar and satellite imagery files. Data (c)
Australian Government Bureau of Meteorology Creative Commons (CC)
Attribution 3.0 licence or Public Access Licence (PAL) as appropriate. See
<http://www.bom.gov.au/other/copyright.shtml> for further details.",2019-03-21,Adam Sparks (<https://orcid.org/0000-0002-0061-8359>),"https://github.com/ropensci/bomrang,
https://ropensci.github.io/bomrang/",TRUE,https://github.com/ropensci/bomrang,17588,47,1554547089
bookdown,Output formats and utilities for authoring books and technical documents with R Markdown.,2018-12-21,Yihui Xie (<https://orcid.org/0000-0003-0645-5666>),https://github.com/rstudio/bookdown,TRUE,https://github.com/rstudio/bookdown,254749,1377,1553542138
bookdownplus,"A collection and selector of R 'bookdown' templates. 'bookdownplus' helps you write academic journal articles, guitar books, chemical equations, mails, calendars, and diaries. R 'bookdownplus' extends the features of 'bookdown', and simplifies the procedure. Users only have to choose a template, clarify the book title and author name, and then focus on writing the text. No need to struggle in 'YAML' and 'LaTeX'.",2019-03-03,Peng Zhao,https://github.com/pzhaonet/bookdownplus,TRUE,https://github.com/pzhaonet/bookdownplus,17427,136,1554364213
bootstrapFP,"Finite Population bootstrap algorithms to estimate the variance
of the Horvitz-Thompson estimator for single-stage sampling.
For a survey of bootstrap methods for finite populations, see Mashreghi et Al. (2016) <doi:10.1214/16-SS113>.",2019-02-24,Roberto Sichera,NA,TRUE,https://github.com/rhobis/bootstrapfp,845,0,1551003738
BootstrapQTL,"Identifies genome-related molecular traits with significant
evidence of genetic regulation and performs a bootstrap procedure to
correct estimated effect sizes for over-estimation present in cis-QTL
mapping studies (The ""Winner's Curse""), described in Huang QQ *et al.*
2018 <doi: 10.1093/nar/gky780>. ",2018-09-21,Scott Ritchie,NA,TRUE,https://github.com/inouyelab/bootstrapqtl,2022,3,1536941993
boxr,"An R interface for the remote file hosting service 'Box'
(<https://www.box.com/>). In addition to uploading and downloading files,
this package includes functions which mirror base R operations for local
files, (e.g. box_load(), box_save(), box_read(), box_setwd(), etc.), as well
as 'git' style functions for entire directories (e.g. box_fetch(),
box_push()).",2017-01-12,Brendan Rocks,https://github.com/brendan-r/boxr/,TRUE,https://github.com/brendan-r/boxr,18760,35,1553895826
bpbounds,"Implementation of the nonparametric bounds for the average causal
effect under an instrumental variable model by Balke and Pearl (Bounds on
Treatment Effects from Studies with Imperfect Compliance, JASA, 1997, 92,
439, 1171-1176). The package can calculate bounds for a binary outcome, a
binary treatment/phenotype, and an instrument with either 2 or 3
categories. The package implements bounds for situations where these 3
variables are measured in the same dataset (trivariate data) or where the
outcome and instrument are measured in one study and the
treatment/phenotype and instrument are measured in another study
(bivariate data).",2019-02-10,Tom Palmer (<https://orcid.org/0000-0003-4655-4511>),https://github.com/remlapmot/bpbounds,TRUE,https://github.com/remlapmot/bpbounds,2163,0,1549801092
bpnreg,"Fitting Bayesian multiple and mixed-effect regression models for
circular data based on the projected normal distribution. Both continuous
and categorical predictors can be included. Sampling from the posterior is
performed via an MCMC algorithm. Posterior descriptives of all parameters,
model fit statistics and Bayes factors for hypothesis tests for inequality
constrained hypotheses are provided. See Cremers, Mulder & Klugkist (2018)
<doi:10.1111/bmsp.12108> and Nuñez-Antonio & Guttiérez-Peña (2014)
<doi:10.1016/j.csda.2012.07.025>.",2018-02-27,Jolien Cremers,https://github.com/joliencremers/bpnreg,TRUE,https://github.com/joliencremers/bpnreg,2913,1,1547560965
BradleyTerry2,"Specify and fit the Bradley-Terry model, including structured versions in which the parameters are related to explanatory variables through a linear predictor and versions with contest-specific effects, such as a home advantage.",2019-02-25,Heather Turner,https://github.com/hturner/BradleyTerry2,TRUE,https://github.com/hturner/bradleyterry2,418687,8,1552408867
BradleyTerryScalable,"Facilities are provided for fitting the simple, unstructured Bradley-Terry model to networks of binary comparisons. The implemented methods are designed to scale well to large, potentially sparse, networks. A fairly high degree of scalability is achieved through the use of EM and MM algorithms, which are relatively undemanding in terms of memory usage (relative to some other commonly used methods such as iterative weighted least squares, for example). Both maximum likelihood and Bayesian MAP estimation methods are implemented. The package provides various standard methods for a newly defined 'btfit' model class, such as the extraction and summarisation of model parameters and the simulation of new datasets from a fitted model. Tools are also provided for reshaping data into the newly defined ""btdata"" class, and for analysing the comparison network, prior to fitting the Bradley-Terry model. This package complements, rather than replaces, the existing 'BradleyTerry2' package. (BradleyTerry2 has rather different aims, which are mainly the specification and fitting of ""structured"" Bradley-Terry models in which the strength parameters depend on covariates.)",2017-06-29,Ella Kaye,https://github.com/EllaKaye/BradleyTerryScalable,TRUE,https://github.com/ellakaye/bradleyterryscalable,5080,10,1533663278
BrailleR,"Blind users do not have access to the graphical output from R
without printing the content of graphics windows to an embosser of some kind. This
is not as immediate as is required for efficient access to statistical output.
The functions here are created so that blind people can make even better use
of R. This includes the text descriptions of graphs, convenience functions
to replace the functionality offered in many GUI front ends, and experimental
functionality for optimising graphical content to prepare it for embossing as
tactile images.",2018-08-10,A. Jonathan R. Godfrey,https://github.com/ajrgodfrey/BrailleR,TRUE,https://github.com/ajrgodfrey/brailler,26451,32,1548460027
brainGraph,"A set of tools for performing graph theory analysis of brain MRI
data. It works with data from a Freesurfer analysis (cortical thickness,
volumes, local gyrification index, surface area), diffusion tensor
tractography data (e.g., from FSL) and resting-state fMRI data (e.g., from
DPABI). It contains a graphical user interface for graph visualization and
data exploration, along with several functions for generating useful
figures.",2018-05-29,Christopher G. Watson <cgwatson@bu.edu>,https://github.com/cwatson/brainGraph,TRUE,https://github.com/cwatson/braingraph,16267,51,1544853850
brandwatchR,"Interact with the 'Brandwatch' API <https://developers.brandwatch.com/docs>.
Allows you to authenticate to the API and obtain data for projects, queries, query groups tags and categories.
Also allows you to directly obtain mentions and aggregate data for a specified query or query group.",2018-08-13,Donal Phipps,https://github.com/Phippsy/brandwatchR,TRUE,https://github.com/phippsy/brandwatchr,2350,9,1534428545
breakDown,"Model agnostic tool for decomposition of predictions from black boxes.
Break Down Table shows contributions of every variable to a final prediction.
Break Down Plot presents variable contributions in a concise graphical way.
This package work for binary classifiers and general regression models. ",2018-06-14,Przemyslaw Biecek,https://pbiecek.github.io/breakDown/,TRUE,https://github.com/pbiecek/breakdown,13615,72,1553643040
breathtestcore,"Reads several formats of 13C data (IRIS/Wagner, BreathID) and CSV.
Creates artificial sample data for testing.
Fits Maes/Ghoos, Bluck-Coward self-correcting formula using 'nls', 'nlme'.
Methods to fit breath test curves with Bayesian Stan methods are refactored to
package 'breathteststan'. For a Shiny GUI, see package
'dmenne/breathtestshiny' on github.",2018-12-18,Dieter Menne,https://github.com/dmenne/breathtestcore,TRUE,https://github.com/dmenne/breathtestcore,8186,1,1551173088
breathteststan,"Stan-based curve-fitting function
for use with package 'breathtestcore' by the same author.
Stan functions are refactored here for easier testing.",2018-11-07,Dieter Menne,https://github.com/dmenne/breathteststan,TRUE,https://github.com/dmenne/breathteststan,10919,2,1541579272
bReeze,"A collection of functions to analyse, visualize and interpret wind data
and to calculate the potential energy production of wind turbines.",2018-11-14,Christian Graul and Carsten Poppinga,https://github.com/chgrl/bReeze,TRUE,https://github.com/chgrl/breeze,22938,13,1541967721
brglm,"Fit generalized linear models with binomial responses using either an adjusted-score approach to bias reduction or maximum penalized likelihood where penalization is by Jeffreys invariant prior. These procedures return estimates with improved frequentist properties (bias, mean squared error) that are always finite even in cases where the maximum likelihood estimates are infinite (data separation). Fitting takes place by fitting generalized linear models on iteratively updated pseudo-data. The interface is essentially the same as 'glm'. More flexibility is provided by the fact that custom pseudo-data representations can be specified and used for model fitting. Functions are provided for the construction of confidence intervals for the reduced-bias estimates.",2019-04-02,Ioannis Kosmidis (<https://orcid.org/0000-0003-1556-0302>),https://github.com/ikosmidis/brglm,TRUE,https://github.com/ikosmidis/brglm,408252,0,1554218062
brglm2,"Estimation and inference from generalized linear models based on various methods for bias reduction. The 'brglmFit' fitting method can achieve reduction of estimation bias by solving either the mean bias-reducing adjusted score equations in Firth (1993) <doi:10.1093/biomet/80.1.27> and Kosmidis and Firth (2009) <doi:10.1093/biomet/asp055>, or the median bias-reduction adjusted score equations in Kenne et al. (2016) <arXiv:1604.04768>, or through the direct subtraction of an estimate of the bias of the maximum likelihood estimator from the maximum likelihood estimates as in Cordeiro and McCullagh (1991) <http://www.jstor.org/stable/2345592>. Estimation in all cases takes place via a quasi Fisher scoring algorithm, and S3 methods for the construction of of confidence intervals for the reduced-bias estimates are provided. In the special case of generalized linear models for binomial and multinomial responses (both ordinal and nominal), the adjusted score approaches return estimates with improved frequentist properties, that are also always finite, even in cases where the maximum likelihood estimates are infinite (e.g. complete and quasi-complete separation). 'brglm2' also provides pre-fit and post-fit methods for detecting separation and infinite maximum likelihood estimates in binomial response generalized linear models.",2019-02-14,Ioannis Kosmidis (<https://orcid.org/0000-0003-1556-0302>),https://github.com/ikosmidis/brglm2,TRUE,https://github.com/ikosmidis/brglm2,11192,2,1550165259
bridgesampling,"Provides functions for estimating marginal likelihoods, Bayes
factors, posterior model probabilities, and normalizing constants in general,
via different versions of bridge sampling (Meng & Wong, 1996,
<http://www3.stat.sinica.edu.tw/statistica/j6n4/j6n43/j6n43.htm>).",2018-10-21,Quentin F. Gronau (<https://orcid.org/0000-0001-5510-6943>),https://github.com/quentingronau/bridgesampling,TRUE,https://github.com/quentingronau/bridgesampling,77987,17,1546942516
BRISC,Fits Bootstrap with univariate spatial regression models using Bootstrap for Rapid Inference on Spatial Covariances (BRISC) for large datasets using Nearest Neighbor Gaussian Processes detailed in Saha and Datta (2018) <doi:10.1002/sta4.184>.,2018-07-22,Arkajyoti Saha,https://github.com/ArkajyotiSaha/BRISC,TRUE,https://github.com/arkajyotisaha/brisc,2383,0,1532036212
brms,"Fit Bayesian generalized (non-)linear multivariate multilevel models
using 'Stan' for full Bayesian inference. A wide range of distributions
and link functions are supported, allowing users to fit -- among others --
linear, robust linear, count data, survival, response times, ordinal,
zero-inflated, hurdle, and even self-defined mixture models all in a
multilevel context. Further modeling options include non-linear and
smooth terms, auto-correlation structures, censored data, meta-analytic
standard errors, and quite a few more. In addition, all parameters of the
response distribution can be predicted in order to perform distributional
regression. Prior specifications are flexible and explicitly encourage
users to apply prior distributions that actually reflect their beliefs.
Model fit can easily be assessed and compared with posterior predictive
checks and leave-one-out cross-validation. References: Bürkner (2017)
<doi:10.18637/jss.v080.i01>; Carpenter et al. (2017) <doi:10.18637/jss.v076.i01>.",2019-03-15,Paul-Christian Bürkner,"https://github.com/paul-buerkner/brms,
http://discourse.mc-stan.org",TRUE,https://github.com/paul-buerkner/brms,232606,482,1554471589
Brobdingnag,"Handles very large numbers in R. Real numbers are held
using their natural logarithms, plus a logical flag indicating
sign. The package includes a vignette that gives a
step-by-step introduction to using S4 methods.",2018-08-13,Robin K. S. Hankin,https://github.com/RobinHankin/Brobdingnag.git,TRUE,https://github.com/robinhankin/brobdingnag,102758,0,1550519799
broman,"Miscellaneous R functions, including functions related to
graphics (mostly for base graphics), permutation tests, running
mean/median, and general utilities.",2018-07-25,Karl W Broman <kbroman@biostat.wisc.edu>,https://github.com/kbroman/broman,TRUE,https://github.com/kbroman/broman,32825,133,1553092138
broom,"Summarizes key information about statistical
objects in tidy tibbles. This makes it easy to report results, create
plots and consistently work with large numbers of models at once.
Broom provides three verbs that each provide different types of
information about a model. tidy() summarizes information about model
components such as coefficients of a regression. glance() reports
information about an entire model, such as goodness of fit measures
like AIC and BIC. augment() adds information about individual
observations to a dataset, such as fitted values or influence
measures.",2018-12-05,Alex Hayes (<https://orcid.org/0000-0002-4985-5160>),http://github.com/tidyverse/broom,TRUE,https://github.com/tidyverse/broom,4171850,840,1554435363
broom.mixed,"Convert fitted objects from various R mixed-model packages
into tidy data frames along the lines of the 'broom' package.
The package provides three
S3 generics for each model: tidy(), which summarizes a model's statistical findings such as
coefficients of a regression; augment(), which adds columns to the original
data such as predictions, residuals and cluster assignments; and glance(), which
provides a one-row summary of model-level statistics.",2019-02-21,Ben Bolker (<https://orcid.org/0000-0002-2127-0443>),http://github.com/bbolker/broom.mixed,TRUE,https://github.com/bbolker/broom.mixed,10398,106,1552526379
broomExtra,"Collection of functions to assist 'broom' and
'broom.mixed' package-related data analysis workflows. In particular,
the generic functions tidy(), glance(), and augment() choose
appropriate S3 methods from these two packages depending on which
package exports the needed method. Additionally, 'grouped_' variants
of the generics provides a convenient way to execute functions across
a combination of grouping variable(s) in a dataframe.",2019-04-03,Indrajeet Patil (<https://orcid.org/0000-0003-1995-6531>),"https://indrajeetpatil.github.io/broomExtra/,
https://github.com/IndrajeetPatil/broomExtra",TRUE,https://github.com/indrajeetpatil/broomextra,1822,6,1554307717
brranching,"Includes methods for fetching 'phylogenies' from a variety
of sources, including the 'Phylomatic' web service
(<http://phylodiversity.net/phylomatic>), and 'Phylocom'
(<https://github.com/phylocom/phylocom/>).",2018-12-05,Scott Chamberlain (<https://orcid.org/0000-0003-1444-9135>),https://github.com/ropensci/brranching,TRUE,https://github.com/ropensci/brranching,15257,10,1552069168
brunnermunzel,"Provides the functions for Brunner-Munzel test and
permuted Brunner-Munzel test,
which enable to use formula, matrix, and table as argument.
These functions are based on Brunner and Munzel (2000)
<doi:10.1002/(SICI)1521-4036(200001)42:1%3C17::AID-BIMJ17%3E3.0.CO;2-U>
and Neubert and Brunner (2007) <doi:10.1016/j.csda.2006.05.024>,
and are written with FORTRAN.",2019-03-28,Toshiaki Ara,https://github.com/toshi-ara/brunnermunzel,TRUE,https://github.com/toshi-ara/brunnermunzel,1053,2,1553778759
bs4Dash,"Make 'Bootstrap 4' dashboards. Use the full power
of 'AdminLTE3', a dashboard template built on top of 'Bootstrap 4'
<https://github.com/almasaeed2010/AdminLTE/tree/v3-dev>.",2019-04-06,David Granjon,"https://rinterface.github.io/bs4Dash/index.html,
https://github.com/RinteRface/bs4Dash",TRUE,https://github.com/rinterface/bs4dash,5237,77,1554559401
bsam,"Tools to fit Bayesian state-space models to animal tracking data. Models are provided for location
filtering, location filtering and behavioural state estimation, and their hierarchical versions.
The models are primarily intended for fitting to ARGOS satellite tracking data but options exist to fit
to other tracking data types. For Global Positioning System data, consider the 'moveHMM' package.
Simplified Markov Chain Monte Carlo convergence diagnostic plotting is provided but users are encouraged
to explore tools available in packages such as 'coda' and 'boa'.",2017-07-01,Ian Jonsen,https://github.com/ianjonsen/bsam,TRUE,https://github.com/ianjonsen/bsam,9826,13,1525723049
bsplus,"The Bootstrap framework lets you add some JavaScript functionality to your web site by
adding attributes to your HTML tags - Bootstrap takes care of the JavaScript
<https://getbootstrap.com/javascript>. If you are using R Markdown or Shiny, you can
use these functions to create collapsible sections, accordion panels, modals, tooltips,
popovers, and an accordion sidebar framework (not described at Bootstrap site).",2018-04-05,Ian Lyttle,https://github.com/ijlyttle/bsplus,TRUE,https://github.com/ijlyttle/bsplus,10600,79,1550627039
bssm,"Efficient methods for Bayesian inference of state space models
via particle Markov chain Monte Carlo and parallel importance sampling type weighted
Markov chain Monte Carlo (Vihola, Helske, and Franks, 2017, <arXiv:1609.02541>).
Gaussian, Poisson, binomial, or negative binomial
observation densities and basic stochastic volatility models with Gaussian state
dynamics, as well as general non-linear Gaussian models and discretised diffusion models
are supported.",2018-11-22,Jouni Helske,NA,TRUE,https://github.com/helske/bssm,11849,8,1554476142
btergm,"Temporal Exponential Random Graph Models (TERGM) estimated by maximum pseudolikelihood with bootstrapped confidence intervals or Markov Chain Monte Carlo maximum likelihood. Goodness of fit assessment for ERGMs, TERGMs, and SAOMs. Micro-level interpretation of ERGMs and TERGMs.",2018-08-24,Philip Leifeld,http://github.com/leifeld/btergm,TRUE,https://github.com/leifeld/btergm,84770,6,1554558835
bucky,"Provides functions for various statistical techniques commonly used in the social sciences, including functions to compute clustered robust standard errors, combine results across multiply-imputed data sets, and simplify the addition of robust and clustered robust standard errors. The package was originally developed, in part, to assist porting of replication code from 'Stata' and attempts to replicate default options from 'Stata' where possible.",2018-10-29,Alexander Tahk,http://github.com/atahk/bucky,TRUE,https://github.com/atahk/bucky,5706,2,1547572771
buildmer,"Finds the largest possible regression model that will still converge
for various types of regression analyses (including mixed models and generalized
additive models) and then optionally performs stepwise elimination similar to the
forward and backward effect selection methods in SAS, based on the change in
log-likelihood, Akaike's Information Criterion, or the Bayesian Information Criterion.",2019-03-31,Cesko Voeten,NA,TRUE,https://github.com/cvoeten/buildmer,195,0,1554207798
burnr,Basic tools to analyze forest fire history data (e.g. FHX) in R.,2019-03-08,Steven Malevich,https://github.com/ltrr-arizona-edu/burnr/,TRUE,https://github.com/ltrr-arizona-edu/burnr,13846,6,1552074703
busdater,"Get a current financial year, start of current
month, End of current month, start of financial year and end of it.
Allow for offset from the date.",2019-01-30,Mick Mioduszewski,"https://mickmioduszewski.github.io/busdater/,
https://github.com/mickmioduszewski/busdater/",TRUE,https://github.com/mickmioduszewski/busdater,934,0,1548872760
BuyseTest,"Implementation of the Generalized Pairwise Comparisons (GPC).
GPC compare two groups of observations (intervention vs. control group)
regarding several prioritized endpoints.
The net benefit and win ratio statistics can then be estimated
and corresponding confidence intervals and p-values can be
estimated using resampling methods or the asymptotic U-statistic
theory. The software enables the use of thresholds of minimal
importance difference, stratification, and corrections to deal
with right-censored endpoints or missing values.",2019-01-16,Brice Ozenne (<https://orcid.org/0000-0001-9694-2956>),https://github.com/bozenne/BuyseTest,TRUE,https://github.com/bozenne/buysetest,11343,0,1554141419
BWStest,"Performs the 'Baumgartner-Weiss-Schindler' two-sample test of equal
probability distributions, <doi:10.2307/2533862>. Also performs
similar rank-based tests for equal probability distributions due to
Neuhauser <doi:10.1080/10485250108832874> and
Murakami <doi:10.1080/00949655.2010.551516>.",2018-10-18,Steven E. Pav (<https://orcid.org/0000-0002-4197-6195>),https://github.com/shabbychef/BWStest,TRUE,https://github.com/shabbychef/bwstest,19211,0,1539835005
bysykkel,"Functions to get, download, and read open data from each City Bike
website, and each City Bike API, in Norway that is made available under the
NLOD 2.0 <https://data.norge.no/nlod/en/2.0>. These functions speed up the
process of reading city bike data directly to R, and to download the data
to disk, so that the user can focus on data analysis. The data is
retrieved from the ""developer"" or ""open data"" pages of
Oslo City Bike <https://developer.oslobysykkel.no/>,
Oslo Winter Bike <https://oslovintersykkel.no/en/open-data>,
Bergen City Bike <https://bergenbysykkel.no/en/open-data>, and
Trondheim City Bike <https://trondheimbysykkel.no/en/open-data>.",2019-03-31,Iman Ghayoornia,http://github.com/PersianCatsLikeToMeow/bysykkel,TRUE,https://github.com/persiancatsliketomeow/bysykkel,184,0,1553448110
c14bazAAR,"Query different C14 date databases and apply basic data cleaning, merging and calibration steps.",2018-10-28,Clemens Schmid,https://github.com/ISAAKiel/c14bazAAR,TRUE,https://github.com/isaakiel/c14bazaar,1855,8,1540801236
c3,"Create interactive charts with the 'C3.js' <http://c3js.org/> charting library. All plot
types in 'C3.js' are available and include line, bar, scatter, and mixed geometry plots. Plot
annotations, labels and axis are highly adjustable. Interactive web based charts can be embedded
in R Markdown documents or Shiny web applications. ",2018-05-29,Matt Johnson,https://github.com/mrjoh3/c3,TRUE,https://github.com/mrjoh3/c3,3436,31,1544843486
C50,"C5.0 decision trees and rule-based models for pattern recognition that extend the work of Quinlan (1993, ISBN:1-55860-238-0).",2018-05-22,Max Kuhn,https://topepo.github.io/C5.0,TRUE,https://github.com/topepo/c5.0,362755,31,1527011871
caesar,"Encrypts and decrypts strings using either the Caesar cipher or a
pseudorandom number generation (using set.seed()) method.",2017-01-18,Jacob Kaplan,https://github.com/jacobkap/caesar,TRUE,https://github.com/jacobkap/caesar,5917,1,1545065848
caffsim,"Simulate plasma caffeine concentrations using population pharmacokinetic model described in Lee, Kim, Perera, McLachlan and Bae (2015) <doi:10.1007/s00431-015-2581-x>.",2017-08-28,Sungpil Han,https://github.com/asancpt/caffsim,TRUE,https://github.com/asancpt/caffsim,4853,2,1533625164
CAISEr,"Functions for performing experimental comparisons of algorithms
using adequate sample sizes for power and accuracy.",2018-07-24,Felipe Campelo,https://fcampelo.github.io/CAISEr/,TRUE,https://github.com/fcampelo/caiser,5255,1,1532402689
calibrar,"Automated parameter estimation for complex (ecological) models in R.
This package allows the parameter estimation or calibration of complex models,
including stochastic ones. It is a generic tool that can be used for fitting
any type of models, especially those with non-differentiable objective functions.
It supports multiple phases and constrained optimization.
It implements maximum likelihood estimation methods and automated construction
of the objective function from simulated model outputs.
See <http://roliveros-ramos.github.io/calibrar> for more details.",2016-02-17,Ricardo Oliveros-Ramos,http://roliveros-ramos.github.io/calibrar,TRUE,https://github.com/roliveros-ramos/calibrar,7708,3,1549327443
calibrator,"Performs Bayesian calibration of computer models as per
Kennedy and O'Hagan 2001. The package includes routines to find the
hyperparameters and parameters; see the help page for stage1() for a
worked example using the toy dataset. A tutorial is provided in the
calex.Rnw vignette; and a suite of especially simple one dimensional
examples appears in inst/doc/one.dim/.",2019-03-07,Robin K. S. Hankin (<https://orcid.org/0000-0001-5982-0415>),https://github.com/RobinHankin/calibrator.git,TRUE,https://github.com/robinhankin/calibrator,28498,0,1551900104
calpassapi,"Implements methods for querying data from CalPASS using its API.
CalPASS Plus. MMAP API V1. <https://mmap.calpassplus.org/docs/index.html>.",2018-08-27,Vinh Nguyen,https://github.com/vinhdizzo/calpassapi,TRUE,https://github.com/vinhdizzo/calpassapi,2573,0,1535394464
camsRad,"Copernicus Atmosphere Monitoring Service (CAMS) radiations service
provides time series of global, direct, and diffuse irradiations on horizontal
surface, and direct irradiation on normal plane for the actual weather
conditions as well as for clear-sky conditions.
The geographical coverage is the field-of-view of the Meteosat satellite,
roughly speaking Europe, Africa, Atlantic Ocean, Middle East. The time coverage
of data is from 2004-02-01 up to 2 days ago. Data are available with a time step
ranging from 15 min to 1 month. For license terms and to create an account,
please see <http://www.soda-pro.com/web-services/radiation/cams-radiation-service>. ",2016-11-30,Lukas Lundstrom,https://github.com/ropenscilabs/camsRad,TRUE,https://github.com/ropenscilabs/camsrad,6036,6,1532931982
camtrapR,"Management of and data extraction from camera trap photographs in wildlife studies. The package provides a workflow for storing and sorting camera trap photos, tabulates records of species and individuals, and creates detection/non-detection matrices for occupancy and spatial capture-recapture analyses with great flexibility. In addition, it provides simple mapping functions (number of species, number of independent species detections by station including GIS export) and can visualise species activity data.",2019-03-13,Juergen Niedballa,"https://github.com/jniedballa/camtrapR,
https://groups.google.com/forum/#!forum/camtrapr",TRUE,https://github.com/jniedballa/camtrapr,30792,0,1551542405
cancensus,"Integrated, convenient, and uniform access to Canadian
Census data and geography retrieved using the 'CensusMapper' API. This package produces analysis-ready
tidy data frames and spatial data in multiple formats, as well as convenience functions
for working with Census variables, variable hierarchies, and region selection. API
keys are freely available with free registration at <https://censusmapper.ca/api>.
Census data and boundary geometries are reproduced and distributed on an ""as
is"" basis with the permission of Statistics Canada (Statistics Canada 2006;
2011; 2016).",2018-11-20,Jens von Bergmann (API creator and maintainer),"https://github.com/mountainMath/cancensus,
https://mountainmath.github.io/cancensus/,
https://censusmapper.ca/api",TRUE,https://github.com/mountainmath/cancensus,5451,31,1542695017
Canopy,"A statistical framework and computational procedure for identifying
the sub-populations within a tumor, determining the mutation profiles of each
subpopulation, and inferring the tumor's phylogenetic history. The input are
variant allele frequencies (VAFs) of somatic single nucleotide alterations
(SNAs) along with allele-specific coverage ratios between the tumor and matched
normal sample for somatic copy number alterations (CNAs). These quantities can
be directly taken from the output of existing software. Canopy provides a
general mathematical framework for pooling data across samples and sites to
infer the underlying parameters. For SNAs that fall within CNA regions, Canopy
infers their temporal ordering and resolves their phase. When there are
multiple evolutionary configurations consistent with the data, Canopy outputs
all configurations along with their confidence assessment.",2017-12-18,Yuchao Jiang,https://github.com/yuchaojiang/Canopy,TRUE,https://github.com/yuchaojiang/canopy,10685,33,1539719674
canprot,"Datasets are collected here for differentially (up- and down-)
expressed proteins identified in proteomic studies of cancer and in cell
culture experiments. Tables of amino acid compositions of proteins are
used for calculations of chemical composition, projected into selected
basis species. Plotting functions are used to visualize the compositional
differences and thermodynamic potentials for proteomic transformations.",2019-02-26,Jeffrey Dick,http://github.com/jedick/canprot,TRUE,https://github.com/jedick/canprot,5767,2,1551095932
cansim,"Searches for, accesses, and retrieves new-format and old-format Statistics Canada data
tables, as well as individual vectors, as tidy data frames. This package deals with encoding issues, allows for
bilingual English or French language data retrieval, and bundles convenience functions
to make it easier to work with retrieved table data. Optional caching features are provided.",2019-01-07,Jens von Bergmann,"https://github.com/mountainMath/cansim,
https://mountainmath.github.io/cansim/",TRUE,https://github.com/mountainmath/cansim,1818,11,1548362054
canvasXpress,"Enables creation of visualizations using the CanvasXpress framework
in R. CanvasXpress is a standalone JavaScript library for reproducible research
with complete tracking of data and end-user modifications stored in a single
PNG image that can be played back. See <http://canvasxpress.org> for more
information.",2019-02-25,Connie Brett,https://github.com/neuhausi/canvasXpress.git,TRUE,https://github.com/neuhausi/canvasxpress,33559,213,1554494354
captr,"Get text from images of text using Captricity Optical Character
Recognition (OCR) API. Captricity allows you to get text from handwritten
forms --- think surveys --- and other structured paper documents. And it can
output data in form a delimited file keeping field information intact. For more
information, read <https://shreddr.captricity.com/developer/overview/>.",2017-04-15,Gaurav Sood,http://github.com/soodoku/captR,TRUE,https://github.com/soodoku/captr,11495,10,1523646559
caRamel,"Multi-objective optimizer initially developed for the calibration of hydrological models.
The algorithm is a hybrid of the MEAS algorithm (Efstratiadis and Koutsoyiannis (2005) <doi:10.13140/RG.2.2.32963.81446>) by using the directional search method based on the simplexes of the objective space
and the epsilon-NGSA-II algorithm with the method of classification of the parameter vectors archiving management by epsilon-dominance (Reed and Devireddy <doi:10.1142/9789812567796_0004>).",2018-03-05,Fabrice Zaoui,https://github.com/fzao/caRamel,TRUE,https://github.com/fzao/caramel,3328,0,1553847860
CARBayes,"Implements a class of univariate and multivariate spatial generalised linear mixed models for areal unit data, with inference in a Bayesian setting using Markov chain Monte Carlo (MCMC) simulation. The response variable can be binomial, Gaussian, multinomial, Poisson or zero-inflated Poisson (ZIP), and spatial autocorrelation is modelled by a set of random effects that are assigned a conditional autoregressive (CAR) prior distribution. A number of different models are available for univariate spatial data, including models with no random effects as well as random effects modelled by different types of CAR prior, including the BYM model (Besag et al. (1991) <doi:10.1007/BF00116466>), the Leroux model (Leroux et al. (2000) <doi:10.1007/978-1-4612-1284-3_4>) and the localised model (Lee et al. (2015) <doi:10.1002/env.2348>). Additionally, a multivariate CAR (MCAR) model for multivariate spatial data is available, as is a two-level hierarchical model for modelling data relating to individuals within areas. Full details are given in the vignette accompanying this package. The initial creation of this package was supported by the Economic and Social Research Council (ESRC) grant RES-000-22-4256, and on-going development has been supported by the Engineering and Physical Science Research Council (EPSRC) grant EP/J017442/1, ESRC grant ES/K006460/1, Innovate UK / Natural Environment Research Council (NERC) grant NE/N007352/1 and the TB Alliance. ",2018-12-06,Duncan Lee,http://github.com/duncanplee/CARBayes,TRUE,https://github.com/duncanplee/carbayes,92784,2,1544095345
CARBayesST,"Implements a class of spatio-temporal generalised linear mixed models for areal unit data, with inference in a Bayesian setting using Markov chain Monte Carlo (MCMC) simulation. The response variable can be binomial, Gaussian, or Poisson, but for some models only the binomial and Poisson data likelihoods are available. The spatio-temporal autocorrelation is modelled by random effects, which are assigned conditional autoregressive (CAR) style prior distributions. A number of different random effects structures are available, including Bernardinelli et al. (1995) <doi:10.1002/sim.4780142112>, Rushworth et al. (2014) <doi:10.1016/j.sste.2014.05.001> and Lee et al. (2016) <doi:10.1214/16-AOAS941>. Full details are given in the vignette accompanying this package. The creation of this package was supported by the Engineering and Physical Sciences Research Council (EPSRC) grant EP/J017442/1 and the Medical Research Council (MRC) grant MR/L022184/1.",2019-01-08,Duncan Lee,http://github.com/duncanplee/CARBayesST,TRUE,https://github.com/duncanplee/carbayesst,30725,2,1545385849
carbonate,Create beautiful images of source code using 'carbon.js'<https://carbon.now.sh/about>.,2019-02-13,Jonathan Sidi (<https://orcid.org/0000-0002-4222-1819>),https://github.com/yonicd/carbonate,TRUE,https://github.com/yonicd/carbonate,2923,95,1550020966
careless,"When taking online surveys, participants sometimes respond to items without regard to their content.
These types of responses, referred to as careless or insufficient effort responding, constitute significant problems for data quality, leading to distortions in data analysis and hypothesis testing, such as spurious correlations. The 'R' package 'careless' provides solutions designed to detect such careless / insufficient effort responses by allowing easy calculation of indices proposed in the literature. It currently supports the calculation of longstring, even-odd consistency, psychometric synonyms/antonyms, Mahalanobis distance, and intra-individual response variability (also termed inter-item standard deviation). For a review of these methods, see Curran (2016) <doi:10.1016/j.jesp.2015.07.006>.",2018-06-19,Richard Yentes,https://github.com/ryentes/careless/,TRUE,https://github.com/ryentes/careless,2647,3,1529414310
caret,"Misc functions for training and plotting classification and
regression models.",2019-03-26,Max Kuhn. Contributions from Jed Wing,https://github.com/topepo/caret/,TRUE,https://github.com/topepo/caret,3688679,1039,1554314296
carpenter,"Mainly used to build tables that are commonly presented for
bio-medical/health research, such as basic characteristic tables or
descriptive statistics.",2019-02-05,Luke Johnston (<https://orcid.org/0000-0003-4169-2616>),https://github.com/lwjohnst86/carpenter,TRUE,https://github.com/lwjohnst86/carpenter,7315,7,1549536660
carrier,"Sending functions to remote processes can be wasteful of
resources because they carry their environments with them. With
the carrier package, it is easy to create functions that are
isolated from their environment. These isolated functions, also
called crates, print at the console with their total size and can
be easily tested locally before being sent to a remote.",2018-10-16,Lionel Henry,https://github.com/r-lib/carrier,TRUE,https://github.com/r-lib/carrier,2116,22,1539263442
cartogram,Construct continuous and non-contiguous area cartograms.,2018-12-01,"Sebastian Jeworutzki
(<https://orcid.org/0000-0002-2671-5253>)",https://github.com/sjewo/cartogram,TRUE,https://github.com/sjewo/cartogram,57328,75,1543649419
cartography,"Create and integrate maps in your R workflow. This package helps to design cartographic representations such as proportional symbols, choropleth, typology, flows or discontinuities maps. It also offers several features that improve the graphic presentation of maps, for instance, map palettes, layout elements (scale, north arrow, title...), labels or legends. See Giraud and Lambert (2017) <doi:10.1007/978-3-319-57336-6_13>.",2019-02-07,Timothée Giraud,https://github.com/riatelab/cartography/,TRUE,https://github.com/riatelab/cartography,53358,241,1549631528
Cascade,"A modeling tool allowing gene selection, reverse engineering, and prediction in cascade networks. Jung, N., Bertrand, F., Bahram, S., Vallat, L., and Maumy-Bertrand, M. (2014) <doi:10.1093/bioinformatics/btt705>.",2019-02-18,Frederic Bertrand (<https://orcid.org/0000-0002-0837-8281>),"http://www-irma.u-strasbg.fr/~fbertran/,
https://github.com/fbertran/Cascade",TRUE,https://github.com/fbertran/cascade,764,1,1549746114
CascadeData,"These experimental expression data (5 leukemic 'CLL' B-lymphocyte of aggressive form from 'GSE39411', <doi:10.1073/pnas.1211130110>), after B-cell receptor stimulation, are used as examples by packages such as the 'Cascade' one, a modeling tool allowing gene selection, reverse engineering, and prediction in cascade networks. Jung, N., Bertrand, F., Bahram, S., Vallat, L., and Maumy-Bertrand, M. (2014) <doi:10.1093/bioinformatics/btt705>.",2019-02-07,Frederic Bertrand (<https://orcid.org/0000-0002-0837-8281>),"http://www-irma.u-strasbg.fr/~fbertran/,
https://github.com/fbertran/CascadeData",TRUE,https://github.com/fbertran/cascadedata,1026,1,1549746151
CaseBasedReasoning,"Given a large set of problems and their individual solutions case based reasoning seeks to solve a new problem by referring to the solution of that problem which is ""most similar"" to the new problem. Crucial in case based reasoning is the decision which problem ""most closely"" matches a given new problem. The basic idea is to define a family of distance functions and to use these distance functions as parameters of local averaging regression estimates of the final result. Then that distance function is chosen for which the resulting estimate is optimal with respect to a certain error measure used in regression estimation. The idea is based on: Dippon J. et al. (2002) <DOI:10.1016/S0167-9473(02)00058-0>. ",2018-06-12,Dr. Simon Mueller <simon.mueller@muon-stat.com>,NA,TRUE,https://github.com/sipemu/case-based-reasoning,2558,3,1535564974
casino,"Play casino games in the R console,
including poker, blackjack, and a slot machine.
Try to build your fortune before you succumb to the gambler's ruin!",2019-01-17,Anthony Pileggi,"https://anthonypileggi.github.io/casino,
https://github.com/anthonypileggi/casino",TRUE,https://github.com/anthonypileggi/casino,964,1,1549035587
CAST,"Supporting functionality to run 'caret' with spatial or spatial-temporal data. 'caret' is a frequently used package for model training and prediction using machine learning. This package includes functions to improve spatial-temporal modelling tasks using 'caret'. It prepares data for Leave-Location-Out and Leave-Time-Out cross-validation which are target-oriented validation strategies for spatial-temporal models. To decrease overfitting and improve model performances, the package implements a forward feature selection that selects suitable predictor variables in view to their contribution to the target-oriented performance.",2018-11-19,Hanna Meyer,https://github.com/environmentalinformatics-marburg/CAST,TRUE,https://github.com/environmentalinformatics-marburg/cast,8248,10,1549541952
cattonum,"Functions for dummy encoding, frequency encoding,
label encoding, leave-one-out encoding, mean encoding,
median encoding, and one-hot encoding.",2018-05-02,Bernie Gray,https://github.com/bfgray3/cattonum,TRUE,https://github.com/bfgray3/cattonum,4067,27,1547860075
cbar,"Detect contextual anomalies in time-series data with Bayesian data
analysis. It focuses on determining a normal range of target value, and
provides simple-to-use functions to abstract the outcome.",2017-10-24,Kim Seonghyun <shyeon.kim@scipi.net>,https://github.com/zedoul/cbar,TRUE,https://github.com/zedoul/cbar,6923,2,1540822257
CBDA,"Classification performed on Big Data. It uses concepts from compressive sensing, and implements ensemble predictor (i.e., 'SuperLearner') and knockoff filtering as the main machine learning and feature mining engines.",2018-04-16,Simeone Marino,https://github.com/SOCR/CBDA,TRUE,https://github.com/socr/cbda,3075,8,1552577453
cbsodataR,"The data and meta data from Statistics
Netherlands (www.cbs.nl) can be browsed and downloaded. The client uses
the open data API of Statistics Netherlands.",2019-02-21,Edwin de Jonge,https://github.com/edwindj/cbsodataR,TRUE,https://github.com/edwindj/cbsodatar,12714,9,1550603968
ccafs,"Client for Climate Change, Agriculture, and Food Security ('CCAFS')
General Circulation Models ('GCM') data. Data is stored in Amazon 'S3', from
which we provide functions to fetch data.",2017-02-24,Scott Chamberlain,https://github.com/ropensci/ccafs,TRUE,https://github.com/ropensci/ccafs,5968,9,1552069194
ccdrAlgorithm,"Implementation of the CCDr (Concave penalized Coordinate Descent with reparametrization) structure learning algorithm as described in Aragam and Zhou (2015) <http://www.jmlr.org/papers/v16/aragam15a.html>. This is a fast, score-based method for learning Bayesian networks that uses sparse regularization and block-cyclic coordinate descent.",2018-06-01,Bryon Aragam,https://github.com/itsrainingdata/ccdrAlgorithm,TRUE,https://github.com/itsrainingdata/ccdralgorithm,11076,4,1527860244
cdata,"Supplies higher-order coordinatized data specification and fluid transform operators that include pivot and anti-pivot as special cases.
The methodology is describe in 'Zumel', 2018, ""Fluid data reshaping with 'cdata'"", <http://winvector.github.io/FluidData/FluidDataReshapingWithCdata.html> , doi:10.5281/zenodo.1173299 .
This package introduces the idea of control table specification of data transforms (later also adapted from 'cdata' by 'tidyr').
Works on in-memory data or on remote data using 'rquery' and 'SQL' database interfaces.",2019-03-30,John Mount,"https://github.com/WinVector/cdata/,
https://winvector.github.io/cdata/",TRUE,https://github.com/winvector/cdata,40867,24,1554227065
cdcsis,"Conditional distance correlation <doi:10.1080/01621459.2014.993081> is a novel conditional dependence measurement of two multivariate random variables given a confounding variable. This package provides conditional distance correlation, performs the conditional distance correlation sure independence screening procedure for ultrahigh dimensional data <doi:10.5705/ss.202014.0117>, and conducts conditional distance covariance test for conditional independence assumption of two multivariate variable.",2019-01-09,Wenhao Hu,https://github.com/Mamba413/cdcsis,TRUE,https://github.com/mamba413/cdcsis,12989,0,1547361533
CDM,"
Functions for cognitive diagnosis modeling and multidimensional item response modeling
for dichotomous and polytomous item responses. This package enables the estimation of
the DINA and DINO model (Junker & Sijtsma, 2001, <doi:10.1177/01466210122032064>),
the multiple group (polytomous) GDINA model (de la Torre, 2011,
<doi:10.1007/s11336-011-9207-7>), the multiple choice DINA model (de la Torre, 2009,
<doi:10.1177/0146621608320523>), the general diagnostic model (GDM; von Davier, 2008,
<doi:10.1348/000711007X193957>), the structured latent class model (SLCA; Formann, 1992,
<doi:10.1080/01621459.1992.10475229>) and regularized latent class analysis
(Chen, Li, Liu, & Ying, 2017, <doi:10.1007/s11336-016-9545-6>).
See George, Robitzsch, Kiefer, Gross, and Uenlue (2017) <doi:10.18637/jss.v074.i02>
for further details on estimation and the package structure.
For tutorials on how to use the CDM package see
George and Robitzsch (2015, <doi:10.20982/tqmp.11.3.p189>) as well as
Ravand and Robitzsch (2015).",2019-03-18,Alexander Robitzsch,"https://github.com/alexanderrobitzsch/CDM,
https://sites.google.com/site/alexanderrobitzsch2/software",TRUE,https://github.com/alexanderrobitzsch/cdm,174580,7,1552988315
CEC,"CEC divides data into Gaussian type clusters. The implementation allows the simultaneous use of various type Gaussian mixture models, performs the reduction of unnecessary clusters and it's able to discover new groups. Based on Spurek, P. and Tabor, J. (2014) <doi:10.1016/j.patcog.2014.03.006>.",2018-07-26,Konrad Kamieniecki,https://github.com/azureblue/cec,TRUE,https://github.com/azureblue/cec,16792,6,1532636283
ceg,"Create and learn Chain Event Graph (CEG) models using a Bayesian
framework. It provides us with a Hierarchical Agglomerative algorithm to
search the CEG model space.
The package also includes several facilities for visualisations of the
objects associated with a CEG. The CEG class can represent a range of
relational data types, and supports arbitrary vertex, edge and graph
attributes. A Chain Event Graph is a tree-based graphical model that
provides a powerful graphical interface through which domain experts can
easily translate a process into sequences of observed events using plain
language. CEGs have been a useful class of graphical model especially to
capture context-specific conditional independences. References: Collazo R,
Gorgen C, Smith J. Chain Event Graph. CRC Press, ISBN 9781498729604, 2018
(forthcoming); and Barday LM, Collazo RA, Smith JQ, Thwaites PA, Nicholson AE.
The Dynamic Chain Event Graph. Electronic Journal of Statistics, 9 (2) 2130-2169
<doi:10.1214/15-EJS1068>.",2017-11-27,Pier Taranti,https://github.com/ptaranti/ceg,TRUE,https://github.com/ptaranti/ceg,3610,1,1527209112
cellranger,"Helper functions to work with spreadsheets and the ""A1:D10"" style
of cell range specification.",2016-07-27,Jennifer Bryan,https://github.com/rsheets/cellranger,TRUE,https://github.com/rsheets/cellranger,4189154,30,1523141984
censusapi,"A wrapper for the U.S. Census Bureau APIs that returns data frames of
Census data and metadata. Available datasets include the
Decennial Census, American Community Survey, Small Area Health Insurance Estimates,
Small Area Income and Poverty Estimates, and Population Estimates and Projections.
See <https://www.census.gov/data/developers/data-sets.html> for more information.",2018-08-19,Hannah Recht,https://github.com/hrecht/censusapi,TRUE,https://github.com/hrecht/censusapi,19103,64,1552832136
CePa,"Use pathway topology information to assign weight to
pathway nodes.",2018-06-04,Zuguang Gu,https://github.com/jokergoo/CePa,TRUE,https://github.com/jokergoo/cepa,20003,0,1528125384
cetcolor,"Collection of perceptually uniform colour maps made by Peter Kovesi
(2015) ""Good Colour Maps: How to Design Them"" <arXiv:1509.03700>
at the Centre for Exploration Targeting (CET).",2018-07-10,James Balamuta,"https://github.com/coatless/cetcolor,
http://thecoatlessprofessor.com/projects/cetcolor/,
http://peterkovesi.com/projects/colourmaps/",TRUE,https://github.com/coatless/cetcolor,4784,19,1531235493
ceterisParibus,"Ceteris Paribus Profiles (What-If Plots) are designed to present model
responses around selected points in a feature space.
For example around a single prediction for an interesting observation.
Plots are designed to work in a model-agnostic fashion, they are working
for any predictive Machine Learning model and allow for model comparisons.
Ceteris Paribus Plots supplement the Break Down Plots from 'breakDown' package.",2019-01-29,Przemyslaw Biecek (<https://orcid.org/0000-0001-8423-1823>),https://pbiecek.github.io/ceterisParibus/,TRUE,https://github.com/pbiecek/ceterisparibus,4561,31,1548752078
cghRA,"Provides functions to import data from Agilent CGH arrays and process them according to the cghRA workflow. Implements several algorithms such as WACA, STEPS and cnvScore and an interactive graphical interface.",2017-03-03,Sylvain Mareschal,http://www.ovsa.fr/cghRA,TRUE,https://github.com/maressyl/r.cghra,5207,0,1549294392
CGPfunctions,Miscellaneous functions useful for teaching statistics as well as actually practicing the art. They typically are not “new” methods but rather wrappers around either base R or other packages.,2019-03-22,Chuck Powell,https://github.com/ibecav/CGPfunctions,TRUE,https://github.com/ibecav/cgpfunctions,6944,9,1554319182
cgraph,"Allows to create, evaluate, and differentiate computational graphs in R. A computational graph is a graph representation of a multivariate function decomposed by its (elementary) operations. Nodes in the graph represent arrays while edges represent dependencies among the arrays. An advantage of expressing a function as a computational graph is that this enables to differentiate the function by automatic differentiation. The 'cgraph' package supports various operations including basic arithmetic, trigonometry operations, and linear algebra operations. It differentiates computational graphs by reverse automatic differentiation. The flexible architecture of the package makes it applicable to solve a variety of problems including local sensitivity analysis, gradient-based optimization, and machine learning.",2019-04-06,Ron Triepels,https://cgraph.org/,TRUE,https://github.com/triepels/cgraph,10834,6,1554539471
chandwich,"Performs adjustments of a user-supplied independence loglikelihood
function using a robust sandwich estimator of the parameter covariance
matrix, based on the methodology in Chandler and Bate (2007)
<doi:10.1093/biomet/asm015>. This can be used for cluster correlated data
when interest lies in the parameters of the marginal distributions or for
performing inferences that are robust to certain types of model
misspecification. Functions for profiling the adjusted loglikelihoods are
also provided, as are functions for calculating and plotting confidence
intervals, for single model parameters, and confidence regions, for pairs
of model parameters. Nested models can be compared using an adjusted
likelihood ratio test.",2018-11-28,Paul J. Northrop,http://github.com/paulnorthrop/chandwich,TRUE,https://github.com/paulnorthrop/chandwich,4168,0,1552415763
changepoint,"Implements various mainstream and specialised changepoint methods for finding single and multiple changepoints within data. Many popular non-parametric and frequentist methods are included. The cpt.mean(), cpt.var(), cpt.meanvar() functions should be your first point of call.",2016-10-04,Rebecca Killick,https://github.com/rkillick/changepoint/,TRUE,https://github.com/rkillick/changepoint,107003,59,1541755949
changer,Changing the name of an existing R package is annoying but common task especially in the early stages of package development. This package (mostly) automates this task.,2018-10-21,Jouni Helske (<https://orcid.org/0000-0001-7130-793X>),https://github.com/helske/changer,TRUE,https://github.com/helske/changer,2160,5,1540114774
charlatan,"Make fake data, supporting addresses, person names, dates,
times, colors, coordinates, currencies, digital object identifiers
('DOIs'), jobs, phone numbers, 'DNA' sequences, doubles and integers
from distributions and within a range.",2018-10-18,Scott Chamberlain (<https://orcid.org/0000-0003-1444-9135>),https://github.com/ropensci/charlatan,TRUE,https://github.com/ropensci/charlatan,7930,116,1550174033
chartql,"Provides a very simple syntax for the user to generate custom plot(s) without having to remember complicated 'ggplot2' syntax. The 'chartql' package uses 'ggplot2' and manages all the syntax complexities internally. As an example, to generate a bar chart of company sales faceted by product category further faceted by season of the year, we simply write: ""CHART bar X category, season Y sales"".",2019-04-04,Rohail Syed,https://github.com/rmsyed/chartql,TRUE,https://github.com/rmsyed/chartql,97,5,1554086331
chebpol,"Contains methods for creating multivariate/multidimensional
interpolations of functions on a hypercube. If available through fftw3, the DCT-II/FFT
is used to compute coefficients for a Chebyshev interpolation.
Other interpolation methods for arbitrary Cartesian grids are also provided, a piecewise multilinear,
and the Floater-Hormann barycenter method. For scattered data polyharmonic splines with a linear term
is provided. The time-critical parts are written in C for speed. All interpolants are parallelized if
used to evaluate more than one point.",2019-03-11,Simen Gaure (<https://orcid.org/0000-0001-7251-8747>),https://github.com/sgaure/chebpol,TRUE,https://github.com/sgaure/chebpol,31264,1,1552306687
checkLuhn,"Confirms if the number is Luhn compliant.
Can check if credit card, IMEI number or any other Luhn based number is correct.
For more info see: <https://en.wikipedia.org/wiki/Luhn_algorithm>.",2018-09-24,Adam Deacon,https://github.com/adamjdeacon/checkLuhn,TRUE,https://github.com/adamjdeacon/checkluhn,4400,1,1538411496
checkmate,"Tests and assertions to perform frequent argument checks. A
substantial part of the package was written in C to minimize any worries
about execution time overhead.",2019-01-15,Michel Lang (<https://orcid.org/0000-0001-9754-0393>),https://github.com/mllg/checkmate,TRUE,https://github.com/mllg/checkmate,3204871,108,1550186550
checkpoint,"The goal of checkpoint is to solve the problem of package
reproducibility in R. Specifically, checkpoint allows you to install packages
as they existed on CRAN on a specific snapshot date as if you had a CRAN time
machine. To achieve reproducibility, the checkpoint() function installs the
packages required or called by your project and scripts to a local library
exactly as they existed at the specified point in time. Only those packages
are available to your project, thereby avoiding any package updates that came
later and may have altered your results. In this way, anyone using checkpoint's
checkpoint() can ensure the reproducibility of your scripts or projects at any
time. To create the snapshot archives, once a day (at midnight UTC) Microsoft
refreshes the Austria CRAN mirror on the ""Microsoft R Archived Network""
server (<https://mran.microsoft.com/>). Immediately after completion
of the rsync mirror process, the process takes a snapshot, thus creating the
archive. Snapshot archives exist starting from 2014-09-17.",2018-09-10,Microsoft Corporation,https://github.com/RevolutionAnalytics/checkpoint,TRUE,https://github.com/revolutionanalytics/checkpoint,66197,114,1523311852
checkr,"Expressive, assertive, pipe-friendly functions
to check the properties of common R objects.
In the case of failure the functions issue informative error messages.",2018-11-01,Joe Thorley (<https://orcid.org/0000-0002-7683-4592>),https://github.com/poissonconsulting/checkr,TRUE,https://github.com/poissonconsulting/checkr,9791,8,1551991051
cheddar,"Provides a flexible, extendable representation of an ecological community and a range of functions for analysis and visualisation, focusing on food web, body mass and numerical abundance data. Allows inter-web comparisons such as examining changes in community structure over environmental, temporal or spatial gradients.",2018-06-10,Lawrence Hudson with contributions from Dan Reuman and Rob Emerson,https://github.com/quicklizard99/cheddar/,TRUE,https://github.com/quicklizard99/cheddar,23955,12,1528581121
cheese,Contains flexible and intuitive functions to assist in carrying out tasks in a statistical analysis and to get from the raw data to presentation-ready results. A user-friendly interface is used in specialized functions that are aimed at common tasks such as building a univariate descriptive table for variables in a dataset. These high-level functions are built on a collection of low(er)-level functions that may be useful for aspects of a custom statistical analysis workflow or for general programming use. ,2019-04-01,Alex Zajichek,https://github.com/zajichek/cheese,TRUE,https://github.com/zajichek/cheese,317,2,1554459367
chemCal,"Simple functions for plotting linear
calibration functions and estimating standard errors for measurements
according to the Handbook of Chemometrics and Qualimetrics: Part A
by Massart et al. There are also functions estimating the limit
of detection (LOD) and limit of quantification (LOQ).
The functions work on model objects from - optionally weighted - linear
regression (lm) or robust linear regression ('rlm' from the 'MASS' package).",2018-07-17,Johannes Ranke,"https://pkgdown.jrwb.de/chemCal,
https://cgit.jrwb.de/chemCal/about",TRUE,https://github.com/jranke/chemcal,24455,1,1550761405
chemmodlab,"Contains a set of methods for fitting models and methods for
validating the resulting models. The statistical methodologies comprise
a comprehensive collection of approaches whose validity and utility have
been accepted by experts in the Cheminformatics field. As promising new
methodologies emerge from the statistical and data-mining communities, they
will be incorporated into the laboratory. These methods are aimed at discovering
quantitative structure-activity relationships (QSARs). However, the user can
directly input their own choices of descriptors and responses, so the capability
for comparing models is effectively unlimited.",2017-04-21,Jeremy Ash,https://github.com/jrash/ChemModLab,TRUE,https://github.com/jrash/chemmodlab,4696,6,1543532467
ChemometricsWithR,"Functions and scripts used in the book ""Chemometrics with R - Multivariate Data Analysis in the Natural Sciences and Life Sciences"" by Ron Wehrens, Springer (2011). Data used in the package are available from github.",2019-01-07,Ron Wehrens,https://github.com/rwehrens/CWR,TRUE,https://github.com/rwehrens/cwr,34150,4,1546850432
ChemoSpec,"A collection of functions for top-down exploratory data analysis
of spectral data including nuclear magnetic resonance (NMR), infrared (IR),
Raman, X-ray fluorescence (XRF) and other similar types of spectroscopy.
Includes functions for plotting and inspecting spectra, peak alignment,
hierarchical cluster analysis (HCA), principal components analysis (PCA) and
model-based clustering. Robust methods appropriate for this type of
high-dimensional data are available. ChemoSpec is designed for structured
experiments, such as metabolomics investigations, where the samples fall into
treatment and control groups. Graphical output is formatted consistently for
publication quality plots. ChemoSpec is intended to be very user friendly and
to help you get usable results quickly. A vignette covering typical operations
is available.",2019-03-01,Bryan A. Hanson (ORCID 0000-0003-3536-8246),https://bryanhanson.github.io/ChemoSpec/,TRUE,https://github.com/bryanhanson/chemospec,49726,25,1551558858
ChemoSpec2D,"A collection of functions for exploratory chemometrics of 2D spectroscopic data sets such as COSY (correlated spectroscopy) and HSQC (heteronuclear single quantum coherence) 2D NMR (nuclear magnetic resonance) spectra. 'ChemoSpec2D' deploys methods aimed primarily at classification of samples and the identification of spectral features which are important in distinguishing samples from each other. Each 2D spectrum (a matrix) is treated as the unit of observation, and thus the physical sample in the spectrometer corresponds to the sample from a statistical perspective. In addition to chemometric tools, a few tools are provided for plotting 2D spectra, but these are not intended to replace the functionality typically available on the spectrometer. 'ChemoSpec2D' takes many of its cues from 'ChemoSpec' and tries to create consistent graphical output and to be very user friendly.",2019-03-01,Bryan A. Hanson (ORCID 0000-0003-3536-8246),https://github.com/bryanhanson/ChemoSpec2D,TRUE,https://github.com/bryanhanson/chemospec2d,1321,0,1551495598
ChemoSpecUtils,Functions supporting the common needs of packages 'ChemoSpec' and 'ChemoSpec2D'.,2019-03-01,Bryan A. Hanson (ORCID 0000-0003-3536-8246),https://github.com/bryanhanson/ChemoSpecUtils,TRUE,https://github.com/bryanhanson/chemospecutils,5395,0,1551449859
childesr,"Tools for connecting to 'CHILDES', an open repository for
transcripts of parent-child interaction. For more information on the
underlying data, see <http://childes-db.stanford.edu>.",2018-05-18,Mika Braginsky,https://github.com/langcog/childesr,TRUE,https://github.com/langcog/childesr,2685,7,1526402778
childsds,"Calculation of standard deviation scores and percentiles adduced from different
growth standards (WHO, UK, Germany, Italy, China, etc). Therefore, the calculation of SDS-values
for different measures like BMI, weight, height, head circumference, different
ratios, etc. are easy to carry out. Also, references for laboratory values in
children and adults are available, e.g., serum lipids, iron-related blood parameters, IGF, liver enzymes. In the
new version, there are also functions combining the lms() function from package 'gamlss' with
resampling methods for using with repeated measurements and family dependencies. A searchable list
of items can be found here: <https://github.com/mvogel78/childsds/wiki>.",2019-03-26,Mandy Vogel,NA,TRUE,https://github.com/mvogel78/childsds,22606,3,1528614331
chipPCR,"A collection of functions to pre-process amplification curve data from polymerase chain reaction (PCR) or isothermal amplification reactions. Contains functions to normalize and baseline amplification curves, to detect both the start and end of an amplification reaction, several smoothers (e.g., LOWESS, moving average, cubic splines, Savitzky-Golay), a function to detect false positive amplification reactions and a function to determine the amplification efficiency. Quantification point (Cq) methods include the first (FDM) and second approximate derivative maximum (SDM) methods (calculated by a 5-point-stencil) and the cycle threshold method. Data sets of experimental nucleic acid amplification systems (VideoScan HCU, capillary convective PCR (ccPCR)) and commercial systems are included. Amplification curves were generated by helicase dependent amplification (HDA), ccPCR or PCR. As detection system intercalating dyes (EvaGreen, SYBR Green) and hydrolysis probes (TaqMan) were used. ",2015-04-10,Stefan Roediger,https://github.com/michbur/chipPCR,TRUE,https://github.com/michbur/chippcr,18408,4,1538033561
CHMM,"An exact and a variational inference for
coupled Hidden Markov Models applied to the joint detection of copy number variations.",2017-09-29,Julie Aubert,http://github.com/julieaubert/CHMM,TRUE,https://github.com/julieaubert/chmm,5766,0,1544700308
cholera,"Amends errors, augments data and aids analysis of John Snow's map
of the 1854 London cholera outbreak.",2019-03-08,Peter Li,https://github.com/lindbrook/cholera,TRUE,https://github.com/lindbrook/cholera,7949,101,1554396513
CholWishart,"Sampling from the Cholesky factorization of a Wishart random
variable, sampling from the inverse Wishart distribution, sampling from
the Cholesky factorization of an inverse Wishart random variable, sampling
from the pseudo Wishart distribution, sampling from the generalized
inverse Wishart distribution, computing densities for the Wishart
and inverse Wishart distributions, and computing the multivariate gamma
and digamma functions.",2019-01-25,Geoffrey Thompson,https://github.com/gzt/CholWishart,TRUE,https://github.com/gzt/cholwishart,5943,0,1548528946
chorrrds,"Extracts music chords from the 'CifraClub' website <https://www.cifraclub.com.br/>.
The package also has functions for cleaning the extracted data and
feature extraction. ",2019-01-28,Bruna Wundervald,https://github.com/r-music/chorrrds,TRUE,https://github.com/r-music/chorrrds,5080,53,1553471072
chromer,"A programmatic interface to the Chromosome Counts Database
(http://ccdb.tau.ac.il/). This package is part of the rOpenSci suite
(http://ropensci.org)",2015-01-13,Matthew Pennell,http://www.github.com/ropensci/chromer,TRUE,https://github.com/ropensci/chromer,12240,3,1554162536
chunked,"Text data can be processed chunkwise using 'dplyr' commands. These
are recorded and executed per data chunk, so large files can be processed with
limited memory using the 'LaF' package.",2017-07-01,Edwin de Jonge,https://github.com/edwindj/chunked,TRUE,https://github.com/edwindj/chunked,11764,136,1527693315
cimir,"Connect to the California Irrigation Management
Information System (CIMIS) Web API. See the CIMIS main page
<https://cimis.water.ca.gov/> and web API documentation
<https://et.water.ca.gov> for more information.",2019-03-14,Michael Koohafkan,https://github.com/mkoohafkan/cimir,TRUE,https://github.com/mkoohafkan/cimir,1025,2,1553317433
CIplot,"Plot confidence interval from the objects of statistical tests such as
t.test(), var.test(), cor.test(), prop.test() and fisher.test() ('htest' class),
Tukey test [TukeyHSD()], Dunnett test [glht() in 'multcomp' package],
logistic regression [glm()], and Tukey or Games-Howell test [posthocTGH() in
'userfriendlyscience' package].
Users are able to set the styles of lines and points.
This package contains the function to calculate odds ratios and their confidence
intervals from the result of logistic regression.",2017-08-14,Toshiaki Ara,https://github.com/toshi-ara/CIplot,TRUE,https://github.com/toshi-ara/ciplot,4576,0,1536294994
circglmbayes,"Perform a Bayesian analysis of a circular outcome General Linear
Model (GLM), which allows regressing a circular outcome on linear and
categorical predictors. Posterior samples are obtained by means of an MCMC
algorithm written in 'C++' through 'Rcpp'. Estimation and credible intervals
are provided, as well as hypothesis testing through Bayes Factors.
See Mulder and Klugkist (2017) <doi:10.1016/j.jmp.2017.07.001>.",2018-03-09,Kees Mulder,https://github.com/keesmulder/circglmbayes,TRUE,https://github.com/keesmulder/circglmbayes,3075,1,1536826062
circlize,"Circular layout is an efficient way for the visualization of huge
amounts of information. Here this package provides an implementation
of circular layout generation in R as well as an enhancement of available
software. The flexibility of the package is based on the usage of low-level
graphics functions such that self-defined high-level graphics can be easily
implemented by users for specific purposes. Together with the seamless
connection between the powerful computational and visual environment in R,
it gives users more convenience and freedom to design figures for
better understanding complex patterns behind multiple dimensional data.",2019-04-03,Zuguang Gu,"https://github.com/jokergoo/circlize,
http://jokergoo.github.io/circlize_book/book/",TRUE,https://github.com/jokergoo/circlize,340596,350,1550580132
circumplex,"Tools for analyzing and visualizing circular data, including
scoring functions for relevant instruments and a generalization of the
bootstrapped structural summary method from Zimmermann & Wright (2017)
<doi:10.1177/1073191115621795> and functions for creating publication-ready
tables and figures from the results. Future versions will include tools for
circular fit and reliability analyses, as well as visualization enhancements.",2018-11-29,Jeffrey Girard (<https://orcid.org/0000-0002-7359-3746>),https://github.com/jmgirard/circumplex,TRUE,https://github.com/jmgirard/circumplex,6081,4,1543508401
cIRT,"Jointly model the accuracy of cognitive responses and item choices
within a bayesian hierarchical framework as described by Culpepper and
Balamuta (2015) <doi:10.1007/s11336-015-9484-7>. In addition, the package
contains the datasets used within the analysis of the paper.",2019-01-24,Steven Andrew Culpepper,https://github.com/tmsalab/cIRT,TRUE,https://github.com/tmsalab/cirt,11738,3,1548214573
CITAN,"Supports quantitative
research in scientometrics and bibliometrics. Provides
various tools for preprocessing bibliographic
data retrieved, e.g., from Elsevier's SciVerse Scopus,
computing bibliometric impact of individuals,
or modeling many phenomena encountered in the social sciences.",2015-12-13,Marek Gagolewski,NA,TRUE,https://github.com/rexamine/citan,18087,6,1552475543
ciTools,"Functions to append confidence intervals, prediction intervals,
and other quantities of interest to data frames. All appended quantities
are for the response variable, after conditioning on the model and covariates.
This package has a data frame first syntax that allows for easy piping.
Currently supported models include (log-) linear, (log-) linear mixed,
generalized linear models, generalized linear mixed models, and
accelerated failure time models.",2019-01-08,John Haman,https://github.com/jthaman/ciTools,TRUE,https://github.com/jthaman/citools,13818,92,1546977080
citr,"Functions and an 'RStudio' add-in that search a 'Bib(La)TeX'-file to create and
insert formatted Markdown citations into the current document.",2018-12-18,Frederik Aust (<https://orcid.org/0000-0003-4900-788X>),https://github.com/crsh/citr,TRUE,https://github.com/crsh/citr,18502,194,1554204949
civis,"A convenient interface for making
requests directly to the 'Civis data science API' <https://www.civisanalytics.com/platform/>.",2019-02-12,Patrick Miller,https://github.com/civisanalytics/civis-r,TRUE,https://github.com/civisanalytics/civis-r,69273,10,1552425925
ckanr,"Client for 'CKAN' 'API' (http://ckan.org/). Includes interface
to 'CKAN' 'APIs' for search, list, show for packages, organizations, and
resources. In addition, provides an interface to the 'datastore' 'API'.",2015-10-22,Scott Chamberlain,https://github.com/ropensci/ckanr,TRUE,https://github.com/ropensci/ckanr,17405,61,1541094121
classInt,Selected commonly used methods for choosing univariate class intervals for mapping or other graphics purposes.,2018-12-18,Roger Bivand (<https://orcid.org/0000-0003-2392-6140>),https://github.com/r-spatial/classInt/,TRUE,https://github.com/r-spatial/classint,1087517,14,1553540177
classyfireR,Access to the ClassyFire RESTful API <http://classyfire.wishartlab.com>. Retrieve existing entity classifications and submit new entities for classification. ,2019-02-25,Tom Wilson,https://github.com/wilsontom/classyfireR,TRUE,https://github.com/wilsontom/classyfirer,2979,1,1551813265
cld2,"Bindings to Google's C++ library Compact Language Detector 2
(see <https://github.com/cld2owners/cld2#readme> for more information). Probabilistically
detects over 80 languages in plain text or HTML. For mixed-language input it returns the
top three detected languages and their approximate proportion of the total classified
text bytes (e.g. 80% English and 20% French out of 1000 bytes). There is also a 'cld3'
package on CRAN which uses a neural network model instead.",2018-05-11,Jeroen Ooms (<https://orcid.org/0000-0002-4035-0289>),"https://github.com/ropensci/cld2 (devel)
https://github.com/cld2owners/cld2 (upstream)",TRUE,https://github.com/ropensci/cld2,9997,29,1534951037
cld3,"Google's Compact Language Detector 3 is a neural network model for language
identification and the successor of 'cld2' (available from CRAN). The algorithm is still
experimental and takes a novel approach to language detection with different properties
and outcomes. It can be useful to combine this with the Bayesian classifier results
from 'cld2'. See <https://github.com/google/cld3#readme> for more information.",2018-06-28,Jeroen Ooms (<https://orcid.org/0000-0002-4035-0289>),"https://github.com/ropensci/cld3 (devel)
https://github.com/google/cld3 (upstream)",TRUE,https://github.com/ropensci/cld3,7958,13,1551005858
cleandata,"Functions to work with data frames to prepare data for further analysis.
The functions for imputation, encoding, partitioning, and other manipulation can produce log files to keep track of process.",2018-12-01,Sherry Zhao,https://github.com/sherrisherry/cleandata,TRUE,https://github.com/sherrisherry/cleandata,3414,3,1543721988
cleanEHR,"An electronic health care record (EHR) data cleaning and processing
platform. It focus on heterogeneous high resolution longitudinal data. It works with
Critical Care Health Informatics Collaborative (CCHIC) dataset. It is
created to address various data reliability and accessibility problems of
EHRs as such. ",2017-12-16,Sinan Shi,"https://github.com/CC-HIC/cleanEHR, http://www.hic.nihr.ac.uk",TRUE,https://github.com/cc-hic/cleanehr,6256,34,1536153213
cleanNLP,"Provides a set of fast tools for converting a textual corpus into a set of normalized
tables. Users may make use of the 'udpipe' back end with no external dependencies, a Python back
end with 'spaCy' <https://spacy.io> or the Java back end 'CoreNLP'
<http://stanfordnlp.github.io/CoreNLP/>. Exposed annotation tasks include
tokenization, part of speech tagging, named entity recognition, entity linking, sentiment
analysis, dependency parsing, coreference resolution, and word embeddings. Summary
statistics regarding token unigram, part of speech tag, and dependency type frequencies
are also included to assist with analyses.",2018-11-18,Taylor B. Arnold,https://statsmaths.github.io/cleanNLP/,TRUE,https://github.com/statsmaths/cleannlp,15555,120,1551190235
clifro,"CliFlo is a web portal to the New Zealand National Climate
Database and provides public access (via subscription) to around 6,500
various climate stations (see <https://cliflo.niwa.co.nz/> for more
information). Collating and manipulating data from CliFlo
(hence clifro) and importing into R for further analysis, exploration and
visualisation is now straightforward and coherent. The user is required to
have an internet connection, and a current CliFlo subscription (free) if
data from stations, other than the public Reefton electronic weather
station, is sought.",2019-03-20,Blake Seers (<https://orcid.org/0000-0001-6841-4312>),https://github.com/ropensci/clifro,TRUE,https://github.com/ropensci/clifro,23304,17,1553054842
climdex.pcic,"PCIC's implementation of Climdex routines for computation of
extreme climate indices.",2019-01-16,"David Bronaugh <bronaugh@uvic.ca> for the Pacific Climate Impacts
Consortium",https://www.r-project.org,TRUE,https://github.com/pacificclimate/climdex.pcic,30114,5,1533068498
ClimDown,"A suite of routines for downscaling coarse scale global
climate model (GCM) output to a fine spatial resolution. Includes
Bias-Corrected Spatial Downscaling (BCDS), Constructed Analogues
(CA), Climate Imprint (CI), and Bias Correction/Constructed
Analogues with Quantile mapping reordering (BCCAQ). Developed by
the the Pacific Climate Impacts Consortium (PCIC), Victoria,
British Columbia, Canada.",2016-12-02,James Hiebert,https://www.r-project.org,TRUE,https://github.com/pacificclimate/climdown,6458,19,1553527981
climwin,"Contains functions to detect and visualise periods of climate
sensitivity (climate windows) for a given biological response.",2017-11-10,Liam D. Bailey and Martijn van de Pol,https://github.com/LiamDBailey/climwin,TRUE,https://github.com/liamdbailey/climwin,16968,4,1550659127
ClinReport,"It enables to create easily formatted statistical tables in 'Microsoft Word' documents in pretty formats according to 'clinical standards'. It can be used also outside the scope of clinical trials, for any statistical reporting in 'Word'. Descriptive tables for quantitative statistics (mean, median, max etc..) and/or qualitative statistics (frequencies and percentages) are available and formatted tables of Least Square Means of Linear Models, Linear Mixed Models and Generalized Linear Mixed Models coming from emmeans() function are also available. The package works with 'officer' and 'flextable' packages to export the outputs into 'Microsoft Word' documents. ",2019-04-02,Jean-Francois Collin,https://jfrancoiscollin.github.io/ClinReport,TRUE,https://github.com/jfrancoiscollin/clinreport,1203,1,1554214003
clipr,"Simple utility functions to read from and write to
the Windows, OS X, and X11 clipboards.",2019-01-11,Matthew Lincoln (<https://orcid.org/0000-0002-4387-3384>),https://github.com/mdlincoln/clipr,TRUE,https://github.com/mdlincoln/clipr,2890283,81,1547673185
cliqueMS,"Annotates data from liquid chromatography coupled to mass spectrometry (LC/MS) metabolomics experiments. Based on a network algorithm (O.Senan, A. Aguilar- Mogas, M. Navarro, O. Yanes, R.Guimerà and M. Sales-Pardo, Metabolomics Conference (2016), Dublin), 'CliqueMS' builds a weighted similarity network where nodes are features and edges are weighted according to the similarity of this features. Then it searches for the most plausible division of the similarity network into cliques (fully connected components). Finally it annotates metabolites within each clique, obtaining for each annotated metabolite the neutral mass and their features, corresponding to isotopes, ionization adducts and fragmentation adducts of that metabolite.",2019-01-30,Oriol Senan Campos,https://github.com/osenan/cliqueMS,TRUE,https://github.com/osenan/cliquems,4101,5,1548845313
clisymbols,"A small subset of Unicode symbols, that are useful
when building command line applications. They fall back to
alternatives on terminals that do not support Unicode.
Many symbols were taken from the 'figures' 'npm' package
(see <https://github.com/sindresorhus/figures>).",2017-05-21,Gábor Csárdi,https://github.com/gaborcsardi/clisymbols,TRUE,https://github.com/gaborcsardi/clisymbols,1063719,44,1553632947
CLME,"Estimation and inference for linear models where some or all of the
fixed-effects coefficients are subject to order restrictions. This package uses
the robust residual bootstrap methodology for inference, and can handle some
structure in the residual variance matrix.",2019-02-07,Casey M. Jelsema,NA,TRUE,https://github.com/jelsema/clme,18944,1,1549569345
clogitLasso,"Fit a sequence of conditional logistic regression models with lasso, for small to large sized samples. Avalos, M., Pouyes, H., Grandvalet, Y., Orriols, L., & Lagarde, E. (2015) <doi:10.1186/1471-2105-16-S6-S1>.",2018-06-27,Marta Avalos,NA,TRUE,https://github.com/mavalosf/clogitlasso,7062,1,1529076175
clubSandwich,"Provides several cluster-robust variance estimators (i.e.,
sandwich estimators) for ordinary and weighted least squares linear regression
models, including the bias-reduced linearization estimator introduced by Bell
and McCaffrey (2002)
<http://www.statcan.gc.ca/pub/12-001-x/2002002/article/9058-eng.pdf> and
developed further by Pustejovsky and Tipton (2017)
<DOI:10.1080/07350015.2016.1247004>. The package includes functions for estimating
the variance- covariance matrix and for testing single- and multiple-
contrast hypotheses based on Wald test statistics. Tests of single regression
coefficients use Satterthwaite or saddle-point corrections. Tests of multiple-
contrast hypotheses use an approximation to Hotelling's T-squared distribution.
Methods are provided for a variety of fitted models, including lm() and mlm
objects, glm(), ivreg (from package 'AER'), plm() (from package 'plm'), gls()
and lme() (from 'nlme'), robu() (from 'robumeta'), and rma.uni() and rma.mv()
(from 'metafor').",2019-01-24,James Pustejovsky,https://github.com/jepusto/clubSandwich,TRUE,https://github.com/jepusto/clubsandwich,49523,24,1551150968
clustcurv,"A method for determining groups in multiple survival
curves with an automatic selection of their number based on k-means or
k-medians algorithms. The selection of the optimal number is provided by
bootstrap methods.
Implemented methods are:
Grouping multiple survival curves described by Villanueva et al. (2018) <doi:10.1002/sim.8016>.",2019-03-25,Nora M. Villanueva (<https://orcid.org/0000-0001-8085-2745>),https://github.com/noramvillanueva/clustcurv,TRUE,https://github.com/noramvillanueva/clustcurv,449,0,1553280351
Cluster.OBeu,"Estimate and return the needed parameters for visualisations designed for 'OpenBudgets' <http://openbudgets.eu/> data. Calculate cluster analysis measures in Budget data of municipalities across Europe, according to the 'OpenBudgets' data model. It involves a set of techniques and algorithms used to find and divide the data into groups of similar observations. Also, can be used generally to extract visualisation parameters convert them to 'JSON' format and use them as input in a different graphical interface.",2019-01-20,Kleanthis Koupidis,https://github.com/okgreece/Cluster.OBeu,TRUE,https://github.com/okgreece/cluster.obeu,3976,1,1551974953
ClusterBootstrap,Provides functionality for the analysis of clustered data using the cluster bootstrap. ,2018-06-26,Mathijs Deen,https://github.com/mathijsdeen/ClusterBootstrap,TRUE,https://github.com/mathijsdeen/clusterbootstrap,6455,1,1530046055
clusteredinterference,"Estimating causal effects from observational studies assuming
clustered (or partial) interference. These inverse probability-weighted
estimators target new estimands arising from population-level treatment
policies. The estimands and estimators are introduced in Barkley et al.
(2017) <arXiv:1711.04834>.",2019-03-18,Brian G. Barkley (<https://orcid.org/0000-0003-1787-4735>),http://github.com/BarkleyBG/clusteredinterference,TRUE,https://github.com/barkleybg/clusteredinterference,3484,3,1552840270
clustermq,"Evaluate arbitrary function calls using workers on HPC schedulers
in single line of code. All processing is done on the network without
accessing the file system. Remote schedulers are supported via SSH.",2019-02-22,Michael Schubert <mschu.dev@gmail.com>,https://github.com/mschubert/clustermq,TRUE,https://github.com/mschubert/clustermq,19880,64,1550845276
clusternor,"The clustering 'NUMA' Optimized Routines package or 'clusternor' is a highly optimized package for performing clustering in parallel with accelerations specifically targeting multi-core Non-Uniform Memory Access ('NUMA') hardware architectures. Disa Mhembere, Da Zheng, Carey E. Priebe, Joshua T. Vogelstein, Randal Burns (2019) <arXiv:1902.09527>.",2019-03-29,Disa Mhembere,https://github.com/neurodata/knorR,TRUE,https://github.com/neurodata/knorr,1048,6,1550642303
ClusterR,"Gaussian mixture models, k-means, mini-batch-kmeans, k-medoids and affinity propagation clustering with the option to plot, validate, predict (new data) and estimate the optimal number of clusters. The package takes advantage of 'RcppArmadillo' to speed up the computationally intensive parts of the functions. For more information, see (i) ""Clustering in an Object-Oriented Environment"" by Anja Struyf, Mia Hubert, Peter Rousseeuw (1997), Journal of Statistical Software, <doi:10.18637/jss.v001.i04>; (ii) ""Web-scale k-means clustering"" by D. Sculley (2010), ACM Digital Library, <doi:10.1145/1772690.1772862>; (iii) ""Armadillo: a template-based C++ library for linear algebra"" by Sanderson et al (2016), The Journal of Open Source Software, <doi:10.21105/joss.00026>; (iv) ""Clustering by Passing Messages Between Data Points"" by Brendan J. Frey and Delbert Dueck, Science 16 Feb 2007: Vol. 315, Issue 5814, pp. 972-976, <doi:10.1126/science.1136800>.",2019-01-11,Lampros Mouselimis,https://github.com/mlampros/ClusterR,TRUE,https://github.com/mlampros/clusterr,44544,36,1553717329
clustRcompaR,"Provides an interface to perform cluster analysis on a corpus of
text. Interfaces to Quanteda to assemble text corpuses easily. Deviationalizes
text vectors prior to clustering using technique described by Sherin (Sherin,
B. [2013]. A computational study of commonsense science: An exploration in the
automated analysis of clinical interview data. Journal of the Learning Sciences,
22(4), 600-638. Chicago. <doi:10.1080/10508406.2013.836654>). Uses
cosine similarity as distance metric for two stage clustering process, involving
Ward's algorithm hierarchical agglomerative clustering, and k-means clustering.
Selects optimal number of clusters to maximize ""variance explained"" by clusters,
adjusted by the number of clusters. Provides plotted output of clustering
results as well as printed output. Assesses ""model fit"" of clustering solution
to a set of preexisting groups in dataset.",2018-01-28,Joshua Rosenberg,https://github.com/alishinski/clustRcompaR,TRUE,https://github.com/alishinski/clustrcompar,6145,2,1533951236
clustree,"Deciding what resolution to use can be a difficult question when
approaching a clustering analysis. One way to approach this problem is to
look at how samples move as the number of clusters increases. This package
allows you to produce clustering trees, a visualisation for interrogating
clusterings as resolution increases.",2019-02-24,Luke Zappia (<https://orcid.org/0000-0001-7744-8565>),https://github.com/lazappi/clustree,TRUE,https://github.com/lazappi/clustree,8285,68,1551001942
cmrutils,"A collection of useful helper routines developed by
students of the Center for Mathematical Research, Stankin,
Moscow.",2018-09-03,Andrey Paramonov,https://github.com/aparamon/cmrutils,TRUE,https://github.com/aparamon/cmrutils,26908,0,1534583164
cmvnorm,Various utilities for the complex multivariate Gaussian distribution.,2018-08-16,Robin K. S. Hankin,https://github.com/RobinHankin/cmvnorm.git,TRUE,https://github.com/robinhankin/cmvnorm,16293,1,1534374733
cNORM,"Conventional methods for producing standard scores in psychometrics or biometrics
are often plagued with ""jumps"" or ""gaps"" (i.e., discontinuities) in norm tables and low
confidence for assessing extreme scores. The continuous norming method introduced by A.
Lenhard et al. (2016), <doi:10.1177/1073191116656437>, generates continuous test norm
scores on the basis of the raw data from standardization samples, without requiring
assumptions about the distribution of the raw data: Norm scores are directly established
from raw data by modeling the latter ones as a function of both percentile scores and an
explanatory variable (e.g., age). The method minimizes bias arising from sampling and
measurement error, while handling marked deviations from normality, addressing bottom
or ceiling effects and capturing almost all of the variance in the original norm data
sample.",2019-03-15,Wolfgang Lenhard (<https://orcid.org/0000-0002-8184-6889>),"https://www.psychometrica.de/cNorm_en.html,
https://github.com/WLenhard/cNORM",TRUE,https://github.com/wlenhard/cnorm,2985,0,1554495141
CNVScope,"Provides the ability to create interaction maps, discover CNV map domains (edges), gene annotate interactions, and create interactive visualizations of these CNV interaction maps.",2018-10-20,James Dalgeish,https://github.com/jamesdalg/CNVScope/,TRUE,https://github.com/jamesdalg/cnvscope,1875,2,1551762083
coala,"Coalescent simulators can rapidly simulate biological sequences
evolving according to a given model of evolution.
You can use this package to specify such models, to conduct the simulations
and to calculate additional statistics from the results.
It relies on existing simulators for doing the simulation, and currently
supports the programs 'ms', 'msms' and 'scrm'. It also supports finite-sites
mutation models by combining the simulators with the program 'seq-gen'.",2017-10-30,Paul Staab,https://github.com/statgenlmu/coala,TRUE,https://github.com/statgenlmu/coala,14758,12,1551123361
coalitions,"An implementation of a MCMC method to calculate
probabilities for a coalition majority based on survey results,
see Bender and Bauer (2018) <doi:10.21105/joss.00606>.",2018-10-06,Andreas Bender (<https://orcid.org/0000-0001-5628-8611>),"https://github.com/adibender/coalitions/,
http://adibender.github.io/coalitions/",TRUE,https://github.com/adibender/coalitions,4327,7,1552057461
cobalt,"Generate balance tables and plots for covariates of groups preprocessed through matching, weighting or subclassification, for example, using propensity scores. Includes integration with 'MatchIt', 'twang', 'Matching', 'optmatch', 'CBPS', 'ebal', 'WeightIt', and 'designmatch' for assessing balance on the output of their preprocessing functions. Users can also specify data for balance assessment not generated through the above packages. Also included are methods for assessing balance in clustered or multiply imputed data sets or data sets with longitudinal treatments.",2019-01-16,Noah Greifer,https://github.com/ngreifer/cobalt,TRUE,https://github.com/ngreifer/cobalt,48230,13,1549956785
cocktailApp,"A 'shiny' app to discover cocktails. The
app allows one to search for cocktails by ingredient,
filter on rating, and number of ingredients. The
package also contains data with the ingredients of
nearly 16 thousand cocktails scraped from the web.",2018-08-19,Steven E. Pav (<https://orcid.org/0000-0002-4197-6195>),https://github.com/shabbychef/cocktailApp,TRUE,https://github.com/shabbychef/cocktailapp,2937,29,1534695460
cocorresp,"Fits predictive and symmetric co-correspondence analysis (CoCA) models to relate one data matrix
to another data matrix. More specifically, CoCA maximises the weighted covariance
between the weighted averaged species scores of one community and the weighted averaged species
scores of another community. CoCA attempts to find patterns that are common to both communities.",2016-02-29,"Original Matlab routines by C.J.F. ter Braak and A.P. Schaffers. R port by Gavin L. Simpson.
Function simpls based on simpls.fit (package pls) by Ron Wehrens and Bjorn-Helge Mevik.",https://github.com/gavinsimpson/cocorresp,TRUE,https://github.com/gavinsimpson/cocorresp,27340,2,1528685526
codebook,"Easily automate the following tasks to describe data frames:
Summarise the distributions, and labelled missings of variables graphically
and using descriptive statistics.
For surveys, compute and summarise reliabilities (internal consistencies,
retest, multilevel) for psychological scales.
Combine this information with metadata (such as item labels and labelled
values) that is derived from R attributes.
To do so, the package relies on 'rmarkdown' partials, so you can generate
HTML, PDF, and Word documents.
Codebooks are also available as tables (CSV, Excel, etc.) and in JSON-LD, so
that search engines can find your data and index the metadata.
The metadata are also available at your fingertips via RStudio Addins.",2019-02-21,Ruben Arslan,https://github.com/rubenarslan/codebook,TRUE,https://github.com/rubenarslan/codebook,9197,39,1553079096
CodeDepends,"Tools for analyzing R expressions
or blocks of code and determining the dependencies between them.
It focuses on R scripts, but can be used on the bodies of functions.
There are many facilities including the ability to summarize or get a high-level
view of code, determining dependencies between variables, code improvement
suggestions.",2018-07-17,Duncan Temple Lang,https://github.com/duncantl/CodeDepends,TRUE,https://github.com/duncantl/codedepends,15209,50,1539876449
codemetar,"The 'Codemeta' Project defines a 'JSON-LD' format for describing
software metadata, as detailed at <https://codemeta.github.io>. This package
provides utilities to generate, parse, and modify 'codemeta.json' files
automatically for R packages, as well as tools and examples for working with
'codemeta.json' 'JSON-LD' more generally.",2019-03-12,Carl Boettiger,"https://github.com/ropensci/codemetar,
https://ropensci.github.io/codemetar",TRUE,https://github.com/ropensci/codemetar,5606,28,1554292413
codified,"Augment clinical data with metadata to create
output used in conventional publications and reports.",2018-09-30,Will Beasley (<https://orcid.org/0000-0002-5613-5006>),"https://ouhscbbmc.github.io/codified/,
https://github.com/OuhscBbmc/codified,
https://github.com/higgi13425/nih_enrollment_table",TRUE,https://github.com/ouhscbbmc/codified,1877,2,1553317365
codyn,"Univariate and multivariate temporal and spatial diversity indices,
rank abundance curves, and community stability measures. The functions
implement measures that are either explicitly temporal and include the
option to calculate them over multiple replicates, or spatial and include
the option to calculate them over multiple time points. Functions fall into
five categories: static diversity indices, temporal diversity indices,
spatial diversity indices, rank abundance curves, and community stability
measures. The diversity indices are temporal and spatial analogs to
traditional diversity indices. Specifically, the package includes functions
to calculate community richness, evenness and diversity at a given point in
space and time. In addition, it contains functions to calculate species
turnover, mean rank shifts, and lags in community similarity between two
time points.",2019-03-08,Matthew B. Jones,https://github.com/NCEAS/codyn/,TRUE,https://github.com/nceas/codyn,12003,18,1553715634
cofeatureR,"Generate cofeature (feature by sample) matrices. The package
utilizes ggplot2::geom_tile() to generate the matrix allowing for easy
additions from the base matrix.",2018-06-24,Fong Chun Chan,https://github.com/tinyheero/cofeatureR,TRUE,https://github.com/tinyheero/cofeaturer,8972,0,1529850699
coga,"Evaluation for density and distribution function of convolution of gamma
distributions in R. Two related exact methods and one approximate method are
implemented with efficient algorithm and C++ code. A quick guide for choosing
correct method and usage of this package is given in package vignette.",2018-05-08,Chaoran Hu,https://github.com/ChaoranHu/coga,TRUE,https://github.com/chaoranhu/coga,8012,0,1552428361
coindeskr,Extract real-time Bitcoin price details by accessing 'CoinDesk' Bitcoin price Index API <https://www.coindesk.com/api/>. ,2018-01-05,AbdulMajedRaja RS,https://github.com/amrrs/coindeskr,TRUE,https://github.com/amrrs/coindeskr,5225,2,1525677814
coinmarketcapr,To extract and monitor price and market cap of 'Crypto currencies' from 'Coin Market Cap' <https://coinmarketcap.com/api/>. ,2017-09-26,AbdulMajedRaja RS,http://github.com/amrrs/coinmarketcapr,TRUE,https://github.com/amrrs/coinmarketcapr,6992,27,1525201900
collapsibleTree,"
Interactive Reingold-Tilford tree diagrams created using 'D3.js', where every node can be expanded and collapsed by clicking on it.
Tooltips and color gradients can be mapped to nodes using a numeric column in the source data frame.
See 'collapsibleTree' website for more information and examples.",2018-08-22,Adeel Khan,"https://github.com/AdeelK93/collapsibleTree,
https://AdeelK93.github.io/collapsibleTree/",TRUE,https://github.com/adeelk93/collapsibletree,15683,81,1542036544
collateral,"The purrr package allows you to capture the side effects (errors, warning, messages and other output) of functions using safely() and quietly(). Using collateral, you can quickly see which elements of a list (or list-column) returned results, which threw errors and which returned warnings or other output.",2018-11-19,James Goldie (<https://orcid.org/0000-0002-5024-6207>),"https://rensa.co/collateral/index.html,
https://github.com/rensa/collateral",TRUE,https://github.com/rensa/collateral,1452,21,1542779260
CollessLike,"Computation of Colless-Like, Sackin and cophenetic balance indices of a phylogenetic tree and study of the distribution of these balance indices under the alpha-gamma model. For more details see A. Mir, F. Rossello, L. Rotger (2013) <doi:10.1016/j.mbs.2012.10.005>, M. J. Sackin (1972) <doi:10.1093/sysbio/21.2.225>, D. H. Colless (1982) <doi:10.2307/2413420>.",2018-04-03,Arnau Mir,https://github.com/LuciaRotger/CollessLike,TRUE,https://github.com/luciarotger/collesslike,3053,1,1524076369
colorednoise,"Temporally autocorrelated populations are correlated in their vital rates (growth, death, etc.) from year to year. It is very common for populations, whether they be bacteria, plants, or humans, to be temporally autocorrelated. This poses a challenge for stochastic population modeling, because a temporally correlated population will behave differently from an uncorrelated one.
This package provides tools for simulating populations with white noise (no temporal autocorrelation), red noise (positive temporal autocorrelation), and blue noise (negative temporal autocorrelation). The algebraic formulation for autocorrelated noise comes from Ruokolainen et al. (2009) <doi:10.1016/j.tree.2009.04.009>. Models for unstructured populations and for structured populations (matrix models) are available.",2019-01-23,Julia Pilowsky (<https://orcid.org/0000-0002-6376-2585>),NA,TRUE,https://github.com/japilo/colorednoise,6337,0,1548243175
colorfindr,"Extracts colors from various image types, returns customized reports and plots treemaps
and 3D scatterplots of image compositions. Color palettes can also be created. ",2019-02-01,David Zumbach,NA,TRUE,https://github.com/zumbov2/colorfindr,3999,23,1547193210
colourpicker,"A colour picker that can be used as an input in Shiny apps
or Rmarkdown documents. The colour picker supports alpha opacity, custom
colour palettes, and many more options. A Plot Colour Helper tool is
available as an RStudio Addin, which helps you pick colours to use in your
plots. A more generic Colour Picker RStudio Addin is also provided to let
you select colours to use in your R code.",2017-09-27,Dean Attali,https://github.com/daattali/colourpicker,TRUE,https://github.com/daattali/colourpicker,306734,90,1554335475
colt,"
A collection of command-line color styles based on the 'crayon'
package. 'Colt' styles are defined in themes that can easily be switched, to
ensure command line output looks nice on dark as well as light consoles.",2017-10-10,Stefan Fleck,https://github.com/s-fleck/colt,TRUE,https://github.com/s-fleck/colt,4712,10,1540649054
commonmark,"The CommonMark specification defines a rationalized version of markdown
syntax. This package uses the 'cmark' reference implementation for converting
markdown text into various formats including html, latex and groff man. In
addition it exposes the markdown parse tree in xml format. Also includes opt-in
support for GFM extensions including tables, autolinks, and strikethrough text.",2018-12-01,Jeroen Ooms,"http://github.com/jeroen/commonmark (devel)
https://github.github.com/gfm/ (spec)",TRUE,https://github.com/jeroen/commonmark,658429,58,1543665294
commonsMath,Java JAR files for the Apache Commons Mathematics Library for use by users and other packages.,2018-10-26,David B. Dahl,https://github.com/dbdahl/commonsMath,TRUE,https://github.com/dbdahl/commonsmath,8286,2,1553291063
comorbidity,"Computing comorbidity scores such as the weighted Charlson score
(Charlson, 1987 <doi:10.1016/0021-9681(87)90171-8>) and the Elixhauser
comorbidity score (Elixhauser, 1998 <doi:10.1097/00005650-199801000-00004>)
using ICD-9-CM or ICD-10 codes (Quan, 2005 <doi:10.1097/01.mlr.0000182534.19832.83>).",2019-03-22,"Alessandro Gasparini
(<https://orcid.org/0000-0002-8319-7624>)",https://github.com/ellessenne/comorbidity,TRUE,https://github.com/ellessenne/comorbidity,7502,10,1553687560
CompareCausalNetworks,"Unified interface for the estimation of causal networks, including
the methods 'backShift' (from package 'backShift'), 'bivariateANM' (bivariate
additive noise model), 'bivariateCAM' (bivariate causal additive model),
'CAM' (causal additive model) (from package 'CAM'), 'hiddenICP' (invariant
causal prediction with hidden variables), 'ICP' (invariant causal prediction)
(from package 'InvariantCausalPrediction'), 'GES' (greedy equivalence
search), 'GIES' (greedy interventional equivalence search), 'LINGAM', 'PC' (PC
Algorithm), 'FCI' (fast causal inference),
'RFCI' (really fast causal inference) (all from package 'pcalg') and
regression.",2018-05-18,Christina Heinze-Deml <heinzedeml@stat.math.ethz.ch>,https://github.com/christinaheinze/CompareCausalNetworks,TRUE,https://github.com/christinaheinze/comparecausalnetworks,14524,9,1526647989
comparer,"Makes comparisons quickly for different functions or code
blocks performing the same task with the function mbc().
Can be used to compare model fits to the same data or
see which function runs faster.",2018-01-08,Collin Erickson,https://github.com/CollinErickson/comparer,TRUE,https://github.com/collinerickson/comparer,3922,2,1554260199
comperank,"Compute ranking and rating based on competition results. Methods of
different nature are implemented: with fixed Head-to-Head structure, with
variable Head-to-Head structure and with iterative nature. All algorithms
are taken from the book 'Who’s #1?: The science of rating and ranking' by
Amy N. Langville and Carl D. Meyer (2012, ISBN:978-0-691-15422-0).",2018-05-30,Evgeni Chasnovski,https://github.com/echasnovski/comperank,TRUE,https://github.com/echasnovski/comperank,2709,5,1547238497
comperes,"Tools for storing and managing competition results. Competition is
understood as a set of games in which players gain some abstract scores.
There are two ways for storing results: in long (one row per game-player)
and wide (one row per game with fixed amount of players) formats. This
package provides functions for creation and conversion between them. Also
there are functions for computing their summary and Head-to-Head values for
players. They leverage grammar of data manipulation from 'dplyr'.",2019-01-12,Evgeni Chasnovski,https://github.com/echasnovski/comperes,TRUE,https://github.com/echasnovski/comperes,4037,4,1547305515
COMPoissonReg,"Fit Conway-Maxwell Poisson (COM-Poisson or CMP) regression models
to count data (Sellers & Shmueli, 2010) <doi:10.1214/09-AOAS306>. The
package provides functions for model estimation, dispersion testing, and
diagnostics. Zero-inflated CMP regression (Sellers & Raim, 2016)
<doi:10.1016/j.csda.2016.01.007> is also supported.",2018-12-09,"Kimberly Sellers <kfs7@georgetown.edu>
Thomas Lotze <thomas.lotze@thomaslotze.com>
Andrew Raim <andrew.raim@gmail.com>",https://github.com/lotze/COMPoissonReg,TRUE,https://github.com/lotze/compoissonreg,22148,0,1547657283
comprehenr,"Provides 'Python'-style list comprehensions.
List comprehension expressions use usual loops (for(), while() and repeat()) and
usual if() as list producers. In many cases it gives more concise notation than
standard ""*apply + filter"" strategy.",2019-03-17,Gregory Demin,https://github.com/gdemin/comprehenr,TRUE,https://github.com/gdemin/comprehenr,841,7,1552857040
comtradr,"Interface with and extract data from the United Nations Comtrade
API <https://comtrade.un.org/data/>. Comtrade provides country level shipping
data for a variety of commodities, these functions allow for easy API query
and data returned as a tidy data frame.",2018-10-05,Chris Muir,https://github.com/ropensci/comtradr,TRUE,https://github.com/ropensci/comtradr,8776,18,1541973831
concurve,"Allows one to compute confidence (compatibility/consonance) intervals for various statistical tests along with their corresponding P-values and S-values. The intervals can be plotted to create consonance functions allowing one to see what effect sizes are compatible with the test model at various compatibility levels rather than being limited to one interval estimate such as 95%. These methods are discussed by Poole C. (1987) <doi:10.2105/AJPH.77.2.195>, Schweder T, Hjort NL. (2002) <doi:10.1111/1467-9469.00285>, Singh K, Xie M, Strawderman WE. (2007) <arXiv:0708.0976>, Rothman KJ, Greenland S, Lash TL. (2008, ISBN:9781451190052), Amrhein V, Trafimow D, Greenland S. (2019) <doi:10.1080/00031305.2018.1543137>, and Greenland S. (2019) <doi:10.1080/00031305.2018.1529625>.",2019-03-21,Zad Chow (<https://orcid.org/0000-0003-1545-8199>),"https://data.lesslikely.com/concurve/,
https://github.com/Zadchow/concurve, https://lesslikely.com/",TRUE,https://github.com/zadchow/concurve,883,6,1553155839
condformat,"Apply and visualize conditional formatting to data frames in R.
It renders a data frame with cells formatted according to
criteria defined by rules, using a tidy evaluation syntax. The table is
printed either opening a web browser or within the 'RStudio' viewer if
available. The conditional formatting rules allow to highlight cells
matching a condition or add a gradient background to a given column. This
package supports both 'HTML' and 'LaTeX' outputs in 'knitr' reports, and
exporting to an 'xlsx' file.",2018-10-29,Sergio Oller Moreno,http://github.com/zeehio/condformat,TRUE,https://github.com/zeehio/condformat,16803,13,1553759292
CondIndTests,"Code for a variety of nonlinear conditional independence tests:
Kernel conditional independence test (Zhang et al., UAI 2011, <arXiv:1202.3775>),
Residual Prediction test (based on Shah and Buehlmann, <arXiv:1511.03334>),
Invariant environment prediction,
Invariant target prediction,
Invariant residual distribution test,
Invariant conditional quantile prediction (all from Heinze-Deml et al., <arXiv:1706.08576>).",2018-05-07,Christina Heinze-Deml <heinzedeml@stat.math.ethz.ch>,https://github.com/christinaheinze/nonlinearICP-and-CondIndTests,TRUE,https://github.com/christinaheinze/nonlinearicp-and-condindtests,6296,5,1525356604
conditions,"Implements specialized conditions, i.e., typed errors,
warnings and messages. Offers a set of standardized conditions (value error,
deprecated warning, io message, ...) in the fashion of Python's built-in
exceptions.",2017-01-18,Michel Lang <michellang@gmail.com>,https://github.com/mllg/conditions,TRUE,https://github.com/mllg/conditions,5268,10,1551693759
condvis,"Exploring fitted models by interactively taking 2-D and 3-D
sections in data space.",2018-09-13,Mark OConnell,http://markajoc.github.io/condvis/,TRUE,https://github.com/markajoc/condvis,16061,17,1536793538
config,"Manage configuration values across multiple environments (e.g.
development, test, production). Read values using a function that determines
the current environment and returns the appropriate value.",2018-03-27,JJ Allaire,https://github.com/rstudio/config,TRUE,https://github.com/rstudio/config,1106605,111,1525205321
configr,"
Implements the JSON, INI, YAML and TOML parser for R setting and writing of configuration file. The functionality of this package is similar to that of package 'config'. ",2018-11-13,Jianfeng Li (<https://orcid.org/0000-0003-2349-208X>),https://github.com/Miachol/configr,TRUE,https://github.com/miachol/configr,31613,28,1542167564
configural,"R functions for criterion profile analysis, Davison and Davenport (2002) <doi:10.1037/1082-989X.7.4.468> and meta-analytic criterion profile analysis, Wiernik, Wilmot, Davison, and Ones (2019). Sensitivity analyses to aid in interpreting criterion profile analysis results are also included.",2019-02-19,Brenton M. Wiernik,NA,TRUE,https://github.com/bwiernik/configural,656,0,1549759622
confinterpret,"Produces descriptive interpretations of confidence intervals.
Includes (extensible) support for various test types, specified as sets
of interpretations dependent on where the lower and upper confidence limits
sit. Provides plotting functions for graphical display of interpretations.",2017-10-03,Jim Vine,https://github.com/jimvine/confinterpret,TRUE,https://github.com/jimvine/confinterpret,6468,0,1529679294
conflicted,"R's default conflict management system gives the most recently
loaded package precedence. This can make it hard to detect conflicts,
particularly when they arise because a package update creates ambiguity
that did not previously exist. 'conflicted' takes a different approach,
making every conflict an error and forcing you to choose which function
to use.",2019-03-29,Hadley Wickham,https://github.com/r-lib/conflicted,TRUE,https://github.com/r-lib/conflicted,9988,121,1553949252
CongreveLamsdell2016,"Includes the 100 datasets simulated by Congreve and Lamsdell (2016)
<doi:10.1111/pala.12236>, and analyses of the partition and quartet distance of
reconstructed trees from the generative tree, as analysed by Smith (2019)
<doi:10.1098/rsbl.2018.0632>.",2019-02-07,Martin R. Smith,https://github.com/ms609/CongreveLamsdell2016,TRUE,https://github.com/ms609/congrevelamsdell2016,1205,0,1549542271
ConR,"Multi-species estimation of geographical range parameters
for preliminary assessment of conservation status following Criterion B of the
International Union for Conservation of Nature (IUCN,
see <http://www.iucnredlist.org>).",2018-06-07,Gilles Dauby,https://github.com/gdauby/ConR,TRUE,https://github.com/gdauby/conr,8947,3,1553793371
constants,"CODATA internationally recommended values of the fundamental physical
constants, provided as symbols for direct use within the R language. Optionally,
the values with errors and/or the values with units are also provided if the
'errors' and/or the 'units' packages are installed. The Committee on Data
for Science and Technology (CODATA) is an interdisciplinary committee of the
International Council for Science which periodically provides the internationally
accepted set of values of the fundamental physical constants. This package
contains the ""2014 CODATA"" version, published on 25 June 2015:
Mohr, P. J., Newell, D. B. and Taylor, B. N. (2016)
<DOI:10.1103/RevModPhys.88.035009>, <DOI:10.1063/1.4954402>.",2018-01-08,Iñaki Ucar,https://github.com/r-quantities/constants,TRUE,https://github.com/r-quantities/constants,5071,10,1534169156
constellation,"Examine any number of time series data frames to identify
instances in which various criteria are met within specified time
frames. In clinical medicine, these types of events are often
called ""constellations of signs and symptoms"", because a single
condition depends on a series of events occurring within a certain
amount of time of each other. This package was written to work with
any number of time series data frames and is optimized for speed
to work well with data frames with millions of rows.",2018-03-27,Mark Sendak,https://github.com/marksendak/constellation,TRUE,https://github.com/marksendak/constellation,5007,2,1542166511
container,"Common container data structures deque, set and dict (resembling
'Python's dict type) with typical member functions to insert, delete and
access container elements. Provides iterators and reference semantics.",2018-12-01,Roman Pahl,https://github.com/rpahl/container,TRUE,https://github.com/rpahl/container,2603,2,1543771276
contextual,"Facilitates the simulation and evaluation of context-free
and contextual multi-Armed Bandit policies or algorithms to ease the
implementation, evaluation, and dissemination of both existing and
new bandit algorithms and policies.",2019-03-17,Robin van Emden (<https://orcid.org/0000-0001-5820-8638>),https://github.com/Nth-iteration-labs/contextual,TRUE,https://github.com/nth-iteration-labs/contextual,2110,20,1552834388
contfrac,Various utilities for evaluating continued fractions.,2018-05-17,Robin K. S. Hankin,https://github.com/RobinHankin/contfrac.git,TRUE,https://github.com/robinhankin/contfrac,160411,0,1548880958
ContourFunctions,"Provides functions for making contour plots.
The contour plot can be created from grid data, a function,
or a data set. If non-grid data is given, then a Gaussian
process is fit to the data and used to create the contour plot.",2017-05-04,Collin Erickson,https://github.com/CollinErickson/contour,TRUE,https://github.com/collinerickson/contour,5179,5,1529460919
control,"Solves control systems problems relating to time/frequency response, LTI systems design and analysis, transfer function manipulations, and system conversion.",2017-12-12,Ben C. Ubah,NA,TRUE,https://github.com/benubah/control,3756,10,1525694086
ConvergenceClubs,"Functions for clustering regions that form convergence clubs, according to the definition of Phillips and Sul (2009) <doi:10.1002/jae.1080>.",2018-12-14,Roberto Sichera,https://CRAN.R-project.org/package=ConvergenceClubs,TRUE,https://github.com/rhobis/convergenceclubs,5354,0,1548328072
convexjlr,"Provides a simple high-level wrapper for
'Julia' package 'Convex.jl' (see <https://github.com/JuliaOpt/Convex.jl> for
more information),
which makes it easy to describe and solve convex optimization problems in R.
The problems can be dealt with include:
linear programs,
second-order cone programs,
semidefinite programs,
exponential cone programs.",2018-12-16,Changcheng Li,https://github.com/Non-Contradiction/convexjlr,TRUE,https://github.com/non-contradiction/convexjlr,6902,9,1545176635
convey,"Variance estimation on indicators of income concentration and
poverty using complex sample survey designs. Wrapper around the
survey package.",2018-06-19,Djalma Pessoa,https://guilhermejacob.github.io/context/,TRUE,https://github.com/djalmapessoa/convey,13866,10,1526655399
coop,"Fast implementations of the co-operations: covariance,
correlation, and cosine similarity. The implementations are
fast and memory-efficient and their use is resolved
automatically based on the input data, handled by R's S3
methods. Full descriptions of the algorithms and benchmarks
are available in the package vignettes.",2017-11-14,Drew Schmidt,https://github.com/wrathematics/coop,TRUE,https://github.com/wrathematics/coop,12528,14,1554337144
CoordinateCleaner,"Automated flagging of common spatial and temporal errors in biological and paleontological collection data, for the use in conservation, ecology and paleontology. Includes automated tests to easily flag (and exclude) records assigned to country or province centroid, the open ocean, the headquarters of the Global Biodiversity Information Facility, urban areas or the location of biodiversity institutions (museums, zoos, botanical gardens, universities). Furthermore identifies per species outlier coordinates, zero coordinates, identical latitude/longitude and invalid coordinates. Also implements an algorithm to identify data sets with a significant proportion of rounded coordinates. Especially suited for large data sets.",2019-04-02,Alexander Zizka,https://ropensci.github.io/CoordinateCleaner/,TRUE,https://github.com/ropensci/coordinatecleaner,11104,22,1554189401
coppeCosenzaR,"The program implements the COPPE-Cosenza Fuzzy Hierarchy Model.
The model was based on the evaluation of local alternatives, representing
regional potentialities, so as to fulfill demands of economic projects.
After defining demand profiles in terms of their technological coefficients,
the degree of importance of factors is defined so as to represent
the productive activity. The method can detect a surplus of supply without
the restriction of the distance of classical algebra, defining a hierarchy
of location alternatives. In COPPE-Cosenza Model, the distance between
factors is measured in terms of the difference between grades of memberships
of the same factors belonging to two or more sets under comparison. The
required factors are classified under the following linguistic variables:
Critical (CR); Conditioning (C); Little Conditioning (LC); and Irrelevant
(I). And the alternatives can assume the following linguistic variables:
Excellent (Ex), Good (G), Regular (R), Weak (W), Empty (Em), Zero (Z) and
Inexistent (In). The model also provides flexibility, allowing different
aggregation rules to be performed and defined by the Decision Maker. Such
feature is considered in this package, allowing the user to define other
aggregation matrices, since it considers the same linguistic variables
mentioned. ",2017-10-28,Pier Taranti,https://github.com/ptaranti/coppeCosenzaR,TRUE,https://github.com/ptaranti/coppecosenzar,5853,0,1527205913
copulaedas,"Provides a platform where EDAs (estimation of
distribution algorithms) based on copulas can be implemented and
studied. The package offers complete implementations of various
EDAs based on copulas and vines, a group of well-known
optimization problems, and utility functions to study the
performance of the algorithms. Newly developed EDAs can be easily
integrated into the package by extending an S4 class with generic
functions for their main components.",2018-07-29,Yasser Gonzalez-Fernandez,https://github.com/yasserglez/copulaedas,TRUE,https://github.com/yasserglez/copulaedas,20878,1,1532841489
coRanking,"Calculates the co-ranking matrix to assess the
quality of a dimensionality reduction.",2018-10-01,Guido Kraemer,https://github.com/gdkrmr/coRanking,TRUE,https://github.com/gdkrmr/coranking,14296,4,1545039155
Corbi,"Provides a bundle of basic and fundamental bioinformatics tools,
such as network querying and alignment, subnetwork extraction and search,
network biomarker identification.",2019-03-04,Ling-Yun Wu,https://github.com/wulingyun/Corbi,TRUE,https://github.com/wulingyun/corbi,11030,2,1553042762
coreCT,"Computed tomography (CT) imaging is a powerful tool for understanding the composition of sediment cores. This package streamlines and accelerates the analysis of CT data generated in the context of environmental science. Included are tools for processing raw DICOM images to characterize sediment composition (sand, peat, etc.). Root analyses are also enabled, including measures of external surface area and volumes for user-defined root size classes. For a detailed description of the application of computed tomography imaging for sediment characterization, see: Davey, E., C. Wigand, R. Johnson, K. Sundberg, J. Morris, and C. Roman. (2011) <DOI: 10.1890/10-2037.1>.",2018-06-24,Troy D. Hill <Hill.Troy@gmail.com>,https://github.com/troyhill/coreCT,TRUE,https://github.com/troyhill/corect,4967,1,1546523244
cornet,Implements lasso and ridge regression for dichotomised outcomes (Rauschenberger et al. 2019). Such outcomes are not naturally but artificially binary. They indicate whether an underlying measurement is greater than a threshold.,2019-03-21,Armin Rauschenberger,https://github.com/rauschenberger/cornet,TRUE,https://github.com/rauschenberger/cornet,321,0,1552981509
coroICA,"Contains an implementation of a confounding robust independent component analysis (ICA) for noisy and grouped data. The main function coroICA() performs a blind source separation, by maximizing an independence across sources and allows to adjust for varying confounding based on user-specified groups. Additionally, the package contains the function uwedge() which can be used to approximately jointly diagonalize a list of matrices. For more details see the project website <https://sweichwald.de/coroICA/>.",2018-12-30,Niklas Pfister and Sebastian Weichwald,https://github.com/sweichwald/coroICA-R,TRUE,https://github.com/sweichwald/coroica-r,1013,1,1545124153
CoRpower,"Calculates power for assessment of intermediate biomarker responses as correlates of risk in the active treatment group in clinical efficacy trials, as described in Gilbert, Janes, and Huang, Power/Sample Size Calculations for Assessing Correlates of Risk in Clinical Efficacy Trials (2016, Statistics in Medicine). The methods differ from past approaches by accounting for the level of clinical treatment efficacy overall and in biomarker response subgroups, which enables the correlates of risk results to be interpreted in terms of potential correlates of efficacy/protection. The methods also account for inter-individual variability of the observed biomarker response that is not biologically relevant (e.g., due to technical measurement error of the laboratory assay used to measure the biomarker response), which is important because power to detect a specified correlate of risk effect size is heavily affected by the biomarker's measurement error. The methods can be used for a general binary clinical endpoint model with a univariate dichotomous, trichotomous, or continuous biomarker response measured in active treatment recipients at a fixed timepoint after randomization, with either case-cohort Bernoulli sampling or case-control without-replacement sampling of the biomarker (a baseline biomarker is handled as a trivial special case). In a specified two-group trial design, the computeN() function can initially be used for calculating additional requisite design parameters pertaining to the target population of active treatment recipients observed to be at risk at the biomarker sampling timepoint. Subsequently, the power calculation employs an inverse probability weighted logistic regression model fitted by the tps() function in the 'osDesign' package. Power results as well as the relationship between the correlate of risk effect size and treatment efficacy can be visualized using various plotting functions.",2018-10-06,Michal Juraska,https://github.com/mjuraska/CoRpower,TRUE,https://github.com/mjuraska/corpower,1466,0,1554332243
corpustools,"Provides text analysis in R, focusing on the use of a tokenized text format. In this format, the positions of tokens are maintained, and each token can be annotated (e.g., part-of-speech tags, dependency relations).
Prominent features include advanced Lucene-like querying for specific tokens or contexts (e.g., documents, sentences),
similarity statistics for words and documents, exporting to DTM for compatibility with many text analysis packages,
and the possibility to reconstruct original text from tokens to facilitate interpretation.",2018-04-20,Kasper Welbers and Wouter van Atteveldt,http://github.com/kasperwelbers/corpustools,TRUE,https://github.com/kasperwelbers/corpustools,7180,17,1543924294
corrgram,"Calculates correlation of variables and displays the results
graphically. Included panel functions can display points, shading, ellipses, and
correlation values with confidence intervals. See Friendly (2002) <doi:10.1198/000313002533>.",2018-07-09,Kevin Wright (<https://orcid.org/0000-0002-0617-8673>),https://github.com/kwstat/corrgram,TRUE,https://github.com/kwstat/corrgram,327933,10,1540829795
corrplot,"A graphical display of a correlation matrix or general matrix.
It also contains some algorithms to do matrix reordering. In addition,
corrplot is good at details, including choosing color, text labels,
color labels, layout, etc.",2017-10-16,Taiyun Wei,https://github.com/taiyun/corrplot,TRUE,https://github.com/taiyun/corrplot,1370281,170,1539741483
corrr,"A tool for exploring correlations.
It makes it possible to easily perform routine tasks when
exploring correlation matrices such as ignoring the diagonal,
focusing on the correlations of certain variables against others,
or rearranging and visualising the matrix in terms of the
strength of the correlations.",2019-03-06,Simon Jackson,https://github.com/drsimonj/corrr,TRUE,https://github.com/drsimonj/corrr,39135,264,1551889660
cosinor2,"Statistical procedures for calculating population–mean cosinor, non–stationary cosinor, estimation of best–fitting period, tests of population rhythm differences and more. See Cornélissen, G. (2014). <doi:10.1186/1742-4682-11-16>.",2018-10-15,Augustin Mutak <mutak94@gmail.com>,https://github.com/amutak/cosinor2,TRUE,https://github.com/amutak/cosinor2,5637,3,1539622583
costsensitive,"Reduction-based techniques for cost-sensitive multi-class classification, in which each observation has a different cost for classifying it into one class, and the goal is to predict the class with the minimum expected cost for each new observation.
Implements Weighted All-Pairs (Beygelzimer, A., Langford, J., & Zadrozny, B., 2008, <doi:10.1007/978-0-387-79361-0_1>), Weighted One-Vs-Rest (Beygelzimer, A., Dani, V., Hayes, T., Langford, J., & Zadrozny, B., 2005, <https://dl.acm.org/citation.cfm?id=1102358>) and Regression One-Vs-Rest.
Works with arbitrary classifiers taking observation weights, or with regressors. Also implements cost-proportionate rejection sampling for working with classifiers
that don't accept observation weights.",2019-03-03,David Cortes,https://github.com/david-cortes/costsensitive,TRUE,https://github.com/david-cortes/costsensitive,629,13,1551980520
countfitteR,"A large number of measurements generate count data. This is a statistical data type that only assumes non-negative integer values and is generated by counting. Typically, counting data can be found in biomedical applications, such as the analysis of DNA double-strand breaks. The number of DNA double-strand breaks can be counted in individual cells using various bioanalytical methods. For diagnostic applications, it is relevant to record the distribution of the number data in order to determine their biomedical significance (Roediger, S. et al., 2018. Journal of Laboratory and Precision Medicine. <doi:10.21037/jlpm.2018.04.10>). The software offers functions for a comprehensive automated evaluation of distribution models of count data. In addition to programmatic interaction, a graphical user interface (web server) is included, which enables fast and interactive data-scientific analyses. The user is supported in selecting the most suitable counting distribution for his own data set.",2019-02-03,Jaroslaw Chilimoniuk,https://github.com/jarochi/countfitteR,TRUE,https://github.com/jarochi/countfitter,842,2,1550349235
countrycode,"Standardize country names, convert them into one of
eleven coding schemes, convert between coding schemes, and
assign region descriptors.",2018-10-27,"Vincent Arel-Bundock
(<https://orcid.org/0000-0003-2042-7063>)",https://github.com/vincentarelbundock/countrycode,TRUE,https://github.com/vincentarelbundock/countrycode,126317,162,1552588248
covafillr,"Facilitates local polynomial regression for state dependent covariates in state-space models. The functionality can also be used from 'C++' based model builder tools such as 'Rcpp'/'inline', 'TMB', or 'JAGS'.",2018-09-13,"Christoffer Moesgaard Albertsen
(<https://orcid.org/0000-0003-0088-4363>)",https://github.com/calbertsen/covafillr,TRUE,https://github.com/calbertsen/covafillr,10393,0,1536842905
coveffectsplot,"Produce forest plots to visualize covariate effects using either
the command line or an interactive 'Shiny' application.",2019-02-25,Samer Mouksassi (<https://orcid.org/0000-0002-7152-6654>),https://github.com/smouksassi/interactiveforestplot,TRUE,https://github.com/smouksassi/interactiveforestplot,2640,5,1551956744
covequal,"Computes p-values using the largest root test using
an approximation to the null distribution by Johnstone (2008) <DOI:10.1214/08-AOS605>.",2017-10-14,Maxime Turgeon,http://github.com/turgeonmaxime/covequal,TRUE,https://github.com/turgeonmaxime/covequal,3602,0,1530984454
covr,"Track and report code coverage for your package and (optionally)
upload the results to a coverage service like 'Codecov' <http://codecov.io> or
'Coveralls' <http://coveralls.io>. Code coverage is a measure of the amount of
code being exercised by a set of tests. It is an indirect measure of test
quality and completeness. This package is compatible with any testing
methodology or framework and tracks coverage of both R code and compiled
C/C++/FORTRAN code.",2018-10-18,Jim Hester,https://github.com/r-lib/covr,TRUE,https://github.com/r-lib/covr,1337510,230,1549906479
covTestR,"Testing functions for Covariance Matrices. These tests include high-dimension homogeneity of covariance
matrix testing described by Schott (2007) <doi:10.1016/j.csda.2007.03.004> and high-dimensional one-sample tests of
covariance matrix structure described by Fisher, et al. (2010) <doi:10.1016/j.jmva.2010.07.004>. Covariance matrix
tests use C++ to speed performance and allow larger data sets.",2018-08-17,Ben Barnard,https://covtestr.bearstatistics.com,TRUE,https://github.com/benbarnard/covtestr,9636,0,1534533199
cowplot,"Some helpful extensions and modifications to the 'ggplot2'
package. In particular, this package makes it easy to combine multiple
'ggplot2' plots into one and label them with letters, e.g. A, B, C, etc.,
as is often required for scientific publications. The package also provides
a streamlined and clean theme that is used in the Wilke lab, hence the
package name, which stands for Claus O. Wilke's plot package.",2019-01-08,Claus O. Wilke,https://github.com/wilkelab/cowplot,TRUE,https://github.com/wilkelab/cowplot,1261957,379,1551654633
cowsay,"Allows printing of character strings as messages/warnings/etc.
with ASCII animals, including cats, cows, frogs, chickens, ghosts,
and more.",2018-09-18,Scott Chamberlain,https://github.com/sckott/cowsay,TRUE,https://github.com/sckott/cowsay,26298,190,1549047428
coxed,"Functions for generating, simulating, and visualizing expected
durations and marginal changes in duration from the Cox proportional hazards
model.",2018-08-23,Kropko,https://github.com/jkropko/coxed,TRUE,https://github.com/jkropko/coxed,3128,3,1553865002
coxrt,Fits Cox regression based on retrospectively ascertained times-to-event. The method uses Inverse-Probability-Weighting estimating equations. ,2019-01-05,Bella Vakulenko-Lagun,https://github.com/Bella2001/coxrt,TRUE,https://github.com/bella2001/coxrt,2538,0,1546716549
CPBayes,"A Bayesian meta-analysis method for studying cross-phenotype
genetic associations. It uses summary-level data across multiple phenotypes to
simultaneously measure the evidence of aggregate-level pleiotropic association
and estimate an optimal subset of traits associated with the risk locus. CPBayes
is based on a spike and slab prior.",2019-01-12,Arunabha Majumdar <statgen.arunabha@gmail.com>,https://github.com/ArunabhaCodes/CPBayes,TRUE,https://github.com/arunabhacodes/cpbayes,7422,1,1547320453
cpgen,"Frequently used methods in genomic applications with emphasis on parallel computing (OpenMP).
At its core, the package has a Gibbs Sampler that allows running univariate linear
mixed models that have both, sparse and dense design matrices. The parallel sampling method
in case of dense design matrices (e.g. Genotypes) allows running Ridge Regression or BayesA for
a very large number of individuals. The Gibbs Sampler is capable of running Single Step Genomic Prediction models.
In addition, the package offers parallelized functions for common tasks like genome-wide
association studies and cross validation in a memory efficient way.",2015-09-15,Claas Heuer,https://github.com/cheuerde/cpgen,TRUE,https://github.com/cheuerde/cpgen,8497,3,1532465358
cplm,"Likelihood-based and Bayesian methods for various compound Poisson linear models based on Zhang, Yanwei (2013) <https://link.springer.com/article/10.1007/s11222-012-9343-7>.",2019-03-05,Yanwei (Wayne) Zhang,https://github.com/actuaryzhang/cplm,TRUE,https://github.com/actuaryzhang/cplm,54235,3,1528668141
cpr,"Implementation of the Control Polygon Reduction and Control Net
Reduction methods for finding parsimonious B-spline regression models.",2017-03-07,Peter DeWitt,https://github.com/dewittpe/cpr/,TRUE,https://github.com/dewittpe/cpr,5108,2,1535950930
cprr,"Calculate date of birth, age, and gender, and generate anonymous
sequence numbers from CPR numbers.
<https://en.wikipedia.org/wiki/Personal_identification_number_(Denmark)>.",2019-03-17,Jacob Anhoej,http://github.com/anhoej/cprr,TRUE,https://github.com/anhoej/cprr,4599,2,1552806166
cptcity,Incorporates colour gradients from the 'cpt-city' web archive available at <http://soliton.vm.bytemark.co.uk/pub/cpt-city/>. ,2019-03-07,"Sergio Ibarra-Espinosa
(<https://orcid.org/0000-0002-3162-1905>)",https://github.com/ibarraespinosa/cptcity,TRUE,https://github.com/ibarraespinosa/cptcity,4815,6,1551957598
cranlike,"A set of functions to manage 'CRAN'-like repositories
efficiently.",2018-11-26,Gábor Csárdi,https://github.com/r-hub/cranlike,TRUE,https://github.com/r-hub/cranlike,6783,20,1543230408
cranly,"Provides core visualisations and summaries for the CRAN package database. The package provides comprehensive methods for cleaning up and organising the information in the CRAN package database, for building package directives networks (depends, imports, suggests, enhances, linking to) and collaboration networks, producing package dependence trees, and for computing useful summaries and producing interactive visualisations from the resulting networks. The package also provides functions to coerce the networks to 'igraph' <https://CRAN.R-project.org/package=igraph> objects for further analyses and modelling.",2019-02-14,Ioannis Kosmidis (<https://orcid.org/0000-0003-1556-0302>),https://github.com/ikosmidis/cranly,TRUE,https://github.com/ikosmidis/cranly,4051,30,1550163820
CREAM,"Provides a new method for identification of clusters of genomic
regions within chromosomes. Primarily, it is used for calling clusters of
cis-regulatory elements (COREs). 'CREAM' uses genome-wide maps of genomic regions
in the tissue or cell type of interest, such as those generated from chromatin-based
assays including DNaseI, ATAC or ChIP-Seq. 'CREAM' considers proximity of the elements
within chromosomes of a given sample to identify COREs in the following steps:
1) It identifies window size or the maximum allowed distance between the elements
within each CORE, 2) It identifies number of elements which should be clustered
as a CORE, 3) It calls COREs, 4) It filters the COREs with lowest order which
does not pass the threshold considered in the approach.",2018-06-06,Benjamin Haibe-Kains,https://github.com/bhklab/CREAM,TRUE,https://github.com/bhklab/cream,5060,4,1544113475
credentials,"Setup and retrieve HTTPS and SSH credentials for use with 'git' and
other services. For HTTPS remotes the package interfaces the 'git-credential'
utility which 'git' uses to store HTTP usernames and passwords. For SSH
remotes we provide convenient functions to find or generate appropriate SSH
keys. The package both helps the user to setup a local git installation, and
also provides a back-end for git/ssh client libraries to authenticate with
existing user credentials.",2019-03-12,Jeroen Ooms (<https://orcid.org/0000-0002-4035-0289>),https://github.com/r-lib/credentials,TRUE,https://github.com/r-lib/credentials,1445,23,1552393786
cregg,"Simple tidying, analysis, and visualization of conjoint (factorial) experiments, including estimation and visualization of average marginal component effects ('AMCEs') and marginal means ('MMs') for weighted and un-weighted survey data, along with useful reference category diagnostics and statistical tests. Estimation of 'AMCEs' is based upon methods described by Hainmueller, Hopkins, and Yamamoto (2014) <doi:10.1093/pan/mpt024>.",2018-07-30,Thomas J. Leeper (<https://orcid.org/0000-0003-4097-6326>),https://github.com/leeper/cregg,TRUE,https://github.com/leeper/cregg,1973,17,1532690068
cRegulome,"Builds a 'SQLite' database file of pre-calculated transcription
factor/microRNA-gene correlations (co-expression) in cancer from the
Cistrome Cancer Liu et al. (2011) <doi:10.1186/gb-2011-12-8-r83> and
'miRCancerdb' databases (in press). Provides custom classes and functions
to query, tidy and plot the correlation data.",2019-01-03,Mahmoud Ahmed (<https://orcid.org/0000-0002-4377-6541>),https://github.com/ropensci/cRegulome,TRUE,https://github.com/ropensci/cregulome,5571,2,1554374195
CRF,"Implements modeling and computational tools for conditional
random fields (CRF) model as well as other probabilistic undirected
graphical models of discrete data with pairwise and unary potentials.",2019-03-04,Ling-Yun Wu,https://github.com/wulingyun/CRF,TRUE,https://github.com/wulingyun/crf,25349,10,1551675207
cricketr,"Tools for analyzing performances of cricketers based on stats in
ESPN Cricinfo Statsguru. The toolset can be used for analysis of Tests,ODIs
and Twenty20 matches of both batsmen and bowlers.",2019-03-07,Tinniam V Ganesh,https://github.com/tvganesh/cricketr,TRUE,https://github.com/tvganesh/cricketr,13230,37,1552008286
crimedata,"Gives convenient access to publicly available police-recorded open
crime data from large cities in the United States that are included in the
Crime Open Database <https://osf.io/zyaqn/>.",2019-03-21,Matthew Ashby (<https://orcid.org/0000-0003-4201-9239>>),https://github.com/mpjashby/crimedata,TRUE,https://github.com/mpjashby/crimedata,2166,2,1553205230
crminer,"Text mining client for 'Crossref' (<https://crossref.org>). Includes
functions for getting getting links to full text of articles, fetching full
text articles from those links or Digital Object Identifiers ('DOIs'),
and text extraction from 'PDFs'.",2018-10-15,Scott Chamberlain (<https://orcid.org/0000-0003-1444-9135>),https://github.com/ropensci/crminer,TRUE,https://github.com/ropensci/crminer,12093,13,1552069779
crmPack,"Implements a wide range of model-based dose
escalation designs, ranging from classical and modern continual
reassessment methods (CRMs) based on dose-limiting toxicity endpoints to
dual-endpoint designs taking into account a biomarker/efficacy outcome. The
focus is on Bayesian inference, making it very easy to setup a new design
with its own JAGS code. However, it is also possible to implement 3+3
designs for comparison or models with non-Bayesian estimation. The whole
package is written in a modular form in the S4 class system, making it very
flexible for adaptation to new models, escalation or stopping rules.",2018-12-21,Giuseppe Palermo,https://github.com/roche/crmPack,TRUE,https://github.com/roche/crmpack,14281,1,1543913428
crochet,"Functions to help implement the extraction / subsetting / indexing
function [ and replacement function [<- of custom matrix-like types (based
on S3, S4, etc.), modeled as closely to the base matrix class as possible
(with tests to prove it).",2018-08-06,Alexander Grueneberg,https://github.com/agrueneberg/crochet,TRUE,https://github.com/agrueneberg/crochet,8197,4,1553272411
cropdatape,"Provides peruvian agricultural production data from the Agriculture Minestry of Peru (MINAGRI). The first version includes
6 crops: rice, quinoa, potato, sweet potato, tomato and wheat; all of them across 24 departments. Initially, in excel files which has been transformed
and assembled using tidy data principles, i.e. each variable is in a column, each observation is a row and each value is in a cell.
The variables variables are sowing and harvest area per crop, yield, production and price per plot, every one year, from 2004 to 2014.",2017-03-02,Omar Benites-Alfaro,"https://github.com/omarbenites/cropdatape,
http://siea.minagri.gob.pe/siea/?q=publicaciones/anuarios-estadisticos",TRUE,https://github.com/omarbenites/cropdatape,4707,0,1543896752
CrossClustering,"Provide the CrossClustering algorithm (Tellaroli et al. (2016)
<doi:10.1371/journal.pone.0152333>), which is a partial clustering algorithm
that combines the Ward's minimum variance and Complete Linkage algorithms,
providing automatic estimation of a suitable number of clusters and
identification of outlier elements.",2018-07-30,Paola Tellaroli,https://CRAN.R-project.org/package=CrossClustering,TRUE,https://github.com/corradolanera/crossclustering,8341,0,1532961604
crossrun,"Joint distribution of number of crossings and the
longest run in a series of independent Bernoulli trials. The
computations uses an iterative procedure where computations
are based on results from shorter series. The procedure
conditions on the start value and partitions by further
conditioning on the position of the first crossing (or none).",2018-10-08,Tore Wentzel-Larsen,https://github.com/ToreWentzel-Larsen/crossrun,TRUE,https://github.com/torewentzel-larsen/crossrun,1519,0,1540531073
crosstalk,"Provides building blocks for allowing HTML widgets to communicate
with each other, with Shiny or without (i.e. static .html files). Currently
supports linked brushing and filtering.",2016-12-21,Joe Cheng,https://rstudio.github.io/crosstalk/,TRUE,https://github.com/rstudio/crosstalk,2260990,172,1544483811
crosswalkr,"A pair of functions for renaming and encoding data frames
using external crosswalk files. It is especially useful when
constructing master data sets from multiple smaller data
sets that do not name or encode variables consistently
across files. Based on similar commands in 'Stata'.",2019-03-04,Benjamin Skinner (<https://orcid.org/0000-0002-0337-7415>),https://github.com/btskinner/crosswalkr,TRUE,https://github.com/btskinner/crosswalkr,5736,3,1551900840
crplyr,"In order to facilitate analysis of datasets hosted on the Crunch
data platform <http://crunch.io/>, the 'crplyr' package implements 'dplyr'
methods on top of the Crunch backend. The usual methods 'select', 'filter',
'group_by', 'summarize', and 'collect' are implemented in such a way as to
perform as much computation on the server and pull as little data locally
as possible.",2019-04-03,Jonathan Keane,"https://crunch.io/r/crplyr/, https://github.com/Crunch-io/crplyr",TRUE,https://github.com/crunch-io/crplyr,6787,3,1554331544
crs,"Regression splines that handle a mix of continuous and categorical (discrete) data often encountered in applied settings. I would like to gratefully acknowledge support from the Natural Sciences and Engineering Research Council of Canada (NSERC, <http://www.nserc-crsng.gc.ca>), the Social Sciences and Humanities Research Council of Canada (SSHRC, <http://www.sshrc-crsh.gc.ca>), and the Shared Hierarchical Academic Research Computing Network (SHARCNET, <https://www.sharcnet.ca>).",2018-05-01,Jeffrey S. Racine,https://github.com/JeffreyRacine/R-Package-crs,TRUE,https://github.com/jeffreyracine/r-package-crs,63347,9,1527003216
crseEventStudy,"Based on Dutta et al. (2018) <doi:10.1016/j.jempfin.2018.02.004>, this package provides their standardized test for abnormal returns in long-horizon event studies. The methods used improve the major weaknesses of size, power, and robustness of long-run statistical tests described in Kothari/Warner (2007) <doi:10.1016/B978-0-444-53265-7.50015-9>. Abnormal returns are weighted by their statistical precision (i.e., standard deviation), resulting in abnormal standardized returns. This procedure efficiently captures the heteroskedasticity problem. Clustering techniques following Cameron et al. (2011) <10.1198/jbes.2010.07136> are adopted for computing cross-sectional correlation robust standard errors. The statistical tests in this package therefore accounts for potential biases arising from returns' cross-sectional correlation, autocorrelation, and volatility clustering without power loss.",2019-02-13,"Siegfried Köstlmeier
(<https://orcid.org/0000-0002-7221-6981>)",https://github.com/skoestlmeier/crseEventStudy,TRUE,https://github.com/skoestlmeier/crseeventstudy,1978,1,1550074729
crsra,"Tidies and performs preliminary analysis of 'Coursera' research
export data. These export data can be downloaded by anyone who has classes
on Coursera and wants to analyze the data. Coursera is one of the leading
providers of MOOCs and was launched in January 2012. With over 25 million
learners, Coursera is the most popular provider in the world being followed
by EdX, the MOOC provider that was a result of a collaboration between
Harvard University and MIT, with over 10 million users. Coursera has over
150 university partners from 29 countries and offers a total of 2000+
courses from computer science to philosophy. Besides, Coursera offers 180+
specialization, Coursera's credential system, and four fully online Masters
degrees. For more information about Coursera check Coursera's
About page on <https://blog.coursera.org/about/>.",2018-05-05,Aboozar Hadavand,NA,TRUE,https://github.com/jhudsl/crsra,2510,1,1550518493
crul,"A simple HTTP client, with tools for making HTTP requests,
and mocking HTTP requests. The package is built on R6, and takes
inspiration from Ruby's 'faraday' gem (<https://rubygems.org/gems/faraday>).
The package name is a play on curl, the widely used command line tool
for HTTP, and this package is built on top of the R package 'curl', an
interface to 'libcurl' (<https://curl.haxx.se/libcurl>).",2019-03-28,Scott Chamberlain (<https://orcid.org/0000-0003-1444-9135>),"https://github.com/ropensci/crul (devel)
https://ropensci.github.io/http-testing-book/ (user manual)",TRUE,https://github.com/ropensci/crul,202953,55,1553790082
crunch,"The Crunch.io service <http://crunch.io/> provides a cloud-based
data store and analytic engine, as well as an intuitive web interface.
Using this package, analysts can interact with and manipulate Crunch
datasets from within R. Importantly, this allows technical researchers to
collaborate naturally with team members, managers, and clients who prefer a
point-and-click interface.",2019-04-02,Jonathan Keane,"https://crunch.io/r/crunch/, https://github.com/Crunch-io/rcrunch",TRUE,https://github.com/crunch-io/rcrunch,46481,6,1554438488
crunchy,"To facilitate building custom dashboards on the Crunch data
platform <https://crunch.io/>, the 'crunchy' package provides tools for
working with 'shiny'. These tools include utilities to manage authentication
and authorization automatically and custom stylesheets to help match the
look and feel of the Crunch web application. The package also includes
several gadgets for use in 'RStudio'.",2019-04-03,Jonathan Keane,"https://crunch.io/r/crunchy/, https://github.com/Crunch-io/crunchy",TRUE,https://github.com/crunch-io/crunchy,6070,3,1554325221
crypto,"Retrieves crypto currency current and historical information as well as information on the exchanges they are listed on. For current and historical it will retrieve the daily open, high, low and close values for all crypto currencies. This retrieves the historical market data by web scraping tables provided by 'Cryptocurrency Market Capitalizations' <https://coinmarketcap.com>.",2019-01-13,Jesse Vent,"https://github.com/JesseVent/crypto,
https://CRAN.R-project.org/package=crypto",TRUE,https://github.com/jessevent/crypto,21044,72,1547365413
cstab,"Selection of the number of clusters in cluster analysis using
stability methods.",2018-06-19,Jonas M. B. Haslbeck,NA,TRUE,https://github.com/jmbh/cstab,8566,2,1529426170
csvread,"Functions for loading large (10M+ lines) CSV
and other delimited files, similar to read.csv, but typically faster and
using less memory than the standard R loader. While not entirely general,
it covers many common use cases when the types of columns in the CSV file
are known in advance. In addition, the package provides a class 'int64',
which represents 64-bit integers exactly when reading from a file. The
latter is useful when working with 64-bit integer identifiers exported from
databases. The CSV file loader supports common column types including
'integer', 'double', 'string', and 'int64', leaving further type
transformations to the user.",2018-12-05,Sergei Izrailev,http://github.com/jabiru/csvread,TRUE,https://github.com/jabiru/csvread,30305,0,1544473585
csvy,"Support for import from and export to the CSVY file format. CSVY is a file format that combines the simplicity of CSV (comma-separated values) with the metadata of other plain text and binary formats (JSON, XML, Stata, etc.) by placing a YAML header on top of a regular CSV.",2018-08-01,Thomas J. Leeper (<https://orcid.org/0000-0003-4097-6326>),https://github.com/leeper/csvy,TRUE,https://github.com/leeper/csvy,53237,29,1533079588
ctmm,"Functions for identifying, fitting, and applying continuous-space, continuous-time stochastic movement models to animal tracking data.
The package is described in Calabrese et al (2016) <doi:10.1111/2041-210X.12559> and its methods are based on those introduced in
Fleming & Calabrese et al (2014) <doi:10.1086/675504>,
Fleming et al (2014) <doi:10.1111/2041-210X.12176>,
Fleming et al (2015) <doi:10.1890/14-2010.1>,
Fleming et al (2016) <doi:10.1890/15-1607>,
Péron & Fleming et al (2016) <doi:10.1186/s40462-016-0084-7>,
Fleming & Calabrese (2016) <doi:10.1111/2041-210X.12673>,
Péron et al (2017) <doi:10.1002/ecm.1260>,
Fleming et al (2017) <doi:10.1016/j.ecoinf.2017.04.008>,
Fleming et al (2018) <doi:10.1002/eap.1704>,
and
Winner & Noonan et al (2018) <doi:10.1111/2041-210X.13027>.",2019-02-11,Christen H. Fleming,"https://github.com/ctmm-initiative/ctmm,
http://biology.umd.edu/movement.html",TRUE,https://github.com/ctmm-initiative/ctmm,30905,7,1554072730
ctrdata,"Provides functions for querying, retrieving and analysing protocol- and results-related information on clinical trials from two public registers, the European Union Clinical Trials Register (EUCTR, <https://www.clinicaltrialsregister.eu/>) and ClinicalTrials.gov (CTGOV, <https://clinicaltrials.gov/>). The information is transformed and then stored in a database (mongo). Functions are provided for accessing and analysing the locally stored information on the clinical trials, as well as for identifying duplicate records. The package is motivated by the need for aggregating and trend-analysing the design, conduct and outcomes across clinical trials.",2019-03-27,Ralf Herold (<https://orcid.org/0000-0002-8148-6748>),https://github.com/rfhb/ctrdata,TRUE,https://github.com/rfhb/ctrdata,732,6,1553792234
CTRE,"
Models extremes of 'bursty' time series via
Continuous Time Random Exceedances (CTRE).
See <arXiv:1802.05218>, K. Hees, S. Nayak, P.Straka, 2018.",2018-05-07,Peter Straka,https://unsw-math.github.io/CTRE/,TRUE,https://github.com/unsw-math/ctre,2910,0,1525923368
ctsem,"A hierarchical, multivariate, continuous (and discrete) time dynamic modelling
package for panel and time series data, using stochastic differential
equations. Contains a faster frequentist set of functions using OpenMx for
single subject and mixed-effects (random intercepts only) structural
equation models, or a hierarchical Bayesian implementation using Stan that
allows for random effects and non-linearity over all model parameters.
Allows for modelling of multiple noisy measurements of multiple stochastic
processes, time varying input / event covariates, and time invariant
covariates used to predict the parameters. Bayesian formulation not available on
32 bit Windows systems.",2019-02-03,Charles Driver,https://github.com/cdriveraus/ctsem,TRUE,https://github.com/