Instantly share code, notes, and snippets.

View mi.R
set.seed(23)
# Create a positive-definite matrix
# a Wishart random variable + the identity matrix should fullfil this. Any positive-definite matrix is invertible.
m <- rWishart(.1, 5, diag(5))[,,1] + diag(5)
# set the bottom and right to 1
# by this the top left is positive definite, but the complete matrix is singular, i.e. not invertible
m[4:5, ] <- 1
m[, 4:5] <- 1
View gd_logreg.R
sig <- function(b, x) 1 / (1 + exp(-b * x))
loss <- function(y, b, x) (y - (sig(b, x)) %*% x
bold <- b <- 1
repeat
{
bold <- b
b <- b - loss(y, b, x)[1]
if (abs(b - bold) < 0.000001) break
}
View diffusion.R
# create a random matrix with non-negativ values (can be anything)
affin <- matrix(runif(100), 10, 10)
# drop self-loops
diag(affin) <- 0
# create column stochastic transition matrix
trans <- sweep(affin, 2, colSums(affin), "/")
# create a matrix initial distribution where every column is one observation
p0 <- matrix(runif(10 * 10), nrow=10)
# column normalize the guys
View bernoulli_mle.R
library(microbenchmark)
bernoulli.loglik.derivative <- function(p, dat)
{
-(sum(dat) - length(dat) * p)
}
optim <- function(dat)
{
p.hat <- 1
View gmm_em.R
## Example code for clustering on a three-component mixture model using the EM-algorithm.
### First we load some libraries and define some useful functions
library(mvtnorm)
library(MASS)
# Create a 'true' data set (an easy one)
.create.data <- function(n)
{
View gradient_descent.jl
using Gadfly
using Distributions
function df(x, y, b)
sum(- (y - x*b)' * x)
end
function gd()
rnorm = Normal()
x = rand(rnorm, 100)