Skip to content

Instantly share code, notes, and snippets.

@dendisuhubdy
Created February 15, 2016 23:22
Show Gist options
  • Save dendisuhubdy/746942acc063d2b5659a to your computer and use it in GitHub Desktop.
Save dendisuhubdy/746942acc063d2b5659a to your computer and use it in GitHub Desktop.
Gradient descent logistic regression code file
#'Norm Vec
norm_vec <- function(x){
return (sqrt(sum(x^2)))
}
#' Gradient Step
#'
#' @param gradf handle to function that returns gradient of objective function
#' @param x current parameter estimate
#' @param t step-size
gradient_step <- function(gradf, t, x) {
result <- (x - (t * gradf))
return (result)
}
#' Gradient Descent (Fixed Step-Size)
#'
#' @param fx handle to function that returns objective function values
#' @param gradf handle to function that returns gradient of objective function
#' @param x0 initial parameter estimate
#' @param t step-size
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
gradient_descent_fixed <- function(fx, gradf, t, x0, max_iter=1e2, tol=1e-3) {
xtrace[max_iter]
ytrace[max_iter]
x[1] <- x0
for(i in 1:max_iter) {
x[i+1] <- gradient_step(gradf,t,x[i])
xtrace = c(xtrace,x)
ytrace = c(ytrace,fx(x))
if (x[i+1]/x[i] >= tol) break
}
result <- list(x[length(x)],fx(x[length(x)]),norm_vec(gradf),xtrace, ytrace)
}
#' Backtracking
#'
#' @param fx handle to function that returns objective function values
#' @param x current parameter estimate
#' @param t current step-size
#' @param df the value of the gradient of objective function evaluated at the current x
#' @param alpha the backtracking parameter
#' @param beta the decrementing multiplier
backtrack <- function(fx, t, x, df, alpha=0.5, beta=0.9) {
iter = length(x)
for (i in 1:iter) {
if (f(x[i] - df(x[i])) >= fx(x[i]) - (t/2)*norm_vec(df(x[i]))) {
t <- beta*t
}
}
return (t)
}
#' Gradient Descent (Backtracking Step-Size)
#'
#' @param fx handle to function that returns objective function values
#' @param gradf handle to function that returns gradient of objective function
#' @param x0 initial parameter estimate
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
gradient_descent_backtrack <- function(fx, gradf, x0, max_iter=1e2, tol=1e-3) {
xtrace[max_iter]
ytrace[max_iter]
t <- 1
x[1] <- x0
for(i in 1:max_iter) {
step <- backtrack(fx,t,x[i],df(x[i]))
x[i+1] <- gradient_step(gradf,step,x[i])
xtrace = c(xtrace,x)
ytrace = c(ytrace,fx(x))
if (x[i+1]/x[i] >= tol) break
}
result <- list(x[length(x)],fx(x[length(x)]),norm_vec(gradf),xtrace, ytrace)
}
#' Gradient Descent
#'
#' @param fx handle to function that returns objective function values
#' @param gradf handle to function that returns gradient of objective function
#' @param x0 initial parameter estimate
#' @param t step-size
#' @param max_iter maximum number of iterations
#' @param tol convergence tolerance
gradient_descent <- function(fx, gradf, x0, t=NULL, max_iter=1e2, tol=1e-3) {
## wrapper
if (t==NULL) {
return (gradient_descent_backtrack(fx, gradf, x0, max_iter, tol))
} else {
return (gradient_descent_fixed(fx, gradf, x0, t, max_iter, tol))
}
}
#' Objective Function for Logistic Regression
#'
#' @param y binary response
#' @param X design matrix
#' @param beta regression coefficient vector
#' @param lambda regularization parameter
fx_logistic <- function(y, X, beta, lambda=0) {
iter <- length[x]
sum <- 0
for (i in 1:iter) {
sum <- sum + (-y[i]*x[i]) + log(1+exp(x[i]*beta))
}
return (sum)
}
#' Gradient for Logistic Regession
#'
#' @param y binary response
#' @param X design matrix
#' @param beta regression coefficient vector
#' @param lambda regularization parameter
gradf_logistic <- function(y, X, beta, lambda=0) {
iter <- length[x]
sum <- 0
p <- 0
for (i in 1:iter) {
p[i] <- exp(x[i]*beta)/(1+ exp(x[i]*beta))
sum <- sum + ((p[i]-y[i])*x[i])
}
return (sum)
}
## step 7
set.seed(12345)
n <- 100
p <- 2
X <- matrix(rnorm(n*p),n,p)
beta0 <- matrix(rnorm(p),p,1)
y <- (runif(n) <= plogis(X%*%beta0)) + 0
## step 8
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment