Last active
March 28, 2019 20:43
-
-
Save Haimonti/1567b0f8a7671be7a764c8485464d486 to your computer and use it in GitHub Desktop.
GDForOneDim
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Generate one dimensional data | |
X<-sample(x=1:100,size=20) | |
#Simulate labels | |
Y<-sample(x=c(-1,1),size=20,replace=TRUE) | |
#Here's how my data looks | |
data<-as.matrix(cbind(X,Y)) | |
#I am going to use squared loss i.e. Loss=(y - h(x))^2 where h(x)=WX | |
#You could also do a slight variant where you have | |
#a bias term in the hypothesis i.e. h(x)=WX+b | |
#Estimate the gradient as follows | |
grad<-function(x,y,w) | |
{ | |
gradientPt<--2*x*(y-x*w) | |
gradient<-sum(gradientPt) | |
return(gradient) | |
} | |
#Perform the descent | |
grad.descent<-function(X,Y,W,maxitr) | |
{ | |
#Set the number of examples | |
nEG<-nrow(as.matrix(X)) | |
#Set the alpha parameter | |
alpha=0.5 | |
#Set the number of iterations | |
for (i in 1:maxitr) | |
{ | |
W <- W-(alpha*grad(X,Y,W))/nEG | |
print(W) | |
} | |
return(W) | |
} | |
#Define the hypothesis | |
h<-function(X,W) | |
{return (X*W)} | |
#Find the loss | |
Loss<-matrix(data=0,nrow=20,ncol=1) | |
W<-grad.descent(X,Y,1,10) | |
for(j in 1:20) | |
{ | |
Loss[j]<-(Y[j]-h(X[j],W))^2 | |
} | |
# Find the mean of the Squared Loss | |
mean(Loss) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment