Skip to content

Instantly share code, notes, and snippets.

View gchhablani's full-sized avatar
🎯
Focusing

Gunjan Chhablani gchhablani

🎯
Focusing
View GitHub Profile
@gchhablani
gchhablani / Autoencoder_PyTorch_Basic.py
Last active May 17, 2020 06:12
Autoencoder in PyTorch
class AE(nn.Module):
def __init__(self):
super(AE,self).__init__()
self.encoder = nn.Sequential(nn.Linear(784,50),nn.ReLU(),nn.Linear(50,50),nn.ReLU())
self.decoder = nn.Sequential(nn.Linear(14,50),nn.ReLU(),nn.Linear(50,50),nn.ReLU(),nn.Linear(50,784),nn.ReLU())
def forward(self,inp):
return self.decoder(self.encoder(inp))
#Set values for
colors = {1:'r',-1:'b'}
fig = plt.figure()
fig.set_size_inches(10,8)
ax = fig.add_subplot(1,1,1)
plt.scatter(X1[:,1],X1[:,2],marker='o',c=y)
def hyperplane_value(x,w,b,v):
return (-w[0]*x-b+v) / w[1]
# Making an SVM
#Length to weight map
lwm={}
#Need these transforms for checking each combination of weights
transforms = [[1,1],[-1,1],[-1,-1],[1,-1]]
#Need to check for values of b in betwen.
b_step_size = 2
positive=[]
negative=[]
for i in range(len(y)):
if y[i]==0:
negative.append(X[i])
else:
positive.append(X[i])
negative = np.array(negative)
positive = np.array(positive)
def predict(Xi,weights):
activation = 0
for i in range(len(weights)):
activation += weights[i]*Xi[i]
return 1.0 if activation>=0 else 0.0
#Training using a 0-1 Loss and Stochastic Gradient Descent
def train(data,labels,lr,epochs):
weights = np.random.randn(data.shape[1])
for epoch in range(epochs):
error = 0.0
@gchhablani
gchhablani / inventory2
Last active February 28, 2018 19:07
This gist has the inventory file which uses groups and uniform variable names for diferent hosts. It also contains a playbook to print out all the variables of the hosts.
[loadbalancer]
lb ansible_connection=local ansible_python_interpreter=/usr/bin/python2 s$
[mainserver]
ms ansible_connection=local ansible_python_interpreter=/usr/bin/python2
[executionnodes]
en1 port=8081
en2 port=8082
en3 port=8083