Skip to content

Instantly share code, notes, and snippets.

@tomonari-masada
Created August 28, 2017 12:28
Show Gist options
  • Save tomonari-masada/a66011ffa8b1b0f97c8a0ba94724e8ed to your computer and use it in GitHub Desktop.
Save tomonari-masada/a66011ffa8b1b0f97c8a0ba94724e8ed to your computer and use it in GitHub Desktop.
spiral data classification
import io, sys, math, random
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn
from torch.autograd import Variable
from torch import optim
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
numSamples = 500
batchSize = 20
layerSizes = [8, 8, 8]
learning_rate = 0.05
np.random.seed(0)
def generateSpiralData(numSamples, noise):
points = []
n = numSamples // 2
def genSpiral(deltaT, label):
for i in range(n):
r = i / n * 5
t = 1.75 * i / n * 2 * math.pi + deltaT
x = r * math.sin(t) + random.uniform(-1, 1) * noise
y = r * math.cos(t) + random.uniform(-1, 1) * noise
points.append([x,y])
genSpiral(0, 1)
genSpiral(math.pi, -1)
return np.array(points)
points = generateSpiralData(numSamples, 0.0)
labels = np.hstack((np.zeros(numSamples // 2, dtype=np.int), np.ones(numSamples // 2, dtype=np.int)))
select = np.random.choice(numSamples // 2, numSamples // 4, replace=False)
select = np.hstack((select, np.random.choice(numSamples // 2, numSamples // 4, replace=False) + numSamples // 2))
select_comp = np.in1d(np.arange(numSamples), select, invert=True)
X_train = points[select]
y_train = labels[select]
X_test = points[select_comp]
y_test = labels[select_comp]
plt.scatter(X_train[y_train == 0,0], X_train[y_train == 0,1])
plt.scatter(X_train[y_train == 1,0], X_train[y_train == 1,1])
plt.scatter(X_test[y_train == 0,0], X_test[y_test == 0,1], marker='.')
plt.scatter(X_test[y_train == 1,0], X_test[y_test == 1,1], marker='.')
plt.show()
dtype = torch.FloatTensor
parameters = []
prevSize = 2
for layerSize in layerSizes:
weight = Variable(torch.randn(prevSize, layerSize).type(dtype) * np.sqrt(2.0 / prevSize), requires_grad=True)
bias = Variable(torch.zeros(layerSize).type(dtype), requires_grad=True)
parameters.append([weight, bias])
prevSize = layerSize
weight = Variable(torch.randn(prevSize, 2).type(dtype) * np.sqrt(2.0 / prevSize), requires_grad=True)
bias = Variable(torch.zeros(2).type(dtype), requires_grad=True)
parameters.append([weight, bias])
optimizer = optim.Adagrad([w for l in parameters for w in l], lr=learning_rate)
criterion = torch.nn.CrossEntropyLoss()
def forward(batch):
temp = Variable(torch.FloatTensor(batch).type(dtype), requires_grad=False)
for weight, bias in parameters:
temp_ = torch.mm(temp, weight) + bias.unsqueeze(0).expand(temp.size(0), bias.size(0))
temp = torch.nn.functional.relu(temp_)
return temp_
i = 0
while True:
batch_indices = np.random.choice(numSamples // 2, batchSize, replace=False)
out = forward(X_train[batch_indices])
target = Variable(torch.LongTensor(y_train[batch_indices]))
loss = criterion(out, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 1000 == 0:
out = forward(X_train)
target = Variable(torch.LongTensor(y_train))
print('train:', i, out.max(1)[1].eq(target).sum().data.numpy()[0] / X_test.shape[0], flush=True, end='; ')
out = forward(X_test)
target = Variable(torch.LongTensor(y_test))
print('test', i, out.max(1)[1].eq(target).sum().data.numpy()[0] / X_test.shape[0], flush=True)
i += 1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment