Skip to content

Instantly share code, notes, and snippets.

@matheushent
Created February 5, 2019 13:16
Show Gist options
  • Save matheushent/4f5fbff47dbe90c52131e119570d4dab to your computer and use it in GitHub Desktop.
Save matheushent/4f5fbff47dbe90c52131e119570d4dab to your computer and use it in GitHub Desktop.
Iris classification
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
from sklearn.preprocessing import StandardScaler
scaler_x = StandardScaler()
X = scaler_x.fit_transform(X)
from sklearn.preprocessing import OneHotEncoder
onehot = OneHotEncoder(categorical_features=[0])
y = y.reshape(-1, 1)
y = onehot.fit_transform(y).toarray()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
import tensorflow as tf
import numpy as np
import math
in_neuron = X.shape[1]
hidden_neuron = math.ceil((X.shape[1] + y.shape[1])/2)
out_neuron = y.shape[1]
weight = {'hidden': tf.Variable(tf.random_normal([in_neuron, hidden_neuron])),
'out': tf.Variable(tf.random_normal([hidden_neuron, out_neuron]))}
bias = {'hidden': tf.Variable(tf.random_normal([hidden_neuron])),
'out': tf.Variable(tf.random_normal([out_neuron]))}
xph = tf.placeholder('float', [None, in_neuron])
yph = tf.placeholder('float', [None, out_neuron])
def mlp(x, weight, bias):
hidden_layer = tf.add(tf.matmul(x, weight['hidden']), bias['hidden'])
hidden_layer_activation = tf.nn.relu(hidden_layer)
out_layer = tf.add(tf.matmul(hidden_layer_activation, weight['out']), bias['out'])
return out_layer
model = mlp(xph, weight, bias)
error = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=yph))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(error)
batch_size = 8
total_batch = int(len(X_train)/batch_size)
X_batches = np.array_split(X_train, total_batch)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(3000):
mean_error = 0
total_batch = int(len(X_train) / batch_size)
X_batches = np.array_split(X_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
for i in range(total_batch):
X_batch, y_batch = X_batches[i], y_batches[i]
_, cost = sess.run([optimizer, error], feed_dict={xph: X_batch, yph: y_batch})
mean_error += cost/total_batch
if epoch % 500 == 0:
print('Epoch: ' + str((epoch + 1)) + ' error: ' + str(mean_error))
final_weight, final_bias = sess.run([weight, bias])
print("\n\n")
print(final_weight)
print("\n")
print(final_bias)
print("\n\n")
# Predictions
pred = mlp(xph, final_weight, final_bias)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
a1 = sess.run(pred, feed_dict={xph: X_test})
a2 = sess.run(tf.nn.softmax(a1))
a3 = sess.run(tf.argmax(a2, 1))
print(a3)
print("\n\n")
y_test = np.argmax(y_test, axis=1)
print(y_test)
print("\n\n")
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, a3))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment