Create a gist now

Instantly share code, notes, and snippets.

Embed
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@p9anand

This comment has been minimized.

Show comment
Hide comment
@p9anand

p9anand Jul 4, 2016

Getting error as
TypeError: list object is not an iterator
Having issue in both for python2.7 and python 3 .
Following is the code

%matplotlib inline

import pymc3 as pm
import theano.tensor as T
import theano
import sklearn
import numpy as np
import matplotlib.pyplot as plt

import seaborn as sns

from sklearn import datasets
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_moons, make_circles, make_classification

X, Y = make_moons(noise=0.2, random_state=0, n_samples=1000)
X = scale(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)

Turn inputs and outputs into shared variables so that we can change them later

ann_input = theano.shared(X_train)
ann_output = theano.shared(Y_train)

n_hidden = 5

Initialize random weights.

init_1 = np.random.randn(X.shape[1], n_hidden)
init_2 = np.random.randn(n_hidden, n_hidden)
init_out = np.random.randn(n_hidden)

with pm.Model() as neural_network:
# Weights from input to hidden layer
weights_in_1 = pm.Normal('w_in_1', 0, sd=1,
shape=(X.shape[1], n_hidden),
testval=init_1)

# Weights from 1st to 2nd layer
weights_1_2 = pm.Normal('w_1_2', 0, sd=1,
                        shape=(n_hidden, n_hidden),
                        testval=init_2)

# Weights from hidden layer to output
weights_2_out = pm.Normal('w_2_out', 0, sd=1,
                          shape=(n_hidden,),
                          testval=init_out)

# Build neural-network
act_1 = T.tanh(T.dot(ann_input, weights_in_1))
act_2 = T.tanh(T.dot(act_1, weights_1_2))
act_out = T.nnet.sigmoid(T.dot(act_2, weights_2_out))

out = pm.Bernoulli('data',
                   act_out,
                   observed=ann_output)

with neural_network:

step = pm.Metropolis()

trace = pm.sample(10000, step=step)[5000:]

Replace shared variables with testing set

(note that using this trick we could be streaming ADVI for big data)

ann_input.set_value(X_test)
ann_output.set_value(Y_test)

Creater posterior predictive samples

ppc = pm.sample_ppc(trace, model=neural_network, samples=500)

pred = ppc['data'].mean(axis=0) > 0.5

plt.scatter(X_test[pred==0, 0], X_test[pred==0, 1])

plt.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')

plt.title('Predicted labels in testing set')

print('Accuracy = {}%'.format((Y_test == pred).mean() * 100))

Turn inputs and outputs into shared variables so that we can change them later

import theano.tensor as tt

Set back to original data to retrain

ann_input.set_value(X_train)
ann_output.set_value(Y_train)

Tensors and RV that will be using mini-batches

minibatch_tensors = np.array([ann_input, ann_output])
minibatch_RVs = np.array([out])

Generator that returns mini-batches in each iteration

def create_minibatch(data):
rng = np.random.RandomState(0)

while True:
    # Return random data samples of set size 100 each iteration
    ixs = rng.randint(len(data), size=50)
    print(ixs)
    print(len(ixs))
    # print('we are in func...')
    return data[ixs]

minibatches = [
create_minibatch(X_train),
create_minibatch(Y_train),
]

print(type(minibatches))
print(minibatches[0])
print(minibatches[1])
total_size = len(Y_train)

%%time

print('running mini batch....')
with neural_network:
# Run advi_minibatch
v_params = pm.variational.advi_minibatch(
n=500, minibatch_tensors=minibatch_tensors,
minibatch_RVs=minibatch_RVs, minibatches=minibatches,
total_size=total_size, learning_rate=1e-2, epsilon=1.0
)
print('final...stage')
with neural_network:
trace = pm.variational.sample_vp(v_params, draws=5000)

p9anand commented Jul 4, 2016

Getting error as
TypeError: list object is not an iterator
Having issue in both for python2.7 and python 3 .
Following is the code

%matplotlib inline

import pymc3 as pm
import theano.tensor as T
import theano
import sklearn
import numpy as np
import matplotlib.pyplot as plt

import seaborn as sns

from sklearn import datasets
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_moons, make_circles, make_classification

X, Y = make_moons(noise=0.2, random_state=0, n_samples=1000)
X = scale(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)

Turn inputs and outputs into shared variables so that we can change them later

ann_input = theano.shared(X_train)
ann_output = theano.shared(Y_train)

n_hidden = 5

Initialize random weights.

init_1 = np.random.randn(X.shape[1], n_hidden)
init_2 = np.random.randn(n_hidden, n_hidden)
init_out = np.random.randn(n_hidden)

with pm.Model() as neural_network:
# Weights from input to hidden layer
weights_in_1 = pm.Normal('w_in_1', 0, sd=1,
shape=(X.shape[1], n_hidden),
testval=init_1)

# Weights from 1st to 2nd layer
weights_1_2 = pm.Normal('w_1_2', 0, sd=1,
                        shape=(n_hidden, n_hidden),
                        testval=init_2)

# Weights from hidden layer to output
weights_2_out = pm.Normal('w_2_out', 0, sd=1,
                          shape=(n_hidden,),
                          testval=init_out)

# Build neural-network
act_1 = T.tanh(T.dot(ann_input, weights_in_1))
act_2 = T.tanh(T.dot(act_1, weights_1_2))
act_out = T.nnet.sigmoid(T.dot(act_2, weights_2_out))

out = pm.Bernoulli('data',
                   act_out,
                   observed=ann_output)

with neural_network:

step = pm.Metropolis()

trace = pm.sample(10000, step=step)[5000:]

Replace shared variables with testing set

(note that using this trick we could be streaming ADVI for big data)

ann_input.set_value(X_test)
ann_output.set_value(Y_test)

Creater posterior predictive samples

ppc = pm.sample_ppc(trace, model=neural_network, samples=500)

pred = ppc['data'].mean(axis=0) > 0.5

plt.scatter(X_test[pred==0, 0], X_test[pred==0, 1])

plt.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')

plt.title('Predicted labels in testing set')

print('Accuracy = {}%'.format((Y_test == pred).mean() * 100))

Turn inputs and outputs into shared variables so that we can change them later

import theano.tensor as tt

Set back to original data to retrain

ann_input.set_value(X_train)
ann_output.set_value(Y_train)

Tensors and RV that will be using mini-batches

minibatch_tensors = np.array([ann_input, ann_output])
minibatch_RVs = np.array([out])

Generator that returns mini-batches in each iteration

def create_minibatch(data):
rng = np.random.RandomState(0)

while True:
    # Return random data samples of set size 100 each iteration
    ixs = rng.randint(len(data), size=50)
    print(ixs)
    print(len(ixs))
    # print('we are in func...')
    return data[ixs]

minibatches = [
create_minibatch(X_train),
create_minibatch(Y_train),
]

print(type(minibatches))
print(minibatches[0])
print(minibatches[1])
total_size = len(Y_train)

%%time

print('running mini batch....')
with neural_network:
# Run advi_minibatch
v_params = pm.variational.advi_minibatch(
n=500, minibatch_tensors=minibatch_tensors,
minibatch_RVs=minibatch_RVs, minibatches=minibatches,
total_size=total_size, learning_rate=1e-2, epsilon=1.0
)
print('final...stage')
with neural_network:
trace = pm.variational.sample_vp(v_params, draws=5000)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment