Skip to content

Instantly share code, notes, and snippets.

@flowpoint
Created September 15, 2017 20:40
Show Gist options
  • Save flowpoint/1191a29e4ed93fc7c91f63a8c514010b to your computer and use it in GitHub Desktop.
Save flowpoint/1191a29e4ed93fc7c91f63a8c514010b to your computer and use it in GitHub Desktop.
dnc_rnn_tests
# coding: utf-8
# In[1]:
import dnc
import tensorflow as tf
import numpy as np
import random
# In[2]:
batch_size = 10
learning_rate = 0.1
num_steps = 5
num_classes = 2
# In[3]:
num_batches = 10
xor = [[0,0,0],
[0,1,1],
[1,0,1],
[1,1,0]]
xorset = random.choices(xor,k=num_batches*batch_size)
xorset = np.array(xorset,dtype='float32').reshape(10,num_batches,3)
dataset = xorset[:,:,0:2]
#print("dataset=\n" + str(dataset))
labels = xorset[:,:,2]
labels = labels.astype('int32')
print("labels=\n" + str(labels))
# In[4]:
def get_batch():
for i in range(len(dataset)):
yield dataset[i], labels[i]
# In[5]:
access_config = {
"memory_size": 100,
"word_size": num_classes,
"num_reads": 1,
"num_writes": 1,
}
controller_config = {
"hidden_size": 2,
}
dataset_target_size = 10
# In[6]:
tf.reset_default_graph()
"""
Placeholders
"""
x = tf.placeholder(tf.int32, [batch_size, num_steps], name='input_placeholder')
y = tf.placeholder(tf.int32, [batch_size, num_steps], name='labels_placeholder')
#init_state = tf.zeros([batch_size, state_size])
"""
Inputs
"""
#rnn_inputs = tf.one_hot(x, num_classes)
"""
RNN
"""
#cell = tf.contrib.rnn.BasicRNNCell(state_size)
#rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state)
dncs = dnc.DNC(access_config,controller_config, dataset_target_size)
outputs, last_states = tf.nn.dynamic_rnn(
cell=dncs,
inputs=dataset,
initial_state=dncs.initial_state(batch_size, dtype='float32'))
# In[7]:
"""
Predictions, loss, training step
"""
with tf.variable_scope('softmax'):
W = tf.get_variable('W', num_classes)
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
# In[8]:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=outputs)#logits)
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
# In[ ]:
opt = tf.train.AdagradOptimizer(learning_rate)
# In[13]:
get_ipython().magic('pinfo tf.Session.run')
# In[23]:
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch in get_batch():
sess.run(total_loss)
print(losses.eval())
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment