Skip to content

Instantly share code, notes, and snippets.

@stas-sl
Last active August 26, 2015 22:34
Show Gist options
  • Save stas-sl/c22bb126775399f5978e to your computer and use it in GitHub Desktop.
Save stas-sl/c22bb126775399f5978e to your computer and use it in GitHub Desktop.
import theano.tensor as T
import numpy as np
import random
from fuel.streams import DataStream
from fuel.datasets import IterableDataset
from blocks.main_loop import MainLoop
from blocks.extensions import FinishAfter, Printing, Timing, ProgressBar
from blocks.algorithms import GradientDescent, Scale, Adam
from blocks.bricks import Linear, Logistic
from blocks.bricks.recurrent import LSTM
from blocks.bricks.cost import BinaryCrossEntropy
from blocks.initialization import Constant, IsotropicGaussian
from blocks.graph import ComputationGraph
from blocks.extensions.monitoring import DataStreamMonitoring, TrainingDataMonitoring
from blocks.monitoring import aggregation
seq_length = 100
batch_size = 20
ltsm_dim = 1
class MyDataset(IterableDataset):
def __init__(self, nb_examples):
super(MyDataset, self).__init__(self.generate_data(nb_examples))
def generate_data(self, nb_examples):
x = []
y = []
batches = nb_examples / batch_size
for i in xrange(batches):
x_batch = np.random.randint(0, 2, (random.randint(1, seq_length), batch_size, 1))
y_batch = x_batch.sum(axis=(0,)) % 2
x.append(x_batch.astype('float32'))
y.append(y_batch.astype('float32'))
return {'x': x, 'y': y}
train_dataset = MyDataset(20000)
test_dataset = MyDataset(100)
stream = DataStream(dataset=train_dataset)
stream_test = DataStream(dataset=test_dataset)
x = T.tensor3('x')
y = T.matrix('y')
x_to_h = Linear(name='x_to_h', input_dim=1,
output_dim=ltsm_dim * 4, weights_init=IsotropicGaussian(0.01),
biases_init=Constant(0.0))
x_transform = x_to_h.apply(x)
lstm = LSTM(ltsm_dim, weights_init=IsotropicGaussian(0.01), biases_init=Constant(0.0))
h, c = lstm.apply(x_transform)
h_to_o = Linear(name='h_to_o', input_dim=ltsm_dim, output_dim=1,
weights_init=IsotropicGaussian(0.01), biases_init=Constant(0.0))
y_hat = h_to_o.apply(h[-1])
y_hat = Logistic().apply(y_hat)
cost = BinaryCrossEntropy().apply(y, y_hat)
cost.name = 'cost'
lstm.initialize()
x_to_h.initialize()
h_to_o.initialize()
cg = ComputationGraph(cost)
algorithm = GradientDescent(cost=cost, parameters=cg.parameters,
step_rule=Adam())
test_monitor = DataStreamMonitoring(variables=[cost], data_stream=stream_test, prefix="test")
train_monitor = TrainingDataMonitoring(variables=[cost,
aggregation.mean(algorithm.total_gradient_norm),
aggregation.mean(algorithm.total_step_norm)],
prefix="train", after_epoch=True)
main_loop = MainLoop(data_stream=stream, algorithm=algorithm,
extensions=[Timing(), test_monitor, train_monitor,
FinishAfter(after_n_epochs=100), Printing(),
ProgressBar()])
main_loop.run()
-------------------------------------------------------------------------------
BEFORE FIRST EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: True
epochs_done: 0
iterations_done: 0
received_first_batch: False
resumed_from: None
training_started: True
Log records from the iteration 0:
test_cost: 0.693146348
time_initialization: 15.959610939
Epoch 0, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 1
iterations_done: 1000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 1000:
test_cost: 0.693330287933
time_read_data_this_epoch: 0.0188336372375
time_read_data_total: 0.0188336372375
time_train_this_epoch: 4.23400616646
time_train_total: 4.23400616646
train_cost: 0.693234026432
train_total_gradient_norm: 0.0903544798493
train_total_step_norm: 0.0015922409948
Epoch 0, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 1, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 2
iterations_done: 2000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 2000:
test_cost: 0.693334937096
time_read_data_this_epoch: 0.0177981853485
time_read_data_total: 0.0366318225861
time_train_this_epoch: 4.26552772522
time_train_total: 8.49953389168
train_cost: 0.693228721619
train_total_gradient_norm: 0.0903861820698
train_total_step_norm: 0.00169123604428
Epoch 1, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 2, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 3
iterations_done: 3000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 3000:
test_cost: 0.693348705769
time_read_data_this_epoch: 0.0172114372253
time_read_data_total: 0.0538432598114
time_train_this_epoch: 4.29980802536
time_train_total: 12.799341917
train_cost: 0.693181455135
train_total_gradient_norm: 0.0905416682363
train_total_step_norm: 0.0032458701171
Epoch 2, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 3, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 4
iterations_done: 4000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 4000:
test_cost: 0.693351268768
time_read_data_this_epoch: 0.0183763504028
time_read_data_total: 0.0722196102142
time_train_this_epoch: 4.45077633858
time_train_total: 17.2501182556
train_cost: 0.692879796028
train_total_gradient_norm: 0.0914226621389
train_total_step_norm: 0.00260851043276
Epoch 3, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 4, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 5
iterations_done: 5000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 5000:
test_cost: 0.693159222603
time_read_data_this_epoch: 0.017315864563
time_read_data_total: 0.0895354747772
time_train_this_epoch: 4.38298487663
time_train_total: 21.6331031322
train_cost: 0.692384779453
train_total_gradient_norm: 0.0926703810692
train_total_step_norm: 0.00221344991587
Epoch 4, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 5, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 6
iterations_done: 6000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 6000:
test_cost: 0.69303637743
time_read_data_this_epoch: 0.0171670913696
time_read_data_total: 0.106702566147
time_train_this_epoch: 4.36592149734
time_train_total: 25.9990246296
train_cost: 0.69156306982
train_total_gradient_norm: 0.0943506658077
train_total_step_norm: 0.00212660012767
Epoch 5, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 6, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 7
iterations_done: 7000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 7000:
test_cost: 0.693288147449
time_read_data_this_epoch: 0.0162725448608
time_read_data_total: 0.122975111008
time_train_this_epoch: 4.29154896736
time_train_total: 30.290573597
train_cost: 0.690440654755
train_total_gradient_norm: 0.0966005474329
train_total_step_norm: 0.00209589977749
Epoch 6, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 7, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 8
iterations_done: 8000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 8000:
test_cost: 0.693119823933
time_read_data_this_epoch: 0.0168285369873
time_read_data_total: 0.139803647995
time_train_this_epoch: 4.32589888573
time_train_total: 34.6164724827
train_cost: 0.689218103886
train_total_gradient_norm: 0.0990314260125
train_total_step_norm: 0.00202929996885
Epoch 7, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 8, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 9
iterations_done: 9000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 9000:
test_cost: 0.692530453205
time_read_data_this_epoch: 0.0171823501587
time_read_data_total: 0.156985998154
time_train_this_epoch: 4.29153084755
time_train_total: 38.9080033302
train_cost: 0.688079953194
train_total_gradient_norm: 0.100569963455
train_total_step_norm: 0.00193881778978
Epoch 8, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 9, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 10
iterations_done: 10000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 10000:
test_cost: 0.691589057446
time_read_data_this_epoch: 0.0170702934265
time_read_data_total: 0.17405629158
time_train_this_epoch: 4.22808480263
time_train_total: 43.1360881329
train_cost: 0.686829447746
train_total_gradient_norm: 0.10218925029
train_total_step_norm: 0.00200984510593
Epoch 9, step 1000 |##################################################################################################| Elapsed Time: 0:00:04
Epoch 10, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 11
iterations_done: 11000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 11000:
test_cost: 0.688588798046
time_read_data_this_epoch: 0.0184333324432
time_read_data_total: 0.192489624023
time_train_this_epoch: 4.47197794914
time_train_total: 47.608066082
train_cost: 0.684867203236
train_total_gradient_norm: 0.105777770281
train_total_step_norm: 0.0021359664388
Epoch 10, step 1000 |#################################################################################################| Elapsed Time: 0:00:04
Epoch 11, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 12
iterations_done: 12000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 12000:
test_cost: 0.680603921413
time_read_data_this_epoch: 0.0173647403717
time_read_data_total: 0.209854364395
time_train_this_epoch: 4.23291349411
time_train_total: 51.8409795761
train_cost: 0.681823670864
train_total_gradient_norm: 0.113032065332
train_total_step_norm: 0.0022689816542
Epoch 11, step 1000 |#################################################################################################| Elapsed Time: 0:00:04
Epoch 12, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 13
iterations_done: 13000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 13000:
test_cost: 0.662819564342
time_read_data_this_epoch: 0.0189263820648
time_read_data_total: 0.22878074646
time_train_this_epoch: 4.34803962708
time_train_total: 56.1890192032
train_cost: 0.677453219891
train_total_gradient_norm: 0.131849586964
train_total_step_norm: 0.0025325815659
Epoch 12, step 1000 |#################################################################################################| Elapsed Time: 0:00:04
Epoch 13, step 1000 | # | Elapsed Time: 0:00:05INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 14
iterations_done: 14000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 14000:
test_cost: 0.136669248343
time_read_data_this_epoch: 0.0202579498291
time_read_data_total: 0.249038696289
time_train_this_epoch: 4.64186143875
time_train_total: 60.8308806419
train_cost: 0.36242878437
train_total_gradient_norm: 0.178038075566
train_total_step_norm: 0.00600557308644
Epoch 13, step 1000 |#################################################################################################| Elapsed Time: 0:00:05
Epoch 14, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 15
iterations_done: 15000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 15000:
test_cost: 0.0713550373912
time_read_data_this_epoch: 0.0192904472351
time_read_data_total: 0.268329143524
time_train_this_epoch: 4.60139369965
time_train_total: 65.4322743416
train_cost: 0.0917091667652
train_total_gradient_norm: 0.0432248823345
train_total_step_norm: 0.00220097112469
Epoch 14, step 1000 |#################################################################################################| Elapsed Time: 0:00:04
Epoch 15, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 16
iterations_done: 16000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 16000:
test_cost: 0.0433191619813
time_read_data_this_epoch: 0.0190880298615
time_read_data_total: 0.287417173386
time_train_this_epoch: 4.60874080658
time_train_total: 70.0410151482
train_cost: 0.0512836314738
train_total_gradient_norm: 0.0233451351523
train_total_step_norm: 0.0016579189105
Epoch 15, step 1000 |#################################################################################################| Elapsed Time: 0:00:05
Epoch 16, step 1000 | # | Elapsed Time: 0:00:05INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 17
iterations_done: 17000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 17000:
test_cost: 0.0276889596134
time_read_data_this_epoch: 0.019106388092
time_read_data_total: 0.306523561478
time_train_this_epoch: 4.66004276276
time_train_total: 74.7010579109
train_cost: 0.0318277515471
train_total_gradient_norm: 0.0143624749035
train_total_step_norm: 0.0014646257041
Epoch 16, step 1000 |#################################################################################################| Elapsed Time: 0:00:05
Epoch 17, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 18
iterations_done: 18000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 18000:
test_cost: 0.0181417986751
time_read_data_this_epoch: 0.017397403717
time_read_data_total: 0.323920965195
time_train_this_epoch: 4.41994524002
time_train_total: 79.1210031509
train_cost: 0.0205487217754
train_total_gradient_norm: 0.00924800150096
train_total_step_norm: 0.0013676994713
Epoch 17, step 1000 |#################################################################################################| Elapsed Time: 0:00:04
Epoch 18, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 19
iterations_done: 19000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 19000:
test_cost: 0.0120534449816
time_read_data_this_epoch: 0.0168647766113
time_read_data_total: 0.340785741806
time_train_this_epoch: 4.42370152473
time_train_total: 83.5447046757
train_cost: 0.013536346145
train_total_gradient_norm: 0.00608377438039
train_total_step_norm: 0.00131083920132
Epoch 18, step 1000 |#################################################################################################| Elapsed Time: 0:00:04
Epoch 19, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 20
iterations_done: 20000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 20000:
test_cost: 0.00807791110128
time_read_data_this_epoch: 0.0163683891296
time_read_data_total: 0.357154130936
time_train_this_epoch: 4.36531853676
time_train_total: 87.9100232124
train_cost: 0.00902231130749
train_total_gradient_norm: 0.00405099103227
train_total_step_norm: 0.00127452181187
Epoch 19, step 1000 |#################################################################################################| Elapsed Time: 0:00:04
Epoch 20, step 1000 | # | Elapsed Time: 0:00:04INFO:blocks.extensions.monitoring:Monitoring on auxiliary data started
INFO:blocks.extensions.monitoring:Monitoring on auxiliary data finished
-------------------------------------------------------------------------------
AFTER ANOTHER EPOCH
-------------------------------------------------------------------------------
Training status:
batch_interrupt_received: False
epoch_interrupt_received: False
epoch_started: False
epochs_done: 21
iterations_done: 21000
received_first_batch: True
resumed_from: None
training_started: True
Log records from the iteration 21000:
test_cost: 0.00544473249465
time_read_data_this_epoch: 0.0164356231689
time_read_data_total: 0.373589754105
time_train_this_epoch: 4.36331009865
time_train_total: 92.2733333111
train_cost: 0.00605866126716
train_total_gradient_norm: 0.00271813361906
train_total_step_norm: 0.00124992604833
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment