Skip to content

Instantly share code, notes, and snippets.

@hassaku
Created March 26, 2016 17:31
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save hassaku/79e030f924084976f562 to your computer and use it in GitHub Desktop.
Save hassaku/79e030f924084976f562 to your computer and use it in GitHub Desktop.
Memory and Learning of Sequential Patterns by Nonmonotone Neural Networks
#!/usr/bin/env python
# encoding: utf-8
import os
from logging import getLogger
import numpy as np
import copy
logger = getLogger(__name__)
class NonmonotoneNeuralNetwork(object):
def __init__(self, size=400, time_constant=5.0, initial_beta=0.2):
self.size = size
self.weight = np.zeros([size, size])
self.tau_activation = time_constant
self.tau_weight = 5000 * self.tau_activation
self.initial_beta = initial_beta
def partial_fit(self, x, loop=20, alpha=2.0):
beta = self.initial_beta
for it in range(loop):
logger.info("iter:%d/%d beta:%f" % (it, loop, beta))
activation = x[0, :] * self.initial_beta
binarized_output, nonmonotone_output = self.__update_output(activation)
for pi in range(1, x.shape[0]):
stimulus = x[pi, :]
activation = self.__update_activation(activation, nonmonotone_output, beta, stimulus)
binarized_output, nonmonotone_output = self.__update_output(activation)
self.__update_weight(activation, nonmonotone_output, binarized_output, alpha, stimulus)
beta -= self.initial_beta / loop
def predict(self, stimulus, loop=30, stop_threshold=0.001):
activation = stimulus * self.initial_beta
binarized_output, nonmonotone_output = self.__update_output(activation)
last_activation = activation
predictions = [copy.deepcopy(binarized_output)]
for it in range(loop):
logger.info("iter:%d/%d" % (it, loop))
activation = self.__update_activation(activation, nonmonotone_output)
binarized_output, nonmonotone_output = self.__update_output(activation)
predictions.append(copy.deepcopy(binarized_output))
if np.sum(np.abs(activation - last_activation)) / float(self.size) < stop_threshold:
break
last_activation = activation
return predictions
# private
def __update_output(self, activation):
return self.__sign(activation), self.__output(activation)
def __update_weight(self, activation, nonmonotone_output, binarized_output, alpha, stimulus):
Y = self.__output(activation)
self.weight = ((self.tau_weight-1) * self.weight + alpha *
np.tile(binarized_output * Y * stimulus, (self.size, 1)).T *
nonmonotone_output) / self.tau_weight
mask = np.ones((self.size, self.size)) - np.eye(self.size)
self.weight *= mask
def __update_activation(self, activation, output, beta=0, stimulus=None):
weighted_input = np.dot(self.weight, output)
if beta > 0.0:
activation = ((self.tau_activation - 1) * activation + weighted_input + beta * stimulus) / self.tau_activation
else:
activation = ((self.tau_activation - 1) * activation + weighted_input) / self.tau_activation
return activation
__C = -50.0
__C_DASH = 10.0
__H = 0.5
__KAI = 1.0
def __output(self, x):
stimulus_abs = np.abs(x)
e_c_i = np.exp(self.__C * x)
e_cd_i = np.exp(self.__C_DASH * (stimulus_abs - self.__H))
return ((1.0 - e_c_i) / (1.0 + e_c_i)) * ((1.0 - self.__KAI * e_cd_i) / (1.0 + e_cd_i))
def __sign(self, x):
y = np.ones(x.shape[0])
y[x < 0] = -1.0
return y
def trajectory_patterns(que, target, batch_size=20):
pattern = np.copy(que)
patterns = [np.copy(que)]
for s in range(0, len(pattern), batch_size):
pattern[0:s+batch_size] = target[0:s+batch_size]
patterns.append(np.copy(pattern))
return np.vstack(patterns)
def plot_animation(patterns, interval=50):
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from matplotlib import animation
fig = plt.figure(1)
ax = plt.subplot(111)
im = ax.imshow(np.reshape(patterns[0], (20, 20)), cmap='Greys', interpolation='nearest', animated=True)
def updatefig(i):
im.set_array(np.reshape(patterns[i], (20, 20)))
return im,
ani = animation.FuncAnimation(fig, updatefig, frames=len(patterns), interval=interval, blit=True, repeat=True)
#ani.save("anime.mp4")
plt.show()
def corrupted(x, rate=0.1):
cx = np.copy(x)
inv = np.random.binomial(n=1, p=rate, size=len(x))
for i, v in enumerate(x):
if inv[i]:
cx[i] = -1 * v
return cx
if __name__ == "__main__":
from logging import Formatter, StreamHandler, DEBUG, INFO
from logging.handlers import RotatingFileHandler
import datetime
log_level = DEBUG
if not os.path.isdir('./logs'):
os.makedirs('./logs')
logger = getLogger(__name__)
logger.setLevel(log_level)
log_format = Formatter('%(asctime)s (%(process)d:%(filename)s:%(funcName)s) [%(levelname)s] %(message)s')
handler = RotatingFileHandler('./logs/%s_%s.log' % (os.path.splitext(os.path.basename(__file__))[0],
datetime.datetime.now().strftime('%Y%m%d_%H%M%S')), backupCount=5)
handler.setFormatter(log_format)
logger.addHandler(handler)
handler = StreamHandler()
handler.setFormatter(log_format)
logger.addHandler(handler)
pattern1 = [-1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1,
1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1,
1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1,
-1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1,
-1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1,
1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1,
-1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1,
-1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1,
-1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1,
-1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1,
-1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1,
1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
-1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1,
-1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1,
1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1,
1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1,
-1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1,
-1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1,
-1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1,
1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, -1,]
pattern2 = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1,
-1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1,
1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1,
1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1,
1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1,
-1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1,
-1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1,
-1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
-1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1,
1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1,
-1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1,
-1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1,
-1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1,
-1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1,
1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1,
-1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1,]
pattern3 = [ 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1,
1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1,
1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1,
-1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1,
-1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1,
-1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1,
1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1,
-1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1,
1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1,
1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1,
-1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1,
-1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1,
1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1,
-1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1,
1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1,
1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1,
-1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1,
-1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1,
-1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1,
-1, 1, 1, 1,]
pattern1 = np.array(pattern1).astype(float)
pattern2 = np.array(pattern2).astype(float)
pattern3 = np.array(pattern3).astype(float)
#plot_animation(trajectory_patterns(pattern1, pattern2))
nnn = NonmonotoneNeuralNetwork()
# Train cyclic patterns p1 -> p2 -> p3 -> p1 -> ... incrementaly
nnn.partial_fit(trajectory_patterns(pattern1, pattern2))
nnn.partial_fit(trajectory_patterns(pattern2, pattern3))
nnn.partial_fit(trajectory_patterns(pattern3, pattern1))
# Recall memory from corrupted p1
predictions = nnn.predict(corrupted(pattern1, rate=0.1), loop=20*6)
plot_animation(predictions, interval=200)
@hassaku
Copy link
Author

hassaku commented Mar 26, 2016

nnn

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment