Skip to content

Instantly share code, notes, and snippets.

@iscadar
Created July 22, 2012 13:37
Show Gist options
  • Save iscadar/3159707 to your computer and use it in GitHub Desktop.
Save iscadar/3159707 to your computer and use it in GitHub Desktop.
A collection of functions for the generation of neural networks and the setup of various experiments.
##A collection of functions for the generation of
##neural networks and the setup of various experiments.
##by Alexandros Kourkoulas-Chondrorizos
##v0.1
from scipy import *
from scipy.signal import *
from matplotlib.pyplot import *
from matplotlib.mlab import *
import scipy.cluster.hierarchy as sch
from numpy.random import random_integers, randint, permutation
from scipy.sparse import csr_matrix
import numpy as np
##Visualising the structural and functional connectivity
##of a neural network.
##by Alexandros Kourkoulas-Chondrorizos
##v0.1
##This is a simple function that calculates the covariance
##matrix of a neural network based on its activity. It then
##reorders the covariance matrix to obtain a depiction of
##functional connectivity and based on that reordering also
##rearranges the connectivity matrix in order to obtain a
##clearer picture of its structural connectivity. Put simply,
##it brings closer for us to see neurons that are structurally
##and functionally connected. All it needs is the network's
##connectivity matrix and a sample of its activity in order to
##calculate the covariance matrix.
def mat_reorder(gen,T,L,gn,gl,iw):
N,C,dn=gen2phen(L,gn,gl,gen) #convert genotype to phenotype
S,K=netgen(N,C,dn) #generate a neural network architecture
I,In=input_gen(T,K,Ia=20,ind=arange(int(N[1])),smin=500,norm=False) #generate input for the network
firings=onet(I,T,0,S,K) #run the net and storing the response
D=cov(firings) #compute covariance matrix
print shape(D)
#compute dendrogram
Y = sch.linkage(D, method='centroid')
Z = sch.dendrogram(Y, orientation='right')
index = Z['leaves']
#reorder matrices
D = D[index,:]
D = D[:,index]
S = S[index,:]
S = S[:,index]
#save reordered covariance and connectivity matrices
figure(1),matshow(S, origin='lower'),xticks([]),yticks([]),
title('Reordered Connectivity Matrix'),savefig('evolution/struc/%s.png' % (iw))
figure(2),matshow(D, origin='lower'),xticks([]),yticks([]),
title('Reordered Covariance Matrix'),savefig('evolution/func/%s.png' % (iw))
##A simple function for obtaining a spiking neural network
##from a binary genome and evaluating its information
##transmission capabilities with respect to stochasticity
##by Alexandros Kourkoulas-Chondrorizos
##v0.1
def ind_net(L,gn,gl,gen,T,D,R):
N,C,dn=gen2phen(L,gn,gl,gen) #convert it to a phenotype
S,K=netgen(N,C,dn) #generate a neural network architecture
I,In=input_gen(T,K,Ia=20,ind=arange(int(N[1])),smin=500,norm=False) #generate input for the network
cf_stor=zeros([R,len(D)]) #storing coding fractions
for j in range(R):
for id in range(len(D)):
firings=onet(I,T,D[id],S,K) #running the net and storing the response
cf_stor[j,id]=eval_estim(In,sum(firings,0)) #storing the coding fraction
fit,cf=eval_net(cf_stor,D) #evaluate net/calculate fitness
return fit,cf
##A simple function for converting a binary genotype
##into the architecture characteristics of a spiking
##neural network.
##by Alexandros Kourkoulas-Chondrorizos
##v0.1
##It takes as input L, which is the number of layers
##in the network for the particular case of layered
##networks, gn which is the number of genes in the
##genome, gl, which is the length of the binary gene
##and gen is the current individual's genotype. It
##returns the network's characteristics, N, the number
##of neurons per layer, C, the connection strength for
##each set of connections and dn, the connectivity
##density for each set of connections.
def gen2phen(L,gn,gl,gen):
phen=zeros([gn]) #phenotype
for i in range(gn):
ni=gen[(gl*i)+arange(gl)] #picking an individual gene
nj=0
for j in range(gl): nj=nj+ni[j]*(2**j) #converting binary gene to denary phenotype
phen[i]=nj #storing the denary phenotype
N=phen[:L]+1 #neurons per layer
C=reshape(phen[L:4*L],(3,3)) #connection set strength
dn=reshape(phen[4*L:7*L],(3,3))/(2**gl) #connection set density
return N,C,dn
##A simple function for evaluating a spiking neural network's
##information transmission capabilities with respect to noise
##amplitude
##by Alexandros Kourkoulas-Chondrorizos
##v0.3
##This function takes cf_stor as input, a vector of stored
##coding fractions each in response to a different value in
##noise level. It removes NaNs, calculates the coding fraction
##at optimum noise level opt, finds the optimum noise level index,
##the trends of pre- and post-optimum coding fractions trajectories
##and their slopes. It can then use any of those values to
##compute a fitness score depending on some fitness function and
##returns it.
def eval_net(cf_stor,D):
cf_stor=nan_to_num(cf_stor) #replace nans with 0s
cf_stor[(cf_stor<0).nonzero()]=0 #lower bound 0
cf_stor[(cf_stor>1).nonzero()]=1 #upper bound 0
cf=cf_stor.mean(0) #summed over runs
opt=cf.max() #noise maximum
oi=cf.argmax() #optimum index
if oi==0:
fit=-Inf #networks with a zero noise optimum suck
elif oi==(len(D)-1):
omin=cf[:oi].min() #pre-optimum noise minimum
#fitness due to transmission improvement and optimum
fit=0.3*(opt-omin)+0.2*(opt)
else:
omin=cf[:oi].min() #pre-optimum noise minimum
pot=cf[oi-1:]-detrend_linear(cf[oi-1:]) #cf trend post-optimum
pm=slopes(D[oi-1:],pot).mean() #post-optimum trend slope
#fitness due to transmission improvement, optimum and
#post-optimum noise robustness
fit=0.5*(opt-omin)+0.3*(opt)+0.2*(1-tanh(pm))
return fit,cf
##A simple function for stimulus estimation and evaluation
##by Alexandros Kourkoulas-Chondrorizos
##v0.1
##The function takes x and y as input where x and y are
##both signals (in this particular case an input signal and
##the response of a neural network) and it returns cf which
##is the coding fraction between the input signal x and its
##estimate Iest.
def eval_estim(x,y):
Iest,stim,h=stim_rec(x,y,nfft=1024,tstep=1) #stimulus estimation
mse=mean((Iest-stim)**2) #mse
cf=1-sqrt(mse)/std(stim) #coding fraction
return cf
##A simple function for simulating a single Izhikevich
##spiking neuron model.
##by Alexandros Kourkoulas-Chondrorizos
##v0.1
##This function simulates a single stochastic Izhikevich
##neuron. It takes T, D and I as input where T is the
##simulation length in ms (positive integer), D is the
##noise amplitude (sigma of Gaussian distribution) and I
##is the input signal. It returns firings which is the
##neural response (a binary sequence).
def oneur(T,D,I):
a,b,c,d=0.02,0.2,-65,8 #neuron parameters
v=-65 #Initial value of v
u=.2*v #Initial value of u
firings=zeros([T]) #neural response
for t in range(T):
sigma=D*randn() #Gaussian noise
if v>=30: #reseting the neuron
v=c
u=u+d
firings[t]=1
v=v+0.5*(0.04*(v**2)+5*v+140-u + I[t] + sigma)
v=v+0.5*(0.04*(v**2)+5*v+140-u + I[t] + sigma)
u=u+a*(b*v-u)
return firings
##A simple function for stimulus estimation/reconstruction
##in a neural system using a Wiener-Kolmogorov filter
##by Alexandros Kourkoulas-Chondorizos
##v0.3
##This function takes two 1-by-N arrays as input, the input
##signal presented to the neuron or neural net and the neural
##response. It also takes two integers nfft and tstep, where
##nfft is the number of data points used in each block for the
##FFT and tstep is the sampling frequency. nfft must be even
##and a power of 2 is most efficient. tstep is an integer
##declaring the time step. It creates a WK filter that is then
##convolved with the neural response in order to produce an
##estimate of the input that produced it. It returns the input
##signal estimate, the zero-centered input signal and the filter.
def stim_rec(I,firings,nfft,tstep):
if I.ndim!=1 or firings.ndim!=1 or not(isinstance(nfft,int))\
or not(nfft%2==0) or not(isinstance(tstep,int)) or not(tstep>0):
raise ValueError('I and firings should be 1-by-N arrays, nfft an even integer and tstep a positive integer')
tstep_s=tstep*0.001 #converts to sec
Fs=1/tstep_s #in Hz
stim=I-I.mean() #subtracts the mean stimulus
spk=firings*Fs
spk=spk-spk.mean() #subtracts mean firing rate and converts to units of spikes/sec
win=get_window('bartlett',nfft,False)
#power spectral density of the neural activity
Pxx,pxx_freqs=psd(spk,nfft,Fs,window=win,noverlap=nfft/2)
#cross power spectral density of input and neural activity
Pxy,pxy_freqs=csd(stim,spk,nfft,Fs,window=win,noverlap=nfft/2)
Tfs=Pxy/Pxx #Estimates the transfer function
Tfl=zeros([nfft])
Tfl[:nfft/2],Tfl[nfft/2:]=Tfs[:nfft/2],flipud(Tfs[1:]).conj()
T=real(ifft(Tfl))*nfft #inverse Fourier transform with negative phase
h=zeros([nfft+1])
h[:nfft/2],h[nfft/2:nfft+1]=T[(nfft/2):nfft],T[:nfft/2+1] #unwrap the filter
Iest=convolve(h*tstep_s,spk,'same') #estimate input
return Iest, stim, h
##A simple function for evaluating a spiking neural network
##by Alexandros Kourkoulas-Chondorizos
##v0.1
##This is a network of Izhikevich neurons arranged with the
##architecture produced by netgen. It takes as input the
##architecture produced by netgen, the input signal produced
##by input_gen, the length of the simulation T and the noise
##level D. It returns a matrix of 0 and 1 where 1 signifies
##a spike and 0 the lack of neural activity.
def onet(I,T,D,S,K):
#run network for training input
#neuron parameters
a=0.02
b=0.2
c=-65
d=8
v=-65*ones([K,1]) #Initial value of v
u=.2*v #Initial value of u
firings=zeros([K,T]) #spike timings
for t in range(T):
sigma=D*randn(K) #Gaussian or Uniform noise
fired=(v>=30).nonzero() #indices of spikes
firings[fired,t]=1
if fired: #reseting the neuron
v[fired]=c
u[fired]=u[fired]+d
# #time-delayed activity
# for i in range(md):
# Istp(:,md)=sum(synd(:,:,i)*firings(:,(t+md)-i),2)
#
# Ist=sum(Istp,2)
Ist=S*firings[:,t] #presynaptic activity
v=v+0.5*(0.04*(v**2)+5*v+140-u + I[:,t] + Ist + sigma)
v=v+0.5*(0.04*(v**2)+5*v+140-u + I[:,t] + Ist + sigma)
u=u+a*(b*v-u)
return firings
##A simple input generation function
##by Alexandros Kourkoulas-Chondrorizos
##v0.4
##This function generates an analogue signal for the neural net.
##It uses a Gaussian distribution for the values and then smooths
##them using a moving window method. The input Ia is the input amplitude
##T is the length of the simulation in ms, K is the number of neurons
##in the population, ind are the indeces of the neurons the input will
##be presented to, smin is the length of the smoothing window and norm
##is the option of whether to bring the entire input signal above zero
##or not. It outputs a matrix with the signal values for all rows with
##index ind and zeros for every other neuron.
def input_gen(T,K,Ia,ind,smin,norm):
if not(isinstance(T,int)) or T < 1:
raise ValueError('T should be a positive integer')
if not(isinstance(K,int)) or K<2:
raise ValueError('K should be a positive integer >2')
if max(ind)>K-1 or min(ind)<0 or len(shape(ind))!=1:
raise ValueError('ind should be a 1xN array of integers in the range 0 to K-1 included')
if not(isinstance(norm,bool)):
raise ValueError('norm should be a boolean')
Ir=50*randn(T+smin) #generate random signal
Is=smooth(Ir,smin,'hanning') #smooth signal using window
I=Is[smin/2:T+smin/2]
if norm: #normalising (or not) the input signal
In=I+abs(min(I))
else:
In=I
It=zeros([K,T]) #input template for the network
It[ind,:]=In #present input signal to subnet
return It,In
##A generic network connectivity architecture
##by Alexandros Kourkoulas-Chondrorizos
##v0.2
##This is a simple function that generates a variety
##of network connectivities and consequently architectures
##anywhere from a three-layer feedforward network
##to a neural pool of dynamics. It supports recurrent
##connections, lateral and self-connections, feedforward
##connections (obviously), any degree of sparsity (0 to
##100% connectivity) and both excitatory and inhibitory connections.
##This function takes three arguments: N, C and D. N is a
##1 by n list of integers larger than 1, where n is an
##integer from 1 to 3 and signifies the number of layers
##in the architecture of the network. Each value in N is the
##number of neurons in each layer. C and D are L by L lists
##where L is the number of layers in the architecture. C stores
##the synaptic strength of each set of connections and D stores
##the sparsity of each set of connections. This function builds
##subsets of connections with individual strengths and sparsities
##which connect any one layer to another. The subsets are then
##concatenated into a big matrix which describes the connectivity
##and the overall architecture of the network.
def netgen(N,C,D): #generic network architecture
L=len(N) #number of layers
K=int(sum(N)) #total number of neurons
# check parameters here
if L < 1.0 or L > 3.0 or len(shape(N))!=1 or K<2:
raise ValueError('N should be a 1 by n list of integers >1, where n is an integer from 1 to 3')
if shape(C)[0]!=shape(C)[1]!=L or shape(D)[0]!=shape(D)[1]!=L:
raise ValueError('C and D should both be L by L lists, where L is the number of layers of the network')
S=zeros((K,K)) #empty synaptic matrix
for i in range(L):
for j in range(L):
#print D[i,j]
S[(sum(N[:i+1])-N[i]):sum(N[:i+1]),(sum(N[:j+1])-N[j]):sum(N[:j+1])]= \
(C[i][j]*sprand(int(N[i]),int(N[j]),D[i][j]).todense())
return S,K
def smooth(x, window_len, window):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
import numpy as np
t = np.linspace(-2,2,0.1)
x = np.sin(t)+np.random.randn(len(t))*0.1
y = smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
w = getattr(np, window)(window_len)
y = np.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def _rand_sparse(m, n, density):
# check parameters here
if density > 1.0 or density < 0.0:
raise ValueError('density should be between 0 and 1')
# More checks?
# Here I use the algorithm suggested by David to avoid ending
# up with less than m*n*density nonzero elements (with the algorithm
# provided by Nathan there is a nonzero probability of having duplicate
# rwo/col pairs).
#m,n,density=array([m]),array([n]),array([density])
nnz = max( min( int(m*n*density), m*n), 0)
#print m,n,density,nnz
rand_seq = permutation(m*n)[:nnz]
row = rand_seq / n
col = rand_seq % n
data = ones(nnz, dtype='int8')
# duplicate (i,j) entries will be summed together
#print (rand_seq),shape(row),shape(col),shape(data)
return csr_matrix( (data,(row,col)), shape=(m,n) )
def sprand(m, n, density):
"""Build a sparse uniformly distributed random matrix
Parameters
----------
m, n : dimensions of the result (rows, columns)
density : fraction of nonzero entries.
Example
-------
>>> from scipy.sparse import sprand
>>> print sprand(2, 3, 0.5).todense()
matrix[[ 0.5724829 0. 0.92891214]
[ 0. 0.07712993 0. ]]
"""
A = _rand_sparse(m, n, density)
A.data = rand(A.nnz)
return A
def sprandn(m, n, density):
"""Build a sparse normally distributed random matrix
Parameters
----------
m, n : dimensions of the result (rows, columns)
density : fraction of nonzero entries.
Example
-------
>>> from scipy.sparse import sprandn
>>> print sprandn(2, 4, 0.5).todense()
matrix([[-0.84041995, 0. , 0. , -0.22398594],
[-0.664707 , 0. , 0. , -0.06084135]])
"""
A = _rand_sparse(m, n, density)
A.data = randn(A.nnz)
return A
if __name__ == '__main__':
print sprand(2, 3, 0.5).todense()
print sprandn(2, 5, 0.2).todense()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment