Skip to content

Instantly share code, notes, and snippets.

View vlavorini's full-sized avatar

Vincenzo Lavorini vlavorini

View GitHub Profile
def poinc_dist_np(points):
u'''
if expanded, the following is equal to:
expd=np.expand_dims(points,2)
tiled=np.tile(expd, points.shape[0])
trans=np.transpose(points)
num=np.sum(np.square(trans-tiled), axis=1)
#num
den1=1-np.sum(np.square(points),1)
expd=tf.expand_dims(ptf,2) # from (n_emb x emb_dim) to (n_emb x emb_dim x 1)
tiled=tf.tile(expd, [1,1,tf.shape(ptf)[0]]) # copying the same matrix n times
trans=tf.transpose(ptf)
num=tf.reduce_sum(tf.squared_difference(trans,tiled), 1)
den1=1-tf.reduce_sum(tf.square(ptf),1)
den1=tf.expand_dims(den1, 1)
den=tf.matmul(den1, tf.transpose(den1))
tot=1+2*tf.div(num, den)
for j range(3):
res[j]=v_1[j]*v_2[j]
res=v1 * v2
def _dist_poinc(a, b):
num=np.dot(a-b, a-b)
den1=1-np.dot(a,a)
den2=1-np.dot(b,b)
return np.arccosh(1+ 2* (num) / (den1*den2))
def dist_poinc(a, A):
res=np.empty(A.shape[0])
for i, el in enumerate(A):
res[i]=_dist_poinc(a, el)
return res
def meanshift(data, sigma, steps):
d1 = np.copy(data) # Need to copy the data, don't want to modify the originals
for it in range(steps): # at each step
for i, p in enumerate(d1): # for each point
dists = dist_poinc( p, d1) # we calculate the distance from that point to all the other ones
weights = gaussian(dists, sigma) # then we weight those distances by our gaussian kernel
d1[i] = (np.expand_dims(weights,1)*d1).sum(0) / weight.sum() # and substitute the point with the weighted sum
return d1
def _dist_poinc(a, b):
num=np.dot(a-b, a-b)
den1=1-np.dot(a,a)
den2=1-np.dot(b,b)
return np.arccosh(1+ 2* (num) / (den1*den2))
def dist_poinc(a, A):
res=np.empty(A.shape[0])
for i, el in enumerate(A):
res[i]=_dist_poinc(a, el)
return res
def num(points):
expd=np.expand_dims(points,2) #need another dimension...
tiled=np.tile(expd, points.shape[0]) #...to tile up the vectors
trans=np.transpose(points) #Also need to transpose the points matrix to fit well with broadcasting
diff=trans-tiled #doing the difference, exploiting Numpy broadcasting capabilities
num=np.sum(np.square(diff), axis=1) #an then obtain the squared norm of the difference
return num
def den(points):
sq_norm=1-np.sum(np.square(points),1) #subtracting from 1 the squared norm of the vectors
expd=np.expand_dims(sq_norm,1) #this operation is needed to obtain a correctly transposed version of the vector
den_all=expd * expd.T #multiply the object by his transpose
return den_all
def poinc_dist_vec(points):
num=num(points)
den=num(points)
return np.arccosh(1+2*num/ den)