This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
model.init_sims(replace=True) # normalize the word embeddings to have length 1 | |
def neighbors_fnct(node, n_neighbors, dilute_factor): | |
return [neighbor for neighbor, _ in model.similar_by_word( | |
node, n_neighbors * dilute_factor)][0:-1:dilute_factor] | |
def euclidean_dist(n1, n2): | |
return np.linalg.norm(model.get_vector(n1) - model.get_vector(n2)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from gensim.models import KeyedVectors | |
model = KeyedVectors.load_word2vec_format( | |
fname=word2vec_file_path, | |
binary=True, | |
limit=100000 | |
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
print morph('tooth', 'light') | |
print morph('John', 'perfect') | |
print morph('pillow', 'car') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
NUM_CLASSES = 2 | |
HIDDEN_STATE_DIM = 1 # using 1 as dimensionality makes it easy to plot z, as we'll do later on | |
visual = tf.placeholder(tf.float32, shape=[None]) | |
textual = tf.placeholder(tf.float32, shape=[None]) | |
target = tf.placeholder(tf.int32, shape=[None]) | |
h_v = tf.layers.dense(tf.reshape(visual, [-1, 1]), | |
HIDDEN_STATE_DIM, | |
activation=tf.nn.tanh) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
import numpy as np | |
import matplotlib.pyplot as plt | |
np.random.seed(41) | |
tf.set_random_seed(41) | |
%matplotlib inline |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
n = 400 | |
p_c = 0.5 | |
p_m = 0.5 | |
mu_v_0 = 1.0 | |
mu_v_1 = 8.0 | |
mu_v_noise = 17.0 | |
mu_t_0 = 13.0 | |
mu_t_1 = 19.0 | |
mu_t_noise = 10.0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
sess = tf.Session() | |
def train(train_op, loss): | |
sess.run(tf.global_variables_initializer()) | |
losses = [] | |
for epoch in xrange(100): | |
_, l = sess.run([train_op, loss], {visual: x_v, | |
textual: x_t, | |
target: c}) | |
losses.append(l) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
plt.figure(figsize=(10, 2)) | |
prob_errors = [np.array(learned_prob) - np.array(number_to_prob.values()) | |
for learned_prob in learned_probs] | |
plt.imshow(np.transpose(prob_errors), | |
cmap='bwr', | |
aspect='auto', | |
vmin=-2, | |
vmax=2) | |
plt.xlabel('epoch') | |
plt.ylabel('number') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def discriminator(x): | |
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE): | |
return tf.contrib.layers.fully_connected(x, | |
num_outputs=1, | |
activation_fn=None) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
generated_outputs, generated_probs = generator() | |
discriminated_real = discriminator(value) | |
discriminated_generated = discriminator(generated_outputs) | |
d_loss_real = tf.reduce_mean( | |
tf.nn.sigmoid_cross_entropy_with_logits(logits=discriminated_real, | |
labels=tf.ones_like(discriminated_real))) | |
d_loss_fake = tf.reduce_mean( | |
tf.nn.sigmoid_cross_entropy_with_logits(logits=discriminated_generated, | |
labels=tf.zeros_like(discriminated_generated))) |
OlderNewer