Skip to content

Instantly share code, notes, and snippets.

View yoel-zeldes's full-sized avatar
💭
Working on my blog at anotherdatum.com

Yoel Zeldes yoel-zeldes

💭
Working on my blog at anotherdatum.com
View GitHub Profile
model.init_sims(replace=True) # normalize the word embeddings to have length 1
def neighbors_fnct(node, n_neighbors, dilute_factor):
return [neighbor for neighbor, _ in model.similar_by_word(
node, n_neighbors * dilute_factor)][0:-1:dilute_factor]
def euclidean_dist(n1, n2):
return np.linalg.norm(model.get_vector(n1) - model.get_vector(n2))
from gensim.models import KeyedVectors
model = KeyedVectors.load_word2vec_format(
fname=word2vec_file_path,
binary=True,
limit=100000
)
print morph('tooth', 'light')
print morph('John', 'perfect')
print morph('pillow', 'car')
NUM_CLASSES = 2
HIDDEN_STATE_DIM = 1 # using 1 as dimensionality makes it easy to plot z, as we'll do later on
visual = tf.placeholder(tf.float32, shape=[None])
textual = tf.placeholder(tf.float32, shape=[None])
target = tf.placeholder(tf.int32, shape=[None])
h_v = tf.layers.dense(tf.reshape(visual, [-1, 1]),
HIDDEN_STATE_DIM,
activation=tf.nn.tanh)
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(41)
tf.set_random_seed(41)
%matplotlib inline
n = 400
p_c = 0.5
p_m = 0.5
mu_v_0 = 1.0
mu_v_1 = 8.0
mu_v_noise = 17.0
mu_t_0 = 13.0
mu_t_1 = 19.0
mu_t_noise = 10.0
sess = tf.Session()
def train(train_op, loss):
sess.run(tf.global_variables_initializer())
losses = []
for epoch in xrange(100):
_, l = sess.run([train_op, loss], {visual: x_v,
textual: x_t,
target: c})
losses.append(l)
plt.figure(figsize=(10, 2))
prob_errors = [np.array(learned_prob) - np.array(number_to_prob.values())
for learned_prob in learned_probs]
plt.imshow(np.transpose(prob_errors),
cmap='bwr',
aspect='auto',
vmin=-2,
vmax=2)
plt.xlabel('epoch')
plt.ylabel('number')
def discriminator(x):
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
return tf.contrib.layers.fully_connected(x,
num_outputs=1,
activation_fn=None)
generated_outputs, generated_probs = generator()
discriminated_real = discriminator(value)
discriminated_generated = discriminator(generated_outputs)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=discriminated_real,
labels=tf.ones_like(discriminated_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=discriminated_generated,
labels=tf.zeros_like(discriminated_generated)))