Skip to content

Instantly share code, notes, and snippets.

@hemmer
Created March 8, 2019 15:33
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save hemmer/8a20975521854cec1ba1dee74ad180bd to your computer and use it in GitHub Desktop.
Save hemmer/8a20975521854cec1ba1dee74ad180bd to your computer and use it in GitHub Desktop.
import time
import os
import numpy as np
from pathlib import Path
import tensorflow as tf
print("tensorflow version: ", tf.__version__)
tf.enable_eager_execution()
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
model = tf.keras.Sequential([
tf.keras.layers.Dense(20, activation=tf.nn.relu, input_shape=(2,)), # input shape required
tf.keras.layers.Dense(20, activation=tf.nn.relu),
tf.keras.layers.Dense(20, activation=tf.nn.relu),
tf.keras.layers.Dense(20, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
test_input = tf.random_uniform([1, 2])
print(model(test_input))
def loss(model: tf.keras.Model, inputs, outputs):
u_true, v_true = outputs[:, 0], outputs[:, 1]
prediction = model(inputs)
u_pred, v_pred = prediction[:, 0], prediction[:, 1]
loss_value = tf.reduce_mean(tf.square(u_true - u_pred)) + \
tf.reduce_mean(tf.square(v_true - v_pred))
return loss_value, u_pred, v_pred
def grad(model: tf.keras.Model, inputs, outputs):
"""
:param inputs: (batch_size, 2) -> x, y
:param outputs: (batch_size, 3) -> vx, vy, p
:return:
"""
with tf.GradientTape() as tape:
loss_value, u_pred, v_pred = loss(model, inputs, outputs)
# AttributeError: 'DeferredTensor' object has no attribute '_id'
# print(tape.gradient(u_pred, model.input))
grads = tape.gradient(loss_value, model.trainable_variables)
return loss_value, grads
# meaningless, just to test
data = np.random.rand(40, 5).astype(np.float32)
print(data.shape)
features = data[:, 0:2] # x, y
labels = data[:, 2:] # vx, vy, p
num_epochs = 10
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(10)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
for epoch in range(num_epochs):
for inputs, outputs in dataset:
loss_value, grads = grad(model, inputs=inputs, outputs=outputs)
optimizer.apply_gradients(zip(grads, model.trainable_variables),
global_step=tf.train.get_or_create_global_step())
print("epoch", epoch, loss_value)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment