Created
May 7, 2021 19:25
-
-
Save gftabor/abeb108fc9aa8b1c799bfc63287c2e5f to your computer and use it in GitHub Desktop.
time testing 10 simple minimization problems in Tensorflow 1.x. Tensorflow 2.x and pytorch. tf.1x takes 0.22s, pytorch takes 1.47s, tf2.x takes 1.62s
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import numpy as np | |
def forward(x): | |
clippedTheta = torch.sigmoid(x) * 720 - 360 | |
return clippedTheta | |
def loss_fn(x): | |
error = 0.7-torch.cos(x*np.pi/180.0) | |
return 1e10 * error*error | |
def train(): | |
# Construct data_loader, optimizer, etc. | |
x = torch.autograd.Variable(torch.tensor(0.001), requires_grad=True) | |
optimizer = torch.optim.Adam([x], lr=5e-2) | |
print(forward(x)) | |
for i in range(100): | |
optimizer.zero_grad() | |
loss_fn(forward(x)).backward() | |
optimizer.step() # This will update the shared parameters | |
print(forward(x)) | |
if __name__ == '__main__': | |
import torch.multiprocessing as mp | |
import time | |
num_processes = 10 | |
# NOTE: this is required for the ``fork`` method to work | |
processes = [] | |
start = time.time() | |
train() | |
print(time.time()-start) | |
start = time.time() | |
for rank in range(num_processes): | |
p = mp.Process(target=train) | |
p.start() | |
processes.append(p) | |
for p in processes: | |
p.join() | |
print(time.time()-start) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
import numpy as np | |
import time | |
def forward(x): | |
clippedTheta = tf.sigmoid(x) * 360 - 180 | |
return clippedTheta | |
def loss_fn(x): | |
error = 0.7-tf.cos(x*np.pi/180.0) | |
return error*error | |
config = tf.ConfigProto(device_count = {'GPU': 0}) | |
sess = tf.Session(config=config) | |
train_ops = [] | |
costs = [] | |
tf_vars = [] | |
ys = [] | |
for i in range(10): | |
x = tf.Variable(0.001,dtype=tf.float64) | |
y = forward(x) | |
cost = loss_fn(y) | |
ys.append(y) | |
tf_vars.append(x) | |
costs.append(cost) | |
train_ops.append(tf.train.AdamOptimizer(5e-2).minimize(cost)) | |
init_op = tf.global_variables_initializer() | |
sess.run(init_op) | |
init_op = [tf_vars[i].initializer for i in range(len(tf_vars))] | |
sess.run(init_op) | |
optimize_ops = [train_ops,costs,tf_vars,ys] | |
start = time.time() | |
for i in range(100): | |
_,bestCosts,bestXs,bestYs = sess.run(optimize_ops) | |
print(time.time()-start) | |
print(bestYs) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
import numpy as np | |
import time | |
@tf.function | |
def forward(x): | |
clippedTheta = tf.sigmoid(x) * 720 - 360 | |
return clippedTheta | |
@tf.function | |
def loss_fn(x): | |
error = 0.7-tf.cos(x*np.pi/180.0) | |
return 1e10 * error*error | |
def trainLoop(opt,x): | |
for j in range(100): | |
with tf.GradientTape() as tape: | |
y = forward(x) | |
loss = loss_fn(y) | |
input_vars = [x] | |
gradients = tape.gradient(loss,input_vars) | |
zipped = zip(gradients,input_vars) | |
opt.apply_gradients(zipped) | |
return x | |
def train(): | |
opt = tf.keras.optimizers.Adam(learning_rate=0.1) | |
x = tf.Variable(0.001,dtype=tf.float64) | |
return trainLoop(opt,x) | |
train() | |
start = time.time() | |
with tf.device('/job:localhost/replica:0/task:0/device:CPU:0'): | |
for i in range(10): | |
x= train() | |
print(time.time()-start) | |
print(forward(x)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment