Skip to content

Instantly share code, notes, and snippets.

@nomoa
Created August 3, 2018 16:36
Show Gist options
  • Save nomoa/27ec7646e115fd5abe0ffec739d3063e to your computer and use it in GitHub Desktop.
Save nomoa/27ec7646e115fd5abe0ffec739d3063e to your computer and use it in GitHub Desktop.
testtensor.md
import tensorflow as tf
tf.reset_default_graph()
x = tf.placeholder(shape=([None]), name="x", dtype=tf.float32)
y = tf.placeholder(shape=([None]), name="y", dtype=tf.float32)
label = tf.placeholder(shape=([None]), name="label", dtype=tf.float32)
boost = tf.Variable(30, name="boost", dtype=tf.float32)
score = (x+y)*boost
with tf.Session() as sess:
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.3)
    loss_op = tf.losses.mean_squared_error(label, score)
    train_op = optimizer.minimize(loss_op)
    sess.run(tf.global_variables_initializer())
    # print(sess.run(score, feed_dict={x:[2,3],y:[2,3]}))
    for i in range(100):
        _, loss = sess.run((train_op, loss_op), feed_dict={
            x: [1, 2, 3, 4, 5, 6],
            y: [1, 2, 3, 4, 5, 6],
            label:[(1+1)*3, (2+2)*3, (3+3)*3, (4+4)*3, (5+5)*3, (6+6)*3]
        })
        print(sess.run(boost))
        print('epoch {} loss {}'.format(i, loss))
                           
-952.80005
epoch 0 loss 44226.0
33838.324
epoch 1 loss 55422256.0
-1197767.5
epoch 2 loss 69452963840.0
42401084.0
epoch 3 loss 87035691925504.0
-1500998400.0
epoch 4 loss 1.0906966368976896e+17
53135344000.0
epoch 5 loss 1.3668175939636927e+20
-1880991500000.0
epoch 6 loss 1.712841256054879e+23
66587105000000.0
epoch 7 loss 2.1464651695586915e+26
-2357183800000000.0
epoch 8 loss 2.6898644821143735e+29
8.344431e+16
epoch 9 loss 3.370831421440324e+32
-2.9539284e+18
epoch 10 loss 4.224191302858904e+35
1.0456907e+20
epoch 11 loss inf
-3.7017453e+21
epoch 12 loss inf
1.3104181e+23
epoch 13 loss inf
-4.6388803e+24
epoch 14 loss inf
1.6421638e+26
epoch 15 loss inf
-5.81326e+27
epoch 16 loss inf
2.0578944e+29
epoch 17 loss inf
-7.284946e+30
epoch 18 loss inf
2.578871e+32
epoch 19 loss inf
-9.1292037e+33
epoch 20 loss inf
3.2317383e+35
epoch 21 loss inf
-1.1440355e+37
epoch 22 loss inf
inf
epoch 23 loss inf
nan
epoch 24 loss inf
nan
epoch 25 loss nan
nan
epoch 26 loss nan
nan
epoch 27 loss nan
nan
epoch 28 loss nan
nan
epoch 29 loss nan
nan
epoch 30 loss nan
nan
epoch 31 loss nan
nan
epoch 32 loss nan
nan
epoch 33 loss nan
nan
epoch 34 loss nan
nan
epoch 35 loss nan
nan
epoch 36 loss nan
nan
epoch 37 loss nan
nan
epoch 38 loss nan
nan
epoch 39 loss nan
nan
epoch 40 loss nan
nan
epoch 41 loss nan
nan
epoch 42 loss nan
nan
epoch 43 loss nan
nan
epoch 44 loss nan
nan
epoch 45 loss nan
nan
epoch 46 loss nan
nan
epoch 47 loss nan
nan
epoch 48 loss nan
nan
epoch 49 loss nan
nan
epoch 50 loss nan
nan
epoch 51 loss nan
nan
epoch 52 loss nan
nan
epoch 53 loss nan
nan
epoch 54 loss nan
nan
epoch 55 loss nan
nan
epoch 56 loss nan
nan
epoch 57 loss nan
nan
epoch 58 loss nan
nan
epoch 59 loss nan
nan
epoch 60 loss nan
nan
epoch 61 loss nan
nan
epoch 62 loss nan
nan
epoch 63 loss nan
nan
epoch 64 loss nan
nan
epoch 65 loss nan
nan
epoch 66 loss nan
nan
epoch 67 loss nan
nan
epoch 68 loss nan
nan
epoch 69 loss nan
nan
epoch 70 loss nan
nan
epoch 71 loss nan
nan
epoch 72 loss nan
nan
epoch 73 loss nan
nan
epoch 74 loss nan
nan
epoch 75 loss nan
nan
epoch 76 loss nan
nan
epoch 77 loss nan
nan
epoch 78 loss nan
nan
epoch 79 loss nan
nan
epoch 80 loss nan
nan
epoch 81 loss nan
nan
epoch 82 loss nan
nan
epoch 83 loss nan
nan
epoch 84 loss nan
nan
epoch 85 loss nan
nan
epoch 86 loss nan
nan
epoch 87 loss nan
nan
epoch 88 loss nan
nan
epoch 89 loss nan
nan
epoch 90 loss nan
nan
epoch 91 loss nan
nan
epoch 92 loss nan
nan
epoch 93 loss nan
nan
epoch 94 loss nan
nan
epoch 95 loss nan
nan
epoch 96 loss nan
nan
epoch 97 loss nan
nan
epoch 98 loss nan
nan
epoch 99 loss nan
with tf.Session() as sess:
    optimizer = tf.train.AdamOptimizer(learning_rate=0.3)
    loss_op = tf.losses.mean_squared_error(label, score)
    train_op = optimizer.minimize(loss_op)
    sess.run(tf.global_variables_initializer())
    # print(sess.run(score, feed_dict={x:[2,3],y:[2,3]}))
    for i in range(100):
        _, loss = sess.run((train_op, loss_op), feed_dict={
            x: [1, 2, 3, 4, 5, 6],
            y: [1, 2, 3, 4, 5, 6],
            label:[(1+1)*3, (2+2)*3, (3+3)*3, (4+4)*3, (5+5)*3, (6+6)*3]
        })
        print(sess.run(boost))
        print('epoch {} loss {}'.format(i, loss))
29.7
epoch 0 loss 44226.0
29.400093
epoch 1 loss 43248.6640625
29.100342
epoch 2 loss 42282.53515625
28.800808
epoch 3 loss 41327.82421875
28.501554
epoch 4 loss 40384.6875
28.202648
epoch 5 loss 39453.30859375
27.904152
epoch 6 loss 38533.85546875
27.60613
epoch 7 loss 37626.484375
27.30865
epoch 8 loss 36731.33984375
27.011772
epoch 9 loss 35848.56640625
26.715563
epoch 10 loss 34978.29296875
26.420088
epoch 11 loss 34120.62890625
26.125408
epoch 12 loss 33275.69921875
25.831587
epoch 13 loss 32443.59375
25.538687
epoch 14 loss 31624.400390625
25.246769
epoch 15 loss 30818.203125
24.955893
epoch 16 loss 30025.072265625
24.666117
epoch 17 loss 29245.046875
24.377499
epoch 18 loss 28478.181640625
24.090097
epoch 19 loss 27724.509765625
23.803967
epoch 20 loss 26984.060546875
23.519161
epoch 21 loss 26256.837890625
23.235733
epoch 22 loss 25542.849609375
22.953735
epoch 23 loss 24842.083984375
22.673216
epoch 24 loss 24154.529296875
22.394224
epoch 25 loss 23480.1484375
22.116806
epoch 26 loss 22818.912109375
21.841007
epoch 27 loss 22170.771484375
21.566872
epoch 28 loss 21535.669921875
21.294441
epoch 29 loss 20913.541015625
21.023758
epoch 30 loss 20304.318359375
20.75486
epoch 31 loss 19707.921875
20.487785
epoch 32 loss 19124.259765625
20.22257
epoch 33 loss 18553.240234375
19.95925
epoch 34 loss 17994.759765625
19.697857
epoch 35 loss 17448.712890625
19.438423
epoch 36 loss 16914.984375
19.180979
epoch 37 loss 16393.453125
18.925554
epoch 38 loss 15883.9951171875
18.672176
epoch 39 loss 15386.4794921875
18.420872
epoch 40 loss 14900.7705078125
18.171665
epoch 41 loss 14426.7314453125
17.924582
epoch 42 loss 13964.2177734375
17.679642
epoch 43 loss 13513.0830078125
17.436869
epoch 44 loss 13073.1728515625
17.196281
epoch 45 loss 12644.33984375
16.957901
epoch 46 loss 12226.4208984375
16.721745
epoch 47 loss 11819.2626953125
16.48783
epoch 48 loss 11422.7001953125
16.25617
epoch 49 loss 11036.5732421875
16.026783
epoch 50 loss 10660.7138671875
15.799681
epoch 51 loss 10294.955078125
15.574877
epoch 52 loss 9939.130859375
15.352383
epoch 53 loss 9593.0703125
15.13221
epoch 54 loss 9256.6025390625
14.914369
epoch 55 loss 8929.5576171875
14.698868
epoch 56 loss 8611.765625
14.485717
epoch 57 loss 8303.0537109375
14.274922
epoch 58 loss 8003.24853515625
14.066491
epoch 59 loss 7712.18212890625
13.860429
epoch 60 loss 7429.67822265625
13.656741
epoch 61 loss 7155.56787109375
13.455433
epoch 62 loss 6889.67822265625
13.256507
epoch 63 loss 6631.841796875
13.059966
epoch 64 loss 6381.88623046875
12.865812
epoch 65 loss 6139.64306640625
12.674047
epoch 66 loss 5904.94384765625
12.484673
epoch 67 loss 5677.623046875
12.297688
epoch 68 loss 5457.513671875
12.1130905
epoch 69 loss 5244.45068359375
11.9308815
epoch 70 loss 5038.27099609375
11.751059
epoch 71 loss 4838.8125
11.573618
epoch 72 loss 4645.91552734375
11.398558
epoch 73 loss 4459.42041015625
11.225873
epoch 74 loss 4279.169921875
11.05556
epoch 75 loss 4105.00927734375
10.887613
epoch 76 loss 3936.7841796875
10.722027
epoch 77 loss 3774.343017578125
10.558795
epoch 78 loss 3617.53515625
10.397911
epoch 79 loss 3466.212890625
10.239367
epoch 80 loss 3320.2314453125
10.083154
epoch 81 loss 3179.444580078125
9.929265
epoch 82 loss 3043.711669921875
9.777691
epoch 83 loss 2912.892578125
9.628421
epoch 84 loss 2786.850341796875
9.481445
epoch 85 loss 2665.4482421875
9.336754
epoch 86 loss 2548.55419921875
9.194335
epoch 87 loss 2436.036376953125
9.054177
epoch 88 loss 2327.766845703125
8.916267
epoch 89 loss 2223.619140625
8.780594
epoch 90 loss 2123.468017578125
8.647143
epoch 91 loss 2027.1927490234375
8.5159025
epoch 92 loss 1934.673828125
8.386856
epoch 93 loss 1845.7943115234375
8.259991
epoch 94 loss 1760.4384765625
8.135291
epoch 95 loss 1678.4951171875
8.012742
epoch 96 loss 1599.853515625
7.892328
epoch 97 loss 1524.40673828125
7.7740316
epoch 98 loss 1452.048828125
7.6578374
epoch 99 loss 1382.6768798828125
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment