Skip to content

Instantly share code, notes, and snippets.

Created January 7, 2018 17:28
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save anonymous/133d8353ba817a721e67694bad922226 to your computer and use it in GitHub Desktop.
Save anonymous/133d8353ba817a721e67694bad922226 to your computer and use it in GitHub Desktop.
import json
import math
from pathlib import Path
import utils
import chess
################################################
# TensorFlow init
utils.printflush("neural initialization")
import tensorflow as tf
utils.printflush("\n------------\nneural started\n------------\n")
################################################
board=chess.Board()
################################################
INFINITE = 1e20
MAX_TRAIN_SIZE = 1000
STORE_ENGINE_WEIGHTS_DIR = "./engineweights"
STORE_ENGINE_WEIGHTS_PATH = STORE_ENGINE_WEIGHTS_DIR + "/engineweights.ckpt"
learning_rate = 0.0000001
num_input = chess.INPUT_SIZE
num_hidden_1 = chess.INPUT_SIZE
num_hidden_2 = chess.INPUT_SIZE
num_output = 1
tf.reset_default_graph()
X = tf.placeholder("float", [1, num_input])
Y = tf.placeholder("float", [1, num_output])
weights = {
'h1': tf.Variable(tf.random_normal([num_input, num_hidden_1]),name="h1"),
'h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2]),name="h2"),
'wout': tf.Variable(tf.random_normal([num_hidden_2, num_output]),name="wout")
}
biases = {
'b1': tf.Variable(tf.random_normal([num_hidden_1]),name="b1"),
'b2': tf.Variable(tf.random_normal([num_hidden_2]),name="b2"),
'bout': tf.Variable(tf.random_normal([num_output]),name="bout")
}
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(X, weights['h1']), biases['b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']))
engine = tf.add(tf.matmul(layer_2, weights['wout']), biases['bout'])
squared_deltas=tf.square(engine - Y)
loss = tf.reduce_sum(squared_deltas)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
trainjob = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
saver = tf.train.Saver()
def save_weights():
utils.printflush(saver.save(sess, STORE_ENGINE_WEIGHTS_PATH))
utils.printflush("engine weights saved ok")
def load_weights():
if Path(STORE_ENGINE_WEIGHTS_DIR).is_dir():
saver.restore(sess, STORE_ENGINE_WEIGHTS_PATH)
utils.printflush("engine weights loaded ok")
else:
utils.printflush("no stored engine weights")
load_weights()
def avg_loss(totalloss,n):
return math.sqrt(totalloss/n)
def calc_pos_value(fen):
board.setFromFen(fen)
value=sess.run(engine, {X:board.inputsrow, Y:[[0.0]]})
return value
def train(verbose=True):
data=json.load(open("evals.json"))
totalloss=0
al=0
for fen,i in zip(data,range(len(data))):
if i<min(MAX_TRAIN_SIZE,len(data)):
attrs=data[fen]
board.setFromFen(fen)
score=attrs["score"]
sess.run(trainjob, {X:board.inputsrow, Y:[[score]]})
actualloss=sess.run(loss, {X:board.inputsrow, Y:[[score]]})
totalloss+=actualloss
n=i+1
al=avg_loss(totalloss,n)
if verbose:
utils.printflush("{0:5d}. avg loss: {1:20f}".format(n,al))
return al
def epoch(n):
for i in range(n):
utils.printflush(train(False))
save_weights()
def play_move(data):
selsan=""
bestvalue=INFINITE
for item in data:
fen=item["fen"]
actualvalue=calc_pos_value(fen)
if actualvalue<bestvalue:
selsan=item["san"]
bestvalue=actualvalue
movejsonstr=json.dumps({ "action" : "makesan" , "san" : selsan })
utils.printflush(movejsonstr)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment