Created
January 6, 2017 08:56
-
-
Save sahiliitm/bed9511d15ba0c45adf3c33210d59232 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# ============================================================================== | |
"""Builds the CIFAR-10 network. | |
Summary of available functions: | |
# Compute input images and labels for training. If you would like to run | |
# evaluations, use inputs() instead. | |
inputs, labels = distorted_inputs() | |
# Compute inference on the model inputs to make a prediction. | |
predictions = inference(inputs) | |
""" | |
# pylint: disable=missing-docstring | |
from __future__ import absolute_import | |
from __future__ import division | |
from __future__ import print_function | |
import gzip | |
import os | |
import re | |
import sys | |
import sys | |
import tarfile | |
from six.moves import urllib | |
import tensorflow as tf | |
from tensorflow.models.image.cifar10 import cifar10_input | |
FLAGS = tf.app.flags.FLAGS | |
tf.app.flags.DEFINE_boolean('use_outerp', False, | |
"""Use Outer product for computing convolutional filter.""") | |
# Global constants describing the CIFAR-10 data set. | |
IMAGE_SIZE = cifar10_input.IMAGE_SIZE | |
CIFAR10 = True | |
NUM_CLASSES = cifar10_input.NUM_CLASSES | |
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN | |
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL | |
# Constants describing the training process. | |
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average. | |
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays. | |
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. | |
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. | |
# If a model is trained with multiple GPUs, prefix all Op names with tower_name | |
# to differentiate the operations. Note that this prefix is removed from the | |
# names of the summaries when visualizing a model. | |
TOWER_NAME = 'tower' | |
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz' | |
def _variable_on_cpu(name, shape, initializer): | |
"""Helper to create a Variable stored on CPU memory. | |
Args: | |
name: name of the variable | |
shape: list of ints | |
initializer: initializer for Variable | |
Returns: | |
Variable Tensor | |
""" | |
sys.stdout.flush() | |
with tf.device('/cpu:0'): | |
dtype = tf.float32 | |
if len(shape) != 4 or not FLAGS.use_outerp: | |
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) | |
else: | |
allw = [] | |
for i in xrange(shape[-1]): | |
bw = [] | |
for j in range(shape[-2]): | |
v1 = tf.get_variable(name+"v1_%d_%d"%(i,j), shape[0], initializer=initializer, dtype=dtype) | |
v2 = tf.get_variable(name+"v2_%d_%d"%(i,j), shape[1], initializer=initializer, dtype=dtype) | |
v1 = tf.reshape(v1, (shape[0], 1)) | |
v2 = tf.reshape(v2, (1, shape[1])) | |
v3 = tf.mul(v1, v2) | |
bw.append(v3) | |
aw = tf.pack(bw) | |
allw.append(aw) | |
var = tf.pack(allw) | |
var = tf.transpose(var, [2,3,1,0]) | |
print("My conv is of shape", var.get_shape()) | |
sys.stdout.flush() | |
return var | |
def _variable_with_weight_decay(name, shape, stddev, wd): | |
"""Helper to create an initialized Variable with weight decay. | |
Note that the Variable is initialized with a truncated normal distribution. | |
A weight decay is added only if one is specified. | |
Args: | |
name: name of the variable | |
shape: list of ints | |
stddev: standard deviation of a truncated Gaussian | |
wd: add L2Loss weight decay multiplied by this float. If None, weight | |
decay is not added for this Variable. | |
Returns: | |
Variable Tensor | |
""" | |
dtype = tf.float32 | |
var = _variable_on_cpu( | |
name, | |
shape, | |
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) | |
if wd is not None: | |
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss') | |
tf.add_to_collection('losses', weight_decay) | |
return var | |
def distorted_inputs(): | |
"""Construct distorted input for CIFAR training using the Reader ops. | |
Returns: | |
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. | |
labels: Labels. 1D tensor of [batch_size] size. | |
Raises: | |
ValueError: If no data_dir | |
""" | |
if not FLAGS.data_dir: | |
raise ValueError('Please supply a data_dir') | |
if CIFAR10: | |
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') | |
else: | |
data_dir = os.path.join(FLAGS.data_dir, 'cifar-100-binary') | |
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir, | |
batch_size=FLAGS.batch_size) | |
return images, labels | |
def inference(images): | |
"""Build the CIFAR-10 model. | |
Args: | |
images: Images returned from distorted_inputs() or inputs(). | |
Returns: | |
Logits. | |
""" | |
# We instantiate all variables using tf.get_variable() instead of | |
# tf.Variable() in order to share variables across multiple GPU training runs. | |
# If we only ran this model on a single GPU, we could simplify this function | |
# by replacing all instances of tf.get_variable() with tf.Variable(). | |
# | |
# attention_conv1 | |
with tf.variable_scope('conv1') as scope: | |
kernel = _variable_with_weight_decay('conv1_weights', | |
shape=[5, 5, 3, 64], | |
stddev=5e-2, | |
wd=0.0) | |
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') | |
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) | |
bias = tf.nn.bias_add(conv, biases) | |
conv1 = tf.nn.relu(bias, name=scope.name) | |
# pool1 | |
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], | |
padding='SAME', name='pool1') | |
# norm1 | |
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, | |
name='norm1') | |
# conv2 | |
with tf.variable_scope('conv2') as scope: | |
kernel = _variable_with_weight_decay('conv2_weights', | |
shape=[5, 5, 64, 64], | |
stddev=5e-2, | |
wd=0.0) | |
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') | |
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) | |
bias = tf.nn.bias_add(conv, biases) | |
conv2 = tf.nn.relu(bias, name=scope.name) | |
# norm2 | |
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, | |
name='norm2') | |
# pool2 | |
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], | |
strides=[1, 2, 2, 1], padding='SAME', name='pool2') | |
# local3 | |
with tf.variable_scope('local3') as scope: | |
# Move everything into depth so we can perform a single matrix multiply. | |
reshape = tf.reshape(pool2, [FLAGS.batch_size, -1]) | |
dim = reshape.get_shape()[1].value | |
weights = _variable_with_weight_decay('weights', shape=[dim, 384], | |
stddev=0.04, wd=0.004) | |
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) | |
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) | |
# local4 | |
with tf.variable_scope('local4') as scope: | |
weights = _variable_with_weight_decay('weights', shape=[384, 192], | |
stddev=0.04, wd=0.004) | |
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) | |
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name) | |
# linear layer(WX + b), | |
# We don't apply softmax here because | |
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits | |
# and performs the softmax internally for efficiency. | |
with tf.variable_scope('softmax_linear') as scope: | |
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], | |
stddev=1/192.0, wd=0.0) | |
biases = _variable_on_cpu('biases', [NUM_CLASSES], | |
tf.constant_initializer(0.0)) | |
print("Final pre softmax hain") | |
sys.stdout.flush() | |
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name) | |
return softmax_linear | |
def maybe_download_and_extract(): | |
"""Download and extract the tarball from Alex's website.""" | |
dest_directory = FLAGS.data_dir | |
if not os.path.exists(dest_directory): | |
os.makedirs(dest_directory) | |
filename = DATA_URL.split('/')[-1] | |
filepath = os.path.join(dest_directory, filename) | |
if not os.path.exists(filepath): | |
def _progress(count, block_size, total_size): | |
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, | |
float(count * block_size) / float(total_size) * 100.0)) | |
sys.stdout.flush() | |
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) | |
print() | |
statinfo = os.stat(filepath) | |
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') | |
tarfile.open(filepath, 'r:gz').extractall(dest_directory) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# ============================================================================== | |
"""A binary to train CIFAR-10 using a single GPU. | |
Accuracy: | |
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of | |
data) as judged by cifar10_eval.py. | |
Speed: With batch_size 128. | |
System | Step Time (sec/batch) | Accuracy | |
------------------------------------------------------------------ | |
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours) | |
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours) | |
Usage: | |
Please see the tutorial and website for how to download the CIFAR-10 | |
data set, compile the program and train the model. | |
http://tensorflow.org/tutorials/deep_cnn/ | |
""" | |
from __future__ import absolute_import | |
from __future__ import division | |
from __future__ import print_function | |
from datetime import datetime | |
import os.path | |
import time | |
import sys | |
import numpy as np | |
from six.moves import xrange # pylint: disable=redefined-builtin | |
import tensorflow as tf | |
import cifar10 | |
FLAGS = tf.app.flags.FLAGS | |
tf.app.flags.DEFINE_string('train_dir', './cifar10_train', | |
"""Directory where to write event logs """ | |
"""and checkpoint.""") | |
tf.app.flags.DEFINE_integer('max_steps', 1000000, | |
"""Number of batches to run.""") | |
tf.app.flags.DEFINE_boolean('log_device_placement', False, | |
"""Whether to log device placement.""") | |
def train(): | |
"""Train CIFAR-10 for a number of steps.""" | |
with tf.Graph().as_default(): | |
global_step = tf.Variable(0, trainable=False) | |
# Get images and labels for CIFAR-10. | |
images, _ = cifar10.distorted_inputs() | |
# Build a Graph that computes the logits predictions from the | |
# inference model. | |
logits = cifar10.inference(images) | |
# Build a Graph that trains the model with one batch of examples and | |
# updates the model parameters. | |
# Create a saver. | |
saver = tf.train.Saver(tf.all_variables()) | |
# Build an initialization operation to run below. | |
#init = tf.global_variables_initializer() | |
init = tf.initialize_all_variables() | |
# Start running operations on the Graph. | |
sess = tf.Session(config=tf.ConfigProto( | |
log_device_placement=FLAGS.log_device_placement)) | |
sess.run(init) | |
# Start the queue runners. | |
tf.train.start_queue_runners(sess=sess) | |
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) | |
for step in xrange(FLAGS.max_steps): | |
start_time = time.time() | |
_ = sess.run([logits]) | |
duration = time.time() - start_time | |
if step % 10 == 0: | |
num_examples_per_step = FLAGS.batch_size | |
examples_per_sec = num_examples_per_step / duration | |
sec_per_batch = float(duration) | |
format_str = ('%s: step %d, (%.1f examples/sec; %.3f ' | |
'sec/batch)') | |
print (format_str % (datetime.now(), step, | |
examples_per_sec, sec_per_batch)) | |
sys.stdout.flush() | |
# Save the model checkpoint periodically. | |
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps: | |
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') | |
saver.save(sess, checkpoint_path, global_step=step) | |
def main(argv=None): # pylint: disable=unused-argument | |
cifar10.maybe_download_and_extract() | |
if tf.gfile.Exists(FLAGS.train_dir): | |
tf.gfile.DeleteRecursively(FLAGS.train_dir) | |
tf.gfile.MakeDirs(FLAGS.train_dir) | |
train() | |
if __name__ == '__main__': | |
tf.app.run() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment