This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import keras | |
import numpy as np | |
from keras.models import Sequential | |
from keras.layers.core import Dense, Activation | |
from keras.optimizers import SGD | |
# Batch size = 32, Input Dimension = 500, Hidden Dimension = 50 | |
# Create the model | |
model = Sequential() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import tensorflow as tf | |
# Batch size = 32, Input Dimension = 500, Hidden Dimension = 50 | |
# Define computational graph | |
# Create placeholders | |
x = tf.placeholder(tf.float32, shape=(32, 500)) | |
y = tf.placeholder(tf.float32, shape=(32, 500)) | |
w1 = tf.placeholer(tf.float32, shape=(500, 50)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import theano | |
import theano.tensor as T | |
# Batch size = 32, Input Dimension = 500, Hidden Dimension = 50, Number of Classes = 5 | |
# Define symbolic variables | |
x = T.matrix('x') | |
y = T.vector('y', dtype='int64') | |
w1 = T.matrix('w1') | |
w2 = T.matrix('w2') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
# Batch Size = 32, Input Dimension = 500, Hidden Dimension = 50, Output Dimension = 5 | |
dtype = torch.FloatTensor | |
# Create random tensors for data and weights | |
x = torch.randn(32, 500).type(dtype) | |
y = torch.randn(32, 5).type(dtype) | |
w1 = torch.randn(500, 50).type(dtype) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
require 'torch' | |
require 'nn' | |
require 'optim' | |
-- Build a model as a sequence of layers, and a loss function | |
local model = nn.Sequential() | |
model:add(nn.Linear(500, 50)) | |
model:add(nn.ReLU()) | |
model:add(nn.Linear(50, 5)) | |
local loss_fn = nn.CrossEntropyCriterion() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Setup | |
from __future__ import print_function, division | |
import tensorflow as tf | |
import numpy as np | |
import matplotlib.pyplot as plt | |
%matplotlib inline | |
# Load Dataset | |
from tensorflow.examples.tutorials.mnist import input_data | |
mnist = input_data.read_data_sets('MNIST_data', one_hot=False) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def load_vgg(sess, vgg_path): | |
# load the model and weights | |
model = tf.saved_model.loader.load(sess, ['vgg16'], vgg_path) | |
# Get Tensors to be returned from graph | |
graph = tf.get_default_graph() | |
image_input = graph.get_tensor_by_name('image_input:0') | |
keep_prob = graph.get_tensor_by_name('keep_prob:0') | |
layer3 = graph.get_tensor_by_name('layer3_out:0') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes): | |
# Use a shorter variable name for simplicity | |
layer3, layer4, layer7 = vgg_layer3_out, vgg_layer4_out, vgg_layer7_out | |
# Apply 1x1 convolution in place of fully connected layer | |
fcn8 = tf.layers.conv2d(layer7, filters=num_classes, kernel_size=1, name="fcn8") | |
# Upsample fcn8 with size depth=(4096?) to match size of layer 4 so that we can add skip connection with 4th layer | |
fcn9 = tf.layers.conv2d_transpose(fcn8, filters=layer4.get_shape().as_list()[-1], |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def optimize(nn_last_layer, correct_label, learning_rate, num_classes): | |
# Reshape 4D tensors to 2D, each row represents a pixel, each column a class | |
logits = tf.reshape(nn_last_layer, (-1, num_classes), name="fcn_logits") | |
correct_label_reshaped = tf.reshape(correct_label, (-1, num_classes)) | |
# Calculate distance from actual labels using cross entropy | |
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label_reshaped[:]) | |
# Take mean for total loss | |
loss_op = tf.reduce_mean(cross_entropy, name="fcn_loss") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, | |
cross_entropy_loss, input_image, | |
correct_label, keep_prob, learning_rate): | |
keep_prob_value = 0.5 | |
learning_rate_value = 0.001 | |
for epoch in range(epochs): | |
# Create function to get batches | |
total_loss = 0 | |
for X_batch, gt_batch in get_batches_fn(batch_size): |
OlderNewer