Block | Elements | Kernel Size | Filter depth | Output depth | Stride | Misc. Info |
---|---|---|---|---|---|---|
Convolution |
[5, 5] |
3 |
64 |
[1, 1] |
||
conv1 |
ReLU |
- |
- |
- |
- |
|
Max-pool |
[3, 3] |
- |
- |
[2, 2] |
||
Convolution |
[5, 5] |
64 |
64 |
[1, 1] |
||
conv2 |
ReLU |
- |
- |
- |
- |
|
Max-pool |
[3, 3] |
- |
- |
[2, 2] |
||
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# We must calculate the mean of each gradient. Note that this is the | |
# synchronization point across all towers. | |
grads_and_vars = average_gradients(t_grads) | |
# Optionally perform gradient clipping | |
if config.max_norm_gradient > 0: | |
grads, variables = zip(*grads_and_vars) | |
grads_clipped, _ = tf.clip_by_global_norm(grads, clip_norm=config.max_norm_gradient) | |
grads_and_vars = zip(grads_clipped, variables) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from Blockchain_test import return_data | |
from keras.preprocessing import sequence | |
from keras.models import Sequential | |
from keras.layers import Dense, Embedding | |
from keras.layers import LSTM |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from Blockchain_test import return_data | |
from keras.preprocessing import sequence | |
from keras.models import Sequential | |
from keras.layers import Dense, Embedding | |
from keras.layers import LSTM |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
filename_queue = tf.FIFOQueue(100000, [tf.string], shapes=[[]]) | |
# ... | |
reader = tf.WholeFileReader() | |
image_filename, image_raw = reader.read(self._filename_queue) | |
image = tf.image.decode_jpeg(image_raw, channels=3) | |
# Image preprocessing | |
image_preproc = ... |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
require 'image' | |
require 'lfs' | |
require 'cunn' | |
require 'nngraph' | |
function segment(model, flow_mag_ang_file, minmax_file, output_file) | |
local file = io.open(minmax_file) | |
local minmaxes = {} | |
local ind = 1; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def load_and_enqueue(input_dir, sess, coord, enqueue_op, queue_inputs, queue_targets, num_examples, examples_per_file=100, rewrite_targets=True): | |
# Check if we have a sufficient number of HDF5 files to load all the samples | |
filenames_queue = glob.glob(os.path.join(input_dir, "train/*.h5")) | |
filenames_queue.sort() | |
assert len(filenames_queue) > 0 | |
examples_available = len(filenames_queue)*examples_per_file | |
num_examples = min(examples_available, num_examples) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Initialize placeholders for feeding in to the queue | |
self.queue_inputs = tf.placeholder(tf.float32, shape=[None, self.config.seq_length, self.config.image_size, self.config.image_size], name="queue_inputs") | |
self.queue_targets = tf.placeholder(tf.uint8, shape=[None, self.config.seq_length], name="queue_targets") | |
min_after_dequeue = 10000 | |
capacity = min_after_dequeue + 3 * self.config.batch_size | |
q = tf.FIFOQueue( |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def decode(serialized_example, sess): | |
''' | |
Given a serialized example in which the frames are stored as | |
compressed JPG images 'frames/0001', 'frames/0002' etc., this | |
function samples SEQ_NUM_FRAMES from the frame list, decodes them from | |
JPG into a tensor and packs them to obtain a tensor of shape (N,H,W,3). | |
Returns the the tuple (frames, class_label (tf.int64) | |
:param serialized_example: serialized example from tf.data.TFRecordDataset | |
:return: tuple: (frames (tf.uint8), class_label (tf.int64) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Initialize the camera | |
camera = bpy.data.cameras.new('Camera') | |
camera_obj = bpy.data.objects.new('Camera', camera) | |
scene.objects.link(camera_obj) | |
scene.objects.active = camera_obj | |
scene.camera = camera_obj | |
camera_obj.select = True | |
camera_obj.location = (0, 0, 0) |