Block | Elements | Kernel Size | Filter depth | Output depth | Stride | Misc. Info |
---|---|---|---|---|---|---|
Convolution |
[5, 5] |
3 |
64 |
[1, 1] |
||
conv1 |
ReLU |
- |
- |
- |
- |
|
Max-pool |
[3, 3] |
- |
- |
[2, 2] |
||
Convolution |
[5, 5] |
64 |
64 |
[1, 1] |
||
conv2 |
ReLU |
- |
- |
- |
- |
|
Max-pool |
[3, 3] |
- |
- |
[2, 2] |
||
View ffmpeg_utils.py
# MIT License | |
# | |
# Copyright (c) 2017 Tom Runia | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to conditions. |
View blender_screen_coordinates.py
# Initialize the camera | |
camera = bpy.data.cameras.new('Camera') | |
camera_obj = bpy.data.objects.new('Camera', camera) | |
scene.objects.link(camera_obj) | |
scene.objects.active = camera_obj | |
scene.camera = camera_obj | |
camera_obj.select = True | |
camera_obj.location = (0, 0, 0) |
View video_input_pipeline.py
def decode(serialized_example, sess): | |
''' | |
Given a serialized example in which the frames are stored as | |
compressed JPG images 'frames/0001', 'frames/0002' etc., this | |
function samples SEQ_NUM_FRAMES from the frame list, decodes them from | |
JPG into a tensor and packs them to obtain a tensor of shape (N,H,W,3). | |
Returns the the tuple (frames, class_label (tf.int64) | |
:param serialized_example: serialized example from tf.data.TFRecordDataset | |
:return: tuple: (frames (tf.uint8), class_label (tf.int64) |
View uva_dlc_cnn_architecture.md
View train_network.py
# Initialize placeholders for feeding in to the queue | |
self.queue_inputs = tf.placeholder(tf.float32, shape=[None, self.config.seq_length, self.config.image_size, self.config.image_size], name="queue_inputs") | |
self.queue_targets = tf.placeholder(tf.uint8, shape=[None, self.config.seq_length], name="queue_targets") | |
min_after_dequeue = 10000 | |
capacity = min_after_dequeue + 3 * self.config.batch_size | |
q = tf.FIFOQueue( |
View tf_load_and_enqueue.py
def load_and_enqueue(input_dir, sess, coord, enqueue_op, queue_inputs, queue_targets, num_examples, examples_per_file=100, rewrite_targets=True): | |
# Check if we have a sufficient number of HDF5 files to load all the samples | |
filenames_queue = glob.glob(os.path.join(input_dir, "train/*.h5")) | |
filenames_queue.sort() | |
assert len(filenames_queue) > 0 | |
examples_available = len(filenames_queue)*examples_per_file | |
num_examples = min(examples_available, num_examples) |
View segment_custom_angle.lua
require 'image' | |
require 'lfs' | |
require 'cunn' | |
require 'nngraph' | |
function segment(model, flow_mag_ang_file, minmax_file, output_file) | |
local file = io.open(minmax_file) | |
local minmaxes = {} | |
local ind = 1; |
View filenames_from_queue.py
filename_queue = tf.FIFOQueue(100000, [tf.string], shapes=[[]]) | |
# ... | |
reader = tf.WholeFileReader() | |
image_filename, image_raw = reader.read(self._filename_queue) | |
image = tf.image.decode_jpeg(image_raw, channels=3) | |
# Image preprocessing | |
image_preproc = ... |
View train_lstm.py
import time | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from Blockchain_test import return_data | |
from keras.preprocessing import sequence | |
from keras.models import Sequential | |
from keras.layers import Dense, Embedding | |
from keras.layers import LSTM |
View train_lstm.py
import time | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from Blockchain_test import return_data | |
from keras.preprocessing import sequence | |
from keras.models import Sequential | |
from keras.layers import Dense, Embedding | |
from keras.layers import LSTM |
NewerOlder