Skip to content

Instantly share code, notes, and snippets.

@vashineyu
Created February 14, 2019 02:02
Show Gist options
  • Save vashineyu/9daf20fdf5b9b812af8758aad28f4eac to your computer and use it in GitHub Desktop.
Save vashineyu/9daf20fdf5b9b812af8758aad28f4eac to your computer and use it in GitHub Desktop.
# model.py
import tensorflow as tf
import tensorflow.keras.layers as layers
import tensorflow.keras.models as models
import tensorflow.nn as F
class Conv_bn_relu(models.Model):
"""Stack blocks of Conv2D->BN->relu.
Args:
filters (int):
kernel_size (int):
strides (int):
data_format (str):
use_bias (bool):
Returns:
layer
"""
def __init__(self, filters, kernel_size=3, strides=1, data_format="channels_last",
use_bias=True, **kwargs):
super(Conv_bn_relu, self).__init__(**kwargs)
axis = -1 if data_format is "channels_last" else 1
self.conv = Conv2DFixedPadding(filters=filters, kernel_size=kernel_size, strides=strides, use_bias=use_bias)
self.normalize = layers.BatchNormalization(axis=axis)
def call(self, x, training=True):
x = self.conv(x)
x = self.normalize(x, training=training)
return F.relu(x)
class StackCNN(models.Model):
"""Stack all required layers together.
Args:
Returns:
"""
def __init__(self, neurons_of_layers, output_units, **kwargs):
super(StackCNN, self).__init__(**kwargs)
print("HI")
self.layers = []
print("YA")
for i, neurons in enumerate(neurons_of_layers):
self.layers.append(Conv_bn_relu(filters=neurons, kernel_size=3, strides=1))
if i != len(neurons_of_layers):
self.layers.append(layers.MaxPooling2D(pool_size=2, strides=1))
self.layers.append(layers.Flatten())
self.layers.append(layers.Dense(units=output_units))
def call(self, x, training=True):
for layer in self.layers:
x = layer(x, training=training)
return F.softmax(x)
## Fixed Functions ##
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
This function is copied from:
https://github.com/tensorflow/models/blob/master/official/resnet/resnet_model.py
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(tensor=inputs,
paddings=[[0, 0], [0, 0], [pad_beg, pad_end],
[pad_beg, pad_end]])
else:
padded_inputs = tf.pad(tensor=inputs,
paddings=[[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
class Conv2DFixedPadding(models.Model):
"""Class for Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
This class is based on:
https://github.com/tensorflow/models/blob/master/official/resnet/resnet_model.py
"""
def __init__(self, filters, kernel_size=3, strides=1, data_format="channels_last",
use_bias=True, **kwargs):
super(Conv2DFixedPadding, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.data_format = data_format
self.strides = strides
self.conv = layers.Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=('SAME' if strides == 1 else 'VALID'),
use_bias=use_bias, data_format=data_format)
def call(self, x):
if self.strides > 1:
x = fixed_padding(x, self.kernel_size, self.data_format)
return self.conv(x)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment