Skip to content

Instantly share code, notes, and snippets.

@scottire
Last active June 15, 2021 11:48
Show Gist options
  • Save scottire/adf670f776a9b00cdddb0b2d2070d878 to your computer and use it in GitHub Desktop.
Save scottire/adf670f776a9b00cdddb0b2d2070d878 to your computer and use it in GitHub Desktop.
PyTorch TransposeConvolution1d written in numpy
import numpy as np
import torch
import torch.nn as nn
import numpy as np
import panel as pn
from ipywidgets import widgets
from ipywidgets import interact, interactive, fixed, interact_manual
from IPython.display import display
import holoviews as hv
hv.extension('bokeh')
from matplotlib.pyplot import imshow
m_idim = 6
m_odim = 4
kernel_size = 7
use_bias = True
weights = np.random.randint(5, size=(m_idim, kernel_size, m_odim))
def conv_transposed_1d_numpy(arr, kernel_size, stride, padding=1, output_padding=1, dilation=1, use_bias=True, weights_np=None, bias_np=None):
assert output_padding < dilation or output_padding < stride
in_channels = m_idim
out_channels = m_odim
kernels = weights_np.transpose(0,2,1)
if use_bias:
bias = bias_np
in_seq_len = arr.shape[1]
out_seq_len = (in_seq_len-1)*stride-(2*padding)+dilation*(kernel_size-1)+output_padding+1
print(out_seq_len)
output_array = np.zeros((out_channels, out_seq_len+(2*padding)))
for j in range(in_seq_len):
out_position = (j*stride)
for i in range(in_channels):
output_array[:,out_position:out_position+kernel_size] += arr[i,j]*(kernels[i])
if use_bias:
for i in range(out_channels):
output_array[i] += bias[i]
if padding:
return output_array[:,padding:-padding]
else:
return output_array
@interact(padding=widgets.IntSlider(min=0,max=10,value=1),
output_padding=widgets.IntSlider(min=0,max=10,value=0),
kernel_size=widgets.IntSlider(min=0,max=10,value=3),
stride=widgets.IntSlider(min=0,max=10,value=2),
seq_len=widgets.IntSlider(min=1,max=20,value=5))
def out(padding, output_padding, kernel_size, stride, seq_len):
dilation = 1
amount_of_zero_pad = dilation * (kernel_size - 1) - padding
display(f"Amount of zero pad {amount_of_zero_pad}")
conv_t = nn.ConvTranspose1d(m_idim, m_odim,
kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=use_bias)
weights_np = np.array(weights)
bias_np = np.array(list(range(m_odim)))
conv_t.weight.data = torch.from_numpy(weights_np[:,:kernel_size,:].transpose(0,2,1)).double()
conv_t.bias.data = torch.from_numpy(bias_np).double()
in_np = np.array([list(range(1, seq_len+1))]*m_idim)
in_tensor = torch.from_numpy(in_np).unsqueeze(0).double()
in_seq_len = in_tensor.shape[-1]
tensor_out = conv_t(in_tensor).squeeze(0)
tensor_out = tensor_out.detach().numpy()
out_seq_len = (in_seq_len-1)*stride-(2*padding)+dilation*(kernel_size-1)+output_padding+1
display('predicted: ', out_seq_len)
display('actual:', tensor_out.shape[-1])
image = hv.Image(tensor_out, vdims=hv.Dimension('z', range=(0, 100)), bounds=(0, 0, tensor_out.shape[-1], m_odim)).opts(cmap='PiYG', xlabel="Sequence Dimension", ylabel='Feature Dimension')
out_np = conv_transposed_1d_numpy(in_np, kernel_size, stride, padding=padding, output_padding=output_padding, use_bias=use_bias, weights_np=weights_np[:,:kernel_size,:], bias_np=bias_np)
image_np = hv.Image(out_np, vdims=hv.Dimension('z', range=(0, 100)), bounds=(0, 0, out_np.shape[-1], m_odim)).opts(cmap='PiYG', xlabel="Sequence Dimension", ylabel='Feature Dimension')
return pn.Column((image * hv.Labels(image).opts(width=800)), (image_np * hv.Labels(image_np).opts(width=800)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment