Skip to content

Instantly share code, notes, and snippets.

@fchollet
fchollet / keras_intermediate.py
Created May 28, 2015 17:34
Defining a Theano function to output intermediate transformations in a Keras model
import theano
from keras.models import Sequential
from keras.layers.core import Dense, Activation
X_train, y_train = ... # load some training data
X_batch = ... # a batch of test data
# this is your initial model
model = Sequential()
model.add(Dense(20, 64))
@udibr
udibr / gruln.py
Last active November 7, 2020 02:34
Keras GRU with Layer Normalization
import numpy as np
from keras.layers import GRU, initializations, K
from collections import OrderedDict
class GRULN(GRU):
'''Gated Recurrent Unit with Layer Normalization
Current impelemtation only works with consume_less = 'gpu' which is already
set.
# Arguments
@dswah
dswah / layers_tied.py
Last active September 17, 2021 22:45
Tied Convolutional Weights with Keras for CNN Auto-encoders
from keras import backend as K
from keras import activations, initializations, regularizers, constraints
from keras.engine import Layer, InputSpec
from keras.utils.np_utils import conv_output_length
from keras.layers import Convolution1D, Convolution2D
import tensorflow as tf
class Convolution1D_tied(Layer):
'''Convolution operator for filtering neighborhoods of one-dimensional inputs.
When using this layer as the first layer in a model,
@wassname
wassname / keras_attention_wrapper.py
Created November 1, 2016 08:06
A keras attention layer that wraps RNN layers.
"""
A keras attention layer that wraps RNN layers.
Based on tensorflows [attention_decoder](https://github.com/tensorflow/tensorflow/blob/c8a45a8e236776bed1d14fd71f3b6755bd63cc58/tensorflow/python/ops/seq2seq.py#L506)
and [Grammar as a Foreign Language](https://arxiv.org/abs/1412.7449).
date: 20161101
author: wassname
url: https://gist.github.com/wassname/5292f95000e409e239b9dc973295327a
"""
@ziadoz
ziadoz / install.sh
Last active April 20, 2024 10:18
Install Chrome, ChromeDriver and Selenium on Ubuntu 16.04
#!/usr/bin/env bash
# https://developers.supportbee.com/blog/setting-up-cucumber-to-run-with-Chrome-on-Linux/
# https://gist.github.com/curtismcmullan/7be1a8c1c841a9d8db2c
# https://stackoverflow.com/questions/10792403/how-do-i-get-chrome-working-with-selenium-using-php-webdriver
# https://stackoverflow.com/questions/26133486/how-to-specify-binary-path-for-remote-chromedriver-in-codeception
# https://stackoverflow.com/questions/40262682/how-to-run-selenium-3-x-with-chrome-driver-through-terminal
# https://askubuntu.com/questions/760085/how-do-you-install-google-chrome-on-ubuntu-16-04
# Versions
CHROME_DRIVER_VERSION=`curl -sS https://chromedriver.storage.googleapis.com/LATEST_RELEASE`
@cbaziotis
cbaziotis / Attention.py
Last active March 28, 2023 11:50
Keras Layer that implements an Attention mechanism for temporal data. Supports Masking. Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]
from keras import backend as K, initializers, regularizers, constraints
from keras.engine.topology import Layer
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
@jihunchoi
jihunchoi / masked_cross_entropy.py
Last active January 22, 2024 19:20
PyTorch workaround for masking cross entropy loss
def _sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.range(0, max_len - 1).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = (sequence_length.unsqueeze(1)
@mjdietzx
mjdietzx / waya-dl-setup.sh
Last active March 13, 2024 15:08
Install CUDA Toolkit v8.0 and cuDNN v6.0 on Ubuntu 16.04
#!/bin/bash
# install CUDA Toolkit v8.0
# instructions from https://developer.nvidia.com/cuda-downloads (linux -> x86_64 -> Ubuntu -> 16.04 -> deb (network))
CUDA_REPO_PKG="cuda-repo-ubuntu1604_8.0.61-1_amd64.deb"
wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/${CUDA_REPO_PKG}
sudo dpkg -i ${CUDA_REPO_PKG}
sudo apt-get update
sudo apt-get -y install cuda
@spro
spro / pytorch-simple-rnn.py
Last active April 25, 2022 10:50
PyTorch RNN training example
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch import optim
import numpy as np
import math, random
# Generating a noisy multi-sin wave
"""Downsized version of Xception, without residual connections.
"""
from __future__ import print_function
from __future__ import absolute_import
from keras.models import Model
from keras.layers import Dense
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.layers import Activation