Skip to content

Instantly share code, notes, and snippets.

View AjayTalati's full-sized avatar

Hello :) AjayTalati

  • London, UK
View GitHub Profile
@AjayTalati
AjayTalati / 2048.c
Last active August 29, 2015 14:14 — forked from justecorruptio/2048.c
M[16],X=16,W,k;main(){T(system("stty cbreak")
);puts(W&1?"WIN":"LOSE");}K[]={2,3,1};s(f,d,i
,j,l,P){for(i=4;i--;)for(j=k=l=0;k<4;)j<4?P=M
[w(d,i,j++)],W|=P>>11,l*P&&(f?M[w(d,i,k)]=l<<
(l==P):0,k++),l=l?P?l-P?P:0:l:P:(f?M[w(d,i,k)
]=l:0,++k,W|=2*!l,l=0);}w(d,i,j){return d?w(d
-1,j,3-i):4*i+j;}T(i){for(i=X+rand()%X;M[i%X]
*i;i--);i?M[i%X]=2<<rand()%2:0;for(W=i=0;i<4;
)s(0,i++);for(i=X,puts("\e[2J\e[H");i--;i%4||
puts(""))printf(M[i]?"%4d|":" |",M[i]);W-2
@AjayTalati
AjayTalati / 2048.cpp
Last active August 29, 2015 14:14 — forked from chandruscm/2048.cpp
#include<iostream>
#include<ctime>
#include<unistd.h>
#include<cstdlib>
#include<cstdio>
#include<cmath>
using namespace std;
int press_enter;
@AjayTalati
AjayTalati / adam.py
Last active August 29, 2015 14:16 — forked from Newmu/adam.py
"""
The MIT License (MIT)
Copyright (c) 2015 Alec Radford
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
@AjayTalati
AjayTalati / dnn.py
Last active August 29, 2015 14:16 — forked from syhw/dnn.py
"""
A deep neural network with or w/o dropout in one file.
License: Do What The Fuck You Want to Public License http://www.wtfpl.net/
"""
import numpy, theano, sys, math
from theano import tensor as T
from theano import shared
from theano.tensor.shared_randomstreams import RandomStreams
# Load the MNIST digit recognition dataset into R
# http://yann.lecun.com/exdb/mnist/
# assume you have all 4 files and gunzip'd them
# creates train$n, train$x, train$y and test$n, test$x, test$y
# e.g. train$x is a 60000 x 784 matrix, each row is one digit (28x28)
# call: show_digit(train$x[5,]) to see a digit.
# brendan o'connor - gist.github.com/39760 - anyall.org
load_mnist <- function() {
load_image_file <- function(filename) {
--[[
Efficient LSTM in Torch using nngraph library. This code was optimized
by Justin Johnson (@jcjohnson) based on the trick of batching up the
LSTM GEMMs, as also seen in my efficient Python LSTM gist.
--]]
function LSTM.fast_lstm(input_size, rnn_size)
local x = nn.Identity()()
local prev_c = nn.Identity()()
local prev_h = nn.Identity()()
@AjayTalati
AjayTalati / LSTM.lua
Last active August 29, 2015 14:20 — forked from skaae/LSTM.lua
--[[
LSTM cell. Modified from
https://github.com/oxford-cs-ml-2015/practical6/blob/master/LSTM.lua
--]]
local LSTM = {}
-- Creates one timestep of one LSTM
function LSTM.lstm(opt)
local x = nn.Identity()()
# Alec Radford, Indico, Kyle Kastner
# License: MIT
"""
Convolutional VAE in a single file.
Bringing in code from IndicoDataSolutions and Alec Radford (NewMu)
Additionally converted to use default conv2d interface instead of explicit cuDNN
"""
import theano
import theano.tensor as T
from theano.compat.python2x import OrderedDict
import re
bvh_file = "Example1.bvh"
def identifier(scanner, token): return "IDENT", token
def operator(scanner, token): return "OPERATOR", token
def digit(scanner, token): return "DIGIT", token
def open_brace(scanner, token): return "OPEN_BRACE", token
def close_brace(scanner, token): return "CLOSE_BRACE", token
import bpy
# get the material
mat = bpy.data.materials['Material']
# get the nodes
nodes = mat.node_tree.nodes
# clear all nodes to start clean
for node in nodes: