start new:
tmux
start new with session name:
tmux new -s myname
import numpy as np | |
import pymc | |
import pdb | |
def unconditionalProbability(Ptrans): | |
"""Compute the unconditional probability for the states of a | |
Markov chain.""" | |
m = Ptrans.shape[0] |
#include <iostream> | |
#include <string> | |
#include <vector> | |
#include <queue> | |
#include <stack> | |
#include <algorithm> | |
using namespace std; | |
class Node{ |
/* | |
* Note: This template uses some c++11 functions , so you have to compile it with c++11 flag. | |
* Example:- $ g++ -std=c++11 c++Template.cpp | |
* | |
* Author : Akshay Pratap Singh | |
* Handle: code_crack_01 | |
* | |
*/ | |
/******** All Required Header Files ********/ |
""" | |
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy) | |
BSD License | |
""" | |
import numpy as np | |
# data I/O | |
data = open('input.txt', 'r').read() # should be simple plain text file | |
chars = list(set(data)) | |
data_size, vocab_size = len(data), len(chars) |
Picking the right architecture = Picking the right battles + Managing trade-offs
In this article, I will share some of my experience on installing NVIDIA driver and CUDA on Linux OS. Here I mainly use Ubuntu as example. Comments for CentOS/Fedora are also provided as much as I can.
#!/usr/bin/env python | |
"""Hidden Markov Models | |
Abstract base class for HMMs and an implementation of an HMM | |
with discrete states and gaussian emissions. | |
""" | |
import tensorflow as tf | |
import edward as ed | |
from edward.models import Bernoulli, Categorical, Normal |
# Instructions for 4.14 and cuda 9.1 | |
# If upgrading from 4.13 and cuda 9.0 | |
$ sudo apt-get purge --auto-remove libcud* | |
$ sudo apt-get purge --auto-remove cuda* | |
$ sudo apt-get purge --auto-remove nvidia* | |
# also remove the container directory direcotory at /usr/local/cuda-9.0/ | |
# Important libs required with 4.14.x with Cuda 9.X | |
$ sudo apt install libelf1 libelf-dev |
import torch | |
import torch.nn as nn | |
from torch.autograd import Variable | |
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence | |
x = Variable(torch.randn(10, 20, 30)).cuda() | |
lens = range(10) | |
x = pack_padded_sequence(x, lens[::-1], batch_first=True) |