Skip to content

Instantly share code, notes, and snippets.

# -*- coding:utf-8 -*-
import codecs
import json
import os
import sys
if __name__ == '__main__':
def walk_json(path):
@hiropppe
hiropppe / cython_dot_sample_for_nogil.pyx
Last active June 5, 2017 07:31
Simple cython dot product sample for nogil
# cython: boundscheck = False
# cython: wraparound = False
# cython: cdivision = True
import numpy as np
cimport numpy as np
from scipy.linalg cimport cython_blas
from cython.parallel import prange
from libc.stdio cimport printf
from libc.stdlib cimport abort, malloc, free
cimport openmp
def get_int_flag():
cdef int flag = 0
@hiropppe
hiropppe / synchronous_distributed_mnist_training.py
Created February 16, 2017 04:31
Distributed Tensorflow 0.12.0 example of using data parallelism and share model parameters. This is roughly a copy of ischlag (https://github.com/ischlag/distributed-tensorflow-example)
'''
Synchronous Distributed Tensorflow 0.12.0 example of using data parallelism and share model parameters.
Trains a simple sigmoid neural network on mnist for 20 epochs on three machines using one parameter server.
Change the hardcoded host urls below with your own hosts.
Run like this:
pc-01$ python synchronous_distributed_mnist_training.py --job_name="ps" --task_index=0
pc-02$ python synchronous_distributed_mnist_training.py --job_name="worker" --task_index=0
pc-03$ python synchronous_distributed_mnist_training.py --job_name="worker" --task_index=1
@hiropppe
hiropppe / async_distributed_mnist_training.py
Last active February 16, 2017 04:30
Distributed Tensorflow 0.12.0 example of using data parallelism and share model parameters. This is roughly a copy of ischlag (https://github.com/ischlag/distributed-tensorflow-example)
'''
Asynchronous Distributed Tensorflow 0.12.0 example of using data parallelism and share model parameters.
Trains a simple sigmoid neural network on mnist for 20 epochs on three machines using one parameter server.
Change the hardcoded host urls below with your own hosts.
Run like this:
pc-01$ python asynchronous_distributed_mnist_training.py --job_name="ps" --task_index=0
pc-02$ python asynchronous_distributed_mnist_training.py --job_name="worker" --task_index=0
pc-03$ python asynchronous_distributed_mnist_training.py --job_name="worker" --task_index=1
import tables as tb
from scipy.sparse import lil_matrix
h5 = tb.open_file('sparse.h5', 'a')
data_group = h5.create_group(h5.root, 'data_group')
indices_group = h5.create_group(h5.root, 'indices_group')
indptr_group = h5.create_group(h5.root, 'indptr_group')
$ mkdir -p ~/.vim/bundle
$ git clone https://github.com/Shougo/neobundle.vim ~/.vim/bundle/neobundle.vim
"------------------------------------
" neocomplete.vim
"------------------------------------
"Note: This option must set it in .vimrc(_vimrc). NOT IN .gvimrc(_gvimrc)!
" Disable AutoComplPop.
let g:acp_enableAtStartup = 0