Skip to content

Instantly share code, notes, and snippets.

View N-McA's full-sized avatar

N-McA

View GitHub Profile
import collections
import dataclasses
import itertools
import typing as tp
from functools import partial
import numpy as np
from scipy.optimize import minimize
DEFAULT_CI_SIZE = 0.95
@N-McA
N-McA / mcmc_ranking.py
Created October 24, 2021 10:47
Main Code for "Features in Trueskill"
import numpy as np
import theano.tensor as T
import pymc3 as pm
from utils import add_params_property
import ranking
from ranking import tennis_data, MPTrueSkill1V1NoDrawRanker
@N-McA
N-McA / install-hooks.py
Created February 1, 2019 15:55
Install git hooks
#!/usr/bin/env python3
import subprocess
import shlex
import os
from pathlib import Path
def chdir_to_script_location():
abspath = os.path.abspath(__file__)
#!/usr/bin/env python3
import subprocess
import shlex
import os
from pathlib import Path
def chdir_to_script_location():
abspath = os.path.abspath(__file__)
#!/usr/bin/env python3
import subprocess
import shlex
import os
from pathlib import Path
def chdir_to_script_location():
abspath = os.path.abspath(__file__)
function isString(s) {
return (typeof s === 'string' || s instanceof String)
}
export function toBaseUnit(value, decimals, BN) {
if (!isString(value)) {
throw new Error('Pass strings to prevent floating point precision issues.')
}
const ten = new BN(10);
const base = ten.pow(new BN(decimals));
@N-McA
N-McA / keras_spatial_bias.py
Last active November 13, 2019 19:15
Concatenates the (x, y) coordinate normalised to 0-1 to each spatial location in the image. Allows a network to learn spatial bias. Has been explored in at least one paper, "An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution" https://arxiv.org/abs/1807.03247
import keras.backend as kb
from keras.layers import Layer
def _kb_linspace(num):
num = kb.cast(num, kb.floatx())
return kb.arange(0, num, dtype=kb.floatx()) / (num - 1)
def _kb_grid_coords(width, height):
w, h = width, height
def tf_pca(x):
'''
Compute PCA on the bottom two dimensions of x,
eg assuming dims = [..., observations, features]
'''
# Center
x -= tf.reduce_mean(x, -2, keepdims=True)
# Currently, the GPU implementation of SVD is awful.
# It is slower than moving data back to CPU to SVD there
@N-McA
N-McA / multi_jpg.py
Last active April 27, 2018 21:22
Trade memory for time when holding big stack of jpgs
'''
Compatible with Keras, faster than reading from files (no stats).
It's only designed to work if all your images are vaguely similar sizes/
when encoded as JPGS, so if you have:
white noise or other hard-to-encode stuff
radically varying image sizes
...
(probably other failure modes)
Then this is foolhardy.
header = r'''
\begin{tikzpicture}[node distance = 2mm, auto]
%% Auto Generated
'''
raw_b = r'''
\node [block, below= of glove] (conv1) {
\begin{tabular}{cc}
Conv1D & Input: $n$x100 \\
64x5 Dilation 1 & Output: $n$x64 \\