Skip to content

Instantly share code, notes, and snippets.

View wdevazelhes's full-sized avatar

William de Vazelhes wdevazelhes

View GitHub Profile
@wdevazelhes
wdevazelhes / kernel_attention.py
Last active June 9, 2019 18:43
trying to use kernel approximations by explicit feature maps for softmax self-attention
import numpy as np
from sklearn.utils.extmath import softmax
from sklearn.kernel_approximation import RBFSampler
from sklearn_extra.kernel_approximation import Fastfood
seed = 42
rng = np.random.RandomState(seed)
D = 20
@wdevazelhes
wdevazelhes / sdml_expr_numerical_uncertainty.py
Last active January 16, 2019 09:08
This gist explains why the two equivalent ways to compute the loss for sdml can be slightly different because of numerical uncertainties
import numpy as np
from scipy.sparse.csgraph import laplacian
from sklearn.utils import check_random_state
from scipy.sparse import coo_matrix
from numpy.testing import assert_allclose
RNG = check_random_state(0)
def test_loss_sdml_uncertainties():
n_samples = 10
@wdevazelhes
wdevazelhes / compare_gradients_mlkr.py
Last active August 16, 2018 10:06
Code for comparing two implementations of the gradient for MLKR
from metric_learn import MLKR
from sklearn.utils import check_random_state
import numpy as np
from losses import _loss_non_optimized, _loss_optimized
from collections import defaultdict
from sklearn.datasets import make_regression
for n_features in [5, 100]:
print('n_features={}'.format(n_features))
import numpy as np
from scipy.sparse.csgraph import laplacian
from sklearn.utils import check_random_state
from scipy.sparse import coo_matrix
from numpy.testing import assert_array_almost_equal
RNG = check_random_state(0)
def test_loss_sdml():
n_samples = 10