Skip to content

Instantly share code, notes, and snippets.

View emilemathieu's full-sized avatar

Mathieue emilemathieu

View GitHub Profile
import numpy as np
class Kernel(object):
"""Collection of usual kernels"""
@staticmethod
def linear():
def f(x, y):
return np.inner(x, y)
return f
class binary_classification(object):
def __init__(self, kernel, C=1.0, max_iter=1000, tol=0.001):
self.kernel = kernel # K(x_i, x_j) = <phi(x_i), phi(x_j)>
self.C = C # penalty coefficient
self.max_iter = max_iter # maximum number of iterations for solver
self.tol = tol # tolerance for the solver
def fit(self, X, y):
# Compute coefficients of the dual problem
lagrange_multipliers, intercept = self._compute_weights(X, y)
def _compute_intercept(self, alpha, yg):
indices = (alpha < self.C) * (alpha > 0)
return np.mean(yg[indices])
def _compute_weights(self, X, y):
iteration = 0
n_samples = X.shape[0]
alpha = np.zeros(n_samples) # Initialise coefficients to 0 w
g = np.ones(n_samples) # Initialise gradients to 1
class Sequential(Module):
""" Special instance of neural network which can be constructed as a sequence of layers
"""
def __init__(self, *modules):
self._modules = list(modules)
def forward(self, X):
for module in self._modules:
X = module.forward(X)
return X
class Module(object):
""" Base class for neural network's layers
"""
def forward(self, X):
""" Apply the layer function to the input data
Parameters
----------
X : array-like, shape = [n_samples, depth_in, height_in, width_in]
Returns
-------
class Optimizer(object):
def __init__(self):
self.state = {}
def __call__(self, layer_id, weight_type, value, grad):
raise NotImplementedError()
class SGD(Optimizer):
def __init__(self, lr=0.1, momentum=0):
super().__init__()
class Linear(Module):
""" Applies a linear transformation to the incoming data: y=Ax+b
Parameters
----------
in_features : int
size of each input sample
out_features : int
size of each output sample
Variables
----------
class CrossEntropyLoss(object):
def __call__(self, Y, labels):
loss = 0
for i, y in enumerate(Y):
loss += - y[labels[i]] + np.log(np.sum(np.exp(y)))
return loss/len(labels)
def grad(self, Y, labels):
output_grad = np.empty_like(Y)
for i, y in enumerate(Y):
class MyNet(nn.Module):
def __init__(self):
self.features = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.MaxPool2d(2, 2),
nn.ReLU(),
nn.Conv2d(10, 20, kernel_size=5),
nn.MaxPool2d(2, 2),
nn.ReLU()
)
layer._weight = optimizer(id(layer), 'weight', layer._weight, layer._grad_weight)
layer._bias = optimizer(id(layer), 'bias', layer._bias, layer._grad_bias)