Kernel mapped generalized Mahalanobis depth (kmGMHD) in Python.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
"""Kernel mapped generalized Mahalanobis depth (kmGMHD). | |
""" | |
# Author: Akira Tamamori <tamamori5917@gmail.com> | |
# License: BSD 2 clause | |
from typing import NamedTuple, Union | |
import numpy as np | |
from sklearn.decomposition import KernelPCA | |
from sklearn.utils import check_array, check_random_state | |
class KpcaConfig(NamedTuple): | |
"""Class for KernelPCA configuration.""" | |
n_components: int = None | |
kernel: str = "rbf" | |
gamma: float = None | |
degree: int = 3 | |
coef0: float = 1.0 | |
kernel_params: dict = None | |
alpha: float = 1.0 | |
eigen_solver: str = "auto" | |
tol: float = 0.0 | |
max_iter: int = None | |
remove_zero_eig: bool = False | |
copy_X: bool = True | |
n_jobs: int = None | |
sampling: bool = True | |
subset_size: Union[float, int] = 20 | |
random_state: None = 0 | |
class GMHD: | |
"""GMHD class. | |
This class computes kernel mapped generalized Mahalanobis depth (kmGMHD). | |
Kernel PCA is performed on the feature space uniquely determined by the kernel. | |
Mahalanobis depth is computed in the RKHS. | |
Yonggang Hu, Yong Wang, Yi Wu, Qiang Li & Chenping Hou, | |
"Generalized Mahalanobis depth in the reproducing kernel Hilbert space," | |
Statistical Papers volume 52, pages 511-522 (2011). | |
Parameters | |
---------- | |
n_components : int, optional (default=None) | |
Number of components. If None, all non-zero components are kept. | |
kernel : string {'linear', 'poly', 'rbf', 'sigmoid', | |
'cosine', 'precomputed'}, optional (default='rbf') | |
Kernel used for PCA. | |
gamma : float, optional (default=None) | |
Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other | |
kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``. | |
degree : int, optional (default=3) | |
Degree for poly kernels. Ignored by other kernels. | |
coef0 : float, optional (default=1) | |
Independent term in poly and sigmoid kernels. | |
Ignored by other kernels. | |
kernel_params : dict, optional (default=None) | |
Parameters (keyword arguments) and | |
values for kernel passed as callable object. | |
Ignored by other kernels. | |
alpha : float, optional (default=1.0) | |
Hyperparameter of the ridge regression that learns the | |
inverse transform (when inverse_transform=True). | |
eigen_solver : string, {'auto', 'dense', 'arpack', 'randomized'}, \ | |
default='auto' | |
Select eigensolver to use. If `n_components` is much | |
less than the number of training samples, randomized (or arpack to a | |
smaller extend) may be more efficient than the dense eigensolver. | |
Randomized SVD is performed according to the method of Halko et al. | |
auto : | |
the solver is selected by a default policy based on n_samples | |
(the number of training samples) and `n_components`: | |
if the number of components to extract is less than 10 (strict) and | |
the number of samples is more than 200 (strict), the 'arpack' | |
method is enabled. Otherwise the exact full eigenvalue | |
decomposition is computed and optionally truncated afterwards | |
('dense' method). | |
dense : | |
run exact full eigenvalue decomposition calling the standard | |
LAPACK solver via `scipy.linalg.eigh`, and select the components | |
by postprocessing. | |
arpack : | |
run SVD truncated to n_components calling ARPACK solver using | |
`scipy.sparse.linalg.eigsh`. It requires strictly | |
0 < n_components < n_samples | |
randomized : | |
run randomized SVD. | |
implementation selects eigenvalues based on their module; therefore | |
using this method can lead to unexpected results if the kernel is | |
not positive semi-definite. | |
tol : float, optional (default=0) | |
Convergence tolerance for arpack. | |
If 0, optimal value will be chosen by arpack. | |
max_iter : int, optional (default=None) | |
Maximum number of iterations for arpack. | |
If None, optimal value will be chosen by arpack. | |
remove_zero_eig : bool, optional (default=False) | |
If True, then all components with zero eigenvalues are removed, so | |
that the number of components in the output may be < n_components | |
(and sometimes even zero due to numerical instability). | |
When n_components is None, this parameter is ignored and components | |
with zero eigenvalues are removed regardless. | |
copy_X : bool, optional (default=True) | |
If True, input X is copied and stored by the model in the `X_fit_` | |
attribute. If no further changes will be done to X, setting | |
`copy_X=False` saves memory by storing a reference. | |
n_jobs : int, optional (default=None) | |
The number of parallel jobs to run. | |
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. | |
``-1`` means using all processors. | |
sampling : bool, optional (default=False) | |
If True, sampling subset from the dataset is performed only once, | |
in order to reduce time complexity while keeping detection performance. | |
subset_size : float in (0., 1.0) or int (0, n_samples), optional (default=20) | |
If sampling is True, the size of subset is specified. | |
random_state : int, RandomState instance or None, optional (default=None) | |
If int, random_state is the seed used by the random number generator; | |
If RandomState instance, random_state is the random number generator; | |
If None, the random number generator is the RandomState instance | |
used by np.random. | |
""" | |
def __init__( | |
self, | |
n_components=None, | |
kernel="rbf", | |
gamma=None, | |
degree=3, | |
coef0=1, | |
kernel_params=None, | |
alpha=1.0, | |
eigen_solver="auto", | |
tol=0, | |
max_iter=None, | |
remove_zero_eig=False, | |
copy_X=True, | |
n_jobs=None, | |
sampling=False, | |
subset_size=20, | |
random_state=None, | |
): | |
super().__init__() | |
self.config = KpcaConfig( | |
n_components, | |
kernel, | |
gamma, | |
degree, | |
coef0, | |
kernel_params, | |
alpha, | |
eigen_solver, | |
tol, | |
max_iter, | |
remove_zero_eig, | |
copy_X, | |
n_jobs, | |
sampling, | |
subset_size, | |
check_random_state(random_state), | |
) | |
self.kpca = None | |
self._gamma = None | |
self.depth_values = None | |
def _check_subset_size(self, array): | |
"""Check subset size.""" | |
n_samples, _ = array.shape | |
if isinstance(self.config.subset_size, int) is True: | |
if 0 < self.config.subset_size <= n_samples: | |
subset_size = self.config.subset_size | |
else: | |
raise ValueError( | |
f"subset_size={self.config.subset_size} " | |
f"must be between 0 and n_samples={n_samples}." | |
) | |
if isinstance(self.config.subset_size, float) is True: | |
if 0.0 < self.config.subset_size <= 1.0: | |
subset_size = int(self.config.subset_size * n_samples) | |
else: | |
raise ValueError("subset_size=%r must be between 0.0 and 1.0") | |
return subset_size | |
def fit(self, X): | |
"""Fit detector. | |
Parameters | |
---------- | |
X : numpy array of shape (n_samples, n_features) | |
The input samples. | |
Returns | |
------- | |
self : object | |
Fitted estimator. | |
""" | |
# validate inputs X | |
X = check_array(X, copy=self.config.copy_X) | |
# perform subsampling to reduce time complexity | |
if self.config.sampling is True: | |
subset_size = self._check_subset_size(X) | |
random_indices = self.config.random_state.choice( | |
X.shape[0], | |
size=subset_size, | |
replace=False, | |
) | |
X = X[random_indices, :] | |
# copy the attributes from the sklearn Kernel PCA object | |
if self.config.n_components is None: | |
n_components = X.shape[0] # use all dimensions | |
else: | |
if self.config.n_components < 1: | |
raise ValueError( | |
f"`n_components` should be >= 1, got: {self.config.n_components}" | |
) | |
n_components = min(X.shape[0], self.config.n_components) | |
if isinstance(self.config.gamma, str): | |
if self.config.gamma == "scale": | |
x_var = X.var() | |
self._gamma = 1.0 / (X.shape[1] * x_var) if x_var != 0 else 1.0 | |
elif self.config.gamma == "auto": | |
self._gamma = 1.0 / X.shape[1] | |
else: | |
raise ValueError( | |
"When 'gamma' is a string, it should be either 'scale' or " | |
f"'auto'. Got '{self.config.gamma}' instead." | |
) | |
else: | |
self._gamma = self.config.gamma | |
self.kpca = KernelPCA( | |
n_components=n_components, | |
kernel=self.config.kernel, | |
gamma=self._gamma, | |
degree=self.config.degree, | |
coef0=self.config.coef0, | |
kernel_params=self.config.kernel_params, | |
alpha=self.config.alpha, | |
fit_inverse_transform=False, | |
eigen_solver=self.config.eigen_solver, | |
tol=self.config.tol, | |
max_iter=self.config.max_iter, | |
remove_zero_eig=self.config.remove_zero_eig, | |
copy_X=self.config.copy_X, | |
n_jobs=self.config.n_jobs, | |
) | |
x_transformed = self.kpca.fit_transform(X) # [n_samples, n_components] | |
non_zeros = np.flatnonzero(self.kpca.eigenvalues_) | |
x_transformed = x_transformed[:, non_zeros] | |
x_transformed = x_transformed / np.sqrt(self.kpca.eigenvalues_[non_zeros]) | |
self.depth_values = 1 / (1 + np.sum(x_transformed**2, axis=1)) | |
return self | |
def depth_function(self, X): | |
"""Compute raw depth values of X using the fitted object. | |
Parameters | |
---------- | |
X : numpy array of shape (n_samples, n_features) | |
The training input samples. Sparse matrices are accepted only | |
if they are supported by the base estimator. | |
Returns | |
------- | |
depth_values : numpy array of shape (n_samples,) | |
The depth value of the input samples. | |
""" | |
X = check_array(X) | |
x_transformed = self.kpca.transform(X) | |
non_zeros = np.flatnonzero(self.kpca.eigenvalues_) | |
x_transformed = x_transformed[:, non_zeros] | |
x_transformed = x_transformed / np.sqrt(self.kpca.eigenvalues_[non_zeros]) | |
depth_values = 1 / (1 + np.sum(x_transformed**2, axis=1)) | |
return depth_values |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment