Skip to content

Instantly share code, notes, and snippets.

@tam17aki
Last active November 23, 2023 11:25
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tam17aki/1468dd87488be97d3af6e641e3060fb0 to your computer and use it in GitHub Desktop.
Save tam17aki/1468dd87488be97d3af6e641e3060fb0 to your computer and use it in GitHub Desktop.
Outlier Detector based on Kernel Random Projection Depth (KRPD). https://arxiv.org/abs/2306.07056
# -*- coding: utf-8 -*-
"""Outlier Detector based on Kernel Random Projection Depth (KRPD).
Copyright (C) 2023 by Akira TAMAMORI
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from pyod.models.base import BaseDetector
from scipy import stats
from sklearn.decomposition import KernelPCA
from sklearn.preprocessing import normalize
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
class KRPD(BaseDetector):
"""KRPD class for outlier detection.
PCA is performed on the feature space uniquely determined by the kernel,
and the negative projection depth in the RKHS is used as anomaly score.
Parameters
----------
n_projections : int (0, n_samples), optional (default=1000)
The number of random projection axes.
n_components : int, optional (default=None)
Number of components. If None, all non-zero components are kept.
kernel : string {'linear', 'poly', 'rbf', 'sigmoid',
'cosine', 'precomputed'}, optional (default='rbf')
Kernel used for PCA.
gamma : float, optional (default=None)
Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``.
degree : int, optional (default=3)
Degree for poly kernels. Ignored by other kernels.
coef0 : float, optional (default=1)
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : dict, optional (default=None)
Parameters (keyword arguments) and
values for kernel passed as callable object.
Ignored by other kernels.
alpha : float, optional (default=1.0)
Hyperparameter of the ridge regression that learns the
inverse transform (when inverse_transform=True).
eigen_solver : string, {'auto', 'dense', 'arpack', 'randomized'}, \
default='auto'
Select eigensolver to use. If `n_components` is much
less than the number of training samples, randomized (or arpack to a
smaller extend) may be more efficient than the dense eigensolver.
Randomized SVD is performed according to the method of Halko et al.
auto :
the solver is selected by a default policy based on n_samples
(the number of training samples) and `n_components`:
if the number of components to extract is less than 10 (strict) and
the number of samples is more than 200 (strict), the 'arpack'
method is enabled. Otherwise the exact full eigenvalue
decomposition is computed and optionally truncated afterwards
('dense' method).
dense :
run exact full eigenvalue decomposition calling the standard
LAPACK solver via `scipy.linalg.eigh`, and select the components
by postprocessing.
arpack :
run SVD truncated to n_components calling ARPACK solver using
`scipy.sparse.linalg.eigsh`. It requires strictly
0 < n_components < n_samples
randomized :
run randomized SVD.
implementation selects eigenvalues based on their module; therefore
using this method can lead to unexpected results if the kernel is
not positive semi-definite.
tol : float, optional (default=0)
Convergence tolerance for arpack.
If 0, optimal value will be chosen by arpack.
max_iter : int, optional (default=None)
Maximum number of iterations for arpack.
If None, optimal value will be chosen by arpack.
remove_zero_eig : bool, optional (default=False)
If True, then all components with zero eigenvalues are removed, so
that the number of components in the output may be < n_components
(and sometimes even zero due to numerical instability).
When n_components is None, this parameter is ignored and components
with zero eigenvalues are removed regardless.
copy_X : bool, optional (default=True)
If True, input X is copied and stored by the model in the `X_fit_`
attribute. If no further changes will be done to X, setting
`copy_X=False` saves memory by storing a reference.
n_jobs : int, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
sampling : bool, optional (default=False)
If True, sampling subset from the dataset is performed only once,
in order to reduce time complexity while keeping detection performance.
subset_size : float in (0., 1.0) or int (0, n_samples), optional (default=20)
If sampling is True, the size of subset is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance
used by np.random.
Attributes
----------
decision_scores_ : numpy array of shape (n_samples,)
The outlier scores of the training data.
The higher, the more abnormal. Outliers tend to have higher
scores. This value is available once the detector is
fitted.
threshold_ : float
The threshold is based on ``contamination``. It is the
``n_samples * contamination`` most abnormal samples in
``decision_scores_``. The threshold is calculated for generating
binary outlier labels.
labels_ : int, either 0 or 1
The binary labels of the training data. 0 stands for inliers
and 1 for outliers/anomalies. It is generated by applying
``threshold_`` on ``decision_scores_``.
"""
def __init__(
self,
contamination=0.1,
n_projections=1000,
n_components=None,
kernel="rbf",
gamma=None,
degree=3,
coef0=1,
kernel_params=None,
alpha=1.0,
eigen_solver="auto",
tol=0,
max_iter=None,
remove_zero_eig=False,
copy_X=True,
n_jobs=None,
sampling=False,
subset_size=20,
random_state=None,
):
super().__init__(contamination=contamination)
self.n_projections = n_projections
self.n_components = n_components
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.alpha = alpha
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.remove_zero_eig = remove_zero_eig
self.copy_X = copy_X
self.n_jobs = n_jobs
self.sampling = sampling
self.subset_size = subset_size
self.random_state = check_random_state(random_state)
self.decision_scores_ = None
def _check_subset_size(self, array):
"""Check subset size."""
n_samples, _ = array.shape
if isinstance(self.subset_size, int) is True:
if 0 < self.subset_size <= n_samples:
subset_size = self.subset_size
else:
raise ValueError(
f"subset_size={self.subset_size} "
f"must be between 0 and n_samples={n_samples}."
)
if isinstance(self.subset_size, float) is True:
if 0.0 < self.subset_size <= 1.0:
subset_size = int(self.subset_size * n_samples)
else:
raise ValueError("subset_size=%r must be between 0.0 and 1.0")
return subset_size
def fit(self, X, y=None):
"""Fit detector. y is ignored in unsupervised methods.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
# validate inputs X and y (optional)
X = check_array(X, copy=self.copy_X)
self._set_n_classes(y)
# perform subsampling to reduce time complexity
if self.sampling is True:
subset_size = self._check_subset_size(X)
random_indices = self.random_state.choice(
X.shape[0],
size=subset_size,
replace=False,
)
X = X[random_indices, :]
# copy the attributes from the sklearn Kernel PCA object
if self.n_components is None:
n_components = X.shape[0] # use all dimensions
else:
if self.n_components < 1:
raise ValueError(
f"`n_components` should be >= 1, got: {self.n_components}"
)
n_components = min(X.shape[0], self.n_components)
if isinstance(self.gamma, str):
if self.gamma == "scale":
# var = E[X^2] - E[X]^2 if sparse
X_var = X.var()
self._gamma = 1.0 / (X.shape[1] * X_var) if X_var != 0 else 1.0
elif self.gamma == "auto":
self._gamma = 1.0 / X.shape[1]
else:
raise ValueError(
"When 'gamma' is a string, it should be either 'scale' or "
f"'auto'. Got '{self.gamma}' instead."
)
else:
self._gamma = self.gamma
self.kpca = KernelPCA(
n_components=n_components,
kernel=self.kernel,
gamma=self._gamma,
degree=self.degree,
coef0=self.coef0,
kernel_params=self.kernel_params,
alpha=self.alpha,
fit_inverse_transform=False,
eigen_solver=self.eigen_solver,
tol=self.tol,
max_iter=self.max_iter,
remove_zero_eig=self.remove_zero_eig,
copy_X=self.copy_X,
n_jobs=self.n_jobs,
)
# Project with a scalar product between K and the scaled eigenvectors
x_transformed = self.kpca.fit_transform(X) # [n_samples, n_components]
non_zeros = np.flatnonzero(self.kpca.eigenvalues_)
x_transformed = x_transformed[:, non_zeros]
x_transformed = x_transformed / np.sqrt(self.kpca.eigenvalues_[non_zeros])
self.proj_axes = self.random_state.randn(
x_transformed.shape[1], self.n_projections
)
self.proj_axes = normalize(self.proj_axes, axis=0)
proj_data = np.dot(x_transformed, self.proj_axes) # [n_samples, n_proj]
self.median = np.median(proj_data, axis=0)
self.median_dev = stats.median_abs_deviation(proj_data, axis=0)
self.decision_scores_ = np.max(
np.abs(proj_data - self.median) / self.median_dev, axis=1
)
self.decision_scores_ = -1 / (1 + self.decision_scores_)
self._process_decision_scores()
return self
def decision_function(self, X):
"""Predict raw anomaly score of X using the fitted detector.
The anomaly score of an input sample is computed based on different
detector algorithms. For consistency, outliers are assigned with
larger anomaly scores.
Parameters
----------
X : numpy array of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only
if they are supported by the base estimator.
Returns
-------
anomaly_scores : numpy array of shape (n_samples,)
The anomaly score of the input samples.
"""
check_is_fitted(self, ["decision_scores_", "threshold_", "labels_"])
X = check_array(X)
x_transformed = self.kpca.transform(X)
non_zeros = np.flatnonzero(self.kpca.eigenvalues_)
x_transformed = x_transformed[:, non_zeros]
x_transformed = x_transformed / np.sqrt(self.kpca.eigenvalues_[non_zeros])
proj_data = np.dot(x_transformed, self.proj_axes) # (n_samples_test, n_proj)
anomaly_scores = np.max(
np.abs(proj_data - self.median) / self.median_dev, axis=1
)
anomaly_scores = -1 / (1 + anomaly_scores)
return anomaly_scores
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment