Last active
January 10, 2023 17:20
-
-
Save skojaku/2a52178cbf9dade53c69c54208a1b1f0 to your computer and use it in GitHub Desktop.
Birch algorithm based on cosine similarity
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
# @Author: Sadamori Kojaku | |
# @Date: 2022-08-28 14:29:28 | |
# @Last Modified by: Sadamori Kojaku | |
# @Last Modified time: 2022-11-11 21:25:00 | |
# This is a modification of Birch algorithm in the scikit-learn package | |
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com> | |
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> | |
# Joel Nothman <joel.nothman@gmail.com> | |
# License: BSD 3 clause | |
import warnings | |
import numbers | |
import numpy as np | |
from scipy import sparse | |
from math import sqrt | |
from sklearn.metrics import pairwise_distances_argmin | |
from sklearn.metrics.pairwise import euclidean_distances | |
from sklearn.base import ( | |
TransformerMixin, | |
ClusterMixin, | |
BaseEstimator, | |
) | |
from sklearn.utils.extmath import row_norms | |
from sklearn.utils import check_scalar, deprecated | |
from sklearn.utils.validation import check_is_fitted | |
from sklearn.exceptions import ConvergenceWarning | |
from sklearn.cluster import AgglomerativeClustering | |
from sklearn._config import config_context | |
def CosineBirch(emb, n_clusters, metric="cosine", rho=0.5): | |
"""Birch algorithm based on cosine similarity. | |
This is a modification of the Birch algorithm in the scikit-learn package, where | |
I replaced the Euclidian distance function with cosine distance. This Birch algorithm is suitable | |
when the similarity is based on dot similarity, cosine similarity, or angular. | |
:param emb: Array of emebdding. emb[i, :] is the embedding of point i. | |
:type emb: 2D numpy.ndarray (n_nodes, dim) | |
:param n_clusters: Number of clusters. If None, the number of clusters are | |
inferred from the data. | |
:type n_clusters: None or int | |
:param metric: metric, defaults to "cosine" | |
:type metric: str, optional | |
:param rho: threshold, defaults to 0.5 | |
:type rho: float, optional | |
:return: _description_ | |
:rtype: _type_ | |
""" | |
X = np.einsum("ij,i->ij", emb, 1 / np.maximum(np.linalg.norm(emb, axis=1), 1e-24)) | |
# R = np.array(np.mean(X, axis=0)).reshape(-1) | |
# rho = np.sum(R * R) | |
# rho = (1 + rho) / 2 # heuristics | |
try: | |
if isinstance(n_clusters, int): | |
model = Birch( | |
threshold=rho, | |
n_clusters=AgglomerativeClustering( | |
affinity=metric, | |
n_clusters=n_clusters, | |
linkage="average", | |
), | |
) | |
elif n_clusters is None: | |
model = Birch( | |
threshold=rho, | |
n_clusters=AgglomerativeClustering( | |
affinity=metric, | |
distance_threshold=1 - rho, | |
n_clusters=None, | |
linkage="average", | |
), | |
) | |
else: | |
model = Birch(threshold=rho, n_clusters=n_clusters) | |
except ValueError: | |
try: | |
model = Birch(threshold=rho, n_clusters=None) | |
except ValueError: | |
return np.zeros(X.shape[0]) | |
return model.fit_predict(X) | |
def _iterate_sparse_X(X): | |
"""This little hack returns a densified row when iterating over a sparse | |
matrix, instead of constructing a sparse matrix for every row that is | |
expensive. | |
""" | |
n_samples = X.shape[0] | |
X_indices = X.indices | |
X_data = X.data | |
X_indptr = X.indptr | |
for i in range(n_samples): | |
row = np.zeros(X.shape[1]) | |
startptr, endptr = X_indptr[i], X_indptr[i + 1] | |
nonzero_indices = X_indices[startptr:endptr] | |
row[nonzero_indices] = X_data[startptr:endptr] | |
yield row | |
def _split_node(node, threshold, branching_factor): | |
"""The node has to be split if there is no place for a new subcluster | |
in the node. | |
1. Two empty nodes and two empty subclusters are initialized. | |
2. The pair of distant subclusters are found. | |
3. The properties of the empty subclusters and nodes are updated | |
according to the nearest distance between the subclusters to the | |
pair of distant subclusters. | |
4. The two nodes are set as children to the two subclusters. | |
""" | |
new_subcluster1 = _CFSubcluster() | |
new_subcluster2 = _CFSubcluster() | |
new_node1 = _CFNode( | |
threshold=threshold, | |
branching_factor=branching_factor, | |
is_leaf=node.is_leaf, | |
n_features=node.n_features, | |
) | |
new_node2 = _CFNode( | |
threshold=threshold, | |
branching_factor=branching_factor, | |
is_leaf=node.is_leaf, | |
n_features=node.n_features, | |
) | |
new_subcluster1.child_ = new_node1 | |
new_subcluster2.child_ = new_node2 | |
if node.is_leaf: | |
if node.prev_leaf_ is not None: | |
node.prev_leaf_.next_leaf_ = new_node1 | |
new_node1.prev_leaf_ = node.prev_leaf_ | |
new_node1.next_leaf_ = new_node2 | |
new_node2.prev_leaf_ = new_node1 | |
new_node2.next_leaf_ = node.next_leaf_ | |
if node.next_leaf_ is not None: | |
node.next_leaf_.prev_leaf_ = new_node2 | |
# dist = euclidean_distances( | |
# node.centroids_, Y_norm_squared=node.squared_norm_, squared=True | |
# ) | |
# dist = -node.linear_sum_ @ node.linear_sum_.T | |
v = np.diag(node.n_samples_) @ node.centroids_ | |
dist = -v @ v.T + threshold * np.outer(node.n_samples_, node.n_samples_) | |
n_clusters = dist.shape[0] | |
farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters)) | |
node1_dist, node2_dist = dist[(farthest_idx,)] | |
node1_closer = node1_dist < node2_dist | |
for idx, subcluster in enumerate(node.subclusters_): | |
if node1_closer[idx]: | |
new_node1.append_subcluster(subcluster) | |
new_subcluster1.update(subcluster) | |
else: | |
new_node2.append_subcluster(subcluster) | |
new_subcluster2.update(subcluster) | |
return new_subcluster1, new_subcluster2 | |
class _CFNode: | |
"""Each node in a CFTree is called a CFNode. | |
The CFNode can have a maximum of branching_factor | |
number of CFSubclusters. | |
Parameters | |
---------- | |
threshold : float | |
Threshold needed for a new subcluster to enter a CFSubcluster. | |
branching_factor : int | |
Maximum number of CF subclusters in each node. | |
is_leaf : bool | |
We need to know if the CFNode is a leaf or not, in order to | |
retrieve the final subclusters. | |
n_features : int | |
The number of features. | |
Attributes | |
---------- | |
subclusters_ : list | |
List of subclusters for a particular CFNode. | |
prev_leaf_ : _CFNode | |
Useful only if is_leaf is True. | |
next_leaf_ : _CFNode | |
next_leaf. Useful only if is_leaf is True. | |
the final subclusters. | |
init_centroids_ : ndarray of shape (branching_factor + 1, n_features) | |
Manipulate ``init_centroids_`` throughout rather than centroids_ since | |
the centroids are just a view of the ``init_centroids_`` . | |
init_sq_norm_ : ndarray of shape (branching_factor + 1,) | |
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``. | |
centroids_ : ndarray of shape (branching_factor + 1, n_features) | |
View of ``init_centroids_``. | |
squared_norm_ : ndarray of shape (branching_factor + 1,) | |
View of ``init_sq_norm_``. | |
""" | |
def __init__(self, *, threshold, branching_factor, is_leaf, n_features): | |
self.threshold = threshold | |
self.branching_factor = branching_factor | |
self.is_leaf = is_leaf | |
self.n_features = n_features | |
# The list of subclusters, centroids and squared norms | |
# to manipulate throughout. | |
self.subclusters_ = [] | |
self.init_centroids_ = np.zeros((branching_factor + 1, n_features)) | |
self.init_n_samples_ = np.zeros((branching_factor + 1)) | |
self.prev_leaf_ = None | |
self.next_leaf_ = None | |
def append_subcluster(self, subcluster): | |
n_samples = len(self.subclusters_) | |
self.subclusters_.append(subcluster) | |
self.init_centroids_[n_samples] = subcluster.centroid_ | |
self.init_n_samples_[n_samples] = subcluster.n_samples_ | |
# Keep centroids and squared norm as views. In this way | |
# if we change init_centroids and init_sq_norm_, it is | |
# sufficient, | |
self.centroids_ = self.init_centroids_[: n_samples + 1, :] | |
self.n_samples_ = self.init_n_samples_[: n_samples + 1] | |
def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2): | |
"""Remove a subcluster from a node and update it with the | |
split subclusters. | |
""" | |
ind = self.subclusters_.index(subcluster) | |
self.subclusters_[ind] = new_subcluster1 | |
self.init_centroids_[ind] = new_subcluster1.centroid_ | |
self.init_n_samples_[ind] = new_subcluster1.n_samples_ | |
self.append_subcluster(new_subcluster2) | |
def insert_cf_subcluster(self, subcluster): | |
"""Insert a new subcluster into the node.""" | |
if not self.subclusters_: | |
self.append_subcluster(subcluster) | |
return False | |
threshold = self.threshold | |
branching_factor = self.branching_factor | |
# We need to find the closest subcluster among all the | |
# subclusters so that we can insert our new subcluster. | |
dist_matrix = ( | |
np.dot(self.centroids_, subcluster.linear_sum_) | |
- self.threshold * self.n_samples_ * subcluster.n_samples_ | |
) | |
# dist_matrix = np.dot(self.centroids_, subcluster.centroid_) | |
# dist_matrix *= -2.0 | |
# dist_matrix += self.squared_norm_ | |
# closest_index = np.argmin(dist_matrix) | |
closest_index = np.argmax(dist_matrix) | |
closest_subcluster = self.subclusters_[closest_index] | |
# If the subcluster has a child, we need a recursive strategy. | |
if closest_subcluster.child_ is not None: | |
split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster) | |
if not split_child: | |
# If it is determined that the child need not be split, we | |
# can just update the closest_subcluster | |
closest_subcluster.update(subcluster) | |
self.init_centroids_[closest_index] = self.subclusters_[ | |
closest_index | |
].centroid_ | |
return False | |
# things not too good. we need to redistribute the subclusters in | |
# our child node, and add a new subcluster in the parent | |
# subcluster to accommodate the new child. | |
else: | |
new_subcluster1, new_subcluster2 = _split_node( | |
closest_subcluster.child_, threshold, branching_factor | |
) | |
self.update_split_subclusters( | |
closest_subcluster, new_subcluster1, new_subcluster2 | |
) | |
if len(self.subclusters_) > self.branching_factor: | |
return True | |
return False | |
# good to go! | |
else: | |
merged = closest_subcluster.merge_subcluster(subcluster, self.threshold) | |
if merged: | |
self.init_centroids_[closest_index] = closest_subcluster.centroid_ | |
return False | |
# not close to any other subclusters, and we still | |
# have space, so add. | |
elif len(self.subclusters_) < self.branching_factor: | |
self.append_subcluster(subcluster) | |
return False | |
# We do not have enough space nor is it closer to an | |
# other subcluster. We need to split. | |
else: | |
self.append_subcluster(subcluster) | |
return True | |
class _CFSubcluster: | |
"""Each subcluster in a CFNode is called a CFSubcluster. | |
A CFSubcluster can have a CFNode has its child. | |
Parameters | |
---------- | |
linear_sum : ndarray of shape (n_features,), default=None | |
Sample. This is kept optional to allow initialization of empty | |
subclusters. | |
Attributes | |
---------- | |
n_samples_ : int | |
Number of samples that belong to each subcluster. | |
linear_sum_ : ndarray | |
Linear sum of all the samples in a subcluster. Prevents holding | |
all sample data in memory. | |
squared_sum_ : float | |
Sum of the squared l2 norms of all samples belonging to a subcluster. | |
centroid_ : ndarray of shape (branching_factor + 1, n_features) | |
Centroid of the subcluster. Prevent recomputing of centroids when | |
``CFNode.centroids_`` is called. | |
child_ : _CFNode | |
Child Node of the subcluster. Once a given _CFNode is set as the child | |
of the _CFNode, it is set to ``self.child_``. | |
sq_norm_ : ndarray of shape (branching_factor + 1,) | |
Squared norm of the subcluster. Used to prevent recomputing when | |
pairwise minimum distances are computed. | |
""" | |
def __init__(self, *, linear_sum=None): | |
if linear_sum is None: | |
self.n_samples_ = 0 | |
self.centroid_ = self.linear_sum_ = 0 | |
else: | |
self.n_samples_ = 1 | |
self.centroid_ = self.linear_sum_ = linear_sum | |
self.child_ = None | |
def update(self, subcluster): | |
self.n_samples_ += subcluster.n_samples_ | |
self.linear_sum_ += subcluster.linear_sum_ | |
self.centroid_ = self.linear_sum_ / np.maximum(1, self.n_samples_) | |
def merge_subcluster(self, nominee_cluster, threshold): | |
"""Check if a cluster is worthy enough to be merged. If | |
yes then merge. | |
""" | |
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ | |
new_n = self.n_samples_ + nominee_cluster.n_samples_ | |
new_centroid = (1 / new_n) * new_ls | |
# The squared radius of the cluster is defined: | |
# r^2 = sum_i ||x_i - c||^2 / n | |
# with x_i the n points assigned to the cluster and c its centroid: | |
# c = sum_i x_i / n | |
# This can be expanded to: | |
# r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n | |
# and therefore simplifies to: | |
# r^2 = sum_i ||x_i||^2 / n - ||c||^2 | |
# sq_radius = new_ss / new_n - new_sq_norm | |
# Merge gain | |
dQ = np.sum(nominee_cluster.linear_sum_ * self.linear_sum_) / ( | |
self.n_samples_ * nominee_cluster.n_samples_ | |
) | |
if dQ >= threshold: | |
# if sq_radius <= threshold ** 2: | |
( | |
self.n_samples_, | |
self.linear_sum_, | |
self.centroid_, | |
) = (new_n, new_ls, new_centroid) | |
return True | |
return False | |
class Birch(ClusterMixin, TransformerMixin, BaseEstimator): | |
"""Implements the BIRCH clustering algorithm. | |
It is a memory-efficient, online-learning algorithm provided as an | |
alternative to :class:`MiniBatchKMeans`. It constructs a tree | |
data structure with the cluster centroids being read off the leaf. | |
These can be either the final cluster centroids or can be provided as input | |
to another clustering algorithm such as :class:`AgglomerativeClustering`. | |
Read more in the :ref:`User Guide <birch>`. | |
.. versionadded:: 0.16 | |
Parameters | |
---------- | |
threshold : float, default=0.5 | |
The radius of the subcluster obtained by merging a new sample and the | |
closest subcluster should be lesser than the threshold. Otherwise a new | |
subcluster is started. Setting this value to be very low promotes | |
splitting and vice-versa. | |
branching_factor : int, default=50 | |
Maximum number of CF subclusters in each node. If a new samples enters | |
such that the number of subclusters exceed the branching_factor then | |
that node is split into two nodes with the subclusters redistributed | |
in each. The parent subcluster of that node is removed and two new | |
subclusters are added as parents of the 2 split nodes. | |
n_clusters : int, instance of sklearn.cluster model, default=3 | |
Number of clusters after the final clustering step, which treats the | |
subclusters from the leaves as new samples. | |
- `None` : the final clustering step is not performed and the | |
subclusters are returned as they are. | |
- :mod:`sklearn.cluster` Estimator : If a model is provided, the model | |
is fit treating the subclusters as new samples and the initial data | |
is mapped to the label of the closest subcluster. | |
- `int` : the model fit is :class:`AgglomerativeClustering` with | |
`n_clusters` set to be equal to the int. | |
compute_labels : bool, default=True | |
Whether or not to compute labels for each fit. | |
copy : bool, default=True | |
Whether or not to make a copy of the given data. If set to False, | |
the initial data will be overwritten. | |
Attributes | |
---------- | |
root_ : _CFNode | |
Root of the CFTree. | |
dummy_leaf_ : _CFNode | |
Start pointer to all the leaves. | |
subcluster_centers_ : ndarray | |
Centroids of all subclusters read directly from the leaves. | |
subcluster_labels_ : ndarray | |
Labels assigned to the centroids of the subclusters after | |
they are clustered globally. | |
labels_ : ndarray of shape (n_samples,) | |
Array of labels assigned to the input data. | |
if partial_fit is used instead of fit, they are assigned to the | |
last batch of data. | |
n_features_in_ : int | |
Number of features seen during :term:`fit`. | |
.. versionadded:: 0.24 | |
feature_names_in_ : ndarray of shape (`n_features_in_`,) | |
Names of features seen during :term:`fit`. Defined only when `X` | |
has feature names that are all strings. | |
.. versionadded:: 1.0 | |
See Also | |
-------- | |
MiniBatchKMeans : Alternative implementation that does incremental updates | |
of the centers' positions using mini-batches. | |
Notes | |
----- | |
The tree data structure consists of nodes with each node consisting of | |
a number of subclusters. The maximum number of subclusters in a node | |
is determined by the branching factor. Each subcluster maintains a | |
linear sum, squared sum and the number of samples in that subcluster. | |
In addition, each subcluster can also have a node as its child, if the | |
subcluster is not a member of a leaf node. | |
For a new point entering the root, it is merged with the subcluster closest | |
to it and the linear sum, squared sum and the number of samples of that | |
subcluster are updated. This is done recursively till the properties of | |
the leaf node are updated. | |
References | |
---------- | |
* Tian Zhang, Raghu Ramakrishnan, Maron Livny | |
BIRCH: An efficient data clustering method for large databases. | |
https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf | |
* Roberto Perdisci | |
JBirch - Java implementation of BIRCH clustering algorithm | |
https://code.google.com/archive/p/jbirch | |
Examples | |
-------- | |
>>> from sklearn.cluster import Birch | |
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]] | |
>>> brc = Birch(n_clusters=None) | |
>>> brc.fit(X) | |
Birch(n_clusters=None) | |
>>> brc.predict(X) | |
array([0, 0, 0, 1, 1, 1]) | |
""" | |
def __init__( | |
self, | |
*, | |
threshold=0.5, | |
branching_factor=50, | |
n_clusters=3, | |
compute_labels=True, | |
copy=True, | |
): | |
self.threshold = threshold | |
self.branching_factor = branching_factor | |
self.n_clusters = n_clusters | |
self.compute_labels = compute_labels | |
self.copy = copy | |
# TODO: Remove in 1.2 | |
# mypy error: Decorated property not supported | |
@deprecated( # type: ignore | |
"`fit_` is deprecated in 1.0 and will be removed in 1.2." | |
) | |
@property | |
def fit_(self): | |
return self._deprecated_fit | |
# TODO: Remove in 1.2 | |
# mypy error: Decorated property not supported | |
@deprecated( # type: ignore | |
"`partial_fit_` is deprecated in 1.0 and will be removed in 1.2." | |
) | |
@property | |
def partial_fit_(self): | |
return self._deprecated_partial_fit | |
def fit(self, X, y=None): | |
""" | |
Build a CF Tree for the input data. | |
Parameters | |
---------- | |
X : {array-like, sparse matrix} of shape (n_samples, n_features) | |
Input data. | |
y : Ignored | |
Not used, present here for API consistency by convention. | |
Returns | |
------- | |
self | |
Fitted estimator. | |
""" | |
# Validating the scalar parameters. | |
check_scalar( | |
self.threshold, | |
"threshold", | |
target_type=numbers.Real, | |
min_val=0.0, | |
include_boundaries="neither", | |
) | |
check_scalar( | |
self.branching_factor, | |
"branching_factor", | |
target_type=numbers.Integral, | |
min_val=1, | |
include_boundaries="neither", | |
) | |
if isinstance(self.n_clusters, numbers.Number): | |
check_scalar( | |
self.n_clusters, | |
"n_clusters", | |
target_type=numbers.Integral, | |
min_val=1, | |
) | |
# TODO: Remove deprecated flags in 1.2 | |
self._deprecated_fit, self._deprecated_partial_fit = True, False | |
return self._fit(X, partial=False) | |
def _fit(self, X, partial): | |
has_root = getattr(self, "root_", None) | |
first_call = not (partial and has_root) | |
X = self._validate_data( | |
X, accept_sparse="csr", copy=self.copy, reset=first_call | |
) | |
threshold = self.threshold | |
branching_factor = self.branching_factor | |
n_samples, n_features = X.shape | |
# If partial_fit is called for the first time or fit is called, we | |
# start a new tree. | |
if first_call: | |
# The first root is the leaf. Manipulate this object throughout. | |
self.root_ = _CFNode( | |
threshold=threshold, | |
branching_factor=branching_factor, | |
is_leaf=True, | |
n_features=n_features, | |
) | |
# To enable getting back subclusters. | |
self.dummy_leaf_ = _CFNode( | |
threshold=threshold, | |
branching_factor=branching_factor, | |
is_leaf=True, | |
n_features=n_features, | |
) | |
self.dummy_leaf_.next_leaf_ = self.root_ | |
self.root_.prev_leaf_ = self.dummy_leaf_ | |
# Cannot vectorize. Enough to convince to use cython. | |
if not sparse.issparse(X): | |
iter_func = iter | |
else: | |
iter_func = _iterate_sparse_X | |
for sample in iter_func(X): | |
subcluster = _CFSubcluster(linear_sum=sample) | |
split = self.root_.insert_cf_subcluster(subcluster) | |
if split: | |
new_subcluster1, new_subcluster2 = _split_node( | |
self.root_, threshold, branching_factor | |
) | |
del self.root_ | |
self.root_ = _CFNode( | |
threshold=threshold, | |
branching_factor=branching_factor, | |
is_leaf=False, | |
n_features=n_features, | |
) | |
self.root_.append_subcluster(new_subcluster1) | |
self.root_.append_subcluster(new_subcluster2) | |
centroids = np.concatenate([leaf.centroids_ for leaf in self._get_leaves()]) | |
self.subcluster_centers_ = centroids | |
self._n_features_out = self.subcluster_centers_.shape[0] | |
self._global_clustering(X) | |
return self | |
def _get_leaves(self): | |
""" | |
Retrieve the leaves of the CF Node. | |
Returns | |
------- | |
leaves : list of shape (n_leaves,) | |
List of the leaf nodes. | |
""" | |
leaf_ptr = self.dummy_leaf_.next_leaf_ | |
leaves = [] | |
while leaf_ptr is not None: | |
leaves.append(leaf_ptr) | |
leaf_ptr = leaf_ptr.next_leaf_ | |
return leaves | |
def partial_fit(self, X=None, y=None): | |
""" | |
Online learning. Prevents rebuilding of CFTree from scratch. | |
Parameters | |
---------- | |
X : {array-like, sparse matrix} of shape (n_samples, n_features), \ | |
default=None | |
Input data. If X is not provided, only the global clustering | |
step is done. | |
y : Ignored | |
Not used, present here for API consistency by convention. | |
Returns | |
------- | |
self | |
Fitted estimator. | |
""" | |
# TODO: Remove deprecated flags in 1.2 | |
self._deprecated_partial_fit, self._deprecated_fit = True, False | |
if X is None: | |
# Perform just the final global clustering step. | |
self._global_clustering() | |
return self | |
else: | |
return self._fit(X, partial=True) | |
def _check_fit(self, X): | |
check_is_fitted(self) | |
if ( | |
hasattr(self, "subcluster_centers_") | |
and X.shape[1] != self.subcluster_centers_.shape[1] | |
): | |
raise ValueError( | |
"Training data and predicted data do not have same number of features." | |
) | |
def predict(self, X): | |
""" | |
Predict data using the ``centroids_`` of subclusters. | |
Avoid computation of the row norms of X. | |
Parameters | |
---------- | |
X : {array-like, sparse matrix} of shape (n_samples, n_features) | |
Input data. | |
Returns | |
------- | |
labels : ndarray of shape(n_samples,) | |
Labelled data. | |
""" | |
check_is_fitted(self) | |
X = self._validate_data(X, accept_sparse="csr", reset=False) | |
return self._predict(X) | |
def _predict(self, X): | |
"""Predict data using the ``centroids_`` of subclusters.""" | |
with config_context(assume_finite=True): | |
# argmin = pairwise_distances_argmin( | |
# X, self.subcluster_centers_, metric_kwargs=kwargs | |
# ) | |
argmin = np.argmax(X @ self.subcluster_centers_.T, axis=1).reshape(-1) | |
return self.subcluster_labels_[argmin] | |
def transform(self, X): | |
""" | |
Transform X into subcluster centroids dimension. | |
Each dimension represents the distance from the sample point to each | |
cluster centroid. | |
Parameters | |
---------- | |
X : {array-like, sparse matrix} of shape (n_samples, n_features) | |
Input data. | |
Returns | |
------- | |
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters) | |
Transformed data. | |
""" | |
check_is_fitted(self) | |
self._validate_data(X, accept_sparse="csr", reset=False) | |
with config_context(assume_finite=True): | |
return X @ self.subcluster_centers_.T | |
# return euclidean_distances(X, self.subcluster_centers_) | |
def _global_clustering(self, X=None): | |
""" | |
Global clustering for the subclusters obtained after fitting | |
""" | |
clusterer = self.n_clusters | |
centroids = self.subcluster_centers_ | |
compute_labels = (X is not None) and self.compute_labels | |
# Preprocessing for the global clustering. | |
not_enough_centroids = False | |
if isinstance(clusterer, numbers.Integral): | |
clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) | |
# There is no need to perform the global clustering step. | |
if len(centroids) < self.n_clusters: | |
not_enough_centroids = True | |
elif clusterer is not None and not hasattr(clusterer, "fit_predict"): | |
raise TypeError( | |
"n_clusters should be an instance of ClusterMixin or an int" | |
) | |
# To use in predict to avoid recalculation. | |
self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) | |
if clusterer is None or not_enough_centroids: | |
self.subcluster_labels_ = np.arange(len(centroids)) | |
if not_enough_centroids: | |
warnings.warn( | |
"Number of subclusters found (%d) by BIRCH is less " | |
"than (%d). Decrease the threshold." | |
% (len(centroids), self.n_clusters), | |
ConvergenceWarning, | |
) | |
else: | |
# The global clustering step that clusters the subclusters of | |
# the leaves. It assumes the centroids of the subclusters as | |
# samples and finds the final centroids. | |
self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) | |
if compute_labels: | |
self.labels_ = self._predict(X) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment