Skip to content

Instantly share code, notes, and snippets.

Avatar
👀
Look out, working on exciting things :)

Khyati Mahendru KhyatiMahendru

👀
Look out, working on exciting things :)
View GitHub Profile
View for-jason-kabi.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
View spectral_clustering.py
# import required functions and libraries
from sklearn.datasets import make_circles
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster import SpectralClustering
import numpy as np
import matplotlib.pyplot as plt
# generate your data
X, labels = make_circles(n_samples=500, noise=0.1, factor=.2)
View ImageCompression.py
# get the image from "https://cdn.pixabay.com/photo/2017/03/27/16/50/beach-2179624_960_720.jpg"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
# read image in grayscale
img = cv2.imread('beach-2179624_960_720.jpg', 0)
# obtain svd
View rsvd.py
import numpy as np
from sklearn.utils.extmath import randomized_svd
A = np.array([[-1, 2, 0], [2, 0, -2], [0, -2, 1]])
u, s, vt = randomized_svd(A, n_components = 2)
print("Left Singular Vectors:")
print(u)
print("Singular Values:")
View tsvd_sklearn.py
import numpy as np
from sklearn.decomposition import TruncatedSVD
A = np.array([[-1, 2, 0], [2, 0, -2], [0, -2, 1]])
print("Original Matrix:")
print(A)
svd = TruncatedSVD(n_components = 2)
A_transf = svd.fit_transform(A)
View svd_numpy.py
import numpy as np
from numpy.linalg import svd
# define your matrix as a 2D numpy array
A = np.array([[4, 0], [3, -5]])
U, S, VT = svd(A)
print("Left Singular Vectors:")
print(U)
View LatentSemanticAnalysis.py
# create document term matrix for your data
# you can use TfidfVectorizer instead of CountVectorizer as well
from sklearn.feature_extraction.text import CountVectorizer
cvec = CountVectorizer()
docTermMat = cvec.fit_transform(data['text'].values)
# truncated SVD to preserve 20 topics
from sklearn.decomposition import TruncatedSVD
lsa = TruncatedSVD(n_components = 20, n_iter = 500)
lsa.fit(docTermMat)
View svd.py
from sklearn.decomposition import TruncatedSVD
// say you want to reduce to 2 features
svd = TruncatedSVD(n_features = 2)
//obtain the transformed data
data_transformed = svd.fit_transform(data)
View imageprocessing.py
# import required libraries
import numpy as np
import matplotlib.pyplot as plt
import cv2
from skimage.color import rgb2gray
from scipy import ndimage
# read the image
img = cv2.imread('1.jpeg')
View pca.py
from sklearn.decomposition import PCA
// say you want to reduce to 2 features
pca = PCA(n_components = 2)
// obtain transformed data
data_transformed = pca.fit_transform(data)