Skip to content

Instantly share code, notes, and snippets.

View KhyatiMahendru's full-sized avatar
👀
Look out, working on exciting things :)

Khyati Mahendru KhyatiMahendru

👀
Look out, working on exciting things :)
View GitHub Profile
from sklearn.cluster import KMeans
# function returns WSS score for k values from 1 to kmax
def calculate_WSS(points, kmax):
sse = []
for k in range(1, kmax+1):
kmeans = KMeans(n_clusters = k).fit(points)
centroids = kmeans.cluster_centers_
pred_clusters = kmeans.predict(points)
curr_sse = 0
@KhyatiMahendru
KhyatiMahendru / creditcardfrauddetection.ipynb
Last active February 21, 2022 16:37
CreditCardFraudDetection.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
def update_weights_BCE(m1, m2, b, X1, X2, Y, learning_rate):
m1_deriv = 0
m2_deriv = 0
b_deriv = 0
N = len(X1)
for i in range(N):
s = 1 / (1 / (1 + math.exp(-m1*X1[i] - m2*X2[i] - b)))
# Calculate partial derivatives
m1_deriv += -X1[i] * (s - Y[i])
# importing requirements
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import adam
# alpha = 0.001 as given in the lr parameter in adam() optimizer
# build the model
model_alpha1 = Sequential()
model_alpha1.add(Dense(50, input_dim=2, activation='relu'))
@KhyatiMahendru
KhyatiMahendru / for-jason-kabi.ipynb
Last active August 12, 2020 16:15
For Jason Kabi.ipynb
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
from sklearn.metrics import silhouette_score
sil = []
kmax = 10
# dissimilarity would not be defined for a single cluster, thus, minimum number of clusters should be 2
for k in range(2, kmax+1):
kmeans = KMeans(n_clusters = k).fit(x)
labels = kmeans.labels_
sil.append(silhouette_score(x, labels, metric = 'euclidean'))
# import required functions and libraries
from sklearn.datasets import make_circles
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster import SpectralClustering
import numpy as np
import matplotlib.pyplot as plt
# generate your data
X, labels = make_circles(n_samples=500, noise=0.1, factor=.2)
def update_weights_Huber(m, b, X, Y, delta, learning_rate):
m_deriv = 0
b_deriv = 0
N = len(X)
for i in range(N):
# derivative of quadratic for small values and of linear for large values
if abs(Y[i] - m*X[i] - b) <= delta:
m_deriv += -X[i] * (Y[i] - (m*X[i] + b))
b_deriv += - (Y[i] - (m*X[i] + b))
else:
# get the image from "https://cdn.pixabay.com/photo/2017/03/27/16/50/beach-2179624_960_720.jpg"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
# read image in grayscale
img = cv2.imread('beach-2179624_960_720.jpg', 0)
# obtain svd
import numpy as np
from sklearn.utils.extmath import randomized_svd
A = np.array([[-1, 2, 0], [2, 0, -2], [0, -2, 1]])
u, s, vt = randomized_svd(A, n_components = 2)
print("Left Singular Vectors:")
print(u)
print("Singular Values:")