Skip to content

Instantly share code, notes, and snippets.

View karanjakhar's full-sized avatar
😀

Karan Jakhar karanjakhar

😀
View GitHub Profile
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
n_epochs = 3
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# save input image dimensions
img_rows, img_cols = 28, 28
#importing required libraries
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import train_test_split
#loading data into dataframe
df = pd.read_csv('https://query.data.world/s/67p5gkjye5vocfiqm2cuxnrkx4ijim')
#printig first five rows
df.head()
#getting basic detail
#importing required libraries
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
#loading data into dataframe
df = pd.read_csv('https://query.data.world/s/67p5gkjye5vocfiqm2cuxnrkx4ijim')
#printig first five rows
df.head()
#importing required libraries
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
#loading data into dataframe
df = pd.read_csv('https://query.data.world/s/67p5gkjye5vocfiqm2cuxnrkx4ijim')
#printig first five rows
df.head()
@karanjakhar
karanjakhar / full implementation.py
Last active July 2, 2019 04:20
Testing the result of different classifiers
#importing required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
@karanjakhar
karanjakhar / kmeans.py
Created July 1, 2019 16:34
k-means with dummy data.
#importing required libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
#creating data
x1 = np.concatenate((np.random.normal(10,2,(100,1)),np.random.normal(20,5,(100,1))))
x2 = np.concatenate((np.random.normal(10,2,(100,1)), np.random.normal(30,3,(100,1))))
#visualizing the data
#importing required libraries
from sklearn.naive_bayes import GaussianNB
import pandas as pd
from sklearn.model_selection import train_test_split
#loading data into dataframe
df = pd.read_csv('https://query.data.world/s/67p5gkjye5vocfiqm2cuxnrkx4ijim')
#printig first five rows
df.head()
#importing required libraries
from sklearn.neighbors import KNeighborsRegressor
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
#loading data for regression
r_df = pd.read_csv('boston_train.csv')
#printing first five rows
#importing required libraries
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
#loading data into dataframe
df = pd.read_csv('https://query.data.world/s/67p5gkjye5vocfiqm2cuxnrkx4ijim')
#printig first five rows
df.head()