Skip to content

Instantly share code, notes, and snippets.

View saimadhu-polamuri's full-sized avatar
💭
For the love of data.

saimadhu saimadhu-polamuri

💭
For the love of data.
View GitHub Profile
from scipy.stats import kstest
import numpy as np
# Generate a random sample from a normal distribution
sample = np.random.normal(loc=0, scale=1, size=100)
# Perform one-sample KS test against a normal distribution
statistic, pvalue = kstest(sample, 'norm')
print('Test statistic:', statistic)
print('P-value:', pvalue)
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
# Creating a simple dataset
data = {'Temperature': [22, 26, 29, 33, 35, 38, 42, 25, 37, 31],
'IceCreamSales': [120, 150, 170, 200, 220, 260, 310, 140, 240, 180]}
df = pd.DataFrame(data)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.datasets import load_iris
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import KNeighborsRegressor
# Load the iris dataset
# Import necessary libraries
import pandas as pd
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
# Load the dataset
cal_housing = fetch_california_housing(as_frame=True)
from sklearn.decomposition import TruncatedSVD
from sklearn.datasets import load_digits
# Load the digits dataset
digits = load_digits()
# Create a TruncatedSVD object with n_components=10
svd = TruncatedSVD(n_components=10)
# Fit the TruncatedSVD model to the data
from sklearn.ensemble import RotationForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Generate a sample dataset
X, y = make_classification(n_samples=1000, n_features=20, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and train the Rotation Forest classifier
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier, BaggingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Assuming X and y are your data
# X, y = load_your_data()
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
# Assuming X and y are your data
# X, y = load_your_data()
# Split the data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Generate a sample dataset
X, y = make_classification(n_samples=1000, n_features=20, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and train the Feature Bagging classifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Generate a sample dataset
X, y = make_classification(n_samples=1000, n_features=20, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Initialize and train the Random Subspaces classifier