Skip to content

Instantly share code, notes, and snippets.

View Kiwibp's full-sized avatar

Keenan Burke-Pitts Kiwibp

View GitHub Profile
# train the model on the training set
gboost.fit(X_train, y_train)
# make class predictions for the testing set
y_pred_class = gboost.predict(X_test)
# IMPORTANT: first argument is true values, second argument is predicted values
print(metrics.confusion_matrix(y_test, y_pred_class))
binary = np.array([[125, 14],
logreg = LogisticRegression()
logreg_cv = LogisticRegressionCV()
rf = RandomForestClassifier()
gboost = GradientBoostingClassifier()
svm = SVC(probability=True)
knn = KNeighborsClassifier()
dt = DecisionTreeClassifier()
models = [logreg, logreg_cv, rf, gboost, svm, knn, dt]
# Tree-based estimators can be used to compute feature importances, which in turn can be used to discard irrelevant features.
clf = RandomForestClassifier(n_estimators=50, max_features='sqrt')
clf = clf.fit(train, targets)
# Let's have a look at the importance of each feature.
features = pd.DataFrame()
features['feature'] = train.columns
features['importance'] = clf.feature_importances_
# Sorting values by feature importance.
@Kiwibp
Kiwibp / wordcloud.py
Created June 11, 2018 16:20
Craigslist Webscraping Project
wordcloud = WordCloud(background_color='white', mode = "RGB", width = 2000, height=1000).generate(str(postings['name']))
plt.title("Craigslist Used Items Word Cloud")
plt.imshow(wordcloud)
plt.axis("off")
plt.show();
@Kiwibp
Kiwibp / popular-locations-subset.py
Created June 11, 2018 16:18
Craiglist Webscraping Project
@Kiwibp
Kiwibp / motor-vehicles-subset.py
Created June 11, 2018 16:15
Craiglist Webscraping Project
# vehicles are skewing boxplot too much; all rows at or above 1.8k appear to be motor vehicles.
motor_vehicles = postings.loc[postings.price >= 1800.0, :]
motor_vehicles.plot.bar('name', 'price', figsize=(9,9))
plt.ylabel("Price")
plt.xlabel("Vehicle")
plt.show();
@Kiwibp
Kiwibp / scrapy-items-and-spider-scripts.py
Last active June 11, 2018 16:16
Craigslist Spider for Webscraping Projecgt
import scrapy
class CraigslistWebscrapingItem(scrapy.Item):
name = scrapy.Field()
price = scrapy.Field()
location = scrapy.Field()
date = scrapy.Field()
#execute Summary Extractor model
ml = MonkeyLearn('insert api key here')
data = list(nlp_df_sample.iloc[:,7])
model_id = 'ex_94WD2XxD'
summary_model_results = ml.extractors.extract(model_id, data, production_model=True)
print(summary_model_results.body)
#execute Price Extractor model
data = list(nlp_df_sample.iloc[:,7])
model_id = 'ex_wNDME4vE'
locations_ten_or_more = all_items_df.groupby(['Location']).filter(lambda g: g.Location.value_counts() >= 10) \
.loc[:,['Location','Description', 'Price', 'Title', 'Url']]
#checking the number of locations with less than 10 items
len_of_locs = len(locations_ten_or_more.groupby("Location").size())
print(f'There are {len_of_locs} cities with 10 items or more.')
print('\n')
#checking the locations with the most items in this subset
print('Locations with the most amount of items in this subset:')
#facebook marketplace
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from pymongo import MongoClient
class App: