This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
guards_advanced = urllib.request.urlopen("https://rotogrinders.com/pages/nba-advanced-player-stats-guards-181885").read() | |
guards_advancedguards_ = bs.BeautifulSoup(guards_advanced, 'lxml') | |
#leaving out a number of lines necessary to extract data, see github repo for full code if you'd like. | |
guards_advanced_col_names = col_names.split() | |
print(guards_advanced_col_names) | |
#could also use pandas read_html method as well | |
guards_advanced_dfs = pd.read_html("https://rotogrinders.com/pages/nba-advanced-player-stats-guards-181885") | |
guards_advanced_stats_df = guards_advanced_dfs[2] | |
guards_advanced_stats_df.tail() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#pipeline adjustment to export data to MongoDB | |
from pymongo import MongoClient | |
from scrapy.conf import settings | |
class MongoDBPipeline(object): | |
def __init__(self): | |
connection = MongoClient( | |
settings['MONGODB_SERVER'], | |
settings['MONGODB_PORT']) | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
locations_ten_or_more = all_items_df.groupby(['Location']).filter(lambda g: g.Location.value_counts() >= 10) \ | |
.loc[:,['Location','Description', 'Price', 'Title', 'Url']] | |
#checking the number of locations with less than 10 items | |
len_of_locs = len(locations_ten_or_more.groupby("Location").size()) | |
print(f'There are {len_of_locs} cities with 10 items or more.') | |
print('\n') | |
#checking the locations with the most items in this subset | |
print('Locations with the most amount of items in this subset:') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#execute Summary Extractor model | |
ml = MonkeyLearn('insert api key here') | |
data = list(nlp_df_sample.iloc[:,7]) | |
model_id = 'ex_94WD2XxD' | |
summary_model_results = ml.extractors.extract(model_id, data, production_model=True) | |
print(summary_model_results.body) | |
#execute Price Extractor model | |
data = list(nlp_df_sample.iloc[:,7]) | |
model_id = 'ex_wNDME4vE' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# vehicles are skewing boxplot too much; all rows at or above 1.8k appear to be motor vehicles. | |
motor_vehicles = postings.loc[postings.price >= 1800.0, :] | |
motor_vehicles.plot.bar('name', 'price', figsize=(9,9)) | |
plt.ylabel("Price") | |
plt.xlabel("Vehicle") | |
plt.show(); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import scrapy | |
class CraigslistWebscrapingItem(scrapy.Item): | |
name = scrapy.Field() | |
price = scrapy.Field() | |
location = scrapy.Field() | |
date = scrapy.Field() | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Removing all locations with 2 or less items. | |
counts = non_mv.location.value_counts() | |
loc_gt2 = counts[counts > 2] | |
popular_locations = non_mv[non_mv.location.isin(loc_gt2.keys())] | |
plt.figure(figsize=(10,5)) | |
sns.violinplot(x="location", y="price", data=popular_locations, scale="width", inner="stick") | |
plt.show(); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
wordcloud = WordCloud(background_color='white', mode = "RGB", width = 2000, height=1000).generate(str(postings['name'])) | |
plt.title("Craigslist Used Items Word Cloud") | |
plt.imshow(wordcloud) | |
plt.axis("off") | |
plt.show(); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Tree-based estimators can be used to compute feature importances, which in turn can be used to discard irrelevant features. | |
clf = RandomForestClassifier(n_estimators=50, max_features='sqrt') | |
clf = clf.fit(train, targets) | |
# Let's have a look at the importance of each feature. | |
features = pd.DataFrame() | |
features['feature'] = train.columns | |
features['importance'] = clf.feature_importances_ | |
# Sorting values by feature importance. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
logreg = LogisticRegression() | |
logreg_cv = LogisticRegressionCV() | |
rf = RandomForestClassifier() | |
gboost = GradientBoostingClassifier() | |
svm = SVC(probability=True) | |
knn = KNeighborsClassifier() | |
dt = DecisionTreeClassifier() | |
models = [logreg, logreg_cv, rf, gboost, svm, knn, dt] |
OlderNewer