This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
class MS(object): | |
def __init__(self, board_len=5, mines=10, *args, **kwargs): | |
super().__init__(*args, **kwargs) | |
self.board_len = board_len | |
self.hidden_board = self.create_hidden_board(self.board_len, mines) | |
self.player_board = np.full((self.board_len, self.board_len), False) | |
self.score = 0 | |
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
##### Xgboost Cross Validation using Subtest Accuracy/AMS ##### | |
result_df = data.frame(eta = NULL, max_depth= NULL, nrounds = NULL, threshold = NULL, accuracy = NULL, ams_sum = NULL) | |
counter = 1 | |
# record the elapsed time | |
ptm <- proc.time() | |
for (eta in seq(0.02, 0.4, 0.08)){ | |
for (depth in seq(5, 10, 1)){ | |
for (nrounds in seq(25, 90, 10)){ | |
for (threshold in seq(0.1, 0.3, 0.05)){ | |
print (counter) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# subset my own training and testing | |
set.seed(0) | |
idx = sample.split(dfTrain$Label, SplitRatio = .7) | |
idx = which(idx == TRUE) | |
subTrain = dfTrain[idx,] | |
mytrain = subTrain[, -c(1,32,33)] | |
subTest = dfTrain[-idx,] | |
mytest = subTest[, -c(1,32,33)] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# function to find optimal hidden nodes and threshold | |
cv_nnet_models = function(train, validation, start, end) { | |
print(paste('Cross validating neural network on ams score with nodes from',start,'to',end)) | |
# create data frame to return best model for each hidden node level | |
neural_df = data.frame(hidden_nodes= NULL, threshold=NULL, ams_sum = NULL) | |
# iterate with increasing number of hidden nodes | |
for (hidden_nodes in seq(start,end,1)){ | |
print(paste('Training with', hidden_nodes,'hidden nodes')) | |
# train model | |
model = train_neural(train, hidden_nodes) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# if making request for billboard playlist | |
if request.args.get('day', None, type=str) != None: | |
from selenium import webdriver | |
from selenium.webdriver.common.keys import Keys | |
driver = webdriver.Chrome(executable_path="./chromedriver") | |
driver.get("http://www.umdmusic.com/default.asp?Chart=D") | |
elem = driver.find_element_by_name('ChDay') | |
elem.clear() | |
elem.send_keys(request.args.get('day', 0, type=str)) | |
elem = driver.find_element_by_name('ChMonth') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# API: get recommended tracks data based on seed and target values | |
recommendations_api_endpoint = SPOTIFY_API_URL + "/recommendations?seed_artists=" + ",".join(seed_artists) + "&target_energy=" + target_energy + "&target_liveness=" + target_liveness + "&target_tempo=" + target_tempo + "&target_speechiness=" + target_speechiness + "&target_acousticness=" + target_acousticness + "&target_instrumentalness=" + target_instrumentalness + "&target_danceability=" + target_danceability + "&target_loudness=" + target_loudness + "&limit=20" | |
recommendations_response = requests.get(recommendations_api_endpoint, headers=GLOBAL['authorization_header']) | |
recommendation_data['data'] = json.loads(recommendations_response.text)['tracks'] | |
# set recommended track title and ids | |
for track in recommendation_data['data']: | |
# Dont add duplicate recommendations or songs already in your playlist | |
if track['id'] not in recommendation_data['ids'] and track['id'] not in seed_data['song_ids']: | |
recommendation_data['titles'].ap |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# get seed_distances from phantom_average_track | |
seed_distances = [phantom_average_track.values()] + playlist_tracks_attributes | |
dist = DistanceMetric.get_metric('euclidean') | |
distances = dist.pairwise(seed_distances)[0] | |
seed_data['distances'] = distances[1:len(distances)] | |
# get attributes of the 5 closest tracks to the phantom_average_track | |
seed_indexes = seed_data['distances'].argsort()[:5] | |
seed_songs = [seed_data['song_ids'][i] for i in seed_indexes] | |
seed_artists = [seed_data['artist_ids'][i] for i in seed_indexes] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# create phantom_average_track | |
phantom_average_track = {} | |
target_attributes = ['energy','liveness','tempo','speechiness','acousticness','instrumentalness','danceability','loudness'] | |
for attribute in target_attributes: | |
phantom_average_track[attribute] = sum(track[attribute] for track in library_tracks) / len(library_tracks) |
NewerOlder