Skip to content

Instantly share code, notes, and snippets.

View mesutcanalkan's full-sized avatar

mesutcanalkan

  • QbizUK
  • London
View GitHub Profile
@mesutcanalkan
mesutcanalkan / Ninja Function to load PV Data.py
Last active March 7, 2021 00:55
Ninja Function for API Call
def loading_generation_profile_data_from_ninja(PARAMS, WEATHER_YEAR, CAPACITY_CAL):
"""for a selected parameter dictionary, weather year, and capacity volume, an hourly capacity factor data frame is calculated.
in: parameters
out: dataframe
"""
TOKEN_ORDER = 0
TOKEN_LIST = LIST_OF_TOKENS
@mesutcanalkan
mesutcanalkan / Collect and Save Ninja PV Data CSVs.py
Created March 7, 2021 00:58
Collect and Save Ninja PV Data CSVs.py
TRACKING_OPTIONS_LIST = [0, 1, 2]
TILT_DEGREE_OPTIONS_LIST = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
AZIMUTH_DEGREE_OPTIONS_LIST = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180]
# Coordinates of the Chelsea and Westminster Hospital
LATITUDE = 51.48437
LONGITUDE = -0.18183
YEAR = 2017
@mesutcanalkan
mesutcanalkan / Read and Compare Ninja CSVs.py
Created March 7, 2021 01:06
Read and Compare Ninja CSVs
df_CSV_Files = pd.DataFrame([x.split('_') for x in
glob.glob('Data/'+ 'ninja_pv_coordinate_*.csv')],
columns=['NINJA', 'RENEWABLE', 'TYPE', 'YEAR', 'LATITUDE', 'LONGITUDE', 'SYSTEM_LOSS',
'TRACKING', 'TILT_DEGREE', 'AZIMUTH_DEGREE', 'EXTENSION']).astype(str)
df_CSV_Files['CF'] = pd.Series([pd.read_csv(x)['CF'].mean() for x in
glob.glob('Data/'+ 'ninja_pv_coordinate_*.csv')])
@mesutcanalkan
mesutcanalkan / libraries_nba.py
Created November 9, 2021 22:31
libraries needed for nba web scraping
# !pip install webdriver-manager
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import re
@mesutcanalkan
mesutcanalkan / BRING_SEASONAL_PLAYER_STATS.py
Last active November 9, 2021 22:41
Nba.com main function
def BRING_SEASONAL_PLAYER_STATS(URL_LINK):
driver = webdriver.Chrome(ChromeDriverManager().install())
# Open the URL on a google chrome window
driver.get(URL_LINK)
# As this is a dynamic html web-page, wait for 3 seconds for everything to be loaded
time.sleep(3)
@mesutcanalkan
mesutcanalkan / google_search_player_id.py
Last active November 9, 2021 22:51
google search for nba players
for INDEX, ROW in df_Players.iterrows():
# In order not to get the HTTP 429 Too Many Requests error, we're sleeping the script for some time.
time.sleep(5)
try:
site= 'https://www.google.com/search?q={}+nba+espn+gamelog'.format(df_Players.loc[INDEX,
'PLAYER'].replace(' ', '+'))
@mesutcanalkan
mesutcanalkan / yearly_player_gamelog.py
Last active November 9, 2021 23:24
Player Gamelog
def get_yearly_player_gamelog(PLAYER_URL, YEAR, PLAYER_NAME):
try:
HTML_RESULTS = pd.read_html('{}{}'.format(PLAYER_URL, YEAR))
LEN_HTML_RESULTS = len(HTML_RESULTS)
EMPTY_LIST = []
@mesutcanalkan
mesutcanalkan / Seasonal Gamelog.py
Last active November 9, 2021 23:31
2000-01 Season for 2000 Drafted Players
df_Players_Drafted_2000 = df_Players[
df_Players['DRAFT YEAR']=='2000'
][
['PLAYER', 'TEAM', 'AGE', 'HEIGHT', 'WEIGHT', 'COLLEGE COUNTRY','DRAFT YEAR', 'ESPN_GAMELOG_ID']
].reset_index(drop=True)
SEASON_2000_2001_CAREER_LIST_TO_CONCAT = []
for INDEX, ROW in df_Players_Drafted_2000.iterrows():
# !pip install pulp
import pulp as plp
# %matplotlib inline
from matplotlib import pyplot as plt
import cv2
import numpy as np
from pytesseract import image_to_string
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'/usr/local/bin/tesseract/'
# !pip install streamlit==0.72.0
WITH date_table AS ( WITH recursive date_in_range(day) AS ( VALUES('2019-12-30')
UNION ALL
SELECT date(day, '+7 day')
FROM date_in_range
WHERE date(day, '+7 day') < '2020-10-19' )
SELECT strftime('%Y-%W', day) AS year_week,
row_number() OVER(ORDER BY day) AS yw_id
FROM date_in_range)
SELECT table_for_flags.user_id ,
table_for_flags.year_week ,