Skip to content

Instantly share code, notes, and snippets.

@fnneves
fnneves / 1.py
Last active July 30, 2021 06:54
medium
np.random.seed(42)
num_ports = 6000
all_weights = np.zeros((num_ports, len(stocks.columns)))
ret_arr = np.zeros(num_ports)
vol_arr = np.zeros(num_ports)
sharpe_arr = np.zeros(num_ports)
for x in range(num_ports):
# Weights
weights = np.array(np.random.random(4))
@fnneves
fnneves / 2.py
Created October 11, 2018 00:01
markowitz
plt.figure(figsize=(12,8))
plt.scatter(vol_arr, ret_arr, c=sharpe_arr, cmap='viridis')
plt.colorbar(label='Sharpe Ratio')
plt.xlabel('Volatility')
plt.ylabel('Return')
plt.scatter(max_sr_vol, max_sr_ret,c='red', s=50) # red dot
plt.show()
@fnneves
fnneves / 3.py
Created October 11, 2018 00:02
Markowitz
def get_ret_vol_sr(weights):
weights = np.array(weights)
ret = np.sum(log_ret.mean() * weights) * 252
vol = np.sqrt(np.dot(weights.T, np.dot(log_ret.cov()*252, weights)))
sr = ret/vol
return np.array([ret, vol, sr])
def neg_sharpe(weights):
# the number 2 is the sharpe ratio index from the get_ret_vol_sr
return get_ret_vol_sr(weights)[2] * -1
@fnneves
fnneves / 4.py
Created October 11, 2018 00:04
frontier_x = []
for possible_return in frontier_y:
cons = ({'type':'eq', 'fun':check_sum},
{'type':'eq', 'fun': lambda w: get_ret_vol_sr(w)[0] - possible_return})
result = minimize(minimize_volatility,init_guess,method='SLSQP', bounds=bounds, constraints=cons)
frontier_x.append(result['fun'])
plt.figure(figsize=(12,8))
plt.scatter(vol_arr, ret_arr, c=sharpe_arr, cmap='viridis')
plt.colorbar(label='Sharpe Ratio')
plt.xlabel('Volatility')
plt.ylabel('Return')
plt.plot(frontier_x,frontier_y, 'r--', linewidth=3)
plt.savefig('cover.png')
plt.show()
%%time
n_pages = 0
for page in range(0,900):
n_pages += 1
sapo_url = 'https://casa.sapo.pt/Venda/Apartamentos/?sa=11&lp=10000&or=10'+'&pn='+str(page)
r = get(sapo_url, headers=headers)
page_html = BeautifulSoup(r.text, 'html.parser')
house_containers = page_html.find_all('div', class_="searchResultProperty")
cols = ['Title', 'Zone', 'Price', 'Size (m²)', 'Status', 'Description', 'Date', 'URL', 'Image']
lisboa = pd.DataFrame({'Title': titles,
'Price': prices,
'Size (m²)': areas,
'Zone': zone,
'Date': created,
'Status': condition,
'Description': descriptions,
'URL': urls,
hashtag_list = ['travelblog', 'travelblogger', 'traveler']
# prev_user_list = [] - if it's the first time you run it, use this line and comment the two below
prev_user_list = pd.read_csv('20181203-224633_users_followed_list.csv', delimiter=',').iloc[:,1:2] # useful to build a user log
prev_user_list = list(prev_user_list['0'])
new_followed = []
tag = -1
followed = 0
likes = 0
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep, strftime
from random import randint
import pandas as pd
chromedriver_path = 'C:/Users/User/Downloads/chromedriver_win32/chromedriver.exe' # Change this to your own chromedriver path!
webdriver = webdriver.Chrome(executable_path=chromedriver_path)
sleep(2)
webdriver.get('https://www.instagram.com/accounts/login/?source=auth_switcher')
from time import sleep, strftime
from random import randint
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import smtplib
from email.mime.multipart import MIMEMultipart
# Change this to your own chromedriver path!
chromedriver_path = 'C:/{YOUR PATH HERE}/chromedriver_win32/chromedriver.exe'