建立量化的投资系统, 包括以下模块
- 量化选股
- 回测
- 策略开发
*/.ipynb_checkpoints/* | |
.ipynb_checkpoints/* | |
db | |
db8 | |
sendsms.py | |
__pycache__/* | |
nohup.out | |
venv | |
*.swp | |
buy | |
sell |
# Default ignored files | |
/workspace.xml |
<component name="InspectionProjectProfileManager"> | |
<settings> | |
<option name="USE_PROJECT_PROFILE" value="false" /> | |
<version value="1.0" /> | |
</settings> | |
</component> |
<?xml version="1.0" encoding="UTF-8"?> | |
<module type="PYTHON_MODULE" version="4"> | |
<component name="NewModuleRootManager"> | |
<content url="file://$MODULE_DIR$" /> | |
<orderEntry type="inheritedJdk" /> | |
<orderEntry type="sourceFolder" forTests="false" /> | |
</component> | |
</module> |
<?xml version="1.0" encoding="UTF-8"?> | |
<project version="4"> | |
<component name="ProjectModuleManager"> | |
<modules> | |
<module fileurl="file://$PROJECT_DIR$/.idea/logicbelief-master-puwork.iml" filepath="$PROJECT_DIR$/.idea/logicbelief-master-puwork.iml" /> | |
</modules> | |
</component> | |
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |
<project version="4"> | |
<component name="VcsDirectoryMappings"> | |
<mapping directory="$PROJECT_DIR$" vcs="Git" /> | |
</component> | |
</project> |
from common import * | |
candidates = set() | |
data = pro.query('stock_basic', exchange='', list_status='L', fields='ts_code, industry') | |
with open("watch_industries.txt") as f: | |
content = f.readlines() | |
industries = set() | |
for x in content: | |
industries.add(x.strip()) | |
for index, basic in data.iterrows(): | |
for basic['industry'] in industries: | |
candidates.add(basic['ts_code']) | |
#print(dichan) | |
period = '20200331' | |
income = pro.income_vip(period = period) | |
net_profit_dict = {} | |
for index, cash in income.iterrows(): | |
code = cash['ts_code'] | |
if not (code in candidates): | |
continue | |
ebit = float(cash['ebit']) | |
if math.isnan(ebit) or ebit is None or ebit < 100000000: | |
continue | |
# if code == '600012.SH': | |
# print("%s %.2f" %(code, ebit)) | |
net_profit_dict[code] = ebit | |
data = pro.balancesheet_vip(period = period) | |
ratio_dict = {} | |
for index, balance in data.iterrows(): | |
code = balance['ts_code'] | |
if code in net_profit_dict: | |
accounts_receiv = float(balance['accounts_receiv']) | |
if math.isnan(accounts_receiv): | |
accounts_receiv = 0 | |
acct_payable = float(balance['acct_payable']) | |
if math.isnan(acct_payable): | |
continue | |
ratio_dict[code] = accounts_receiv /acct_payable | |
sorted_ratios = sorted(ratio_dict.items(), key=lambda d: d[1]) | |
n = 0 | |
column_str = """date, code, value, name""" | |
queries = [] | |
for (k, v) in sorted_ratios: | |
n += 1 | |
if n > 10: | |
break | |
print(k) | |
print(v) |
import tushare as ts | |
import warnings | |
import threading | |
import time | |
import sys | |
print(ts.__version__) | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
import easyquotation | |
quotation = easyquotation.use("timekline") | |
from send_wx import * | |
from common import * | |
from collections import defaultdict | |
con = get_conn("db8") | |
max_date = get_max_date() | |
print(max_date) | |
def obtain_list_of_db_tickers(table): | |
cur = con.cursor() | |
query_str = "select distinct code from %s where date = '%s'" % (table, max_date) | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
class BuyThread(threading.Thread): | |
def __init__(self, index, thread_count, model, close_dicts, volume_dicts, is_signal): | |
threading.Thread.__init__(self) | |
self.index = index | |
self.thread_count = thread_count | |
self.position_table = "%s_position" %(model) | |
self.model = model | |
self.close_dicts = close_dicts | |
self.volume_dicts = volume_dicts | |
self.is_signal = is_signal | |
def run(self): | |
print("Starting thread %d \n" % self.index) | |
buy(self.index, self.thread_count, self.model, self.position_table, self.close_dicts, self.volume_dicts, self.is_signal) | |
def buy(idx, thread_count, model, position_table, close_dicts, volume_dicts, is_signal): | |
count_dicts = defaultdict(int) | |
buy_dicts = defaultdict(int) | |
buy_queries = [] | |
bought = set() | |
while not is_after_hour(): | |
if not is_in_trade_time(): | |
time.sleep(60) | |
continue | |
i = -1 | |
for stock in stocks: | |
i += 1 | |
if (i % thread_count) != idx: | |
continue | |
# print(stock) | |
if stock in bought: | |
continue | |
code = stock[:-3] | |
sc = stock[-2:].lower() | |
try: | |
key = "%s%s.js" % (sc, code) | |
obj = quotation.real([code])[key]['time_data'] | |
total_len = len(obj) | |
for cur_len in range(count_dicts[stock] - 1, total_len): | |
if cur_len < 3: | |
continue | |
cur_time = obj[cur_len][0] | |
if buy_dicts[stock] == int(cur_time): | |
continue | |
cur_price = float(obj[cur_len][1]) | |
cur_volume = float(obj[cur_len][2]) | |
last_volume = float(obj[cur_len - 1][2]) | |
sec_volume = cur_volume - last_volume | |
avg_vol = cur_volume / (1 + cur_len) | |
last_avg_vol = last_volume / cur_len | |
pre_3_price = float(obj[cur_len - 3][1]) | |
if cur_price < close_dicts[stock]: | |
continue | |
price_chg_now = cur_price / pre_3_price | |
if price_chg_now < 1.011: | |
continue | |
#print("%d, %s %d %s cur_price: %f ,cur_volume: %f ,last_volume: %f ,sec_volume: %f ,avg_vol: %f ,last_avg_vol: %f, pre_3_price: %f" \ | |
# %(idx, code, cur_len, cur_time, cur_price, cur_volume, last_volume, sec_volume, avg_vol, last_avg_vol, pre_3_price)) | |
day_volume_ratio = avg_vol * 240 / volume_dicts[stock] | |
sec_volume_ratio = sec_volume / last_avg_vol | |
print("%d, %s %d %s price_up_chg: %f ,day_volume_ratio: %f ,sec_volume_ratio: %f " \ | |
% (idx, code, cur_len, cur_time, price_chg_now, day_volume_ratio, sec_volume_ratio)) | |
if price_chg_now < 1.04 and \ | |
day_volume_ratio < 6 and \ | |
sec_volume_ratio < 5 and \ | |
(day_volume_ratio < 2 or sec_volume_ratio < 1.8): | |
continue | |
if not stock in bought: | |
buy_num = calc_buy_num(cur_price) | |
content = format('%s 模型 %s 买入 %s %d 股, 价格:%.2f' % (model, cur_time, stock, buy_num, cur_price)) | |
if is_signal: | |
send_to_wx(content) | |
print(content) | |
buy_queries.append(build_table_buy_query(position_table, stock, cur_price, buy_num, day_volume_ratio, sec_volume_ratio, | |
price_chg_now)) | |
bought.add(stock) | |
buy_dicts[stock] = int(cur_time) | |
count_dicts[stock] = total_len | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
time.sleep(10) | |
if len(buy_queries) > 0: | |
time.sleep(90 + idx * 5) | |
tcon = get_conn("db8") | |
execute_queries(tcon, buy_queries) | |
if __name__ == '__main__': | |
import sys | |
if not is_today_trading(): | |
sys.exit("not trading day") | |
candidate_table = sys.argv[1] | |
model = sys.argv[2] | |
thread_count = int(sys.argv[3]) | |
close_dicts = defaultdict(float) | |
volume_dicts = defaultdict(float) | |
is_signal = True | |
if len(sys.argv) >= 5: | |
is_signal = ('true' == sys.argv[4].lower()) | |
stocks = set() | |
for table in [candidate_table]: | |
for stock in obtain_list_of_db_tickers(table): | |
stocks.add(stock) | |
print("total stocks len: %d" % len(stocks)) | |
if len(stocks) == 0: | |
sys.exit("Empty in buy list") | |
for stock in stocks: | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date='20200616', end_date='20300318') | |
if df is None or len(df) < 1: | |
continue | |
close_dicts[stock] = float(df.head(1)['close'].max()) * 1.015 | |
volume_dicts[stock] = float(df.head(1)['vol'].max()) | |
print(close_dicts) | |
print(volume_dicts) | |
threads = [] | |
for idx in range(0, thread_count): | |
thread = BuyThread(idx, thread_count, model, close_dicts, volume_dicts, is_signal) | |
thread.start() | |
threads.append(thread) | |
for t in threads: | |
t.join() | |
con.close() | |
print("Done") |
import tushare as ts | |
import warnings | |
import threading | |
import time | |
print(ts.__version__) | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
import easyquotation | |
quotation = easyquotation.use("hkquote") | |
data = quotation.real(['00700']) | |
print(data) | |
# from send_wx import * | |
# from common import * | |
# from collections import defaultdict | |
# | |
# con = get_conn("db8") | |
# | |
# max_date = get_max_date() | |
# print(max_date) | |
# | |
# def obtain_list_of_db_tickers(table): | |
# cur = con.cursor() | |
# query_str = "select distinct code from %s where date = '%s'" % (table, max_date) | |
# cur.execute(query_str) | |
# data = cur.fetchall() | |
# cur.close() | |
# return [(d[0]) for d in data] | |
# | |
# class BuyThread(threading.Thread): | |
# def __init__(self, index, thread_count, model, close_dicts, volume_dicts, is_signal): | |
# threading.Thread.__init__(self) | |
# self.index = index | |
# self.thread_count = thread_count | |
# self.position_table = "%s_position" %(model) | |
# self.model = model | |
# self.close_dicts = close_dicts | |
# self.volume_dicts = volume_dicts | |
# self.is_signal = is_signal | |
# | |
# def run(self): | |
# print("Starting thread %d \n" % self.index) | |
# buy(self.index, self.thread_count, self.model, self.position_table, self.close_dicts, self.volume_dicts, self.is_signal) | |
# | |
# def buy(idx, thread_count, model, position_table, close_dicts, volume_dicts, is_signal): | |
# count_dicts = defaultdict(int) | |
# buy_dicts = defaultdict(int) | |
# buy_queries = [] | |
# bought = set() | |
# while not is_after_hour(): | |
# if not is_in_trade_time(): | |
# time.sleep(60) | |
# continue | |
# i = -1 | |
# for stock in stocks: | |
# i += 1 | |
# if (i % thread_count) != idx: | |
# continue | |
# # print(stock) | |
# if stock in bought: | |
# continue | |
# code = stock[:-3] | |
# sc = stock[-2:].lower() | |
# try: | |
# key = "%s%s.js" % (sc, code) | |
# obj = quotation.real([code])[key]['time_data'] | |
# total_len = len(obj) | |
# for cur_len in range(count_dicts[stock] - 1, total_len): | |
# if cur_len < 3: | |
# continue | |
# cur_time = obj[cur_len][0] | |
# if buy_dicts[stock] == int(cur_time): | |
# continue | |
# cur_price = float(obj[cur_len][1]) | |
# cur_volume = float(obj[cur_len][2]) | |
# last_volume = float(obj[cur_len - 1][2]) | |
# sec_volume = cur_volume - last_volume | |
# avg_vol = cur_volume / (1 + cur_len) | |
# last_avg_vol = last_volume / cur_len | |
# pre_3_price = float(obj[cur_len - 3][1]) | |
# | |
# if cur_price < close_dicts[stock]: | |
# continue | |
# price_chg_now = cur_price / pre_3_price | |
# if price_chg_now < 1.011: | |
# continue | |
# #print("%d, %s %d %s cur_price: %f ,cur_volume: %f ,last_volume: %f ,sec_volume: %f ,avg_vol: %f ,last_avg_vol: %f, pre_3_price: %f" \ | |
# # %(idx, code, cur_len, cur_time, cur_price, cur_volume, last_volume, sec_volume, avg_vol, last_avg_vol, pre_3_price)) | |
# day_volume_ratio = avg_vol * 240 / volume_dicts[stock] | |
# sec_volume_ratio = sec_volume / last_avg_vol | |
# print("%d, %s %d %s price_up_chg: %f ,day_volume_ratio: %f ,sec_volume_ratio: %f " \ | |
# % (idx, code, cur_len, cur_time, price_chg_now, day_volume_ratio, sec_volume_ratio)) | |
# if price_chg_now < 1.04 and \ | |
# 6 > day_volume_ratio > 9 and \ | |
# sec_volume_ratio < 5 and \ | |
# (day_volume_ratio < 2 or sec_volume_ratio < 1.8): | |
# continue | |
# if not stock in bought: | |
# buy_num = calc_buy_num(cur_price) | |
# content = format('%s 模型 %s 买入 %s %d 股, 价格:%.2f' % (model, cur_time, stock, buy_num, cur_price)) | |
# if is_signal: | |
# send_to_wx(content) | |
# print(content) | |
# buy_queries.append(build_table_buy_query(position_table, stock, cur_price, buy_num, day_volume_ratio, sec_volume_ratio, | |
# price_chg_now)) | |
# bought.add(stock) | |
# buy_dicts[stock] = int(cur_time) | |
# count_dicts[stock] = total_len | |
# except Exception as e: | |
# print(e) | |
# print('%s error' % stock) | |
# time.sleep(10) | |
# if len(buy_queries) > 0: | |
# time.sleep(90 + idx * 5) | |
# tcon = get_conn("db8") | |
# execute_queries(tcon, buy_queries) | |
# | |
# | |
# if __name__ == '__main__': | |
# import sys | |
# candidate_table = sys.argv[1] | |
# model = sys.argv[2] | |
# thread_count = int(sys.argv[3]) | |
# close_dicts = defaultdict(float) | |
# volume_dicts = defaultdict(float) | |
# is_signal = True | |
# if len(sys.argv) >= 5: | |
# is_signal = ('true' == sys.argv[4].lower()) | |
# | |
# stocks = set() | |
# for table in [candidate_table]: | |
# for stock in obtain_list_of_db_tickers(table): | |
# stocks.add(stock) | |
# | |
# print("total stocks len: %d" % len(stocks)) | |
# if len(stocks) == 0: | |
# sys.exit("Empty in buy list") | |
# for stock in stocks: | |
# df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date='20200316', end_date='20300318') | |
# if df is None or len(df) < 1: | |
# continue | |
# close_dicts[stock] = float(df.head(1)['close'].max()) * 1.015 | |
# volume_dicts[stock] = float(df.head(1)['vol'].max()) | |
# | |
# print(close_dicts) | |
# print(volume_dicts) | |
# | |
# threads = [] | |
# | |
# for idx in range(0, thread_count): | |
# thread = BuyThread(idx, thread_count, model, close_dicts, volume_dicts, is_signal) | |
# thread.start() | |
# threads.append(thread) | |
# | |
# for t in threads: | |
# t.join() | |
# con.close() | |
# print("Done") |
import warnings | |
import threading | |
import os.path | |
import pandas as pd | |
import time | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
from send_wx import * | |
from common import * | |
from iex import * | |
con = get_conn() | |
max_date = get_max_date() | |
print(max_date) | |
def obtain_list_of_db_tickers(table): | |
cur = con.cursor() | |
query_str = "select distinct code from %s where date = '%s'" % (table, max_date) | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
from collections import defaultdict | |
close_dicts = defaultdict(float) | |
volume_dicts = defaultdict(float) | |
stocks = set() | |
for table in ['flat_us', 'reverse_us', 'vcpim_us']: | |
for stock in obtain_list_of_db_tickers(table): | |
stocks.add(stock) | |
print("total stocks len: %d" % len(stocks)) | |
SOURCE_DIR = '/home/ruoang/ustocks/' | |
d = get_max_date() | |
candidates = [] | |
for stock in stocks: | |
path = format("%s/%s.csv" %(SOURCE_DIR, stock)) | |
if not os.path.isfile(path): | |
continue | |
try: | |
df = pd.read_csv(path) | |
if df is None or len(df) < 1: | |
continue | |
#print(df.tail(1)) | |
close_dicts[stock] = float(df.tail(1)['close'].max()) | |
volume_dicts[stock] = float(df.tail(1)['volume'].max()) | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
print(close_dicts) | |
print(volume_dicts) | |
THREAD_COUNT = 1 | |
class BuyThread(threading.Thread): | |
def __init__(self, index): | |
threading.Thread.__init__(self) | |
self.index = index | |
def run(self): | |
print("Starting thread %d \n" % self.index) | |
buy(self.index) | |
def buy(idx): | |
buy_dicts = defaultdict(int) | |
buy_queries = [] | |
while is_us_trading_time(): | |
i = -1 | |
for stock in stocks: | |
i += 1 | |
if (i % THREAD_COUNT) != idx: | |
continue | |
# print(stock) | |
try: | |
obj = get_intraday_prices(stock, 4) | |
total_len = len(obj) | |
if total_len < 4: | |
continue | |
#print(obj) | |
if obj[3]['close'] is None or obj[0]['close'] is None: | |
continue | |
cur_time = obj[3]['minute'] | |
cur_price = float(obj[3]['close']) | |
pre_3_price = float(obj[0]['close']) | |
if buy_dicts[stock] == cur_time: | |
continue | |
print("%d, %s %s price_up_chg: %f" %(idx, stock, cur_time, cur_price/pre_3_price)) | |
if cur_price < close_dicts[stock]: | |
continue | |
if cur_price / pre_3_price < 1.01: | |
continue | |
buy_num = calc_buy_us_num(cur_price) | |
content = format('%s 买入 %s %d 股, 价格: %.2f' % (cur_time, stock, buy_num, cur_price)) | |
send_to_wx(content) | |
buy_queries.append(build_us_buy_query(stock, cur_price, buy_num, cur_price / pre_3_price)) | |
buy_dicts[stock] = cur_time | |
except Exception as e: | |
print(e) | |
#print('%s error' % stock) | |
time.sleep(45) | |
print("Time is Out") | |
if len(buy_queries) > 0: | |
time.sleep(90 + idx * 5) | |
tcon = get_conn() | |
execute_queries(tcon, buy_queries) | |
threads = [] | |
for idx in range(0, THREAD_COUNT): | |
thread = BuyThread(idx) | |
thread.start() | |
threads.append(thread) | |
for t in threads: | |
t.join() | |
con.close() | |
print("Done") |
from common import * | |
import warnings | |
warnings.simplefilter(action='ignore') | |
stocks = pro.stock_basic(exchange_id='', fields='ts_code, name, list_status') | |
to_delete_queries = [] | |
for row in stocks.itertuples(): | |
if 'ST' in row[2] or 'L' != row[3]: | |
update_str = "DELETE from stock_basic where stock = '%s'" % (row[1]) | |
#print(update_str) | |
to_delete_queries.append(update_str) | |
#print('%s finish' % (row[1])) | |
con = get_conn("db8") | |
execute_queries(con, to_delete_queries) | |
con = get_conn("db") | |
execute_queries(con, to_delete_queries) | |
print("Done") |
from datetime import datetime, timedelta | |
import tushare as ts | |
import warnings | |
import math | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
import MySQLdb as mdb | |
def is_in_trade_time(): | |
now = datetime.now() | |
return is_trade_time(now) | |
def is_trade_time(now): | |
if now.hour < 9: | |
return False | |
if now.hour >= 15: | |
return False | |
if now.hour == 9 and now.minute < 30: | |
return False | |
if now.hour == 11 and now.minute > 30: | |
return False | |
if now.hour == 12: | |
return False | |
return True | |
def is_after_hour(): | |
now = datetime.now() | |
return now.hour >= 15 | |
def is_us_trading_time(): | |
now = datetime.now() | |
return is_now_us_trading_time(now) | |
def is_now_us_trading_time(now): | |
return (now.hour == 21 and now.minute >= 30) or now.hour >= 22 or now.hour < 4 | |
def get_max_date(): | |
today = datetime.today() | |
return get_last_date(today) | |
def get_last_trade_date(): | |
d = datetime.today() | |
today = d.strftime("%Y%m%d") | |
last = (d - timedelta(days=15)).strftime("%Y%m%d") | |
df = pro.query('trade_cal', start_date=last, end_date=today).query("is_open==1") | |
if d.weekday() < 5: | |
return df.tail(2)['cal_date'].min() | |
return df.tail(1)['cal_date'].min() | |
def get_last_date(d): | |
today = d.strftime("%Y%m%d") | |
last = (d - timedelta(days=15)).strftime("%Y%m%d") | |
df = pro.query('trade_cal', start_date=last, end_date=today).query("is_open==1") | |
min = df.tail(2)['cal_date'].min() | |
return datetime.strptime(min, "%Y%m%d").strftime("%Y-%m-%d") | |
def is_today_trading(): | |
today = datetime.today().strftime("%Y%m%d") | |
df = pro.query('trade_cal', start_date=today, end_date=today).query("is_open==1") | |
return len(df) > 0 | |
def execute_query(con, query): | |
queries = [] | |
queries.append(query) | |
execute_queries(con, queries) | |
def execute_queries(con, queries): | |
try: | |
cur = con.cursor() | |
for query in queries: | |
print(query) | |
cur.execute(query) | |
cur.close() | |
con.commit() | |
con.close() | |
except Exception as e: | |
print(e) | |
def calc_buy_num(price, money=500): | |
num = math.floor(money / price) * 100 | |
return num | |
def calc_buy_us_num(price, money=10000): | |
num = math.floor(money / price) | |
return num | |
def build_vcp_buy_query(stock, price, num, day_volume_ratio, sec_volume_ratio, up_pct): | |
return "insert ignore vcp_position set code = '%s', buy_date=CURDATE(), price = '%s', volume = '%s', dvr = '%f', svr ='%f', up_pct = '%f' " % (stock, price, num, day_volume_ratio, sec_volume_ratio, up_pct) | |
def build_buy_query(stock, price, num, day_volume_ratio, sec_volume_ratio, up_pct): | |
return "insert ignore position set code = '%s', buy_date=CURDATE(), price = '%s', volume = '%s', dvr = '%f', svr ='%f', up_pct = '%f' " % (stock, price, num, day_volume_ratio, sec_volume_ratio, up_pct) | |
def build_concept_buy_query(stock, price, num, day_volume_ratio, sec_volume_ratio, up_pct): | |
return "insert ignore concept_position set code = '%s', buy_date=CURDATE(), price = '%s', volume = '%s', dvr = '%f', svr ='%f', up_pct = '%f' " % (stock, price, num, day_volume_ratio, sec_volume_ratio, up_pct) | |
def build_table_buy_query(table, stock, price, num, day_volume_ratio, sec_volume_ratio, up_pct): | |
return "insert ignore %s set code = '%s', buy_date=CURDATE(), price = '%s', volume = '%s', dvr = '%f', svr ='%f', up_pct = '%f' " % (table, stock, price, num, day_volume_ratio, sec_volume_ratio, up_pct) | |
def build_us_buy_query(stock, price, num, up_pct): | |
return "insert ignore us_position set code = '%s', buy_date=CURDATE(), price = '%s', volume = '%s', up_pct = '%f' " % (stock, price, num, up_pct) | |
def build_sell_query(position_table, now, volume, max_stop, stock, buy_date): | |
return "update %s set is_sold=1, sell_date=CURDATE(), sell_price = '%s', sell_volume = '%s', max_profit=%.2f where code='%s' and buy_date='%s' " % ( | |
position_table, now, volume, max_stop * 100, stock, buy_date) | |
def obtain_stock_us(con): | |
cur = con.cursor() | |
query_str = "select stock from stock_us limit 0, 600" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
def obtain_stock_basic(con): | |
cur = con.cursor() | |
query_str = "select stock, name from stock_basic where total_mv > 500000" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], d[1]) for d in data] | |
def obtain_stocks_above_mv(con, mv=500000): | |
cur = con.cursor() | |
query_str = "select stock, name from stock_basic where total_mv > %d" % mv | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], d[1]) for d in data] | |
class Stock: | |
def __init__(self, stock, days): | |
self.stock = stock | |
self.days = days | |
self.days_no = 0 | |
def __str__(self): | |
return "stock: %s ,days: %s, days_no: %d" % (self.stock, self.days, self.days_no) | |
def get_conn(name="db"): | |
with open(name) as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
return con | |
def format_code(code): | |
code = code.strip() | |
if code.startswith("6"): | |
code += ".SH" | |
else: | |
code += ".SZ" | |
return code | |
if __name__ == '__main__': | |
print(is_in_trade_time()) | |
print(is_trade_time(datetime.strptime("09:14:32", "%H:%M:%S"))) | |
print(is_trade_time(datetime.strptime("09:15:32", "%H:%M:%S"))) | |
print(is_trade_time(datetime.strptime("09:30:01", "%H:%M:%S"))) | |
print(is_trade_time(datetime.strptime("11:30:01", "%H:%M:%S"))) | |
print(is_trade_time(datetime.strptime("11:31:01", "%H:%M:%S"))) | |
print(is_trade_time(datetime.strptime("12:31:01", "%H:%M:%S"))) | |
print(is_trade_time(datetime.strptime("13:00:01", "%H:%M:%S"))) | |
print(is_trade_time(datetime.strptime("14:59:01", "%H:%M:%S"))) | |
print(is_trade_time(datetime.strptime("15:00:01", "%H:%M:%S"))) | |
print(is_after_hour()) | |
print(get_max_date()) | |
print(get_last_date(datetime.strptime("2020-03-23", "%Y-%m-%d"))) | |
print(get_last_date(datetime.strptime("2020-03-24", "%Y-%m-%d"))) | |
print(get_last_date(datetime.strptime("2020-03-22", "%Y-%m-%d"))) | |
print(calc_buy_num(3.2)) | |
print(calc_buy_num(5)) | |
print(calc_buy_num(48)) | |
print(build_buy_query("300000.SZ", 3.41, 5000, 1.5, 1.8, 1.235)) | |
print(format("止盈, 目前盈利:%.2f ,最大盈利: %.2f" % (4.555, 5.2222))) | |
print(build_sell_query("hot", 10.2, 111000, 0.156, "000001.SZ", "2020-05-22")) | |
print(is_us_trading_time()) | |
print(is_now_us_trading_time(datetime.strptime("23:32:01", "%H:%M:%S"))) | |
print(is_now_us_trading_time(datetime.strptime("00:31:01", "%H:%M:%S"))) | |
print(is_now_us_trading_time(datetime.strptime("01:31:01", "%H:%M:%S"))) | |
print(is_now_us_trading_time(datetime.strptime("02:00:01", "%H:%M:%S"))) | |
print(is_now_us_trading_time(datetime.strptime("03:59:01", "%H:%M:%S"))) | |
print(is_now_us_trading_time(datetime.strptime("04:00:01", "%H:%M:%S"))) | |
print(is_today_trading()) |
import tushare as ts | |
import warnings | |
import threading | |
import time | |
print(ts.__version__) | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
import easyquotation | |
quotation = easyquotation.use("timekline") | |
from common import * | |
con = get_conn("db8") | |
max_date = get_max_date() | |
print(max_date) | |
def obtain_list_of_db_tickers(table): | |
cur = con.cursor() | |
query_str = "select distinct code from %s where date = '%s'" % (table, max_date) | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
from collections import defaultdict | |
close_dicts = defaultdict(float) | |
volume_dicts = defaultdict(float) | |
stocks = set() | |
for table in ['top_concept']: | |
for stock in obtain_list_of_db_tickers(table): | |
stocks.add(stock) | |
print("total stocks len: %d" % len(stocks)) | |
for stock in stocks: | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date='20200316', end_date='20300318') | |
if df is None or len(df) < 1: | |
continue | |
close_dicts[stock] = float(df.head(1)['close'].max()) * 1.015 | |
volume_dicts[stock] = float(df.head(1)['vol'].max()) | |
print(close_dicts) | |
print(volume_dicts) | |
THREAD_COUNT = 2 | |
class BuyThread(threading.Thread): | |
def __init__(self, index): | |
threading.Thread.__init__(self) | |
self.index = index | |
def run(self): | |
print("Starting thread %d \n" % self.index) | |
buy(self.index) | |
def buy(idx): | |
count_dicts = defaultdict(int) | |
buy_dicts = defaultdict(int) | |
buy_queries = [] | |
bought = set() | |
while not is_after_hour(): | |
if not is_in_trade_time(): | |
time.sleep(60) | |
continue | |
i = -1 | |
for stock in stocks: | |
i += 1 | |
if (i % THREAD_COUNT) != idx: | |
continue | |
# print(stock) | |
if stock in bought: | |
continue | |
code = stock[:-3] | |
sc = stock[-2:].lower() | |
try: | |
key = "%s%s.js" % (sc, code) | |
obj = quotation.real([code])[key]['time_data'] | |
total_len = len(obj) | |
for cur_len in range(count_dicts[stock] - 1, total_len): | |
if cur_len < 3: | |
continue | |
cur_time = obj[cur_len][0] | |
if buy_dicts[stock] == int(cur_time): | |
continue | |
cur_price = float(obj[cur_len][1]) | |
cur_volume = float(obj[cur_len][2]) | |
last_volume = float(obj[cur_len - 1][2]) | |
sec_volume = cur_volume - last_volume | |
avg_vol = cur_volume / (1 + cur_len) | |
last_avg_vol = last_volume / cur_len | |
pre_3_price = float(obj[cur_len - 3][1]) | |
if cur_price < close_dicts[stock]: | |
continue | |
price_chg_now = cur_price / pre_3_price | |
if price_chg_now < 1.014 or price_chg_now >= 1.027: | |
continue | |
#print("%d, %s %d %s cur_price: %f ,cur_volume: %f ,last_volume: %f ,sec_volume: %f ,avg_vol: %f ,last_avg_vol: %f, pre_3_price: %f" \ | |
# %(idx, code, cur_len, cur_time, cur_price, cur_volume, last_volume, sec_volume, avg_vol, last_avg_vol, pre_3_price)) | |
day_volume_ratio = avg_vol * 240 / volume_dicts[stock] | |
sec_volume_ratio = sec_volume / last_avg_vol | |
print("%d, %s %d %s price_up_chg: %f ,day_volume_ratio: %f ,sec_volume_ratio: %f " \ | |
% (idx, code, cur_len, cur_time, price_chg_now, day_volume_ratio, sec_volume_ratio)) | |
if price_chg_now < 1.04 and \ | |
6 > day_volume_ratio > 9 and \ | |
sec_volume_ratio < 5 and \ | |
(day_volume_ratio < 2 or sec_volume_ratio < 1.8): | |
continue | |
if not stock in bought: | |
buy_num = calc_buy_num(cur_price) | |
content = format('概念模型 %s 买入 %s %d 股, 价格:%.2f' % (cur_time, stock, buy_num, cur_price)) | |
print(content) | |
buy_queries.append(build_concept_buy_query(stock, cur_price, buy_num, day_volume_ratio, sec_volume_ratio, | |
price_chg_now)) | |
bought.add(stock) | |
buy_dicts[stock] = int(cur_time) | |
count_dicts[stock] = total_len | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
time.sleep(10) | |
if len(buy_queries) > 0: | |
time.sleep(90 + idx * 5) | |
tcon = get_conn("db8") | |
execute_queries(tcon, buy_queries) | |
if __name__ == '__main__': | |
import sys | |
if not is_today_trading(): | |
sys.exit("not trading day") | |
threads = [] | |
for idx in range(0, THREAD_COUNT): | |
thread = BuyThread(idx) | |
thread.start() | |
threads.append(thread) | |
for t in threads: | |
t.join() | |
con.close() | |
print("Done") |
from datetime import date | |
import warnings | |
import easyquotation | |
import time | |
warnings.simplefilter(action='ignore') | |
from send_wx import * | |
from common import * | |
quotation = easyquotation.use('qq') | |
end_date = date.today() | |
end_date = end_date.strftime('%Y%m%d') | |
con = get_conn("db8") | |
def obtain_position(): | |
cur = con.cursor() | |
query_str = "select code, buy_date, price, volume from concept_position where is_sold = 0" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], d[1], d[2], d[3]) for d in data] | |
from collections import defaultdict | |
high_dicts = defaultdict(float) | |
positions = obtain_position() | |
for (stock, buy_date, price, volume) in positions: | |
start_date = (buy_date + timedelta(days=1)).strftime('%Y%m%d') | |
end_date = date.today() + timedelta(days=-1) | |
end_date = end_date.strftime('%Y%m%d') | |
try: | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date=start_date, end_date=end_date) | |
if df is None or len(df) < 1: | |
continue | |
# print(df) | |
high_dicts[stock] = float(df['high'].max()) | |
except Exception as e: | |
print(e) | |
print('obtain %s error' % (stock)) | |
sold = set() | |
sell_queries = [] | |
while not is_after_hour(): | |
if not is_in_trade_time(): | |
time.sleep(60) | |
continue | |
for (stock, buy_date, price, volume) in positions: | |
if stock in sold: | |
continue | |
# print("%s %s %s %s %s" %(stock, buy_date, price, volume, predict)) | |
code = stock[0:6] | |
try: | |
obj = quotation.real(code) | |
now = obj[code]["now"] | |
stop = (now - price) / price | |
is_sell = False | |
msg = "" | |
# print(stop) | |
if stop <= -0.03: | |
is_sell = True | |
msg = format("概念模型止损, 亏损:%.2f, 价格: %.2f" % (stop * 100, now)) | |
elif stop > 0: | |
high = obj[code]["high"] | |
today_max_stop = (high - price) / price | |
if high_dicts[stock] > high: | |
high = high_dicts[stock] | |
max_stop = (high - price) / price | |
if max_stop > 0.2: | |
if stop <= 0.8 * max_stop: | |
is_sell = True | |
elif max_stop > 0.1: | |
if max_stop - stop > 0.05: | |
is_sell = True | |
elif max_stop > 0.06: | |
if max_stop - stop > 0.03: | |
is_sell = True | |
elif today_max_stop - stop >= 0.03: | |
is_sell = True | |
if is_sell: | |
msg = format("概念模型止盈, 目前盈利:%.2f ,最大盈利: %.2f, 价格: %.2f" % (stop * 100, max_stop * 100, now)) | |
if is_sell: | |
content = format("%s: %s" % (code, msg)) | |
#send_to_wx(content) | |
print(content) | |
sold.add(stock) | |
sell_query = "update concept_position set is_sold=1, sell_date=CURDATE(), sell_price = '%s', sell_volume = '%s' where code='%s' " % ( | |
now, volume, stock) | |
sell_queries.append(sell_query) | |
except Exception as e: | |
print(e) | |
print('%s error' % (stock)) | |
time.sleep(30) | |
con.close() | |
con = get_conn("db8") | |
execute_queries(con, sell_queries) | |
print("Done") |
import tushare as ts | |
import warnings | |
import threading | |
import time | |
import sys | |
print(ts.__version__) | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
import easyquotation | |
quotation = easyquotation.use("timekline") | |
from send_wx import * | |
from common import * | |
from collections import defaultdict | |
con = get_conn("db8") | |
max_date = get_max_date() | |
print(max_date) | |
def obtain_list_of_db_tickers(table): | |
cur = con.cursor() | |
query_str = "select distinct code from %s where date = '%s'" % (table, max_date) | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
class BuyThread(threading.Thread): | |
def __init__(self, index, thread_count, model, close_dicts, volume_dicts, is_signal): | |
threading.Thread.__init__(self) | |
self.index = index | |
self.thread_count = thread_count | |
self.position_table = "%s_position" %(model) | |
self.model = model | |
self.close_dicts = close_dicts | |
self.volume_dicts = volume_dicts | |
self.is_signal = is_signal | |
def run(self): | |
print("Starting thread %d \n" % self.index) | |
buy(self.index, self.thread_count, self.model, self.position_table, self.close_dicts, self.volume_dicts, self.is_signal) | |
def buy(idx, thread_count, model, position_table, close_dicts, volume_dicts, is_signal): | |
count_dicts = defaultdict(int) | |
buy_dicts = defaultdict(int) | |
buy_queries = [] | |
bought = set() | |
while not is_after_hour(): | |
if not is_in_trade_time(): | |
time.sleep(60) | |
continue | |
i = -1 | |
for stock in stocks: | |
i += 1 | |
if (i % thread_count) != idx: | |
continue | |
# print(stock) | |
if stock in bought: | |
continue | |
code = stock[:-3] | |
sc = stock[-2:].lower() | |
try: | |
key = "%s%s.js" % (sc, code) | |
obj = quotation.real([code])[key]['time_data'] | |
total_len = len(obj) | |
for cur_len in range(count_dicts[stock] - 1, total_len): | |
if cur_len < 3: | |
continue | |
cur_time = obj[cur_len][0] | |
if buy_dicts[stock] == int(cur_time): | |
continue | |
cur_price = float(obj[cur_len][1]) | |
cur_volume = float(obj[cur_len][2]) | |
last_volume = float(obj[cur_len - 1][2]) | |
sec_volume = cur_volume - last_volume | |
avg_vol = cur_volume / (1 + cur_len) | |
last_avg_vol = last_volume / cur_len | |
pre_3_price = float(obj[cur_len - 3][1]) | |
if cur_price < close_dicts[stock]: | |
continue | |
price_chg_now = cur_price / pre_3_price | |
if price_chg_now < 1.02: | |
continue | |
#print("%d, %s %d %s cur_price: %f ,cur_volume: %f ,last_volume: %f ,sec_volume: %f ,avg_vol: %f ,last_avg_vol: %f, pre_3_price: %f" \ | |
# %(idx, code, cur_len, cur_time, cur_price, cur_volume, last_volume, sec_volume, avg_vol, last_avg_vol, pre_3_price)) | |
day_volume_ratio = avg_vol * 240 / volume_dicts[stock] | |
sec_volume_ratio = sec_volume / last_avg_vol | |
print("%d, %s %d %s price_up_chg: %f ,day_volume_ratio: %f ,sec_volume_ratio: %f " \ | |
% (idx, code, cur_len, cur_time, price_chg_now, day_volume_ratio, sec_volume_ratio)) | |
if price_chg_now < 1.04 and \ | |
6 > day_volume_ratio > 9 and \ | |
sec_volume_ratio < 5 and \ | |
(day_volume_ratio < 2 or sec_volume_ratio < 1.8): | |
continue | |
if not stock in bought: | |
buy_num = calc_buy_num(cur_price) | |
content = format('%s 模型 %s 买入 %s %d 股, 价格:%.2f' % (model, cur_time, stock, buy_num, cur_price)) | |
if is_signal: | |
send_to_wx(content) | |
print(content) | |
buy_queries.append(build_table_buy_query(position_table, stock, cur_price, buy_num, day_volume_ratio, sec_volume_ratio, | |
price_chg_now)) | |
bought.add(stock) | |
buy_dicts[stock] = int(cur_time) | |
count_dicts[stock] = total_len | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
time.sleep(10) | |
if len(buy_queries) > 0: | |
time.sleep(90 + idx * 5) | |
tcon = get_conn("db8") | |
execute_queries(tcon, buy_queries) | |
if __name__ == '__main__': | |
import sys | |
if not is_today_trading(): | |
sys.exit("not trading day") | |
candidate_table = sys.argv[1] | |
model = sys.argv[2] | |
thread_count = int(sys.argv[3]) | |
close_dicts = defaultdict(float) | |
volume_dicts = defaultdict(float) | |
is_signal = True | |
if len(sys.argv) >= 5: | |
is_signal = ('true' == sys.argv[4].lower()) | |
stocks = set() | |
for table in [candidate_table]: | |
for stock in obtain_list_of_db_tickers(table): | |
stocks.add(stock) | |
print("total stocks len: %d" % len(stocks)) | |
if len(stocks) == 0: | |
sys.exit("Empty in buy list") | |
for stock in stocks: | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date='20200316', end_date='20300318') | |
if df is None or len(df) < 1: | |
continue | |
close_dicts[stock] = float(df.head(1)['close'].max()) * 1.015 | |
volume_dicts[stock] = float(df.head(1)['vol'].max()) | |
print(close_dicts) | |
print(volume_dicts) | |
threads = [] | |
for idx in range(0, thread_count): | |
thread = BuyThread(idx, thread_count, model, close_dicts, volume_dicts, is_signal) | |
thread.start() | |
threads.append(thread) | |
for t in threads: | |
t.join() | |
con.close() | |
print("Done") |
import os.path | |
import pandas as pd | |
from common import * | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
def obtain_stock_basic_test(): | |
return [("603713.SH", "")] | |
def days_above(df, close): | |
if df is None: | |
return 0 | |
days = 0 | |
for idx in df.index: | |
high = df['high'][idx] | |
if close < high: | |
break | |
days = days + 1 | |
return days | |
def ma_days_above(df, ma='ma5', limit=5): | |
if df is None: | |
return False | |
days = 0 | |
last = 100000000 | |
for idx in df.index: | |
cur = df[ma][idx] | |
if cur < last: | |
days = days + 1 | |
if days >= limit: | |
return True | |
last = cur | |
#print(days) | |
return days >= limit | |
def is_cross(open, close, high, low): | |
if abs(open/close - 1) > 0.008: | |
return False | |
return (high/low - 1) < 0.06 | |
count = 0 | |
date = 0 | |
con = get_conn("db8") | |
stocks = obtain_stock_basic(con) | |
con.close() | |
stock_len = len(stocks) | |
from collections import defaultdict | |
dict = defaultdict(list) | |
SOURCE_DIR = '/home/ruoang/astocks/' | |
for (stock, name) in stocks: | |
if stock.startswith('688'): | |
continue | |
#print(stock) | |
count += 1 | |
if count % 100 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
path = format("%s/%s.csv" %(SOURCE_DIR, stock)) | |
if not os.path.isfile(path): | |
continue | |
try: | |
dfo = pd.read_csv(path, header=0, index_col=0) | |
#dfo = dfo.set_index('trade_date') | |
#dfo = dfo[::-1] | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
continue | |
for i in range(0, 1): | |
df = dfo[i:] | |
last = df.head(1) | |
#print(last) | |
if last is None: | |
continue | |
# if last['amount'].max() < 100000: | |
# continue | |
last_close = last['close'].max() | |
# if last_close < 6: | |
# continue | |
last_high = last['high'].max() | |
last_low = last['low'].max() | |
last_open = last['open'].max() | |
last_vol = last['vol'].max() | |
last_150 = last['ma150'].max() | |
if last_150 is None: | |
continue | |
last_50 = last['ma50'].max() | |
last_5 = last['ma5'].max() | |
lastv_5 = last['ma_v_5'].max() | |
last_pct_chg = last['pct_chg'].max() | |
if last_pct_chg > 4 or last_5 < last_50: | |
continue | |
if not is_cross(last_open, last_close, last_high, last_low): | |
continue | |
if not ma_days_above(df.head(7)): | |
continue | |
if last_vol/lastv_5 > 0.6: | |
continue | |
d = int(last['trade_date']) | |
remain = df[1:] | |
dict[d].append(Stock(stock, 0)) | |
print('%s %s' %(stock, name)) | |
con = get_conn("db8") | |
for (k, v) in dict.items(): | |
sorted_days = sorted(v, key=lambda item: item.days) | |
lens = len(v) | |
n = 0 | |
for s in reversed(sorted_days): | |
if n >= 64: | |
break | |
#print(k) | |
print(str(s)) | |
column_str = """date, code, value""" | |
final_str = "INSERT IGNORE INTO top_cross (%s) VALUES (%d, '%s', %d)" % \ | |
(column_str, k, s.stock, s.days) | |
n += 1 | |
cur = con.cursor() | |
print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
con.commit() | |
con.close() | |
print("Done") |
import pandas as pd | |
from datetime import date | |
from common import * | |
print(ts.__version__) | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
def position_profit(): | |
cur = con.cursor() | |
query_str = "select code, profit, profit_pct, is_sold, sell_date from position" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], d[1], d[2], d[3], d[4]) for d in data] | |
date_today = date.today().strftime('%Y-%m-%d') | |
con = get_conn() | |
# cur = con.cursor() | |
# query_str = "select * from position" | |
# cur.execute(query_str) | |
# des = cur.description | |
# print(des) | |
# cur.close() | |
position_pro = position_profit() | |
df = pd.DataFrame(position_pro, columns = ['stock', 'profit', 'profit_pct', 'is_sold', 'sell_date']) | |
print(df.shape[0]) | |
# print(df[(df['is_sold'] == 1) & (df['sell_date'] == date_today)]) | |
daily_profit = df[df['is_sold'] == 0].sum()['profit'] +\ | |
df[(df['is_sold'] == 1) & (df['sell_date'] == date_today)].sum()['profit'] | |
daily_profit_pct = df[df['is_sold'] == 0].sum()['profit_pct'] +\ | |
df[(df['is_sold'] == 1) & (df['sell_date'] == date_today)].sum()['profit_pct'] | |
win_cnt = df[(df['is_sold'] == 0) & (df['profit'] > 0)].shape[0] +\ | |
df[(df['is_sold'] == 1) & (df['sell_date'] == date_today) & (df['profit'] > 0)].shape[0] | |
loss_cnt = df[(df['is_sold'] == 0) & (df['profit'] < 0)].shape[0] +\ | |
df[(df['is_sold'] == 1) & (df['sell_date'] == date_today) & (df['profit'] < 0)].shape[0] | |
print(daily_profit) | |
print(daily_profit_pct) | |
print(win_cnt) | |
print(loss_cnt) | |
con.close() | |
print("Done") |
from common import * | |
warnings.simplefilter(action='ignore') | |
con = get_conn("db8") | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select distinct stock from stock_basic where total_mv > 100000" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
start = (datetime.today() - timedelta(days=700)).strftime("%Y%m%d") | |
print(start) | |
end = datetime(2030, 1, 1).strftime("%Y%m%d") | |
for stock in obtain_stock_basic(): | |
try: | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date=start, end_date=end, ma=[5, 20, 50, 150, 200]) | |
df = df.head(300) | |
df.to_csv("/home/ruoang/astocks/" + stock + '.csv') | |
print('%s finish' % (stock)) | |
except Exception as e: | |
print(e) | |
print('%s error' % (stock)) | |
con.close() | |
print("Done") |
from common import * | |
warnings.simplefilter(action='ignore') | |
con = get_conn("db8") | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select distinct stock from stock_basic where total_mv > 100000" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
start = (datetime.today() - timedelta(days=500)).strftime("%Y%m%d") | |
print(start) | |
end = datetime(2030, 1, 1).strftime("%Y%m%d") | |
basic = pro.hk_basic() | |
for index, row in basic.iterrows(): | |
stock = row['ts_code'] | |
try: | |
df = pro.hk_daily(ts_code=stock, start_date=start, end_date=end, ma=[5, 20, 50, 150, 200]) | |
df = df.head(300) | |
df.to_csv("/home/ruoang/hstocks/" + stock + '.csv') | |
print('%s finish' % (stock)) | |
except Exception as e: | |
print(e) | |
print('%s error' % (stock)) | |
con.close() | |
print("Done") |
import pandas_datareader.data as web | |
from common import * | |
import os | |
warnings.simplefilter(action='ignore') | |
con = get_conn() | |
start = datetime(2017, 1, 1) | |
print(start) | |
end = datetime(2030, 1, 1) | |
SOURCE_DIR = '/home/ruoang/ustocks/' | |
for stock in obtain_stock_us(con): | |
try: | |
path = format("%s/%s.csv" % (SOURCE_DIR, stock)) | |
if os.path.isfile(path): | |
continue | |
df = web.DataReader(stock, 'iex', start, end, access_key='pk_88d98ab1d0344ceb8184b898313a18cc') | |
df.to_csv("/home/ruoang/ustocks/" + stock + '.csv', mode='a', header=True) | |
print('%s finish' % (stock)) | |
except Exception as e: | |
print(e) | |
print('%s error' % (stock)) | |
con.close() | |
print("Done") |
import pandas_datareader.data as web | |
from common import * | |
warnings.simplefilter(action='ignore') | |
con = get_conn() | |
yesterday = datetime.today() - timedelta(days=1) | |
start = datetime(2020, yesterday.month, yesterday.day) | |
print(start) | |
end = datetime(2030, 1, 1) | |
for stock in obtain_stock_us(con): | |
try: | |
df = web.DataReader(stock, 'iex', start, end, access_key='pk_88d98ab1d0344ceb8184b898313a18cc') | |
df.to_csv("/home/ruoang/ustocks/" + stock + '.csv', mode='a', header=False) | |
print('%s finish' % (stock)) | |
except Exception as e: | |
print(e) | |
print('%s error' % (stock)) | |
con.close() | |
print("Done") |
import math | |
import pandas as pd | |
def days_above(df, ma): | |
df = df.iloc[::-1] | |
if df is None: | |
return 0 | |
days = 0 | |
diff = df[ma] - df[ma].shift(1) | |
#print(diff) | |
for idx in reversed(diff.index): | |
#print(idx) | |
if math.isnan(diff[idx]) or diff[idx] < 0: | |
continue | |
days = days + 1 | |
# print(days) | |
return days | |
def up_days(df): | |
if df is None: | |
return 0 | |
return len(df[df.pct_chg > 0.0]) | |
def close_up_days(df): | |
if df is None: | |
return 0 | |
days = 0 | |
for idx in range(0, len(df)): | |
row = df.iloc[idx] | |
if row['close'] > (row['high'] + row['low']) / 2: | |
days = days + 1 | |
# print(days) | |
return days | |
def up_down_v_ratio(df): | |
if df is None: | |
return 0 | |
up_volume = 0.0 | |
down_volume = 1.0 | |
for idx in range(0, len(df)): | |
row = df.iloc[idx] | |
if row['pct_chg'] >= 0.0: | |
up_volume += row['vol'] | |
else: | |
down_volume += row['vol'] | |
return up_volume / down_volume | |
def up_down_v_ratio_v2(df): | |
if df is None: | |
return 0 | |
up_volume = 0.0 | |
down_volume = 1.0 | |
up_cnt = 0 | |
down_cnt = 0 | |
for idx in range(0, len(df)): | |
row = df.iloc[idx] | |
if row['pct_chg'] >= 0.0: | |
up_volume += row['vol'] | |
up_cnt += 1 | |
else: | |
down_volume += row['vol'] | |
down_cnt += 1 | |
if up_cnt == 0: | |
return 0 | |
if down_cnt == 0 or down_volume < 100: | |
return 10000 | |
return (up_volume/ up_cnt) / (down_volume/down_cnt) | |
def up_down_volume_ratio_v2(df): | |
if df is None: | |
return 0 | |
up_volume = 0.0 | |
down_volume = 1.0 | |
up_cnt = 0 | |
down_cnt = 0 | |
for idx in range(0, len(df)): | |
row = df.iloc[idx] | |
if row['pct_chg'] >= 0.0: | |
up_volume += row['volume'] | |
up_cnt += 1 | |
else: | |
down_volume += row['volume'] | |
down_cnt += 1 | |
if up_cnt == 0: | |
return 0 | |
if down_cnt == 0 or down_volume < 100: | |
return 10000 | |
return (up_volume/ up_cnt) / (down_volume/down_cnt) | |
def up_down_pct_chg_ratio(df): | |
if df is None: | |
return 0 | |
up_price_chg = 0.0 | |
down_price_chg = 0.0 | |
up_cnt = 0 | |
down_cnt = 0 | |
for idx in range(0, len(df)): | |
row = df.iloc[idx] | |
if row['pct_chg'] >= 0.0: | |
up_price_chg += row['pct_chg'] | |
up_cnt += 1 | |
else: | |
down_price_chg -= row['pct_chg'] | |
down_cnt += 1 | |
if up_cnt == 0: | |
return 0 | |
if down_cnt == 0 or down_price_chg < 0.1: | |
return 1000 | |
return (up_price_chg/ up_cnt) / (down_price_chg/down_cnt) | |
def avg_volatility(df): | |
if df is None: | |
return 0 | |
volatility = 0.0 | |
for idx in range(0, len(df)): | |
row = df.iloc[idx] | |
volatility += (row['high'] / row['low']) - 1 | |
return volatility * 100 / len(df) | |
def avg_pct_chg(df): | |
return df['pct_chg'].mean() | |
def days_small_above_large(df, ma_small, ma_large): | |
df = df.iloc[::-1] | |
if df is None: | |
return 0 | |
days = 0 | |
diff = df[ma_small] - df[ma_large] | |
#print(diff) | |
diff_diff = diff - diff.shift(1) | |
#print(diff_diff) | |
for idx in reversed(diff_diff.index): | |
#print(idx) | |
if math.isnan(diff_diff[idx]) or diff_diff[idx] < 0 or diff[idx] < 0: | |
continue | |
days = days + 1 | |
#print(days) | |
return days | |
def max_drawback(df): | |
if df is None: | |
return 0 | |
df['max2here'] = df['close'].expanding().max() | |
df['dd2here'] = df['close']/df['max2here'] | |
end_date, remains = tuple(df.sort_values(by=['dd2here']).iloc[0] | |
[['trade_date', 'dd2here']]) | |
# start_date = df[df['trade_date'] <= end_date]\ | |
# .sort_values(by='close', ascending=False)\ | |
# .iloc[0]['trade_date'] | |
# print(start_date) | |
# print(end_date) | |
return (round((1-remains) * 100, 2)) | |
def max_profit(df): | |
col_names = ['trade_date'] | |
maxseq = seq = pd.DataFrame(columns=col_names) | |
start, end, sum_start = -1, -1, -1 | |
maxsum_, sum_ = 0, 0 | |
i = 0 | |
for x in df.itertuples(): | |
if math.isnan(x[9]): | |
continue | |
seq.loc[len(seq)] = x[2]; sum_ += x[9] | |
if maxsum_ < sum_: | |
maxseq = seq; maxsum_ = sum_ | |
start, end = sum_start, i | |
elif sum_ < 0: | |
seq = pd.DataFrame(columns=col_names); sum_ = 0 | |
sum_start = i | |
i += 1 | |
#assert maxsum_ == sum(maxseq[:end - start]) | |
#print(maxsum_) | |
#print(maxseq) | |
return round(maxsum_, 2) | |
def ma(data, n=10, val_name="close"): | |
import numpy as np | |
''' | |
移动平均线 Moving Average | |
Parameters | |
------ | |
data:pandas.DataFrame | |
通过 get_h_data 取得的股票数据 | |
n:int | |
移动平均线时长,时间单位根据data决定 | |
val_name:string | |
计算哪一列的列名,默认为 close 收盘值 | |
return | |
------- | |
list | |
移动平均线 | |
''' | |
values = [] | |
MA = [] | |
for index, row in data.iterrows(): | |
values.append(row[val_name]) | |
if len(values) == n: | |
del values[0] | |
MA.append(np.average(values)) | |
return np.asarray(MA) | |
def sma(data, column, n=10, val_name="close"): | |
ma = pd.Series(data[val_name].rolling(window=n).mean(), name = column) | |
data = data.join(ma) | |
return data |
from common import * | |
import MySQLdb as mdb | |
import math | |
print(math.isnan(1)) | |
with open("db8") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock, name from stock_basic where total_mv > 200000" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], d[1]) for d in data] | |
def obtain_stock_basic_test(): | |
return [("601158.SH", "")] | |
def flat_days_above(df): | |
if df is None: | |
return False | |
return range_high_chg(df.head(5), 0.35) | |
def range_close_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['close'].max() / df['close'].min() - 1 < ratio | |
def range_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
class Stock: | |
def __init__(self, stock, days): | |
self.stock = stock | |
self.days = days | |
self.days_no = 0 | |
def __str__(self): | |
return "stock: %s ,days: %s, days_no: %d" % (self.stock, self.days, self.days_no) | |
count = 0 | |
date = 0 | |
stocks = obtain_stock_basic() | |
con.close() | |
stock_len = len(stocks) | |
from collections import defaultdict | |
dict = defaultdict(list) | |
for (stock, name) in stocks: | |
#print(stock) | |
count += 1 | |
if count % 100 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
dfo = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date='20200101', end_date='20220710', ma=[5, 10]) | |
if dfo is None: | |
continue | |
for i in range(10, 100): | |
df = dfo[i:] | |
last = df.head(1) | |
if last is None: | |
continue | |
last_pct_chg = last['pct_chg'].max() | |
if last_pct_chg < 9.9: | |
continue | |
last_vol = last['vol'].max() | |
if last_vol is None: | |
continue | |
lastv_10 = last['ma_v_10'].max() | |
if lastv_10 is None: | |
continue | |
# if last_close < last_5 or last_5 < last_50 or last_50 < last_150: | |
# continue | |
if last['trade_date'].empty: | |
continue | |
#if not flat_days_above(df): | |
# continue | |
d = int(last['trade_date']) | |
remain = df[1:] | |
if remain.head(5)['pct_chg'].abs().sum() > 10: | |
continue | |
before_5_vol = remain.head(5)['vol'].mean() | |
if before_5_vol is None: | |
continue | |
if last_vol < 2 * before_5_vol: | |
continue | |
if last_vol > 3 * before_5_vol: | |
continue | |
dict[d].append(Stock(stock, last_vol / before_5_vol / remain.head(5)['pct_chg'].abs().mean())) | |
# print('%s %s' %(stock, name)) | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
for (k, v) in dict.items(): | |
sorted_days = sorted(v, key=lambda item: item.days) | |
lens = len(v) | |
n = 0 | |
for s in reversed(sorted_days): | |
if n >= 3: | |
break | |
if s is None or s.days is None: | |
continue | |
if math.isnan(s.days): | |
continue | |
print(k) | |
print(str(s)) | |
column_str = """date, code, value""" | |
final_str = "INSERT IGNORE INTO first_zt5 (%s) VALUES (%d, '%s', %d)" % \ | |
(column_str, k, s.stock, s.days) | |
n += 1 | |
cur = con.cursor() | |
cur.execute(final_str) | |
con.commit() | |
con.commit() | |
con.close() | |
print("Done") |
from common import * | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
def days_above(df, close): | |
if df is None: | |
return 0 | |
days = 0 | |
for idx in df.index: | |
high = df['high'][idx] | |
if close < high: | |
break | |
days = days + 1 | |
return days | |
def ma_days_above(df, ma='ma150', limit=7): | |
if df is None: | |
return False | |
days = 0 | |
last = 100000000 | |
for idx in df.index: | |
cur = df[ma][idx] | |
if cur > last: | |
break | |
days = days + 1 | |
if days >= limit: | |
return True | |
last = cur | |
#print(days) | |
return days >= limit | |
def flat_days_above(df): | |
if df is None: | |
return False | |
if range_high_chg(df.head(3), 0.02) or \ | |
range_close_chg(df.head(3), 0.025) or \ | |
range_high_chg(df.head(5), 0.07): | |
return True | |
return False | |
def range_close_chg(df, ratio): | |
print(df['close'].max() / df['close'].min()) | |
return df['close'].max() / df['close'].min() - 1 < ratio | |
def range_high_chg(df, ratio): | |
print(df) | |
print(df['high'].max() / df['low'].min()) | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
if __name__ == '__main__': | |
import sys | |
stock = format_code(sys.argv[1]) | |
end_date = sys.argv[2] | |
if stock.startswith('688'): | |
sys.exit("not support 688") | |
#print(stock) | |
try: | |
dfo = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date='20190101', end_date=end_date, ma=[5, 50, 150]) | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
sys.exit("read data error") | |
for i in range(0, 1): | |
df = dfo[i:] | |
last = df.head(1) | |
#print(last) | |
if last is None: | |
continue | |
if last['amount'].max() < 100000: | |
continue | |
last_close = last['close'].max() | |
if last_close < 6: | |
continue | |
last_high = last['high'].max() | |
last_open = last['open'].max() | |
last_vol = last['vol'].max() | |
last_150 = last['ma150'].max() | |
if last_150 is None: | |
continue | |
last_50 = last['ma50'].max() | |
last_5 = last['ma5'].max() | |
last_pct_chg = last['pct_chg'].max() | |
if last_pct_chg > 4 or last_5 < last_50: | |
continue | |
if not ma_days_above(df): | |
continue | |
if not flat_days_above(df): | |
continue | |
d = int(last['trade_date']) | |
remain = df[1:] | |
days = days_above(remain, last_high) | |
if days >= len(df) - 1: | |
continue | |
days = days_above(remain, last_close * 1.03) | |
#print(days) | |
if days < 20: | |
continue | |
print('%s hit' %(stock)) | |
import os.path | |
import pandas as pd | |
from common import * | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
def obtain_stock_basic_test(): | |
return [("603713.SH", "")] | |
def days_above(df, close): | |
if df is None: | |
return 0 | |
days = 0 | |
for idx in df.index: | |
high = df['high'][idx] | |
if close < high: | |
break | |
days = days + 1 | |
return days | |
def ma_days_above(df, ma='ma150', limit=7): | |
if df is None: | |
return False | |
days = 0 | |
last = 100000000 | |
for idx in df.index: | |
cur = df[ma][idx] | |
if cur > last: | |
break | |
days = days + 1 | |
if days >= limit: | |
return True | |
last = cur | |
#print(days) | |
return days >= limit | |
def flat_days_above(df): | |
if df is None: | |
return False | |
if range_high_chg(df.head(3), 0.02) or \ | |
range_high_chg(df.head(5), 0.07): | |
return True | |
return False | |
def range_close_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['close'].max() / df['close'].min() - 1 < ratio | |
def range_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
def vcp_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
idxmax = df['high'].idxmax() | |
idxmin = df['low'].idxmin() | |
if df.iloc[idxmax]['trade_date'] > df.iloc[idxmin]['trade_date']: | |
return False | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
count = 0 | |
date = 0 | |
con = get_conn() | |
stocks = obtain_stock_basic(con) | |
con.close() | |
stock_len = len(stocks) | |
from collections import defaultdict | |
dict = defaultdict(list) | |
SOURCE_DIR = '/home/ruoang/astocks/' | |
for (stock, name) in stocks: | |
if stock.startswith('688'): | |
continue | |
#print(stock) | |
count += 1 | |
if count % 100 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
path = format("%s/%s.csv" %(SOURCE_DIR, stock)) | |
if not os.path.isfile(path): | |
continue | |
try: | |
dfo = pd.read_csv(path, header=0, index_col=0) | |
#dfo = dfo.set_index('trade_date') | |
#dfo = dfo[::-1] | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
continue | |
for i in range(0, 1): | |
df = dfo[i:] | |
last = df.head(1) | |
#print(last) | |
if last is None: | |
continue | |
if last['amount'].max() < 100000: | |
continue | |
last_close = last['close'].max() | |
if last_close < 6: | |
continue | |
last_high = last['high'].max() | |
last_open = last['open'].max() | |
last_vol = last['vol'].max() | |
last_150 = last['ma150'].max() | |
if last_150 is None: | |
continue | |
last_50 = last['ma50'].max() | |
last_5 = last['ma5'].max() | |
last_pct_chg = last['pct_chg'].max() | |
if last_pct_chg > 4 or last_5 < last_50 or last_50 < last_150: | |
continue | |
if not ma_days_above(df): | |
continue | |
if not flat_days_above(df): | |
continue | |
d = int(last['trade_date']) | |
remain = df[1:] | |
days = days_above(remain, last_high) | |
if days >= len(df) - 1: | |
continue | |
days = days_above(remain, last_close * 1.03) | |
#print(days) | |
if days < 20: | |
continue | |
dict[d].append(Stock(stock, days)) | |
print('%s %s' %(stock, name)) | |
con = get_conn() | |
for (k, v) in dict.items(): | |
sorted_days = sorted(v, key=lambda item: item.days) | |
lens = len(v) | |
n = 0 | |
for s in reversed(sorted_days): | |
if n >= 64: | |
break | |
#print(k) | |
print(str(s)) | |
column_str = """date, code, value""" | |
final_str = "INSERT IGNORE INTO flat_nb (%s) VALUES (%d, '%s', %d)" % \ | |
(column_str, k, s.stock, s.days) | |
n += 1 | |
cur = con.cursor() | |
print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
con.commit() | |
con.close() | |
print("Done") |
import os.path | |
import pandas as pd | |
from common import * | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
def obtain_stock_basic_test(): | |
return [("603713.SH", "")] | |
def days_above(df, close): | |
if df is None: | |
return 0 | |
days = 0 | |
for idx in df.index: | |
high = df['close'][idx] | |
if close < high: | |
break | |
days = days + 1 | |
return days | |
def ma_days_above(df, ma='ma150', limit=7): | |
if df is None: | |
return False | |
days = 0 | |
last = 100000000 | |
for idx in df.index: | |
cur = df[ma][idx] | |
if cur > last: | |
break | |
days = days + 1 | |
if days >= limit: | |
return True | |
last = cur | |
#print(days) | |
return days >= limit | |
def flat_days_above(df): | |
if df is None: | |
return False | |
if range_high_chg(df.head(3), 0.02) or \ | |
range_high_chg(df.head(5), 0.07): | |
return True | |
return False | |
def range_close_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['close'].max() / df['close'].min() - 1 < ratio | |
def range_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
def vcp_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
idxmax = df['high'].idxmax() | |
idxmin = df['low'].idxmin() | |
if df.iloc[idxmax]['trade_date'] > df.iloc[idxmin]['trade_date']: | |
return False | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
count = 0 | |
date = 0 | |
con = get_conn() | |
stocks = obtain_stock_basic(con) | |
con.close() | |
stock_len = len(stocks) | |
from collections import defaultdict | |
dict = defaultdict(list) | |
SOURCE_DIR = '/home/ruoang/astocks/' | |
for (stock, name) in stocks: | |
if stock.startswith('688'): | |
continue | |
#print(stock) | |
count += 1 | |
if count % 100 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
path = format("%s/%s.csv" %(SOURCE_DIR, stock)) | |
if not os.path.isfile(path): | |
continue | |
try: | |
dfo = pd.read_csv(path, header=0, index_col=0) | |
#dfo = dfo.set_index('trade_date') | |
#dfo = dfo[::-1] | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
continue | |
for i in range(0, 1): | |
df = dfo[i:] | |
last = df.head(1) | |
#print(last) | |
if last is None: | |
continue | |
if last['amount'].max() < 100000: | |
continue | |
last_close = last['close'].max() | |
if last_close < 6: | |
continue | |
last_high = last['high'].max() | |
last_open = last['open'].max() | |
last_vol = last['vol'].max() | |
last_150 = last['ma150'].max() | |
if last_150 is None: | |
continue | |
last_50 = last['ma50'].max() | |
last_5 = last['ma5'].max() | |
last_pct_chg = last['pct_chg'].max() | |
if last_pct_chg > 4 or last_5 < last_50 or last_50 < last_150: | |
continue | |
if not ma_days_above(df): | |
continue | |
if not flat_days_above(df): | |
continue | |
d = int(last['trade_date']) | |
remain = df[1:] | |
days = days_above(remain, last_high) | |
if days >= len(df) - 1: | |
continue | |
days = days_above(remain, last_close * 1.03) | |
#print(days) | |
if days < 20: | |
continue | |
dict[d].append(Stock(stock, days)) | |
print('%s %s' %(stock, name)) | |
con = get_conn("db8") | |
for (k, v) in dict.items(): | |
sorted_days = sorted(v, key=lambda item: item.days) | |
lens = len(v) | |
n = 0 | |
for s in reversed(sorted_days): | |
if n >= 64: | |
break | |
#print(k) | |
print(str(s)) | |
column_str = """date, code, value""" | |
final_str = "INSERT IGNORE INTO flat_close (%s) VALUES (%d, '%s', %d)" % \ | |
(column_str, k, s.stock, s.days) | |
n += 1 | |
cur = con.cursor() | |
print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
con.commit() | |
con.close() | |
print("Done") |
from common import * | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
import MySQLdb as mdb | |
con = get_conn() | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock, name from stock_basic where total_mv > 500000" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], d[1]) for d in data] | |
def obtain_stock_basic_test(): | |
return [("300206.SZ", "")] | |
def days_above(df, close): | |
if df is None: | |
return 0 | |
days = 0 | |
for idx in df.index: | |
high = df['high'][idx] | |
if close < high: | |
break | |
days = days + 1 | |
return days | |
def ma_days_above(df, ma='ma150', limit=7): | |
if df is None: | |
return False | |
days = 0 | |
last = 100000000 | |
for idx in df.index: | |
cur = df[ma][idx] | |
if cur > last: | |
break | |
days = days + 1 | |
if days >= limit: | |
return True | |
last = cur | |
#print(days) | |
return days >= limit | |
def flat_days_above(df): | |
if df is None: | |
return False | |
if range_high_chg(df.head(5), 0.08) or \ | |
range_high_chg(df.head(10), 0.12): | |
return True | |
return vcp_high_chg(df.head(7), 0.10) and \ | |
vcp_high_chg(df.head(15), 0.20) or \ | |
vcp_high_chg(df.head(30), 0.30) | |
def range_close_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['close'].max() / df['close'].min() - 1 < ratio | |
def range_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
def vcp_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
idxmax = df['high'].idxmax() | |
idxmin = df['low'].idxmin() | |
if df.iloc[idxmax]['trade_date'] > df.iloc[idxmin]['trade_date']: | |
return False | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
class Stock: | |
def __init__(self, stock, days): | |
self.stock = stock | |
self.days = days | |
self.days_no = 0 | |
def __str__(self): | |
return "stock: %s ,days: %s, days_no: %d" % (self.stock, self.days, self.days_no) | |
count = 0 | |
date = 0 | |
stocks = obtain_stock_basic_test() | |
con.close() | |
stock_len = len(stocks) | |
from collections import defaultdict | |
dict = defaultdict(list) | |
for (stock, name) in stocks: | |
#print(stock) | |
count += 1 | |
if count % 100 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
dfo = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date='20190101', end_date='20200609', ma=[5, 50, 150]) | |
if dfo is None: | |
continue | |
for i in range(0, 1): | |
df = dfo[i:] | |
last = df.head(1) | |
if last is None: | |
continue | |
if last['amount'].max() < 100000: | |
continue | |
last_close = last['close'].max() | |
last_open = last['open'].max() | |
last_vol = last['vol'].max() | |
last_150 = last['ma150'].max() | |
if last_150 is None: | |
continue | |
last_50 = last['ma50'].max() | |
last_5 = last['ma5'].max() | |
lastv_50 = last['ma_v_50'].max() | |
last_pct_chg = last['pct_chg'].max() | |
if last_5 < last_50 or last_50 < last_150: | |
continue | |
if last['trade_date'].empty: | |
continue | |
# if not ma_days_above(df): | |
# continue | |
if not flat_days_above(df): | |
continue | |
d = int(last['trade_date']) | |
remain = df[1:] | |
days = days_above(remain, last_close * 1.04) | |
#print(days) | |
if days < 80: | |
continue | |
dict[d].append(Stock(stock, days)) | |
# print('%s %s' %(stock, name)) | |
con = get_conn() | |
for (k, v) in dict.items(): | |
sorted_days = sorted(v, key=lambda item: item.days) | |
lens = len(v) | |
n = 0 | |
for s in reversed(sorted_days): | |
if n >= 64: | |
break | |
#print(k) | |
print(str(s)) | |
column_str = """date, code, value""" | |
final_str = "INSERT IGNORE INTO flat_test (%s) VALUES (%d, '%s', %d)" % \ | |
(column_str, k, s.stock, s.days) | |
n += 1 | |
cur = con.cursor() | |
#cur.execute(final_str) | |
con.commit() | |
con.commit() | |
con.close() | |
print("Done") |
from common import * | |
from factor import * | |
import os.path | |
import pandas as pd | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
con = get_conn() | |
def obtain_stock_basic_test(): | |
return [("601158.SH", "")] | |
def days_above(df, close): | |
if df is None: | |
return 0 | |
days = 0 | |
for idx in reversed(df.index): | |
high = df['high'][idx] | |
if close < high: | |
break | |
days = days + 1 | |
return days | |
def ma_days_above(df, ma='ma150', limit=7): | |
if df is None: | |
return False | |
days = 0 | |
last = 0 | |
for idx in df.index: | |
cur = df[ma][idx] | |
if cur < last: | |
break | |
days = days + 1 | |
if days >= limit: | |
return True | |
last = cur | |
#print(days) | |
return days >= limit | |
def flat_days_above(df): | |
if df is None: | |
return False | |
return range_close_chg(df.tail(10), 0.04) or \ | |
range_close_chg(df.tail(5), 0.02) or \ | |
range_close_chg(df.tail(20), 0.05) or \ | |
range_high_chg(df.tail(10), 0.06) or \ | |
range_high_chg(df.tail(20), 0.08) | |
def range_close_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['close'].max() / df['close'].min() - 1 < ratio | |
def range_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
class Stock: | |
def __init__(self, stock, days): | |
self.stock = stock | |
self.days = days | |
self.days_no = 0 | |
def __str__(self): | |
return "stock: %s ,days: %s, days_no: %d" % (self.stock, self.days, self.days_no) | |
count = 0 | |
date = 0 | |
stocks = obtain_stock_us(con) | |
con.close() | |
stock_len = len(stocks) | |
from collections import defaultdict | |
dict = defaultdict(list) | |
SOURCE_DIR = '/home/ruoang/ustocks/' | |
yesterday = datetime.today() - timedelta(days=1) | |
d = datetime(2020, yesterday.month, yesterday.day).strftime("%Y-%m-%d") | |
print(d) | |
candidates = [] | |
for stock in stocks: | |
#print(stock) | |
count += 1 | |
if count % 100 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
path = format("%s/%s.csv" %(SOURCE_DIR, stock)) | |
if not os.path.isfile(path): | |
continue | |
try: | |
df = pd.read_csv(path) | |
df = sma(df, "ma5", n=5) | |
df = sma(df, "ma50", n=50) | |
df = sma(df, "ma150", n=150) | |
except Exception as e: | |
#print(e) | |
#print('%s error' % stock) | |
continue | |
last = df.tail(1) | |
if last is None: | |
continue | |
if last['date'].max() != d: | |
print("Exception %s wrong date %s" % (stock, last['date'].max())) | |
continue | |
#print(last) | |
last_close = last['close'].max() | |
last_open = last['open'].max() | |
last_vol = last['volume'].max() | |
last_150 = last['ma150'].max() | |
last_50 = last['ma50'].max() | |
last_5 = last['ma5'].max() | |
if last_5 < last_50 or last_50 < last_150: | |
continue | |
if not ma_days_above(df.tail(7)): | |
continue | |
if not flat_days_above(df): | |
continue | |
remain = df[-200:-1] | |
days = days_above(remain, last_close * 1.03) | |
if days < 40: | |
continue | |
candidates.append(Stock(stock, days)) | |
con = get_conn() | |
sorted_days = sorted(candidates, key=lambda item: item.days) | |
n = 0 | |
for s in reversed(sorted_days): | |
if n >= 10: | |
break | |
print(str(s)) | |
column_str = """date, code, value""" | |
final_str = "INSERT IGNORE INTO flat_us (%s) VALUES ('%s', '%s', %d)" % \ | |
(column_str, d, s.stock, s.days) | |
n += 1 | |
cur = con.cursor() | |
cur.execute(final_str) | |
con.commit() | |
con.close() | |
print("Done") |
from common import * | |
from collections import defaultdict | |
pct_chg_dict = defaultdict(float) | |
vol_dict = defaultdict(float) | |
candidates = set() | |
def obtain_stock_basic(): | |
# 获取沪股通成分 | |
df = pro.hs_const(hs_type='SH') | |
for index, row in df.iterrows(): | |
candidates.add(row['ts_code']) | |
# 获取深股通成分 | |
df = pro.hs_const(hs_type='SZ') | |
for index, row in df.iterrows(): | |
candidates.add(row['ts_code']) | |
today = datetime.today().strftime("%Y%m%d") | |
today_daily = pro.daily(trade_date=today) | |
for index, row in today_daily.iterrows(): | |
stock = row['ts_code'] | |
if not(stock in candidates): | |
continue | |
if float(row['pct_chg']) < 1: | |
continue | |
pct_chg_dict[stock] = float(row['pct_chg']) | |
vol_dict[stock] = float(row['amount']) | |
sorted_concepts = sorted(pct_chg_dict.items(), key=lambda d: d[1]) | |
print("Done") |
git status | |
git add . | |
git commit -m "update" | |
git push |
import os.path | |
import pandas as pd | |
from common import * | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
def obtain_stock_basic_test(): | |
return [("603713.SH", "")] | |
def days_above(df, close): | |
if df is None: | |
return 0 | |
days = 0 | |
for idx in df.index: | |
high = df['high'][idx] | |
if close < high: | |
break | |
days = days + 1 | |
return days | |
def ma_days_above(df, ma='ma150', limit=7): | |
if df is None: | |
return False | |
days = 0 | |
last = 100000000 | |
for idx in df.index: | |
cur = df[ma][idx] | |
if cur > last: | |
break | |
days = days + 1 | |
if days >= limit: | |
return True | |
last = cur | |
#print(days) | |
return days >= limit | |
def flat_days_above(df): | |
if df is None: | |
return False | |
if range_high_chg(df.head(3), 0.05) or \ | |
range_high_chg(df.head(5), 0.09) or \ | |
range_high_chg(df.head(10), 0.15): | |
return True | |
return False | |
def range_close_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['close'].max() / df['close'].min() - 1 < ratio | |
def range_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
def vcp_high_chg(df, ratio): | |
#print(df['close'].max() / df['close'].min()) | |
idxmax = df['high'].idxmax() | |
idxmin = df['low'].idxmin() | |
if df.iloc[idxmax]['trade_date'] > df.iloc[idxmin]['trade_date']: | |
return False | |
return df['high'].max() / df['low'].min() - 1 < ratio | |
count = 0 | |
date = 0 | |
con = get_conn("db8") | |
max_date = datetime.today().strftime("%Y-%m-%d") | |
print(max_date) | |
def obtain_list_of_db_tickers(table): | |
cur = con.cursor() | |
query_str = "select distinct code from %s where date = '%s'" % (table, max_date) | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
stocks = set() | |
for table in ['top_concept', 'top_industry']: | |
for stock in obtain_list_of_db_tickers(table): | |
stocks.add(stock) | |
print("total stocks len: %d" % len(stocks)) | |
con.close() | |
stock_len = len(stocks) | |
from collections import defaultdict | |
dict = defaultdict(list) | |
SOURCE_DIR = '/home/ruoang/astocks/' | |
for stock in stocks: | |
#print(stock) | |
count += 1 | |
if count % 100 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
path = format("%s/%s.csv" %(SOURCE_DIR, stock)) | |
if not os.path.isfile(path): | |
continue | |
try: | |
dfo = pd.read_csv(path, header=0, index_col=0) | |
#dfo = dfo.set_index('trade_date') | |
#dfo = dfo[::-1] | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
continue | |
for i in range(0, 1): | |
df = dfo[i:] | |
last = df.head(1) | |
#print(last) | |
if last is None: | |
continue | |
if last['amount'].max() < 100000: | |
continue | |
last_close = last['close'].max() | |
last_high = last['high'].max() | |
last_open = last['open'].max() | |
last_vol = last['vol'].max() | |
last_150 = last['ma150'].max() | |
if last_150 is None: | |
continue | |
last_50 = last['ma50'].max() | |
last_5 = last['ma5'].max() | |
lastv_50 = last['ma_v_50'].max() | |
last_pct_chg = last['pct_chg'].max() | |
if last_pct_chg > 3: | |
continue | |
if not flat_days_above(df): | |
continue | |
d = int(last['trade_date']) | |
remain = df[1:] | |
days = days_above(remain, last_close * 1.03) | |
#print(days) | |
if days < 20: | |
continue | |
dict[d].append(Stock(stock, days)) | |
print('%s' %(stock)) | |
con = get_conn("db8") | |
for (k, v) in dict.items(): | |
sorted_days = sorted(v, key=lambda item: item.days) | |
lens = len(v) | |
n = 0 | |
for s in reversed(sorted_days): | |
if n >= 64: | |
break | |
#print(k) | |
print(str(s)) | |
column_str = """date, code, value""" | |
final_str = "INSERT IGNORE INTO hot (%s) VALUES (%d, '%s', %d)" % \ | |
(column_str, k, s.stock, s.days) | |
n += 1 | |
cur = con.cursor() | |
print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
con.commit() | |
con.close() | |
print("Done") |
import time | |
import pandas as pd | |
from numpy import cumsum, log, polyfit, sqrt, std, subtract | |
from numpy.random import randn | |
# Import the Time Series library | |
import statsmodels.tsa.stattools as tts | |
import warnings | |
warnings.simplefilter(action='ignore') | |
def hurst(ts): | |
"""Returns the Hurst Exponent of the time series vector ts""" | |
# Create the range of lag values | |
lags = range(2, 100) | |
# Calculate the array of the variances of the lagged differences | |
tau = [sqrt(std(subtract(ts[lag:], ts[:-lag]))) for lag in lags] | |
# Use a linear fit to estimate the Hurst Exponent | |
poly = polyfit(log(lags), log(tau), 1) | |
# Return the Hurst exponent from the polyfit output | |
return poly[0] * 2.0 | |
def is_trend(df): | |
try: | |
adf = tts.adfuller(df['close'], 1) | |
if adf[0] < adf[4]['5%']: | |
#print("adf(%s): %s" % (stock, adf)) | |
hurstValue = hurst(df['close']) | |
if hurstValue < 0.5: | |
return True | |
except ValueError: | |
pass | |
return False |
import json | |
import requests | |
URL_PREFIX = 'https://cloud.iexapis.com/stable/stock/' | |
URL_SUFFIX = '/intraday-prices?token=pk_88d98ab1d0344ceb8184b898313a18cc&chartIEXOnly=true&chartLast=' | |
def get_intraday_prices(stock, n=390): | |
url = URL_PREFIX + stock + URL_SUFFIX + str(n) | |
r = requests.get(url) | |
s = str(r.content, encoding = "utf-8") | |
return json.loads(s) | |
if __name__ == '__main__': | |
json = get_intraday_prices('AMZN', 3) | |
print(len(json)) | |
for item in json: | |
print(item) | |
print(item['volume']) | |
print(item['close']) |
from common import * | |
from collections import defaultdict | |
def get_ebit_dict(period): | |
last_income = pro.income_vip(period = str(period), fields='ts_code, ebit') | |
ebit_dict = defaultdict(float) | |
for index, cash in last_income.iterrows(): | |
code = cash['ts_code'] | |
ebit = float(cash['ebit']) | |
if math.isnan(ebit) or ebit is None or ebit < 50000000: | |
continue | |
ebit_dict[code] = ebit | |
income = pro.income_vip(period = str(period + 10000), fields='ts_code, ebit') | |
ebit_pct_dict = defaultdict(float) | |
for index, cash in income.iterrows(): | |
code = cash['ts_code'] | |
if not (code in ebit_dict): | |
continue | |
ebit = float(cash['ebit']) | |
if ebit / ebit_dict[code] > 1.1: | |
ebit_pct_dict[code] = ebit / ebit_dict[code] | |
#print("%s %.2f" %(code, ebit / ebit_dict[code])) | |
return ebit_pct_dict | |
if __name__ == '__main__': | |
cur_ebit_pct = get_ebit_dict(20190331) | |
last_ebit_pct = get_ebit_dict(20181231) | |
queries = [] | |
for (k, v) in cur_ebit_pct.items(): | |
if not (k in last_ebit_pct): | |
continue | |
if v > last_ebit_pct[k]: | |
print("%s %.2f %.2f" %(k, v, last_ebit_pct[k])) | |
query = "INSERT IGNORE INTO watch (code) VALUES ('%s')" % (k) | |
queries.append(query) | |
con = get_conn("db8") | |
execute_queries(con, queries) | |
print("Done") | |
煤炭开采 | |
环境保护 | |
食品 | |
水泥 | |
广告包装 | |
批发业 | |
公路 | |
旅游服务 | |
机械基件 | |
服饰 | |
矿物制品 | |
机场 | |
元器件 | |
生物制药 | |
水力发电 | |
超市连锁 | |
农药化肥 | |
钢加工 | |
橡胶 | |
饲料 | |
白酒 | |
新型电力 | |
中成药 | |
IT设备 | |
多元金融 | |
铝 | |
商品城 | |
渔业 | |
啤酒 | |
摩托车 | |
仓储物流 | |
汽车配件 | |
日用化工 | |
铅锌 | |
水务 | |
焦炭加工 | |
运输设备 | |
None | |
小金属 | |
银行 | |
其他商业 | |
家用电器 | |
黄金 | |
酒店餐饮 | |
出版业 | |
汽车整车 | |
装修装饰 | |
区域地产 | |
专用机械 | |
证券 | |
化工原料 | |
染料涂料 | |
陶瓷 | |
港口 | |
林业 | |
百货 | |
农业综合 | |
石油贸易 | |
其他建材 | |
轻工机械 | |
汽车服务 | |
软饮料 | |
电信运营 | |
乳制品 | |
农用机械 | |
旅游景点 | |
造纸 | |
家居用品 | |
房产服务 | |
综合类 | |
石油开采 | |
工程机械 | |
铜 | |
通信设备 | |
纺织机械 | |
电器仪表 | |
软件服务 | |
保险 | |
园区开发 | |
机床制造 | |
路桥 | |
建筑工程 | |
医疗保健 | |
供气供热 | |
船舶 | |
电气设备 | |
化工机械 | |
文教休闲 | |
玻璃 | |
互联网 | |
电器连锁 | |
火力发电 | |
半导体 | |
红黄酒 | |
影视音像 | |
种植业 | |
全国地产 | |
普钢 | |
公共交通 | |
纺织 | |
铁路 | |
特种钢 | |
石油加工 | |
化纤 | |
医药商业 | |
空运 | |
塑料 | |
航空 | |
水运 | |
化学制药 | |
商贸代理 |
import tushare as ts | |
import warnings | |
import threading | |
import time | |
import sys | |
print(ts.__version__) | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
import easyquotation | |
quotation = easyquotation.use("timekline") | |
from send_wx import * | |
from common import * | |
from collections import defaultdict | |
con = get_conn("db8") | |
max_date = get_max_date() | |
print(max_date) | |
def obtain_list_of_db_tickers(table): | |
cur = con.cursor() | |
query_str = "select distinct code from %s where date = '%s'" % (table, max_date) | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
class BuyThread(threading.Thread): | |
def __init__(self, index, thread_count, model, close_dicts, volume_dicts, is_signal): | |
threading.Thread.__init__(self) | |
self.index = index | |
self.thread_count = thread_count | |
self.position_table = "%s_position" %(model) | |
self.model = model | |
self.close_dicts = close_dicts | |
self.volume_dicts = volume_dicts | |
self.is_signal = is_signal | |
def run(self): | |
print("Starting thread %d \n" % self.index) | |
buy(self.index, self.thread_count, self.model, self.position_table, self.close_dicts, self.volume_dicts, self.is_signal) | |
def buy(idx, thread_count, model, position_table, close_dicts, volume_dicts, is_signal): | |
count_dicts = defaultdict(int) | |
buy_dicts = defaultdict(int) | |
buy_queries = [] | |
bought = set() | |
while not is_after_hour(): | |
if not is_in_trade_time(): | |
time.sleep(60) | |
continue | |
i = -1 | |
for stock in stocks: | |
i += 1 | |
if (i % thread_count) != idx: | |
continue | |
# print(stock) | |
if stock in bought: | |
continue | |
code = stock[:-3] | |
sc = stock[-2:].lower() | |
try: | |
key = "%s%s.js" % (sc, code) | |
obj = quotation.real([code])[key]['time_data'] | |
total_len = len(obj) | |
for cur_len in range(count_dicts[stock] - 1, total_len): | |
if cur_len < 3: | |
continue | |
cur_time = obj[cur_len][0] | |
if buy_dicts[stock] == int(cur_time): | |
continue | |
cur_price = float(obj[cur_len][1]) | |
cur_volume = float(obj[cur_len][2]) | |
last_volume = float(obj[cur_len - 1][2]) | |
sec_volume = cur_volume - last_volume | |
avg_vol = cur_volume / (1 + cur_len) | |
last_avg_vol = last_volume / cur_len | |
pre_3_price = float(obj[cur_len - 3][1]) | |
if cur_price < close_dicts[stock]: | |
continue | |
price_chg_now = cur_price / pre_3_price | |
if price_chg_now < 1.011: | |
continue | |
#print("%d, %s %d %s cur_price: %f ,cur_volume: %f ,last_volume: %f ,sec_volume: %f ,avg_vol: %f ,last_avg_vol: %f, pre_3_price: %f" \ | |
# %(idx, code, cur_len, cur_time, cur_price, cur_volume, last_volume, sec_volume, avg_vol, last_avg_vol, pre_3_price)) | |
day_volume_ratio = avg_vol * 240 / volume_dicts[stock] | |
sec_volume_ratio = sec_volume / last_avg_vol | |
print("%d, %s %d %s price_up_chg: %f ,day_volume_ratio: %f ,sec_volume_ratio: %f " \ | |
% (idx, code, cur_len, cur_time, price_chg_now, day_volume_ratio, sec_volume_ratio)) | |
if price_chg_now < 1.04 and \ | |
day_volume_ratio <= 8.1 and \ | |
sec_volume_ratio < 5 and \ | |
(day_volume_ratio < 2 or sec_volume_ratio < 1.8): | |
continue | |
if not stock in bought: | |
buy_num = calc_buy_num(cur_price) | |
content = format('%s 模型 %s 买入 %s %d 股, 价格:%.2f' % (model, cur_time, stock, buy_num, cur_price)) | |
if is_signal: | |
send_to_wx(content) | |
print(content) | |
buy_queries.append(build_table_buy_query(position_table, stock, cur_price, buy_num, day_volume_ratio, sec_volume_ratio, | |
price_chg_now)) | |
bought.add(stock) | |
buy_dicts[stock] = int(cur_time) | |
count_dicts[stock] = total_len | |
except Exception as e: | |
print(e) | |
print('%s error' % stock) | |
time.sleep(10) | |
if len(buy_queries) > 0: | |
time.sleep(90 + idx * 5) | |
tcon = get_conn("db8") | |
execute_queries(tcon, buy_queries) | |
if __name__ == '__main__': | |
import sys | |
if not is_today_trading(): | |
sys.exit("not trading day") | |
candidate_table = sys.argv[1] | |
model = sys.argv[2] | |
thread_count = int(sys.argv[3]) | |
close_dicts = defaultdict(float) | |
volume_dicts = defaultdict(float) | |
is_signal = True | |
if len(sys.argv) >= 5: | |
is_signal = ('true' == sys.argv[4].lower()) | |
stocks = set() | |
for table in [candidate_table]: | |
for stock in obtain_list_of_db_tickers(table): | |
stocks.add(stock) | |
print("total stocks len: %d" % len(stocks)) | |
if len(stocks) == 0: | |
sys.exit("Empty in buy list") | |
for stock in stocks: | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date='20200316', end_date='20300318') | |
if df is None or len(df) < 1: | |
continue | |
close_dicts[stock] = float(df.head(1)['close'].max()) * 1.015 | |
volume_dicts[stock] = float(df.head(1)['vol'].max()) | |
print(close_dicts) | |
print(volume_dicts) | |
threads = [] | |
for idx in range(0, thread_count): | |
thread = BuyThread(idx, thread_count, model, close_dicts, volume_dicts, is_signal) | |
thread.start() | |
threads.append(thread) | |
for t in threads: | |
t.join() | |
con.close() | |
print("Done") |
from common import * | |
import warnings | |
import MySQLdb as mdb | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
data = pro.stock_basic(exchange_id='', list_status='L', fields='ts_code,name,list_date') | |
stock_len = len(data) | |
count = 0 | |
start_date = (datetime.today() - timedelta(days=7)).strftime("%Y%m%d") | |
end_date = datetime.today().strftime("%Y%m%d") | |
queries = [] | |
for row in data.itertuples(): | |
listdate = int(row[3]) | |
if listdate > 20191001: | |
continue | |
stock = row[1] | |
name = row[2] | |
code = stock.split('.', 1) | |
count += 1 | |
if count % 100 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
try: | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date=start_date, end_date=end_date) | |
except Exception as e: | |
continue | |
pct_chg = df.head(1)['close'].max() / df.tail(1)['open'].max() - 1 | |
if pct_chg > 0.35: | |
print('%s %s %.2f' %(stock, name, pct_chg)) | |
column_str = """date, code, name, pct_chg""" | |
final_str = "INSERT IGNORE INTO leader (%s) VALUES ('%s', '%s', '%s', %.2f)" % \ | |
(column_str, end_date, stock, name, pct_chg) | |
queries.append(final_str) | |
con = get_conn("db8") | |
execute_queries(con, queries) | |
print("Done") |
from common import * | |
from collections import defaultdict | |
pct_chg_dict = defaultdict(float) | |
amp_dict = defaultdict(float) | |
vol_ratio_dict = defaultdict(float) | |
candidates = set() | |
con = get_conn("db8") | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_basic where total_mv > 300000" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
for stock in obtain_stock_basic(): | |
candidates.add(stock) | |
today = datetime.today().strftime("%Y%m%d") | |
today_daily = pro.daily(trade_date=today) | |
for index, row in today_daily.iterrows(): | |
stock = row['ts_code'] | |
pct_chg_dict[stock] = float(row['pct_chg']) | |
amp_dict[stock] = (float(row['high']) / float(row['low']) - 1) * 100 | |
sorted_pct_chg = sorted(pct_chg_dict.items(), key=lambda d: d[1]) | |
sorted_amp = sorted(amp_dict.items(), key=lambda d: d[1]) | |
today_daily_basic = pro.daily_basic(trade_date=today) | |
for index, row in today_daily_basic.iterrows(): | |
stock = row['ts_code'] | |
if math.isnan(row['volume_ratio']): | |
continue | |
vol_ratio_dict[stock] = float(row['volume_ratio']) | |
sorted_pct_chg = sorted(pct_chg_dict.items(), key=lambda d: d[1]) | |
sorted_amp = sorted(amp_dict.items(), key=lambda d: d[1]) | |
sorted_vol_ratio = sorted(vol_ratio_dict.items(), key=lambda d: d[1]) | |
n = 70 | |
a = set(map(lambda t: t[0], sorted_pct_chg[-n:])) | |
b = set(map(lambda t: t[0], sorted_amp[-n:])) | |
c = set(map(lambda t: t[0], sorted_vol_ratio[-n:])) | |
queries = [] | |
column_str = """date, code, value""" | |
today = datetime.today().strftime("%Y-%m-%d") | |
for stock in a.intersection(b).intersection(c).intersection(candidates): | |
final_str = "INSERT IGNORE INTO top_leading (%s) VALUES ('%s', '%s', %d)" % \ | |
(column_str, today, stock, 0) | |
queries.append(final_str) | |
execute_queries(con, queries) | |
print("Done") |
import os | |
import glob2 | |
from send_wx import * | |
for path in glob2.glob("/home/ruoang/*.out"): | |
print(path) | |
if not os.path.isfile(path): | |
continue | |
with open(path) as f: | |
content = f.readlines() | |
hasException = "" | |
for line in content: | |
if "exception" in line or "Traceback" in line or "can't" in line: | |
hasException = True | |
break | |
if hasException: | |
name = path.split('/')[3] | |
send_to_wx("%s 有异常,请及时处理" % (name)) |
from common import * | |
start_date = (datetime.today() - timedelta(days=8)).strftime("%Y%m%d") | |
end_date = (datetime.today() + timedelta(days=1)).strftime("%Y%m%d") | |
today = (datetime.today()).strftime("%Y-%m-%d") | |
profit_date = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d") | |
def obtain_position(con, table): | |
cur = con.cursor() | |
query_str = "select code, buy_date, profit_pct from %s where is_sold = 1 and profit > 0" %(table) | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], d[1], d[2]) for d in data] | |
con = get_conn() | |
for table in ['position']: | |
for (stock, buy_date, profit_pct) in obtain_position(con, table): | |
pre_date = (buy_date - timedelta(days=1)).strftime("%Y%m%d") | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date=pre_date, end_date=pre_date) | |
if df is None or len(df) < 1: | |
continue | |
print("%s, %s, %.2f, %.2f" %(stock, buy_date, df.head(1)['pct_chg'].max(), profit_pct)) | |
con.commit() | |
con.close() | |
print("Done") | |
# check library version numbers | |
# scipy | |
import scipy | |
print('scipy: %s' % scipy.__version__) | |
# numpy | |
import numpy | |
print('numpy: %s' % numpy.__version__) | |
# matplotlib | |
import matplotlib | |
print('matplotlib: %s' % matplotlib.__version__) | |
# pandas | |
import pandas | |
print('pandas: %s' % pandas.__version__) | |
# statsmodels | |
import statsmodels | |
print('statsmodels: %s' % statsmodels.__version__) | |
# scikit-learn | |
import sklearn | |
print('sklearn: %s' % sklearn.__version__) |
# theano | |
import theano | |
print('theano: %s' % theano.__version__) | |
# tensorflow | |
import tensorflow | |
print('tensorflow: %s' % tensorflow.__version__) | |
# keras | |
import keras | |
print('keras: %s' % keras.__version__) |
# transform univariate time series to supervised learning problem | |
from numpy import array | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define univariate time series | |
series = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) | |
print(series.shape) | |
# transform to a supervised learning problem | |
X, y = split_sequence(series, 3) | |
print(X.shape, y.shape) | |
# show each sample | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# transform univariate 2d to 3d | |
from numpy import array | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define univariate time series | |
series = array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) | |
print(series.shape) | |
# transform to a supervised learning problem | |
X, y = split_sequence(series, 3) | |
print(X.shape, y.shape) | |
# transform input from [samples, features] to [samples, timesteps, features] | |
X = X.reshape((X.shape[0], X.shape[1], 1)) | |
print(X.shape) |
# example of defining a dataset | |
from numpy import array | |
# define the dataset | |
data = list() | |
n = 5000 | |
for i in range(n): | |
data.append([i+1, (i+1)*10]) | |
data = array(data) | |
print(data[:5, :]) | |
print(data.shape) |
# example of dropping the time dimension from the dataset | |
from numpy import array | |
# define the dataset | |
data = list() | |
n = 5000 | |
for i in range(n): | |
data.append([i+1, (i+1)*10]) | |
data = array(data) | |
# drop time | |
data = data[:, 1] | |
print(data.shape) |
# example of splitting a univariate sequence into subsequences | |
from numpy import array | |
# define the dataset | |
data = list() | |
n = 5000 | |
for i in range(n): | |
data.append([i+1, (i+1)*10]) | |
data = array(data) | |
# drop time | |
data = data[:, 1] | |
# split into samples (e.g. 5000/200 = 25) | |
samples = list() | |
length = 200 | |
# step over the 5,000 in jumps of 200 | |
for i in range(0,n,length): | |
# grab from i to i + 200 | |
sample = data[i:i+length] | |
samples.append(sample) | |
print(len(samples)) |
# example of creating an array of subsequence | |
from numpy import array | |
# define the dataset | |
data = list() | |
n = 5000 | |
for i in range(n): | |
data.append([i+1, (i+1)*10]) | |
data = array(data) | |
# drop time | |
data = data[:, 1] | |
# split into samples (e.g. 5000/200 = 25) | |
samples = list() | |
length = 200 | |
# step over the 5,000 in jumps of 200 | |
for i in range(0,n,length): | |
# grab from i to i + 200 | |
sample = data[i:i+length] | |
samples.append(sample) | |
# convert list of arrays into 2d array | |
data = array(samples) | |
print(data.shape) |
# example of creating a 3d array of subsequences | |
from numpy import array | |
# define the dataset | |
data = list() | |
n = 5000 | |
for i in range(n): | |
data.append([i+1, (i+1)*10]) | |
data = array(data) | |
# drop time | |
data = data[:, 1] | |
# split into samples (e.g. 5000/200 = 25) | |
samples = list() | |
length = 200 | |
# step over the 5,000 in jumps of 200 | |
for i in range(0,n,length): | |
# grab from i to i + 200 | |
sample = data[i:i+length] | |
samples.append(sample) | |
# convert list of arrays into 2d array | |
data = array(samples) | |
# reshape into [samples, timesteps, features] | |
data = data.reshape((len(samples), length, 1)) | |
print(data.shape) |
# univariate data preparation | |
from numpy import array | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 3 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# univariate mlp example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import Dense | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 3 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# define model | |
model = Sequential() | |
model.add(Dense(100, activation='relu', input_dim=n_steps)) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate data preparation | |
from numpy import array | |
from numpy import hstack | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
print(dataset) |
# multivariate data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate mlp example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import Dense | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# flatten input | |
n_input = X.shape[1] * X.shape[2] | |
X = X.reshape((X.shape[0], n_input)) | |
# define model | |
model = Sequential() | |
model.add(Dense(100, activation='relu', input_dim=n_input)) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[80, 85], [90, 95], [100, 105]]) | |
x_input = x_input.reshape((1, n_input)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate mlp example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Model | |
from keras.layers import Input | |
from keras.layers import Dense | |
from keras.layers.merge import concatenate | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# separate input data | |
X1 = X[:, :, 0] | |
X2 = X[:, :, 1] | |
# first input model | |
visible1 = Input(shape=(n_steps,)) | |
dense1 = Dense(100, activation='relu')(visible1) | |
# second input model | |
visible2 = Input(shape=(n_steps,)) | |
dense2 = Dense(100, activation='relu')(visible2) | |
# merge input models | |
merge = concatenate([dense1, dense2]) | |
output = Dense(1)(merge) | |
model = Model(inputs=[visible1, visible2], outputs=output) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit([X1, X2], y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[80, 85], [90, 95], [100, 105]]) | |
x1 = x_input[:, 0].reshape((1, n_steps)) | |
x2 = x_input[:, 1].reshape((1, n_steps)) | |
yhat = model.predict([x1, x2], verbose=0) | |
print(yhat) |
# multivariate output data prep | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate output mlp example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import Dense | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# flatten input | |
n_input = X.shape[1] * X.shape[2] | |
X = X.reshape((X.shape[0], n_input)) | |
n_output = y.shape[1] | |
# define model | |
model = Sequential() | |
model.add(Dense(100, activation='relu', input_dim=n_input)) | |
model.add(Dense(n_output)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[70,75,145], [80,85,165], [90,95,185]]) | |
x_input = x_input.reshape((1, n_input)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate output mlp example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Model | |
from keras.layers import Input | |
from keras.layers import Dense | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# flatten input | |
n_input = X.shape[1] * X.shape[2] | |
X = X.reshape((X.shape[0], n_input)) | |
# separate output | |
y1 = y[:, 0].reshape((y.shape[0], 1)) | |
y2 = y[:, 1].reshape((y.shape[0], 1)) | |
y3 = y[:, 2].reshape((y.shape[0], 1)) | |
# define model | |
visible = Input(shape=(n_input,)) | |
dense = Dense(100, activation='relu')(visible) | |
# define output 1 | |
output1 = Dense(1)(dense) | |
# define output 2 | |
output2 = Dense(1)(dense) | |
# define output 2 | |
output3 = Dense(1)(dense) | |
# tie together | |
model = Model(inputs=visible, outputs=[output1, output2, output3]) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, [y1,y2,y3], epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[70,75,145], [80,85,165], [90,95,185]]) | |
x_input = x_input.reshape((1, n_input)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multi-step data preparation | |
from numpy import array | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the sequence | |
if out_end_ix > len(sequence): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# univariate multi-step vector-output mlp example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import Dense | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the sequence | |
if out_end_ix > len(sequence): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) | |
# define model | |
model = Sequential() | |
model.add(Dense(100, activation='relu', input_dim=n_steps_in)) | |
model.add(Dense(n_steps_out)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps_in)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate multi-step data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out-1 | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1:out_end_ix, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate multi-step mlp example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import Dense | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out-1 | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1:out_end_ix, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
# flatten input | |
n_input = X.shape[1] * X.shape[2] | |
X = X.reshape((X.shape[0], n_input)) | |
# define model | |
model = Sequential() | |
model.add(Dense(100, activation='relu', input_dim=n_input)) | |
model.add(Dense(n_steps_out)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[70, 75], [80, 85], [90, 95]]) | |
x_input = x_input.reshape((1, n_input)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate multi-step data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate multi-step mlp example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import Dense | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
# flatten input | |
n_input = X.shape[1] * X.shape[2] | |
X = X.reshape((X.shape[0], n_input)) | |
# flatten output | |
n_output = y.shape[1] * y.shape[2] | |
y = y.reshape((y.shape[0], n_output)) | |
# define model | |
model = Sequential() | |
model.add(Dense(100, activation='relu', input_dim=n_input)) | |
model.add(Dense(n_output)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[60, 65, 125], [70, 75, 145], [80, 85, 165]]) | |
x_input = x_input.reshape((1, n_input)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# univariate data preparation | |
from numpy import array | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 3 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# univariate cnn example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 3 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# reshape from [samples, timesteps] into [samples, timesteps, features] | |
n_features = 1 | |
X = X.reshape((X.shape[0], X.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(n_steps, n_features))) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(50, activation='relu')) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=1000, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate data preparation | |
from numpy import array | |
from numpy import hstack | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
print(dataset) |
# multivariate data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate cnn example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(n_steps, n_features))) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(50, activation='relu')) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=1000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[80, 85], [90, 95], [100, 105]]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate multi-headed 1d cnn example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Model | |
from keras.layers import Input | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from keras.layers.merge import concatenate | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# one time series per head | |
n_features = 1 | |
# separate input data | |
X1 = X[:, :, 0].reshape(X.shape[0], X.shape[1], n_features) | |
X2 = X[:, :, 1].reshape(X.shape[0], X.shape[1], n_features) | |
# first input model | |
visible1 = Input(shape=(n_steps, n_features)) | |
cnn1 = Conv1D(filters=64, kernel_size=2, activation='relu')(visible1) | |
cnn1 = MaxPooling1D(pool_size=2)(cnn1) | |
cnn1 = Flatten()(cnn1) | |
# second input model | |
visible2 = Input(shape=(n_steps, n_features)) | |
cnn2 = Conv1D(filters=64, kernel_size=2, activation='relu')(visible2) | |
cnn2 = MaxPooling1D(pool_size=2)(cnn2) | |
cnn2 = Flatten()(cnn2) | |
# merge input models | |
merge = concatenate([cnn1, cnn2]) | |
dense = Dense(50, activation='relu')(merge) | |
output = Dense(1)(dense) | |
model = Model(inputs=[visible1, visible2], outputs=output) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit([X1, X2], y, epochs=1000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[80, 85], [90, 95], [100, 105]]) | |
x1 = x_input[:, 0].reshape((1, n_steps, n_features)) | |
x2 = x_input[:, 1].reshape((1, n_steps, n_features)) | |
yhat = model.predict([x1, x2], verbose=0) | |
print(yhat) |
# multivariate output data prep | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate output 1d cnn example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(n_steps, n_features))) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(50, activation='relu')) | |
model.add(Dense(n_features)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=3000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[70,75,145], [80,85,165], [90,95,185]]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate output 1d cnn example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Model | |
from keras.layers import Input | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# separate output | |
y1 = y[:, 0].reshape((y.shape[0], 1)) | |
y2 = y[:, 1].reshape((y.shape[0], 1)) | |
y3 = y[:, 2].reshape((y.shape[0], 1)) | |
# define model | |
visible = Input(shape=(n_steps, n_features)) | |
cnn = Conv1D(filters=64, kernel_size=2, activation='relu')(visible) | |
cnn = MaxPooling1D(pool_size=2)(cnn) | |
cnn = Flatten()(cnn) | |
cnn = Dense(50, activation='relu')(cnn) | |
# define output 1 | |
output1 = Dense(1)(cnn) | |
# define output 2 | |
output2 = Dense(1)(cnn) | |
# define output 3 | |
output3 = Dense(1)(cnn) | |
# tie together | |
model = Model(inputs=visible, outputs=[output1, output2, output3]) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, [y1,y2,y3], epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[70,75,145], [80,85,165], [90,95,185]]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multi-step data preparation | |
from numpy import array | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the sequence | |
if out_end_ix > len(sequence): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# univariate multi-step vector-output 1d cnn example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the sequence | |
if out_end_ix > len(sequence): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) | |
# reshape from [samples, timesteps] into [samples, timesteps, features] | |
n_features = 1 | |
X = X.reshape((X.shape[0], X.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(n_steps_in, n_features))) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(50, activation='relu')) | |
model.add(Dense(n_steps_out)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps_in, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate multi-step data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out-1 | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1:out_end_ix, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate multi-step 1d cnn example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out-1 | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1:out_end_ix, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(n_steps_in, n_features))) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(50, activation='relu')) | |
model.add(Dense(n_steps_out)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=2000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[70, 75], [80, 85], [90, 95]]) | |
x_input = x_input.reshape((1, n_steps_in, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate multi-step data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate output multi-step 1d cnn example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
# flatten output | |
n_output = y.shape[1] * y.shape[2] | |
y = y.reshape((y.shape[0], n_output)) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(n_steps_in, n_features))) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(50, activation='relu')) | |
model.add(Dense(n_output)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=7000, verbose=0) | |
# demonstrate prediction | |
x_input = array([[60, 65, 125], [70, 75, 145], [80, 85, 165]]) | |
x_input = x_input.reshape((1, n_steps_in, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# univariate data preparation | |
from numpy import array | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 3 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# univariate lstm example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 3 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# reshape from [samples, timesteps] into [samples, timesteps, features] | |
n_features = 1 | |
X = X.reshape((X.shape[0], X.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=200, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# univariate stacked lstm example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
# split a univariate sequence | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 3 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# reshape from [samples, timesteps] into [samples, timesteps, features] | |
n_features = 1 | |
X = X.reshape((X.shape[0], X.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(n_steps, n_features))) | |
model.add(LSTM(50, activation='relu')) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=200, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# univariate bidirectional lstm example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
from keras.layers import Bidirectional | |
# split a univariate sequence | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 3 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# reshape from [samples, timesteps] into [samples, timesteps, features] | |
n_features = 1 | |
X = X.reshape((X.shape[0], X.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(Bidirectional(LSTM(50, activation='relu'), input_shape=(n_steps, n_features))) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=200, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# univariate cnn lstm example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import TimeDistributed | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 4 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# reshape from [samples, timesteps] into [samples, subsequences, timesteps, features] | |
n_features = 1 | |
n_seq = 2 | |
n_steps = 2 | |
X = X.reshape((X.shape[0], n_seq, n_steps, n_features)) | |
# define model | |
model = Sequential() | |
model.add(TimeDistributed(Conv1D(filters=64, kernel_size=1, activation='relu'), input_shape=(None, n_steps, n_features))) | |
model.add(TimeDistributed(MaxPooling1D(pool_size=2))) | |
model.add(TimeDistributed(Flatten())) | |
model.add(LSTM(50, activation='relu')) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=500, verbose=0) | |
# demonstrate prediction | |
x_input = array([60, 70, 80, 90]) | |
x_input = x_input.reshape((1, n_seq, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# univariate convlstm example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import ConvLSTM2D | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the sequence | |
if end_ix > len(sequence)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps = 4 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps) | |
# reshape from [samples, timesteps] into [samples, timesteps, rows, columns, features] | |
n_features = 1 | |
n_seq = 2 | |
n_steps = 2 | |
X = X.reshape((X.shape[0], n_seq, 1, n_steps, n_features)) | |
# define model | |
model = Sequential() | |
model.add(ConvLSTM2D(filters=64, kernel_size=(1,2), activation='relu', input_shape=(n_seq, 1, n_steps, n_features))) | |
model.add(Flatten()) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=500, verbose=0) | |
# demonstrate prediction | |
x_input = array([60, 70, 80, 90]) | |
x_input = x_input.reshape((1, n_seq, 1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate data preparation | |
from numpy import array | |
from numpy import hstack | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
print(dataset) |
# multivariate data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate lstm example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# define model | |
model = Sequential() | |
model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features))) | |
model.add(Dense(1)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=200, verbose=0) | |
# demonstrate prediction | |
x_input = array([[80, 85], [90, 95], [100, 105]]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate output data prep | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate output stacked lstm example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences)-1: | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps = 3 | |
# convert into input/output | |
X, y = split_sequences(dataset, n_steps) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# define model | |
model = Sequential() | |
model.add(LSTM(100, activation='relu', return_sequences=True, input_shape=(n_steps, n_features))) | |
model.add(LSTM(100, activation='relu')) | |
model.add(Dense(n_features)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=400, verbose=0) | |
# demonstrate prediction | |
x_input = array([[70,75,145], [80,85,165], [90,95,185]]) | |
x_input = x_input.reshape((1, n_steps, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multi-step data preparation | |
from numpy import array | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the sequence | |
if out_end_ix > len(sequence): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# univariate multi-step vector-output stacked lstm example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the sequence | |
if out_end_ix > len(sequence): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) | |
# reshape from [samples, timesteps] into [samples, timesteps, features] | |
n_features = 1 | |
X = X.reshape((X.shape[0], X.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(LSTM(100, activation='relu', return_sequences=True, input_shape=(n_steps_in, n_features))) | |
model.add(LSTM(100, activation='relu')) | |
model.add(Dense(n_steps_out)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=50, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps_in, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# univariate multi-step encoder-decoder lstm example | |
from numpy import array | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
from keras.layers import RepeatVector | |
from keras.layers import TimeDistributed | |
# split a univariate sequence into samples | |
def split_sequence(sequence, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequence)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the sequence | |
if out_end_ix > len(sequence): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90] | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# split into samples | |
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out) | |
# reshape from [samples, timesteps] into [samples, timesteps, features] | |
n_features = 1 | |
X = X.reshape((X.shape[0], X.shape[1], n_features)) | |
y = y.reshape((y.shape[0], y.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(LSTM(100, activation='relu', input_shape=(n_steps_in, n_features))) | |
model.add(RepeatVector(n_steps_out)) | |
model.add(LSTM(100, activation='relu', return_sequences=True)) | |
model.add(TimeDistributed(Dense(1))) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=100, verbose=0) | |
# demonstrate prediction | |
x_input = array([70, 80, 90]) | |
x_input = x_input.reshape((1, n_steps_in, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate multi-step data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out-1 | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1:out_end_ix, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# covert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate multi-step stacked lstm example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out-1 | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1:out_end_ix, -1] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# covert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# define model | |
model = Sequential() | |
model.add(LSTM(100, activation='relu', return_sequences=True, input_shape=(n_steps_in, n_features))) | |
model.add(LSTM(100, activation='relu')) | |
model.add(Dense(n_steps_out)) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=200, verbose=0) | |
# demonstrate prediction | |
x_input = array([[70, 75], [80, 85], [90, 95]]) | |
x_input = x_input.reshape((1, n_steps_in, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# multivariate multi-step data preparation | |
from numpy import array | |
from numpy import hstack | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# covert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
print(X.shape, y.shape) | |
# summarize the data | |
for i in range(len(X)): | |
print(X[i], y[i]) |
# multivariate multi-step encoder-decoder lstm example | |
from numpy import array | |
from numpy import hstack | |
from keras.models import Sequential | |
from keras.layers import LSTM | |
from keras.layers import Dense | |
from keras.layers import RepeatVector | |
from keras.layers import TimeDistributed | |
# split a multivariate sequence into samples | |
def split_sequences(sequences, n_steps_in, n_steps_out): | |
X, y = list(), list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps_in | |
out_end_ix = end_ix + n_steps_out | |
# check if we are beyond the dataset | |
if out_end_ix > len(sequences): | |
break | |
# gather input and output parts of the pattern | |
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :] | |
X.append(seq_x) | |
y.append(seq_y) | |
return array(X), array(y) | |
# define input sequence | |
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90]) | |
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95]) | |
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))]) | |
# convert to [rows, columns] structure | |
in_seq1 = in_seq1.reshape((len(in_seq1), 1)) | |
in_seq2 = in_seq2.reshape((len(in_seq2), 1)) | |
out_seq = out_seq.reshape((len(out_seq), 1)) | |
# horizontally stack columns | |
dataset = hstack((in_seq1, in_seq2, out_seq)) | |
# choose a number of time steps | |
n_steps_in, n_steps_out = 3, 2 | |
# covert into input/output | |
X, y = split_sequences(dataset, n_steps_in, n_steps_out) | |
# the dataset knows the number of features, e.g. 2 | |
n_features = X.shape[2] | |
# define model | |
model = Sequential() | |
model.add(LSTM(200, activation='relu', input_shape=(n_steps_in, n_features))) | |
model.add(RepeatVector(n_steps_out)) | |
model.add(LSTM(200, activation='relu', return_sequences=True)) | |
model.add(TimeDistributed(Dense(n_features))) | |
model.compile(optimizer='adam', loss='mse') | |
# fit model | |
model.fit(X, y, epochs=300, verbose=0) | |
# demonstrate prediction | |
x_input = array([[60, 65, 125], [70, 75, 145], [80, 85, 165]]) | |
x_input = x_input.reshape((1, n_steps_in, n_features)) | |
yhat = model.predict(x_input, verbose=0) | |
print(yhat) |
# example of a one-step naive forecast | |
def naive_forecast(history, n): | |
return history[-n] | |
# define dataset | |
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] | |
print(data) | |
# test naive forecast | |
for i in range(1, len(data)+1): | |
print(naive_forecast(data, i)) |
# example of an average forecast | |
from numpy import mean | |
from numpy import median | |
# one-step average forecast | |
def average_forecast(history, config): | |
n, avg_type = config | |
# mean of last n values | |
if avg_type is 'mean': | |
return mean(history[-n:]) | |
# median of last n values | |
return median(history[-n:]) | |
# define dataset | |
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] | |
print(data) | |
# test naive forecast | |
for i in range(1, len(data)+1): | |
print(average_forecast(data, (i, 'mean'))) |
# example of an average forecast for seasonal data | |
from numpy import mean | |
from numpy import median | |
# one-step average forecast | |
def average_forecast(history, config): | |
n, offset, avg_type = config | |
values = list() | |
if offset == 1: | |
values = history[-n:] | |
else: | |
# skip bad configs | |
if n*offset > len(history): | |
raise Exception('Config beyond end of data: %d %d' % (n,offset)) | |
# try and collect n values using offset | |
for i in range(1, n+1): | |
ix = i * offset | |
values.append(history[-ix]) | |
# mean of last n values | |
if avg_type is 'mean': | |
return mean(values) | |
# median of last n values | |
return median(values) | |
# define dataset | |
data = [10.0, 20.0, 30.0, 10.0, 20.0, 30.0, 10.0, 20.0, 30.0] | |
print(data) | |
# test naive forecast | |
for i in [1, 2, 3]: | |
print(average_forecast(data, (i, 3, 'mean'))) |
# grid search simple forecasts | |
from math import sqrt | |
from numpy import mean | |
from numpy import median | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from sklearn.metrics import mean_squared_error | |
# one-step simple forecast | |
def simple_forecast(history, config): | |
n, offset, avg_type = config | |
# persist value, ignore other config | |
if avg_type == 'persist': | |
return history[-n] | |
# collect values to average | |
values = list() | |
if offset == 1: | |
values = history[-n:] | |
else: | |
# skip bad configs | |
if n*offset > len(history): | |
raise Exception('Config beyond end of data: %d %d' % (n,offset)) | |
# try and collect n values using offset | |
for i in range(1, n+1): | |
ix = i * offset | |
values.append(history[-ix]) | |
# check if we can average | |
if len(values) < 2: | |
raise Exception('Cannot calculate average') | |
# mean of last n values | |
if avg_type == 'mean': | |
return mean(values) | |
# median of last n values | |
return median(values) | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = simple_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of simple configs to try | |
def simple_configs(max_length, offsets=[1]): | |
configs = list() | |
for i in range(1, max_length+1): | |
for o in offsets: | |
for t in ['persist', 'mean', 'median']: | |
cfg = [i, o, t] | |
configs.append(cfg) | |
return configs | |
if __name__ == '__main__': | |
# define dataset | |
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] | |
# data split | |
n_test = 4 | |
# model configs | |
max_length = len(data) - n_test | |
cfg_list = simple_configs(max_length) | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# load and plot daily births dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load | |
series = read_csv('daily-total-female-births.csv', header=0, index_col=0) | |
# summarize shape | |
print(series.shape) | |
# plot | |
pyplot.plot(series) | |
pyplot.xticks([]) | |
pyplot.show() |
# grid search simple forecast for daily female births | |
from math import sqrt | |
from numpy import mean | |
from numpy import median | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
# one-step simple forecast | |
def simple_forecast(history, config): | |
n, offset, avg_type = config | |
# persist value, ignore other config | |
if avg_type == 'persist': | |
return history[-n] | |
# collect values to average | |
values = list() | |
if offset == 1: | |
values = history[-n:] | |
else: | |
# skip bad configs | |
if n*offset > len(history): | |
raise Exception('Config beyond end of data: %d %d' % (n,offset)) | |
# try and collect n values using offset | |
for i in range(1, n+1): | |
ix = i * offset | |
values.append(history[-ix]) | |
# check if we can average | |
if len(values) < 2: | |
raise Exception('Cannot calculate average') | |
# mean of last n values | |
if avg_type == 'mean': | |
return mean(values) | |
# median of last n values | |
return median(values) | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = simple_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of simple configs to try | |
def simple_configs(max_length, offsets=[1]): | |
configs = list() | |
for i in range(1, max_length+1): | |
for o in offsets: | |
for t in ['persist', 'mean', 'median']: | |
cfg = [i, o, t] | |
configs.append(cfg) | |
return configs | |
if __name__ == '__main__': | |
# define dataset | |
series = read_csv('daily-total-female-births.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 165 | |
# model configs | |
max_length = len(data) - n_test | |
cfg_list = simple_configs(max_length) | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# load and plot monthly shampoo sales dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load | |
series = read_csv('monthly-shampoo-sales.csv', header=0, index_col=0) | |
# summarize shape | |
print(series.shape) | |
# plot | |
pyplot.plot(series) | |
pyplot.xticks([]) | |
pyplot.show() |
# grid search simple forecast for monthly shampoo sales | |
from math import sqrt | |
from numpy import mean | |
from numpy import median | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
# one-step simple forecast | |
def simple_forecast(history, config): | |
n, offset, avg_type = config | |
# persist value, ignore other config | |
if avg_type == 'persist': | |
return history[-n] | |
# collect values to average | |
values = list() | |
if offset == 1: | |
values = history[-n:] | |
else: | |
# skip bad configs | |
if n*offset > len(history): | |
raise Exception('Config beyond end of data: %d %d' % (n,offset)) | |
# try and collect n values using offset | |
for i in range(1, n+1): | |
ix = i * offset | |
values.append(history[-ix]) | |
# check if we can average | |
if len(values) < 2: | |
raise Exception('Cannot calculate average') | |
# mean of last n values | |
if avg_type == 'mean': | |
return mean(values) | |
# median of last n values | |
return median(values) | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = simple_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of simple configs to try | |
def simple_configs(max_length, offsets=[1]): | |
configs = list() | |
for i in range(1, max_length+1): | |
for o in offsets: | |
for t in ['persist', 'mean', 'median']: | |
cfg = [i, o, t] | |
configs.append(cfg) | |
return configs | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('monthly-shampoo-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
max_length = len(data) - n_test | |
cfg_list = simple_configs(max_length) | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# load and plot monthly mean temp dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load | |
series = read_csv('monthly-mean-temp.csv', header=0, index_col=0) | |
# summarize shape | |
print(series.shape) | |
# plot | |
pyplot.plot(series) | |
pyplot.xticks([]) | |
pyplot.show() |
# grid search simple forecast for monthly mean temperature | |
from math import sqrt | |
from numpy import mean | |
from numpy import median | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
# one-step simple forecast | |
def simple_forecast(history, config): | |
n, offset, avg_type = config | |
# persist value, ignore other config | |
if avg_type == 'persist': | |
return history[-n] | |
# collect values to average | |
values = list() | |
if offset == 1: | |
values = history[-n:] | |
else: | |
# skip bad configs | |
if n*offset > len(history): | |
raise Exception('Config beyond end of data: %d %d' % (n,offset)) | |
# try and collect n values using offset | |
for i in range(1, n+1): | |
ix = i * offset | |
values.append(history[-ix]) | |
# check if we can average | |
if len(values) < 2: | |
raise Exception('Cannot calculate average') | |
# mean of last n values | |
if avg_type == 'mean': | |
return mean(values) | |
# median of last n values | |
return median(values) | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = simple_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of simple configs to try | |
def simple_configs(max_length, offsets=[1]): | |
configs = list() | |
for i in range(1, max_length+1): | |
for o in offsets: | |
for t in ['persist', 'mean', 'median']: | |
cfg = [i, o, t] | |
configs.append(cfg) | |
return configs | |
if __name__ == '__main__': | |
# define dataset | |
series = read_csv('monthly-mean-temp.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
max_length = len(data) - n_test | |
cfg_list = simple_configs(max_length, offsets=[1,12]) | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# load and plot monthly car sales dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
# summarize shape | |
print(series.shape) | |
# plot | |
pyplot.plot(series) | |
pyplot.xticks([]) | |
pyplot.show() |
# grid search simple forecast for monthly car sales | |
from math import sqrt | |
from numpy import mean | |
from numpy import median | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
# one-step simple forecast | |
def simple_forecast(history, config): | |
n, offset, avg_type = config | |
# persist value, ignore other config | |
if avg_type == 'persist': | |
return history[-n] | |
# collect values to average | |
values = list() | |
if offset == 1: | |
values = history[-n:] | |
else: | |
# skip bad configs | |
if n*offset > len(history): | |
raise Exception('Config beyond end of data: %d %d' % (n,offset)) | |
# try and collect n values using offset | |
for i in range(1, n+1): | |
ix = i * offset | |
values.append(history[-ix]) | |
# check if we can average | |
if len(values) < 2: | |
raise Exception('Cannot calculate average') | |
# mean of last n values | |
if avg_type == 'mean': | |
return mean(values) | |
# median of last n values | |
return median(values) | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = simple_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of simple configs to try | |
def simple_configs(max_length, offsets=[1]): | |
configs = list() | |
for i in range(1, max_length+1): | |
for o in offsets: | |
for t in ['persist', 'mean', 'median']: | |
cfg = [i, o, t] | |
configs.append(cfg) | |
return configs | |
if __name__ == '__main__': | |
# define dataset | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
max_length = len(data) - n_test | |
cfg_list = simple_configs(max_length, offsets=[1,12]) | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
Date | Births | |
---|---|---|
1959-01-01 | 35 | |
1959-01-02 | 32 | |
1959-01-03 | 30 | |
1959-01-04 | 31 | |
1959-01-05 | 44 | |
1959-01-06 | 29 | |
1959-01-07 | 45 | |
1959-01-08 | 43 | |
1959-01-09 | 38 | |
1959-01-10 | 27 | |
1959-01-11 | 38 | |
1959-01-12 | 33 | |
1959-01-13 | 55 | |
1959-01-14 | 47 | |
1959-01-15 | 45 | |
1959-01-16 | 37 | |
1959-01-17 | 50 | |
1959-01-18 | 43 | |
1959-01-19 | 41 | |
1959-01-20 | 52 | |
1959-01-21 | 34 | |
1959-01-22 | 53 | |
1959-01-23 | 39 | |
1959-01-24 | 32 | |
1959-01-25 | 37 | |
1959-01-26 | 43 | |
1959-01-27 | 39 | |
1959-01-28 | 35 | |
1959-01-29 | 44 | |
1959-01-30 | 38 | |
1959-01-31 | 24 | |
1959-02-01 | 23 | |
1959-02-02 | 31 | |
1959-02-03 | 44 | |
1959-02-04 | 38 | |
1959-02-05 | 50 | |
1959-02-06 | 38 | |
1959-02-07 | 51 | |
1959-02-08 | 31 | |
1959-02-09 | 31 | |
1959-02-10 | 51 | |
1959-02-11 | 36 | |
1959-02-12 | 45 | |
1959-02-13 | 51 | |
1959-02-14 | 34 | |
1959-02-15 | 52 | |
1959-02-16 | 47 | |
1959-02-17 | 45 | |
1959-02-18 | 46 | |
1959-02-19 | 39 | |
1959-02-20 | 48 | |
1959-02-21 | 37 | |
1959-02-22 | 35 | |
1959-02-23 | 52 | |
1959-02-24 | 42 | |
1959-02-25 | 45 | |
1959-02-26 | 39 | |
1959-02-27 | 37 | |
1959-02-28 | 30 | |
1959-03-01 | 35 | |
1959-03-02 | 28 | |
1959-03-03 | 45 | |
1959-03-04 | 34 | |
1959-03-05 | 36 | |
1959-03-06 | 50 | |
1959-03-07 | 44 | |
1959-03-08 | 39 | |
1959-03-09 | 32 | |
1959-03-10 | 39 | |
1959-03-11 | 45 | |
1959-03-12 | 43 | |
1959-03-13 | 39 | |
1959-03-14 | 31 | |
1959-03-15 | 27 | |
1959-03-16 | 30 | |
1959-03-17 | 42 | |
1959-03-18 | 46 | |
1959-03-19 | 41 | |
1959-03-20 | 36 | |
1959-03-21 | 45 | |
1959-03-22 | 46 | |
1959-03-23 | 43 | |
1959-03-24 | 38 | |
1959-03-25 | 34 | |
1959-03-26 | 35 | |
1959-03-27 | 56 | |
1959-03-28 | 36 | |
1959-03-29 | 32 | |
1959-03-30 | 50 | |
1959-03-31 | 41 | |
1959-04-01 | 39 | |
1959-04-02 | 41 | |
1959-04-03 | 47 | |
1959-04-04 | 34 | |
1959-04-05 | 36 | |
1959-04-06 | 33 | |
1959-04-07 | 35 | |
1959-04-08 | 38 | |
1959-04-09 | 38 | |
1959-04-10 | 34 | |
1959-04-11 | 53 | |
1959-04-12 | 34 | |
1959-04-13 | 34 | |
1959-04-14 | 38 | |
1959-04-15 | 35 | |
1959-04-16 | 32 | |
1959-04-17 | 42 | |
1959-04-18 | 34 | |
1959-04-19 | 46 | |
1959-04-20 | 30 | |
1959-04-21 | 46 | |
1959-04-22 | 45 | |
1959-04-23 | 54 | |
1959-04-24 | 34 | |
1959-04-25 | 37 | |
1959-04-26 | 35 | |
1959-04-27 | 40 | |
1959-04-28 | 42 | |
1959-04-29 | 58 | |
1959-04-30 | 51 | |
1959-05-01 | 32 | |
1959-05-02 | 35 | |
1959-05-03 | 38 | |
1959-05-04 | 33 | |
1959-05-05 | 39 | |
1959-05-06 | 47 | |
1959-05-07 | 38 | |
1959-05-08 | 52 | |
1959-05-09 | 30 | |
1959-05-10 | 34 | |
1959-05-11 | 40 | |
1959-05-12 | 35 | |
1959-05-13 | 42 | |
1959-05-14 | 41 | |
1959-05-15 | 42 | |
1959-05-16 | 38 | |
1959-05-17 | 24 | |
1959-05-18 | 34 | |
1959-05-19 | 43 | |
1959-05-20 | 36 | |
1959-05-21 | 55 | |
1959-05-22 | 41 | |
1959-05-23 | 45 | |
1959-05-24 | 41 | |
1959-05-25 | 37 | |
1959-05-26 | 43 | |
1959-05-27 | 39 | |
1959-05-28 | 33 | |
1959-05-29 | 43 | |
1959-05-30 | 40 | |
1959-05-31 | 38 | |
1959-06-01 | 45 | |
1959-06-02 | 46 | |
1959-06-03 | 34 | |
1959-06-04 | 35 | |
1959-06-05 | 48 | |
1959-06-06 | 51 | |
1959-06-07 | 36 | |
1959-06-08 | 33 | |
1959-06-09 | 46 | |
1959-06-10 | 42 | |
1959-06-11 | 48 | |
1959-06-12 | 34 | |
1959-06-13 | 41 | |
1959-06-14 | 35 | |
1959-06-15 | 40 | |
1959-06-16 | 34 | |
1959-06-17 | 30 | |
1959-06-18 | 36 | |
1959-06-19 | 40 | |
1959-06-20 | 39 | |
1959-06-21 | 45 | |
1959-06-22 | 38 | |
1959-06-23 | 47 | |
1959-06-24 | 33 | |
1959-06-25 | 30 | |
1959-06-26 | 42 | |
1959-06-27 | 43 | |
1959-06-28 | 41 | |
1959-06-29 | 41 | |
1959-06-30 | 59 | |
1959-07-01 | 43 | |
1959-07-02 | 45 | |
1959-07-03 | 38 | |
1959-07-04 | 37 | |
1959-07-05 | 45 | |
1959-07-06 | 42 | |
1959-07-07 | 57 | |
1959-07-08 | 46 | |
1959-07-09 | 51 | |
1959-07-10 | 41 | |
1959-07-11 | 47 | |
1959-07-12 | 26 | |
1959-07-13 | 35 | |
1959-07-14 | 44 | |
1959-07-15 | 41 | |
1959-07-16 | 42 | |
1959-07-17 | 36 | |
1959-07-18 | 45 | |
1959-07-19 | 45 | |
1959-07-20 | 45 | |
1959-07-21 | 47 | |
1959-07-22 | 38 | |
1959-07-23 | 42 | |
1959-07-24 | 35 | |
1959-07-25 | 36 | |
1959-07-26 | 39 | |
1959-07-27 | 45 | |
1959-07-28 | 43 | |
1959-07-29 | 47 | |
1959-07-30 | 36 | |
1959-07-31 | 41 | |
1959-08-01 | 50 | |
1959-08-02 | 39 | |
1959-08-03 | 41 | |
1959-08-04 | 46 | |
1959-08-05 | 64 | |
1959-08-06 | 45 | |
1959-08-07 | 34 | |
1959-08-08 | 38 | |
1959-08-09 | 44 | |
1959-08-10 | 48 | |
1959-08-11 | 46 | |
1959-08-12 | 44 | |
1959-08-13 | 37 | |
1959-08-14 | 39 | |
1959-08-15 | 44 | |
1959-08-16 | 45 | |
1959-08-17 | 33 | |
1959-08-18 | 44 | |
1959-08-19 | 38 | |
1959-08-20 | 46 | |
1959-08-21 | 46 | |
1959-08-22 | 40 | |
1959-08-23 | 39 | |
1959-08-24 | 44 | |
1959-08-25 | 48 | |
1959-08-26 | 50 | |
1959-08-27 | 41 | |
1959-08-28 | 42 | |
1959-08-29 | 51 | |
1959-08-30 | 41 | |
1959-08-31 | 44 | |
1959-09-01 | 38 | |
1959-09-02 | 68 | |
1959-09-03 | 40 | |
1959-09-04 | 42 | |
1959-09-05 | 51 | |
1959-09-06 | 44 | |
1959-09-07 | 45 | |
1959-09-08 | 36 | |
1959-09-09 | 57 | |
1959-09-10 | 44 | |
1959-09-11 | 42 | |
1959-09-12 | 53 | |
1959-09-13 | 42 | |
1959-09-14 | 34 | |
1959-09-15 | 40 | |
1959-09-16 | 56 | |
1959-09-17 | 44 | |
1959-09-18 | 53 | |
1959-09-19 | 55 | |
1959-09-20 | 39 | |
1959-09-21 | 59 | |
1959-09-22 | 55 | |
1959-09-23 | 73 | |
1959-09-24 | 55 | |
1959-09-25 | 44 | |
1959-09-26 | 43 | |
1959-09-27 | 40 | |
1959-09-28 | 47 | |
1959-09-29 | 51 | |
1959-09-30 | 56 | |
1959-10-01 | 49 | |
1959-10-02 | 54 | |
1959-10-03 | 56 | |
1959-10-04 | 47 | |
1959-10-05 | 44 | |
1959-10-06 | 43 | |
1959-10-07 | 42 | |
1959-10-08 | 45 | |
1959-10-09 | 50 | |
1959-10-10 | 48 | |
1959-10-11 | 43 | |
1959-10-12 | 40 | |
1959-10-13 | 59 | |
1959-10-14 | 41 | |
1959-10-15 | 42 | |
1959-10-16 | 51 | |
1959-10-17 | 49 | |
1959-10-18 | 45 | |
1959-10-19 | 43 | |
1959-10-20 | 42 | |
1959-10-21 | 38 | |
1959-10-22 | 47 | |
1959-10-23 | 38 | |
1959-10-24 | 36 | |
1959-10-25 | 42 | |
1959-10-26 | 35 | |
1959-10-27 | 28 | |
1959-10-28 | 44 | |
1959-10-29 | 36 | |
1959-10-30 | 45 | |
1959-10-31 | 46 | |
1959-11-01 | 48 | |
1959-11-02 | 49 | |
1959-11-03 | 43 | |
1959-11-04 | 42 | |
1959-11-05 | 59 | |
1959-11-06 | 45 | |
1959-11-07 | 52 | |
1959-11-08 | 46 | |
1959-11-09 | 42 | |
1959-11-10 | 40 | |
1959-11-11 | 40 | |
1959-11-12 | 45 | |
1959-11-13 | 35 | |
1959-11-14 | 35 | |
1959-11-15 | 40 | |
1959-11-16 | 39 | |
1959-11-17 | 33 | |
1959-11-18 | 42 | |
1959-11-19 | 47 | |
1959-11-20 | 51 | |
1959-11-21 | 44 | |
1959-11-22 | 40 | |
1959-11-23 | 57 | |
1959-11-24 | 49 | |
1959-11-25 | 45 | |
1959-11-26 | 49 | |
1959-11-27 | 51 | |
1959-11-28 | 46 | |
1959-11-29 | 44 | |
1959-11-30 | 52 | |
1959-12-01 | 45 | |
1959-12-02 | 32 | |
1959-12-03 | 46 | |
1959-12-04 | 41 | |
1959-12-05 | 34 | |
1959-12-06 | 33 | |
1959-12-07 | 36 | |
1959-12-08 | 49 | |
1959-12-09 | 43 | |
1959-12-10 | 43 | |
1959-12-11 | 34 | |
1959-12-12 | 39 | |
1959-12-13 | 35 | |
1959-12-14 | 52 | |
1959-12-15 | 47 | |
1959-12-16 | 52 | |
1959-12-17 | 39 | |
1959-12-18 | 40 | |
1959-12-19 | 42 | |
1959-12-20 | 42 | |
1959-12-21 | 53 | |
1959-12-22 | 39 | |
1959-12-23 | 40 | |
1959-12-24 | 38 | |
1959-12-25 | 44 | |
1959-12-26 | 34 | |
1959-12-27 | 37 | |
1959-12-28 | 52 | |
1959-12-29 | 48 | |
1959-12-30 | 55 | |
1959-12-31 | 50 |
Month | Car Sales | |
---|---|---|
1960-01 | 6550 | |
1960-02 | 8728 | |
1960-03 | 12026 | |
1960-04 | 14395 | |
1960-05 | 14587 | |
1960-06 | 13791 | |
1960-07 | 9498 | |
1960-08 | 8251 | |
1960-09 | 7049 | |
1960-10 | 9545 | |
1960-11 | 9364 | |
1960-12 | 8456 | |
1961-01 | 7237 | |
1961-02 | 9374 | |
1961-03 | 11837 | |
1961-04 | 13784 | |
1961-05 | 15926 | |
1961-06 | 13821 | |
1961-07 | 11143 | |
1961-08 | 7975 | |
1961-09 | 7610 | |
1961-10 | 10015 | |
1961-11 | 12759 | |
1961-12 | 8816 | |
1962-01 | 10677 | |
1962-02 | 10947 | |
1962-03 | 15200 | |
1962-04 | 17010 | |
1962-05 | 20900 | |
1962-06 | 16205 | |
1962-07 | 12143 | |
1962-08 | 8997 | |
1962-09 | 5568 | |
1962-10 | 11474 | |
1962-11 | 12256 | |
1962-12 | 10583 | |
1963-01 | 10862 | |
1963-02 | 10965 | |
1963-03 | 14405 | |
1963-04 | 20379 | |
1963-05 | 20128 | |
1963-06 | 17816 | |
1963-07 | 12268 | |
1963-08 | 8642 | |
1963-09 | 7962 | |
1963-10 | 13932 | |
1963-11 | 15936 | |
1963-12 | 12628 | |
1964-01 | 12267 | |
1964-02 | 12470 | |
1964-03 | 18944 | |
1964-04 | 21259 | |
1964-05 | 22015 | |
1964-06 | 18581 | |
1964-07 | 15175 | |
1964-08 | 10306 | |
1964-09 | 10792 | |
1964-10 | 14752 | |
1964-11 | 13754 | |
1964-12 | 11738 | |
1965-01 | 12181 | |
1965-02 | 12965 | |
1965-03 | 19990 | |
1965-04 | 23125 | |
1965-05 | 23541 | |
1965-06 | 21247 | |
1965-07 | 15189 | |
1965-08 | 14767 | |
1965-09 | 10895 | |
1965-10 | 17130 | |
1965-11 | 17697 | |
1965-12 | 16611 | |
1966-01 | 12674 | |
1966-02 | 12760 | |
1966-03 | 20249 | |
1966-04 | 22135 | |
1966-05 | 20677 | |
1966-06 | 19933 | |
1966-07 | 15388 | |
1966-08 | 15113 | |
1966-09 | 13401 | |
1966-10 | 16135 | |
1966-11 | 17562 | |
1966-12 | 14720 | |
1967-01 | 12225 | |
1967-02 | 11608 | |
1967-03 | 20985 | |
1967-04 | 19692 | |
1967-05 | 24081 | |
1967-06 | 22114 | |
1967-07 | 14220 | |
1967-08 | 13434 | |
1967-09 | 13598 | |
1967-10 | 17187 | |
1967-11 | 16119 | |
1967-12 | 13713 | |
1968-01 | 13210 | |
1968-02 | 14251 | |
1968-03 | 20139 | |
1968-04 | 21725 | |
1968-05 | 26099 | |
1968-06 | 21084 | |
1968-07 | 18024 | |
1968-08 | 16722 | |
1968-09 | 14385 | |
1968-10 | 21342 | |
1968-11 | 17180 | |
1968-12 | 14577 |
Month | Temperature | |
---|---|---|
1920-01 | 40.6 | |
1920-02 | 40.8 | |
1920-03 | 44.4 | |
1920-04 | 46.7 | |
1920-05 | 54.1 | |
1920-06 | 58.5 | |
1920-07 | 57.7 | |
1920-08 | 56.4 | |
1920-09 | 54.3 | |
1920-10 | 50.5 | |
1920-11 | 42.9 | |
1920-12 | 39.8 | |
1921-01 | 44.2 | |
1921-02 | 39.8 | |
1921-03 | 45.1 | |
1921-04 | 47.0 | |
1921-05 | 54.1 | |
1921-06 | 58.7 | |
1921-07 | 66.3 | |
1921-08 | 59.9 | |
1921-09 | 57.0 | |
1921-10 | 54.2 | |
1921-11 | 39.7 | |
1921-12 | 42.8 | |
1922-01 | 37.5 | |
1922-02 | 38.7 | |
1922-03 | 39.5 | |
1922-04 | 42.1 | |
1922-05 | 55.7 | |
1922-06 | 57.8 | |
1922-07 | 56.8 | |
1922-08 | 54.3 | |
1922-09 | 54.3 | |
1922-10 | 47.1 | |
1922-11 | 41.8 | |
1922-12 | 41.7 | |
1923-01 | 41.8 | |
1923-02 | 40.1 | |
1923-03 | 42.9 | |
1923-04 | 45.8 | |
1923-05 | 49.2 | |
1923-06 | 52.7 | |
1923-07 | 64.2 | |
1923-08 | 59.6 | |
1923-09 | 54.4 | |
1923-10 | 49.2 | |
1923-11 | 36.6 | |
1923-12 | 37.6 | |
1924-01 | 39.3 | |
1924-02 | 37.5 | |
1924-03 | 38.3 | |
1924-04 | 45.5 | |
1924-05 | 53.2 | |
1924-06 | 57.7 | |
1924-07 | 60.8 | |
1924-08 | 58.2 | |
1924-09 | 56.4 | |
1924-10 | 49.8 | |
1924-11 | 44.4 | |
1924-12 | 43.6 | |
1925-01 | 40.0 | |
1925-02 | 40.5 | |
1925-03 | 40.8 | |
1925-04 | 45.1 | |
1925-05 | 53.8 | |
1925-06 | 59.4 | |
1925-07 | 63.5 | |
1925-08 | 61.0 | |
1925-09 | 53.0 | |
1925-10 | 50.0 | |
1925-11 | 38.1 | |
1925-12 | 36.3 | |
1926-01 | 39.2 | |
1926-02 | 43.4 | |
1926-03 | 43.4 | |
1926-04 | 48.9 | |
1926-05 | 50.6 | |
1926-06 | 56.8 | |
1926-07 | 62.5 | |
1926-08 | 62.0 | |
1926-09 | 57.5 | |
1926-10 | 46.7 | |
1926-11 | 41.6 | |
1926-12 | 39.8 | |
1927-01 | 39.4 | |
1927-02 | 38.5 | |
1927-03 | 45.3 | |
1927-04 | 47.1 | |
1927-05 | 51.7 | |
1927-06 | 55.0 | |
1927-07 | 60.4 | |
1927-08 | 60.5 | |
1927-09 | 54.7 | |
1927-10 | 50.3 | |
1927-11 | 42.3 | |
1927-12 | 35.2 | |
1928-01 | 40.8 | |
1928-02 | 41.1 | |
1928-03 | 42.8 | |
1928-04 | 47.3 | |
1928-05 | 50.9 | |
1928-06 | 56.4 | |
1928-07 | 62.2 | |
1928-08 | 60.5 | |
1928-09 | 55.4 | |
1928-10 | 50.2 | |
1928-11 | 43.0 | |
1928-12 | 37.3 | |
1929-01 | 34.8 | |
1929-02 | 31.3 | |
1929-03 | 41.0 | |
1929-04 | 43.9 | |
1929-05 | 53.1 | |
1929-06 | 56.9 | |
1929-07 | 62.5 | |
1929-08 | 60.3 | |
1929-09 | 59.8 | |
1929-10 | 49.2 | |
1929-11 | 42.9 | |
1929-12 | 41.9 | |
1930-01 | 41.6 | |
1930-02 | 37.1 | |
1930-03 | 41.2 | |
1930-04 | 46.9 | |
1930-05 | 51.2 | |
1930-06 | 60.4 | |
1930-07 | 60.1 | |
1930-08 | 61.6 | |
1930-09 | 57.0 | |
1930-10 | 50.9 | |
1930-11 | 43.0 | |
1930-12 | 38.8 | |
1931-01 | 37.1 | |
1931-02 | 38.4 | |
1931-03 | 38.4 | |
1931-04 | 46.5 | |
1931-05 | 53.5 | |
1931-06 | 58.4 | |
1931-07 | 60.6 | |
1931-08 | 58.2 | |
1931-09 | 53.8 | |
1931-10 | 46.6 | |
1931-11 | 45.5 | |
1931-12 | 40.6 | |
1932-01 | 42.4 | |
1932-02 | 38.4 | |
1932-03 | 40.3 | |
1932-04 | 44.6 | |
1932-05 | 50.9 | |
1932-06 | 57.0 | |
1932-07 | 62.1 | |
1932-08 | 63.5 | |
1932-09 | 56.2 | |
1932-10 | 47.3 | |
1932-11 | 43.6 | |
1932-12 | 41.8 | |
1933-01 | 36.2 | |
1933-02 | 39.3 | |
1933-03 | 44.5 | |
1933-04 | 48.7 | |
1933-05 | 54.2 | |
1933-06 | 60.8 | |
1933-07 | 65.5 | |
1933-08 | 64.9 | |
1933-09 | 60.1 | |
1933-10 | 50.2 | |
1933-11 | 42.1 | |
1933-12 | 35.6 | |
1934-01 | 39.4 | |
1934-02 | 38.2 | |
1934-03 | 40.4 | |
1934-04 | 46.9 | |
1934-05 | 53.4 | |
1934-06 | 59.6 | |
1934-07 | 66.5 | |
1934-08 | 60.4 | |
1934-09 | 59.2 | |
1934-10 | 51.2 | |
1934-11 | 42.8 | |
1934-12 | 45.8 | |
1935-01 | 40.4 | |
1935-02 | 42.6 | |
1935-03 | 43.5 | |
1935-04 | 47.1 | |
1935-05 | 50.0 | |
1935-06 | 60.5 | |
1935-07 | 64.6 | |
1935-08 | 64.0 | |
1935-09 | 56.8 | |
1935-10 | 48.6 | |
1935-11 | 44.2 | |
1935-12 | 36.4 | |
1936-01 | 37.3 | |
1936-02 | 35.0 | |
1936-03 | 44.0 | |
1936-04 | 43.9 | |
1936-05 | 52.7 | |
1936-06 | 58.6 | |
1936-07 | 60.0 | |
1936-08 | 61.1 | |
1936-09 | 58.1 | |
1936-10 | 49.6 | |
1936-11 | 41.6 | |
1936-12 | 41.3 | |
1937-01 | 40.8 | |
1937-02 | 41.0 | |
1937-03 | 38.4 | |
1937-04 | 47.4 | |
1937-05 | 54.1 | |
1937-06 | 58.6 | |
1937-07 | 61.4 | |
1937-08 | 61.8 | |
1937-09 | 56.3 | |
1937-10 | 50.9 | |
1937-11 | 41.4 | |
1937-12 | 37.1 | |
1938-01 | 42.1 | |
1938-02 | 41.2 | |
1938-03 | 47.3 | |
1938-04 | 46.6 | |
1938-05 | 52.4 | |
1938-06 | 59.0 | |
1938-07 | 59.6 | |
1938-08 | 60.4 | |
1938-09 | 57.0 | |
1938-10 | 50.7 | |
1938-11 | 47.8 | |
1938-12 | 39.2 | |
1939-01 | 39.4 | |
1939-02 | 40.9 | |
1939-03 | 42.4 | |
1939-04 | 47.8 | |
1939-05 | 52.4 | |
1939-06 | 58.0 | |
1939-07 | 60.7 | |
1939-08 | 61.8 | |
1939-09 | 58.2 | |
1939-10 | 46.7 | |
1939-11 | 46.6 | |
1939-12 | 37.8 |
Month | Sales | |
---|---|---|
1-01 | 266.0 | |
1-02 | 145.9 | |
1-03 | 183.1 | |
1-04 | 119.3 | |
1-05 | 180.3 | |
1-06 | 168.5 | |
1-07 | 231.8 | |
1-08 | 224.5 | |
1-09 | 192.8 | |
1-10 | 122.9 | |
1-11 | 336.5 | |
1-12 | 185.9 | |
2-01 | 194.3 | |
2-02 | 149.5 | |
2-03 | 210.1 | |
2-04 | 273.3 | |
2-05 | 191.4 | |
2-06 | 287.0 | |
2-07 | 226.0 | |
2-08 | 303.6 | |
2-09 | 289.9 | |
2-10 | 421.6 | |
2-11 | 264.5 | |
2-12 | 342.3 | |
3-01 | 339.7 | |
3-02 | 440.4 | |
3-03 | 315.9 | |
3-04 | 439.3 | |
3-05 | 401.3 | |
3-06 | 437.4 | |
3-07 | 575.5 | |
3-08 | 407.6 | |
3-09 | 682.0 | |
3-10 | 475.3 | |
3-11 | 581.3 | |
3-12 | 646.9 |
# grid search holt winter's exponential smoothing | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.holtwinters import ExponentialSmoothing | |
from sklearn.metrics import mean_squared_error | |
from numpy import array | |
# one-step Holt Winter’s Exponential Smoothing forecast | |
def exp_smoothing_forecast(history, config): | |
t,d,s,p,b,r = config | |
# define model | |
history = array(history) | |
model = ExponentialSmoothing(history, trend=t, damped=d, seasonal=s, seasonal_periods=p) | |
# fit model | |
model_fit = model.fit(optimized=True, use_boxcox=b, remove_bias=r) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = exp_smoothing_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of exponential smoothing configs to try | |
def exp_smoothing_configs(seasonal=[None]): | |
models = list() | |
# define config lists | |
t_params = ['add', 'mul', None] | |
d_params = [True, False] | |
s_params = ['add', 'mul', None] | |
p_params = seasonal | |
b_params = [True, False] | |
r_params = [True, False] | |
# create config instances | |
for t in t_params: | |
for d in d_params: | |
for s in s_params: | |
for p in p_params: | |
for b in b_params: | |
for r in r_params: | |
cfg = [t,d,s,p,b,r] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# define dataset | |
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] | |
print(data) | |
# data split | |
n_test = 4 | |
# model configs | |
cfg_list = exp_smoothing_configs() | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search ets models for daily female births | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.holtwinters import ExponentialSmoothing | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
from numpy import array | |
# one-step Holt Winter’s Exponential Smoothing forecast | |
def exp_smoothing_forecast(history, config): | |
t,d,s,p,b,r = config | |
# define model | |
history = array(history) | |
model = ExponentialSmoothing(history, trend=t, damped=d, seasonal=s, seasonal_periods=p) | |
# fit model | |
model_fit = model.fit(optimized=True, use_boxcox=b, remove_bias=r) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = exp_smoothing_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of exponential smoothing configs to try | |
def exp_smoothing_configs(seasonal=[None]): | |
models = list() | |
# define config lists | |
t_params = ['add', 'mul', None] | |
d_params = [True, False] | |
s_params = ['add', 'mul', None] | |
p_params = seasonal | |
b_params = [True, False] | |
r_params = [True, False] | |
# create config instances | |
for t in t_params: | |
for d in d_params: | |
for s in s_params: | |
for p in p_params: | |
for b in b_params: | |
for r in r_params: | |
cfg = [t,d,s,p,b,r] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('daily-total-female-births.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 165 | |
# model configs | |
cfg_list = exp_smoothing_configs() | |
# grid search | |
scores = grid_search(data[:,0], cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search ets models for monthly shampoo sales | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.holtwinters import ExponentialSmoothing | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
from numpy import array | |
# one-step Holt Winter’s Exponential Smoothing forecast | |
def exp_smoothing_forecast(history, config): | |
t,d,s,p,b,r = config | |
# define model | |
history = array(history) | |
model = ExponentialSmoothing(history, trend=t, damped=d, seasonal=s, seasonal_periods=p) | |
# fit model | |
model_fit = model.fit(optimized=True, use_boxcox=b, remove_bias=r) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = exp_smoothing_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of exponential smoothing configs to try | |
def exp_smoothing_configs(seasonal=[None]): | |
models = list() | |
# define config lists | |
t_params = ['add', 'mul', None] | |
d_params = [True, False] | |
s_params = ['add', 'mul', None] | |
p_params = seasonal | |
b_params = [True, False] | |
r_params = [True, False] | |
# create config instances | |
for t in t_params: | |
for d in d_params: | |
for s in s_params: | |
for p in p_params: | |
for b in b_params: | |
for r in r_params: | |
cfg = [t,d,s,p,b,r] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('monthly-shampoo-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = exp_smoothing_configs() | |
# grid search | |
scores = grid_search(data[:,0], cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search ets hyperparameters for monthly mean temp dataset | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.holtwinters import ExponentialSmoothing | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
from numpy import array | |
# one-step Holt Winter’s Exponential Smoothing forecast | |
def exp_smoothing_forecast(history, config): | |
t,d,s,p,b,r = config | |
# define model | |
history = array(history) | |
model = ExponentialSmoothing(history, trend=t, damped=d, seasonal=s, seasonal_periods=p) | |
# fit model | |
model_fit = model.fit(optimized=True, use_boxcox=b, remove_bias=r) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = exp_smoothing_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of exponential smoothing configs to try | |
def exp_smoothing_configs(seasonal=[None]): | |
models = list() | |
# define config lists | |
t_params = ['add', 'mul', None] | |
d_params = [True, False] | |
s_params = ['add', 'mul', None] | |
p_params = seasonal | |
b_params = [True, False] | |
r_params = [True, False] | |
# create config instances | |
for t in t_params: | |
for d in d_params: | |
for s in s_params: | |
for p in p_params: | |
for b in b_params: | |
for r in r_params: | |
cfg = [t,d,s,p,b,r] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('monthly-mean-temp.csv', header=0, index_col=0) | |
data = series.values | |
# trim dataset to 5 years | |
data = data[-(5*12):] | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = exp_smoothing_configs(seasonal=[0,12]) | |
# grid search | |
scores = grid_search(data[:,0], cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search ets models for monthly car sales | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.holtwinters import ExponentialSmoothing | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
from numpy import array | |
# one-step Holt Winter’s Exponential Smoothing forecast | |
def exp_smoothing_forecast(history, config): | |
t,d,s,p,b,r = config | |
# define model | |
history = array(history) | |
model = ExponentialSmoothing(history, trend=t, damped=d, seasonal=s, seasonal_periods=p) | |
# fit model | |
model_fit = model.fit(optimized=True, use_boxcox=b, remove_bias=r) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = exp_smoothing_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of exponential smoothing configs to try | |
def exp_smoothing_configs(seasonal=[None]): | |
models = list() | |
# define config lists | |
t_params = ['add', 'mul', None] | |
d_params = [True, False] | |
s_params = ['add', 'mul', None] | |
p_params = seasonal | |
b_params = [True, False] | |
r_params = [True, False] | |
# create config instances | |
for t in t_params: | |
for d in d_params: | |
for s in s_params: | |
for p in p_params: | |
for b in b_params: | |
for r in r_params: | |
cfg = [t,d,s,p,b,r] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = exp_smoothing_configs(seasonal=[0,6,12]) | |
# grid search | |
scores = grid_search(data[:,0], cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
1959-01-01 | 35 | |
---|---|---|
1959-01-02 | 32 | |
1959-01-03 | 30 | |
1959-01-04 | 31 | |
1959-01-05 | 44 | |
1959-01-06 | 29 | |
1959-01-07 | 45 | |
1959-01-08 | 43 | |
1959-01-09 | 38 | |
1959-01-10 | 27 | |
1959-01-11 | 38 | |
1959-01-12 | 33 | |
1959-01-13 | 55 | |
1959-01-14 | 47 | |
1959-01-15 | 45 | |
1959-01-16 | 37 | |
1959-01-17 | 50 | |
1959-01-18 | 43 | |
1959-01-19 | 41 | |
1959-01-20 | 52 | |
1959-01-21 | 34 | |
1959-01-22 | 53 | |
1959-01-23 | 39 | |
1959-01-24 | 32 | |
1959-01-25 | 37 | |
1959-01-26 | 43 | |
1959-01-27 | 39 | |
1959-01-28 | 35 | |
1959-01-29 | 44 | |
1959-01-30 | 38 | |
1959-01-31 | 24 | |
1959-02-01 | 23 | |
1959-02-02 | 31 | |
1959-02-03 | 44 | |
1959-02-04 | 38 | |
1959-02-05 | 50 | |
1959-02-06 | 38 | |
1959-02-07 | 51 | |
1959-02-08 | 31 | |
1959-02-09 | 31 | |
1959-02-10 | 51 | |
1959-02-11 | 36 | |
1959-02-12 | 45 | |
1959-02-13 | 51 | |
1959-02-14 | 34 | |
1959-02-15 | 52 | |
1959-02-16 | 47 | |
1959-02-17 | 45 | |
1959-02-18 | 46 | |
1959-02-19 | 39 | |
1959-02-20 | 48 | |
1959-02-21 | 37 | |
1959-02-22 | 35 | |
1959-02-23 | 52 | |
1959-02-24 | 42 | |
1959-02-25 | 45 | |
1959-02-26 | 39 | |
1959-02-27 | 37 | |
1959-02-28 | 30 | |
1959-03-01 | 35 | |
1959-03-02 | 28 | |
1959-03-03 | 45 | |
1959-03-04 | 34 | |
1959-03-05 | 36 | |
1959-03-06 | 50 | |
1959-03-07 | 44 | |
1959-03-08 | 39 | |
1959-03-09 | 32 | |
1959-03-10 | 39 | |
1959-03-11 | 45 | |
1959-03-12 | 43 | |
1959-03-13 | 39 | |
1959-03-14 | 31 | |
1959-03-15 | 27 | |
1959-03-16 | 30 | |
1959-03-17 | 42 | |
1959-03-18 | 46 | |
1959-03-19 | 41 | |
1959-03-20 | 36 | |
1959-03-21 | 45 | |
1959-03-22 | 46 | |
1959-03-23 | 43 | |
1959-03-24 | 38 | |
1959-03-25 | 34 | |
1959-03-26 | 35 | |
1959-03-27 | 56 | |
1959-03-28 | 36 | |
1959-03-29 | 32 | |
1959-03-30 | 50 | |
1959-03-31 | 41 | |
1959-04-01 | 39 | |
1959-04-02 | 41 | |
1959-04-03 | 47 | |
1959-04-04 | 34 | |
1959-04-05 | 36 | |
1959-04-06 | 33 | |
1959-04-07 | 35 | |
1959-04-08 | 38 | |
1959-04-09 | 38 | |
1959-04-10 | 34 | |
1959-04-11 | 53 | |
1959-04-12 | 34 | |
1959-04-13 | 34 | |
1959-04-14 | 38 | |
1959-04-15 | 35 | |
1959-04-16 | 32 | |
1959-04-17 | 42 | |
1959-04-18 | 34 | |
1959-04-19 | 46 | |
1959-04-20 | 30 | |
1959-04-21 | 46 | |
1959-04-22 | 45 | |
1959-04-23 | 54 | |
1959-04-24 | 34 | |
1959-04-25 | 37 | |
1959-04-26 | 35 | |
1959-04-27 | 40 | |
1959-04-28 | 42 | |
1959-04-29 | 58 | |
1959-04-30 | 51 | |
1959-05-01 | 32 | |
1959-05-02 | 35 | |
1959-05-03 | 38 | |
1959-05-04 | 33 | |
1959-05-05 | 39 | |
1959-05-06 | 47 | |
1959-05-07 | 38 | |
1959-05-08 | 52 | |
1959-05-09 | 30 | |
1959-05-10 | 34 | |
1959-05-11 | 40 | |
1959-05-12 | 35 | |
1959-05-13 | 42 | |
1959-05-14 | 41 | |
1959-05-15 | 42 | |
1959-05-16 | 38 | |
1959-05-17 | 24 | |
1959-05-18 | 34 | |
1959-05-19 | 43 | |
1959-05-20 | 36 | |
1959-05-21 | 55 | |
1959-05-22 | 41 | |
1959-05-23 | 45 | |
1959-05-24 | 41 | |
1959-05-25 | 37 | |
1959-05-26 | 43 | |
1959-05-27 | 39 | |
1959-05-28 | 33 | |
1959-05-29 | 43 | |
1959-05-30 | 40 | |
1959-05-31 | 38 | |
1959-06-01 | 45 | |
1959-06-02 | 46 | |
1959-06-03 | 34 | |
1959-06-04 | 35 | |
1959-06-05 | 48 | |
1959-06-06 | 51 | |
1959-06-07 | 36 | |
1959-06-08 | 33 | |
1959-06-09 | 46 | |
1959-06-10 | 42 | |
1959-06-11 | 48 | |
1959-06-12 | 34 | |
1959-06-13 | 41 | |
1959-06-14 | 35 | |
1959-06-15 | 40 | |
1959-06-16 | 34 | |
1959-06-17 | 30 | |
1959-06-18 | 36 | |
1959-06-19 | 40 | |
1959-06-20 | 39 | |
1959-06-21 | 45 | |
1959-06-22 | 38 | |
1959-06-23 | 47 | |
1959-06-24 | 33 | |
1959-06-25 | 30 | |
1959-06-26 | 42 | |
1959-06-27 | 43 | |
1959-06-28 | 41 | |
1959-06-29 | 41 | |
1959-06-30 | 59 | |
1959-07-01 | 43 | |
1959-07-02 | 45 | |
1959-07-03 | 38 | |
1959-07-04 | 37 | |
1959-07-05 | 45 | |
1959-07-06 | 42 | |
1959-07-07 | 57 | |
1959-07-08 | 46 | |
1959-07-09 | 51 | |
1959-07-10 | 41 | |
1959-07-11 | 47 | |
1959-07-12 | 26 | |
1959-07-13 | 35 | |
1959-07-14 | 44 | |
1959-07-15 | 41 | |
1959-07-16 | 42 | |
1959-07-17 | 36 | |
1959-07-18 | 45 | |
1959-07-19 | 45 | |
1959-07-20 | 45 | |
1959-07-21 | 47 | |
1959-07-22 | 38 | |
1959-07-23 | 42 | |
1959-07-24 | 35 | |
1959-07-25 | 36 | |
1959-07-26 | 39 | |
1959-07-27 | 45 | |
1959-07-28 | 43 | |
1959-07-29 | 47 | |
1959-07-30 | 36 | |
1959-07-31 | 41 | |
1959-08-01 | 50 | |
1959-08-02 | 39 | |
1959-08-03 | 41 | |
1959-08-04 | 46 | |
1959-08-05 | 64 | |
1959-08-06 | 45 | |
1959-08-07 | 34 | |
1959-08-08 | 38 | |
1959-08-09 | 44 | |
1959-08-10 | 48 | |
1959-08-11 | 46 | |
1959-08-12 | 44 | |
1959-08-13 | 37 | |
1959-08-14 | 39 | |
1959-08-15 | 44 | |
1959-08-16 | 45 | |
1959-08-17 | 33 | |
1959-08-18 | 44 | |
1959-08-19 | 38 | |
1959-08-20 | 46 | |
1959-08-21 | 46 | |
1959-08-22 | 40 | |
1959-08-23 | 39 | |
1959-08-24 | 44 | |
1959-08-25 | 48 | |
1959-08-26 | 50 | |
1959-08-27 | 41 | |
1959-08-28 | 42 | |
1959-08-29 | 51 | |
1959-08-30 | 41 | |
1959-08-31 | 44 | |
1959-09-01 | 38 | |
1959-09-02 | 68 | |
1959-09-03 | 40 | |
1959-09-04 | 42 | |
1959-09-05 | 51 | |
1959-09-06 | 44 | |
1959-09-07 | 45 | |
1959-09-08 | 36 | |
1959-09-09 | 57 | |
1959-09-10 | 44 | |
1959-09-11 | 42 | |
1959-09-12 | 53 | |
1959-09-13 | 42 | |
1959-09-14 | 34 | |
1959-09-15 | 40 | |
1959-09-16 | 56 | |
1959-09-17 | 44 | |
1959-09-18 | 53 | |
1959-09-19 | 55 | |
1959-09-20 | 39 | |
1959-09-21 | 59 | |
1959-09-22 | 55 | |
1959-09-23 | 73 | |
1959-09-24 | 55 | |
1959-09-25 | 44 | |
1959-09-26 | 43 | |
1959-09-27 | 40 | |
1959-09-28 | 47 | |
1959-09-29 | 51 | |
1959-09-30 | 56 | |
1959-10-01 | 49 | |
1959-10-02 | 54 | |
1959-10-03 | 56 | |
1959-10-04 | 47 | |
1959-10-05 | 44 | |
1959-10-06 | 43 | |
1959-10-07 | 42 | |
1959-10-08 | 45 | |
1959-10-09 | 50 | |
1959-10-10 | 48 | |
1959-10-11 | 43 | |
1959-10-12 | 40 | |
1959-10-13 | 59 | |
1959-10-14 | 41 | |
1959-10-15 | 42 | |
1959-10-16 | 51 | |
1959-10-17 | 49 | |
1959-10-18 | 45 | |
1959-10-19 | 43 | |
1959-10-20 | 42 | |
1959-10-21 | 38 | |
1959-10-22 | 47 | |
1959-10-23 | 38 | |
1959-10-24 | 36 | |
1959-10-25 | 42 | |
1959-10-26 | 35 | |
1959-10-27 | 28 | |
1959-10-28 | 44 | |
1959-10-29 | 36 | |
1959-10-30 | 45 | |
1959-10-31 | 46 | |
1959-11-01 | 48 | |
1959-11-02 | 49 | |
1959-11-03 | 43 | |
1959-11-04 | 42 | |
1959-11-05 | 59 | |
1959-11-06 | 45 | |
1959-11-07 | 52 | |
1959-11-08 | 46 | |
1959-11-09 | 42 | |
1959-11-10 | 40 | |
1959-11-11 | 40 | |
1959-11-12 | 45 | |
1959-11-13 | 35 | |
1959-11-14 | 35 | |
1959-11-15 | 40 | |
1959-11-16 | 39 | |
1959-11-17 | 33 | |
1959-11-18 | 42 | |
1959-11-19 | 47 | |
1959-11-20 | 51 | |
1959-11-21 | 44 | |
1959-11-22 | 40 | |
1959-11-23 | 57 | |
1959-11-24 | 49 | |
1959-11-25 | 45 | |
1959-11-26 | 49 | |
1959-11-27 | 51 | |
1959-11-28 | 46 | |
1959-11-29 | 44 | |
1959-11-30 | 52 | |
1959-12-01 | 45 | |
1959-12-02 | 32 | |
1959-12-03 | 46 | |
1959-12-04 | 41 | |
1959-12-05 | 34 | |
1959-12-06 | 33 | |
1959-12-07 | 36 | |
1959-12-08 | 49 | |
1959-12-09 | 43 | |
1959-12-10 | 43 | |
1959-12-11 | 34 | |
1959-12-12 | 39 | |
1959-12-13 | 35 | |
1959-12-14 | 52 | |
1959-12-15 | 47 | |
1959-12-16 | 52 | |
1959-12-17 | 39 | |
1959-12-18 | 40 | |
1959-12-19 | 42 | |
1959-12-20 | 42 | |
1959-12-21 | 53 | |
1959-12-22 | 39 | |
1959-12-23 | 40 | |
1959-12-24 | 38 | |
1959-12-25 | 44 | |
1959-12-26 | 34 | |
1959-12-27 | 37 | |
1959-12-28 | 52 | |
1959-12-29 | 48 | |
1959-12-30 | 55 | |
1959-12-31 | 50 |
1960-01 | 6550 | |
---|---|---|
1960-02 | 8728 | |
1960-03 | 12026 | |
1960-04 | 14395 | |
1960-05 | 14587 | |
1960-06 | 13791 | |
1960-07 | 9498 | |
1960-08 | 8251 | |
1960-09 | 7049 | |
1960-10 | 9545 | |
1960-11 | 9364 | |
1960-12 | 8456 | |
1961-01 | 7237 | |
1961-02 | 9374 | |
1961-03 | 11837 | |
1961-04 | 13784 | |
1961-05 | 15926 | |
1961-06 | 13821 | |
1961-07 | 11143 | |
1961-08 | 7975 | |
1961-09 | 7610 | |
1961-10 | 10015 | |
1961-11 | 12759 | |
1961-12 | 8816 | |
1962-01 | 10677 | |
1962-02 | 10947 | |
1962-03 | 15200 | |
1962-04 | 17010 | |
1962-05 | 20900 | |
1962-06 | 16205 | |
1962-07 | 12143 | |
1962-08 | 8997 | |
1962-09 | 5568 | |
1962-10 | 11474 | |
1962-11 | 12256 | |
1962-12 | 10583 | |
1963-01 | 10862 | |
1963-02 | 10965 | |
1963-03 | 14405 | |
1963-04 | 20379 | |
1963-05 | 20128 | |
1963-06 | 17816 | |
1963-07 | 12268 | |
1963-08 | 8642 | |
1963-09 | 7962 | |
1963-10 | 13932 | |
1963-11 | 15936 | |
1963-12 | 12628 | |
1964-01 | 12267 | |
1964-02 | 12470 | |
1964-03 | 18944 | |
1964-04 | 21259 | |
1964-05 | 22015 | |
1964-06 | 18581 | |
1964-07 | 15175 | |
1964-08 | 10306 | |
1964-09 | 10792 | |
1964-10 | 14752 | |
1964-11 | 13754 | |
1964-12 | 11738 | |
1965-01 | 12181 | |
1965-02 | 12965 | |
1965-03 | 19990 | |
1965-04 | 23125 | |
1965-05 | 23541 | |
1965-06 | 21247 | |
1965-07 | 15189 | |
1965-08 | 14767 | |
1965-09 | 10895 | |
1965-10 | 17130 | |
1965-11 | 17697 | |
1965-12 | 16611 | |
1966-01 | 12674 | |
1966-02 | 12760 | |
1966-03 | 20249 | |
1966-04 | 22135 | |
1966-05 | 20677 | |
1966-06 | 19933 | |
1966-07 | 15388 | |
1966-08 | 15113 | |
1966-09 | 13401 | |
1966-10 | 16135 | |
1966-11 | 17562 | |
1966-12 | 14720 | |
1967-01 | 12225 | |
1967-02 | 11608 | |
1967-03 | 20985 | |
1967-04 | 19692 | |
1967-05 | 24081 | |
1967-06 | 22114 | |
1967-07 | 14220 | |
1967-08 | 13434 | |
1967-09 | 13598 | |
1967-10 | 17187 | |
1967-11 | 16119 | |
1967-12 | 13713 | |
1968-01 | 13210 | |
1968-02 | 14251 | |
1968-03 | 20139 | |
1968-04 | 21725 | |
1968-05 | 26099 | |
1968-06 | 21084 | |
1968-07 | 18024 | |
1968-08 | 16722 | |
1968-09 | 14385 | |
1968-10 | 21342 | |
1968-11 | 17180 | |
1968-12 | 14577 |
1920-01 | 40.6 | |
---|---|---|
1920-02 | 40.8 | |
1920-03 | 44.4 | |
1920-04 | 46.7 | |
1920-05 | 54.1 | |
1920-06 | 58.5 | |
1920-07 | 57.7 | |
1920-08 | 56.4 | |
1920-09 | 54.3 | |
1920-10 | 50.5 | |
1920-11 | 42.9 | |
1920-12 | 39.8 | |
1921-01 | 44.2 | |
1921-02 | 39.8 | |
1921-03 | 45.1 | |
1921-04 | 47.0 | |
1921-05 | 54.1 | |
1921-06 | 58.7 | |
1921-07 | 66.3 | |
1921-08 | 59.9 | |
1921-09 | 57.0 | |
1921-10 | 54.2 | |
1921-11 | 39.7 | |
1921-12 | 42.8 | |
1922-01 | 37.5 | |
1922-02 | 38.7 | |
1922-03 | 39.5 | |
1922-04 | 42.1 | |
1922-05 | 55.7 | |
1922-06 | 57.8 | |
1922-07 | 56.8 | |
1922-08 | 54.3 | |
1922-09 | 54.3 | |
1922-10 | 47.1 | |
1922-11 | 41.8 | |
1922-12 | 41.7 | |
1923-01 | 41.8 | |
1923-02 | 40.1 | |
1923-03 | 42.9 | |
1923-04 | 45.8 | |
1923-05 | 49.2 | |
1923-06 | 52.7 | |
1923-07 | 64.2 | |
1923-08 | 59.6 | |
1923-09 | 54.4 | |
1923-10 | 49.2 | |
1923-11 | 36.6 | |
1923-12 | 37.6 | |
1924-01 | 39.3 | |
1924-02 | 37.5 | |
1924-03 | 38.3 | |
1924-04 | 45.5 | |
1924-05 | 53.2 | |
1924-06 | 57.7 | |
1924-07 | 60.8 | |
1924-08 | 58.2 | |
1924-09 | 56.4 | |
1924-10 | 49.8 | |
1924-11 | 44.4 | |
1924-12 | 43.6 | |
1925-01 | 40.0 | |
1925-02 | 40.5 | |
1925-03 | 40.8 | |
1925-04 | 45.1 | |
1925-05 | 53.8 | |
1925-06 | 59.4 | |
1925-07 | 63.5 | |
1925-08 | 61.0 | |
1925-09 | 53.0 | |
1925-10 | 50.0 | |
1925-11 | 38.1 | |
1925-12 | 36.3 | |
1926-01 | 39.2 | |
1926-02 | 43.4 | |
1926-03 | 43.4 | |
1926-04 | 48.9 | |
1926-05 | 50.6 | |
1926-06 | 56.8 | |
1926-07 | 62.5 | |
1926-08 | 62.0 | |
1926-09 | 57.5 | |
1926-10 | 46.7 | |
1926-11 | 41.6 | |
1926-12 | 39.8 | |
1927-01 | 39.4 | |
1927-02 | 38.5 | |
1927-03 | 45.3 | |
1927-04 | 47.1 | |
1927-05 | 51.7 | |
1927-06 | 55.0 | |
1927-07 | 60.4 | |
1927-08 | 60.5 | |
1927-09 | 54.7 | |
1927-10 | 50.3 | |
1927-11 | 42.3 | |
1927-12 | 35.2 | |
1928-01 | 40.8 | |
1928-02 | 41.1 | |
1928-03 | 42.8 | |
1928-04 | 47.3 | |
1928-05 | 50.9 | |
1928-06 | 56.4 | |
1928-07 | 62.2 | |
1928-08 | 60.5 | |
1928-09 | 55.4 | |
1928-10 | 50.2 | |
1928-11 | 43.0 | |
1928-12 | 37.3 | |
1929-01 | 34.8 | |
1929-02 | 31.3 | |
1929-03 | 41.0 | |
1929-04 | 43.9 | |
1929-05 | 53.1 | |
1929-06 | 56.9 | |
1929-07 | 62.5 | |
1929-08 | 60.3 | |
1929-09 | 59.8 | |
1929-10 | 49.2 | |
1929-11 | 42.9 | |
1929-12 | 41.9 | |
1930-01 | 41.6 | |
1930-02 | 37.1 | |
1930-03 | 41.2 | |
1930-04 | 46.9 | |
1930-05 | 51.2 | |
1930-06 | 60.4 | |
1930-07 | 60.1 | |
1930-08 | 61.6 | |
1930-09 | 57.0 | |
1930-10 | 50.9 | |
1930-11 | 43.0 | |
1930-12 | 38.8 | |
1931-01 | 37.1 | |
1931-02 | 38.4 | |
1931-03 | 38.4 | |
1931-04 | 46.5 | |
1931-05 | 53.5 | |
1931-06 | 58.4 | |
1931-07 | 60.6 | |
1931-08 | 58.2 | |
1931-09 | 53.8 | |
1931-10 | 46.6 | |
1931-11 | 45.5 | |
1931-12 | 40.6 | |
1932-01 | 42.4 | |
1932-02 | 38.4 | |
1932-03 | 40.3 | |
1932-04 | 44.6 | |
1932-05 | 50.9 | |
1932-06 | 57.0 | |
1932-07 | 62.1 | |
1932-08 | 63.5 | |
1932-09 | 56.2 | |
1932-10 | 47.3 | |
1932-11 | 43.6 | |
1932-12 | 41.8 | |
1933-01 | 36.2 | |
1933-02 | 39.3 | |
1933-03 | 44.5 | |
1933-04 | 48.7 | |
1933-05 | 54.2 | |
1933-06 | 60.8 | |
1933-07 | 65.5 | |
1933-08 | 64.9 | |
1933-09 | 60.1 | |
1933-10 | 50.2 | |
1933-11 | 42.1 | |
1933-12 | 35.6 | |
1934-01 | 39.4 | |
1934-02 | 38.2 | |
1934-03 | 40.4 | |
1934-04 | 46.9 | |
1934-05 | 53.4 | |
1934-06 | 59.6 | |
1934-07 | 66.5 | |
1934-08 | 60.4 | |
1934-09 | 59.2 | |
1934-10 | 51.2 | |
1934-11 | 42.8 | |
1934-12 | 45.8 | |
1935-01 | 40.4 | |
1935-02 | 42.6 | |
1935-03 | 43.5 | |
1935-04 | 47.1 | |
1935-05 | 50.0 | |
1935-06 | 60.5 | |
1935-07 | 64.6 | |
1935-08 | 64.0 | |
1935-09 | 56.8 | |
1935-10 | 48.6 | |
1935-11 | 44.2 | |
1935-12 | 36.4 | |
1936-01 | 37.3 | |
1936-02 | 35.0 | |
1936-03 | 44.0 | |
1936-04 | 43.9 | |
1936-05 | 52.7 | |
1936-06 | 58.6 | |
1936-07 | 60.0 | |
1936-08 | 61.1 | |
1936-09 | 58.1 | |
1936-10 | 49.6 | |
1936-11 | 41.6 | |
1936-12 | 41.3 | |
1937-01 | 40.8 | |
1937-02 | 41.0 | |
1937-03 | 38.4 | |
1937-04 | 47.4 | |
1937-05 | 54.1 | |
1937-06 | 58.6 | |
1937-07 | 61.4 | |
1937-08 | 61.8 | |
1937-09 | 56.3 | |
1937-10 | 50.9 | |
1937-11 | 41.4 | |
1937-12 | 37.1 | |
1938-01 | 42.1 | |
1938-02 | 41.2 | |
1938-03 | 47.3 | |
1938-04 | 46.6 | |
1938-05 | 52.4 | |
1938-06 | 59.0 | |
1938-07 | 59.6 | |
1938-08 | 60.4 | |
1938-09 | 57.0 | |
1938-10 | 50.7 | |
1938-11 | 47.8 | |
1938-12 | 39.2 | |
1939-01 | 39.4 | |
1939-02 | 40.9 | |
1939-03 | 42.4 | |
1939-04 | 47.8 | |
1939-05 | 52.4 | |
1939-06 | 58.0 | |
1939-07 | 60.7 | |
1939-08 | 61.8 | |
1939-09 | 58.2 | |
1939-10 | 46.7 | |
1939-11 | 46.6 | |
1939-12 | 37.8 |
1-01 | 266.0 | |
---|---|---|
1-02 | 145.9 | |
1-03 | 183.1 | |
1-04 | 119.3 | |
1-05 | 180.3 | |
1-06 | 168.5 | |
1-07 | 231.8 | |
1-08 | 224.5 | |
1-09 | 192.8 | |
1-10 | 122.9 | |
1-11 | 336.5 | |
1-12 | 185.9 | |
2-01 | 194.3 | |
2-02 | 149.5 | |
2-03 | 210.1 | |
2-04 | 273.3 | |
2-05 | 191.4 | |
2-06 | 287.0 | |
2-07 | 226.0 | |
2-08 | 303.6 | |
2-09 | 289.9 | |
2-10 | 421.6 | |
2-11 | 264.5 | |
2-12 | 342.3 | |
3-01 | 339.7 | |
3-02 | 440.4 | |
3-03 | 315.9 | |
3-04 | 439.3 | |
3-05 | 401.3 | |
3-06 | 437.4 | |
3-07 | 575.5 | |
3-08 | 407.6 | |
3-09 | 682.0 | |
3-10 | 475.3 | |
3-11 | 581.3 | |
3-12 | 646.9 |
# grid search sarima hyperparameters | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.statespace.sarimax import SARIMAX | |
from sklearn.metrics import mean_squared_error | |
# one-step sarima forecast | |
def sarima_forecast(history, config): | |
order, sorder, trend = config | |
# define model | |
model = SARIMAX(history, order=order, seasonal_order=sorder, trend=trend, enforce_stationarity=False, enforce_invertibility=False) | |
# fit model | |
model_fit = model.fit(disp=False) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = sarima_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of sarima configs to try | |
def sarima_configs(seasonal=[0]): | |
models = list() | |
# define config lists | |
p_params = [0, 1, 2] | |
d_params = [0, 1] | |
q_params = [0, 1, 2] | |
t_params = ['n','c','t','ct'] | |
P_params = [0, 1, 2] | |
D_params = [0, 1] | |
Q_params = [0, 1, 2] | |
m_params = seasonal | |
# create config instances | |
for p in p_params: | |
for d in d_params: | |
for q in q_params: | |
for t in t_params: | |
for P in P_params: | |
for D in D_params: | |
for Q in Q_params: | |
for m in m_params: | |
cfg = [(p,d,q), (P,D,Q,m), t] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# define dataset | |
data = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] | |
print(data) | |
# data split | |
n_test = 4 | |
# model configs | |
cfg_list = sarima_configs() | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search sarima hyperparameters for daily female dataset | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.statespace.sarimax import SARIMAX | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
# one-step sarima forecast | |
def sarima_forecast(history, config): | |
order, sorder, trend = config | |
# define model | |
model = SARIMAX(history, order=order, seasonal_order=sorder, trend=trend, enforce_stationarity=False, enforce_invertibility=False) | |
# fit model | |
model_fit = model.fit(disp=False) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = sarima_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of sarima configs to try | |
def sarima_configs(seasonal=[0]): | |
models = list() | |
# define config lists | |
p_params = [0, 1, 2] | |
d_params = [0, 1] | |
q_params = [0, 1, 2] | |
t_params = ['n','c','t','ct'] | |
P_params = [0, 1, 2] | |
D_params = [0, 1] | |
Q_params = [0, 1, 2] | |
m_params = seasonal | |
# create config instances | |
for p in p_params: | |
for d in d_params: | |
for q in q_params: | |
for t in t_params: | |
for P in P_params: | |
for D in D_params: | |
for Q in Q_params: | |
for m in m_params: | |
cfg = [(p,d,q), (P,D,Q,m), t] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('daily-total-female-births.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 165 | |
# model configs | |
cfg_list = sarima_configs() | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search sarima hyperparameters for monthly shampoo sales dataset | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.statespace.sarimax import SARIMAX | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
# one-step sarima forecast | |
def sarima_forecast(history, config): | |
order, sorder, trend = config | |
# define model | |
model = SARIMAX(history, order=order, seasonal_order=sorder, trend=trend, enforce_stationarity=False, enforce_invertibility=False) | |
# fit model | |
model_fit = model.fit(disp=False) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = sarima_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of sarima configs to try | |
def sarima_configs(seasonal=[0]): | |
models = list() | |
# define config lists | |
p_params = [0, 1, 2] | |
d_params = [0, 1] | |
q_params = [0, 1, 2] | |
t_params = ['n','c','t','ct'] | |
P_params = [0, 1, 2] | |
D_params = [0, 1] | |
Q_params = [0, 1, 2] | |
m_params = seasonal | |
# create config instances | |
for p in p_params: | |
for d in d_params: | |
for q in q_params: | |
for t in t_params: | |
for P in P_params: | |
for D in D_params: | |
for Q in Q_params: | |
for m in m_params: | |
cfg = [(p,d,q), (P,D,Q,m), t] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('monthly-shampoo-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = sarima_configs() | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search sarima hyperparameters for monthly mean temp dataset | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.statespace.sarimax import SARIMAX | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
# one-step sarima forecast | |
def sarima_forecast(history, config): | |
order, sorder, trend = config | |
# define model | |
model = SARIMAX(history, order=order, seasonal_order=sorder, trend=trend, enforce_stationarity=False, enforce_invertibility=False) | |
# fit model | |
model_fit = model.fit(disp=False) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = sarima_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of sarima configs to try | |
def sarima_configs(seasonal=[0]): | |
models = list() | |
# define config lists | |
p_params = [0, 1, 2] | |
d_params = [0, 1] | |
q_params = [0, 1, 2] | |
t_params = ['n','c','t','ct'] | |
P_params = [0, 1, 2] | |
D_params = [0, 1] | |
Q_params = [0, 1, 2] | |
m_params = seasonal | |
# create config instances | |
for p in p_params: | |
for d in d_params: | |
for q in q_params: | |
for t in t_params: | |
for P in P_params: | |
for D in D_params: | |
for Q in Q_params: | |
for m in m_params: | |
cfg = [(p,d,q), (P,D,Q,m), t] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('monthly-mean-temp.csv', header=0, index_col=0) | |
data = series.values | |
# trim dataset to 5 years | |
data = data[-(5*12):] | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = sarima_configs(seasonal=[0, 12]) | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search sarima hyperparameters for monthly car sales dataset | |
from math import sqrt | |
from multiprocessing import cpu_count | |
from joblib import Parallel | |
from joblib import delayed | |
from warnings import catch_warnings | |
from warnings import filterwarnings | |
from statsmodels.tsa.statespace.sarimax import SARIMAX | |
from sklearn.metrics import mean_squared_error | |
from pandas import read_csv | |
# one-step sarima forecast | |
def sarima_forecast(history, config): | |
order, sorder, trend = config | |
# define model | |
model = SARIMAX(history, order=order, seasonal_order=sorder, trend=trend, enforce_stationarity=False, enforce_invertibility=False) | |
# fit model | |
model_fit = model.fit(disp=False) | |
# make one step forecast | |
yhat = model_fit.predict(len(history), len(history)) | |
return yhat[0] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = sarima_forecast(history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
return error | |
# score a model, return None on failure | |
def score_model(data, n_test, cfg, debug=False): | |
result = None | |
# convert config to a key | |
key = str(cfg) | |
# show all warnings and fail on exception if debugging | |
if debug: | |
result = walk_forward_validation(data, n_test, cfg) | |
else: | |
# one failure during model validation suggests an unstable config | |
try: | |
# never show warnings when grid searching, too noisy | |
with catch_warnings(): | |
filterwarnings("ignore") | |
result = walk_forward_validation(data, n_test, cfg) | |
except: | |
error = None | |
# check for an interesting result | |
if result is not None: | |
print(' > Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test, parallel=True): | |
scores = None | |
if parallel: | |
# execute configs in parallel | |
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing') | |
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list) | |
scores = executor(tasks) | |
else: | |
scores = [score_model(data, n_test, cfg) for cfg in cfg_list] | |
# remove empty results | |
scores = [r for r in scores if r[1] != None] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a set of sarima configs to try | |
def sarima_configs(seasonal=[0]): | |
models = list() | |
# define config lists | |
p_params = [0, 1, 2] | |
d_params = [0, 1] | |
q_params = [0, 1, 2] | |
t_params = ['n','c','t','ct'] | |
P_params = [0, 1, 2] | |
D_params = [0, 1] | |
Q_params = [0, 1, 2] | |
m_params = seasonal | |
# create config instances | |
for p in p_params: | |
for d in d_params: | |
for q in q_params: | |
for t in t_params: | |
for P in P_params: | |
for D in D_params: | |
for Q in Q_params: | |
for m in m_params: | |
cfg = [(p,d,q), (P,D,Q,m), t] | |
models.append(cfg) | |
return models | |
if __name__ == '__main__': | |
# load dataset | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = sarima_configs(seasonal=[0,6,12]) | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
1959-01-01 | 35 | |
---|---|---|
1959-01-02 | 32 | |
1959-01-03 | 30 | |
1959-01-04 | 31 | |
1959-01-05 | 44 | |
1959-01-06 | 29 | |
1959-01-07 | 45 | |
1959-01-08 | 43 | |
1959-01-09 | 38 | |
1959-01-10 | 27 | |
1959-01-11 | 38 | |
1959-01-12 | 33 | |
1959-01-13 | 55 | |
1959-01-14 | 47 | |
1959-01-15 | 45 | |
1959-01-16 | 37 | |
1959-01-17 | 50 | |
1959-01-18 | 43 | |
1959-01-19 | 41 | |
1959-01-20 | 52 | |
1959-01-21 | 34 | |
1959-01-22 | 53 | |
1959-01-23 | 39 | |
1959-01-24 | 32 | |
1959-01-25 | 37 | |
1959-01-26 | 43 | |
1959-01-27 | 39 | |
1959-01-28 | 35 | |
1959-01-29 | 44 | |
1959-01-30 | 38 | |
1959-01-31 | 24 | |
1959-02-01 | 23 | |
1959-02-02 | 31 | |
1959-02-03 | 44 | |
1959-02-04 | 38 | |
1959-02-05 | 50 | |
1959-02-06 | 38 | |
1959-02-07 | 51 | |
1959-02-08 | 31 | |
1959-02-09 | 31 | |
1959-02-10 | 51 | |
1959-02-11 | 36 | |
1959-02-12 | 45 | |
1959-02-13 | 51 | |
1959-02-14 | 34 | |
1959-02-15 | 52 | |
1959-02-16 | 47 | |
1959-02-17 | 45 | |
1959-02-18 | 46 | |
1959-02-19 | 39 | |
1959-02-20 | 48 | |
1959-02-21 | 37 | |
1959-02-22 | 35 | |
1959-02-23 | 52 | |
1959-02-24 | 42 | |
1959-02-25 | 45 | |
1959-02-26 | 39 | |
1959-02-27 | 37 | |
1959-02-28 | 30 | |
1959-03-01 | 35 | |
1959-03-02 | 28 | |
1959-03-03 | 45 | |
1959-03-04 | 34 | |
1959-03-05 | 36 | |
1959-03-06 | 50 | |
1959-03-07 | 44 | |
1959-03-08 | 39 | |
1959-03-09 | 32 | |
1959-03-10 | 39 | |
1959-03-11 | 45 | |
1959-03-12 | 43 | |
1959-03-13 | 39 | |
1959-03-14 | 31 | |
1959-03-15 | 27 | |
1959-03-16 | 30 | |
1959-03-17 | 42 | |
1959-03-18 | 46 | |
1959-03-19 | 41 | |
1959-03-20 | 36 | |
1959-03-21 | 45 | |
1959-03-22 | 46 | |
1959-03-23 | 43 | |
1959-03-24 | 38 | |
1959-03-25 | 34 | |
1959-03-26 | 35 | |
1959-03-27 | 56 | |
1959-03-28 | 36 | |
1959-03-29 | 32 | |
1959-03-30 | 50 | |
1959-03-31 | 41 | |
1959-04-01 | 39 | |
1959-04-02 | 41 | |
1959-04-03 | 47 | |
1959-04-04 | 34 | |
1959-04-05 | 36 | |
1959-04-06 | 33 | |
1959-04-07 | 35 | |
1959-04-08 | 38 | |
1959-04-09 | 38 | |
1959-04-10 | 34 | |
1959-04-11 | 53 | |
1959-04-12 | 34 | |
1959-04-13 | 34 | |
1959-04-14 | 38 | |
1959-04-15 | 35 | |
1959-04-16 | 32 | |
1959-04-17 | 42 | |
1959-04-18 | 34 | |
1959-04-19 | 46 | |
1959-04-20 | 30 | |
1959-04-21 | 46 | |
1959-04-22 | 45 | |
1959-04-23 | 54 | |
1959-04-24 | 34 | |
1959-04-25 | 37 | |
1959-04-26 | 35 | |
1959-04-27 | 40 | |
1959-04-28 | 42 | |
1959-04-29 | 58 | |
1959-04-30 | 51 | |
1959-05-01 | 32 | |
1959-05-02 | 35 | |
1959-05-03 | 38 | |
1959-05-04 | 33 | |
1959-05-05 | 39 | |
1959-05-06 | 47 | |
1959-05-07 | 38 | |
1959-05-08 | 52 | |
1959-05-09 | 30 | |
1959-05-10 | 34 | |
1959-05-11 | 40 | |
1959-05-12 | 35 | |
1959-05-13 | 42 | |
1959-05-14 | 41 | |
1959-05-15 | 42 | |
1959-05-16 | 38 | |
1959-05-17 | 24 | |
1959-05-18 | 34 | |
1959-05-19 | 43 | |
1959-05-20 | 36 | |
1959-05-21 | 55 | |
1959-05-22 | 41 | |
1959-05-23 | 45 | |
1959-05-24 | 41 | |
1959-05-25 | 37 | |
1959-05-26 | 43 | |
1959-05-27 | 39 | |
1959-05-28 | 33 | |
1959-05-29 | 43 | |
1959-05-30 | 40 | |
1959-05-31 | 38 | |
1959-06-01 | 45 | |
1959-06-02 | 46 | |
1959-06-03 | 34 | |
1959-06-04 | 35 | |
1959-06-05 | 48 | |
1959-06-06 | 51 | |
1959-06-07 | 36 | |
1959-06-08 | 33 | |
1959-06-09 | 46 | |
1959-06-10 | 42 | |
1959-06-11 | 48 | |
1959-06-12 | 34 | |
1959-06-13 | 41 | |
1959-06-14 | 35 | |
1959-06-15 | 40 | |
1959-06-16 | 34 | |
1959-06-17 | 30 | |
1959-06-18 | 36 | |
1959-06-19 | 40 | |
1959-06-20 | 39 | |
1959-06-21 | 45 | |
1959-06-22 | 38 | |
1959-06-23 | 47 | |
1959-06-24 | 33 | |
1959-06-25 | 30 | |
1959-06-26 | 42 | |
1959-06-27 | 43 | |
1959-06-28 | 41 | |
1959-06-29 | 41 | |
1959-06-30 | 59 | |
1959-07-01 | 43 | |
1959-07-02 | 45 | |
1959-07-03 | 38 | |
1959-07-04 | 37 | |
1959-07-05 | 45 | |
1959-07-06 | 42 | |
1959-07-07 | 57 | |
1959-07-08 | 46 | |
1959-07-09 | 51 | |
1959-07-10 | 41 | |
1959-07-11 | 47 | |
1959-07-12 | 26 | |
1959-07-13 | 35 | |
1959-07-14 | 44 | |
1959-07-15 | 41 | |
1959-07-16 | 42 | |
1959-07-17 | 36 | |
1959-07-18 | 45 | |
1959-07-19 | 45 | |
1959-07-20 | 45 | |
1959-07-21 | 47 | |
1959-07-22 | 38 | |
1959-07-23 | 42 | |
1959-07-24 | 35 | |
1959-07-25 | 36 | |
1959-07-26 | 39 | |
1959-07-27 | 45 | |
1959-07-28 | 43 | |
1959-07-29 | 47 | |
1959-07-30 | 36 | |
1959-07-31 | 41 | |
1959-08-01 | 50 | |
1959-08-02 | 39 | |
1959-08-03 | 41 | |
1959-08-04 | 46 | |
1959-08-05 | 64 | |
1959-08-06 | 45 | |
1959-08-07 | 34 | |
1959-08-08 | 38 | |
1959-08-09 | 44 | |
1959-08-10 | 48 | |
1959-08-11 | 46 | |
1959-08-12 | 44 | |
1959-08-13 | 37 | |
1959-08-14 | 39 | |
1959-08-15 | 44 | |
1959-08-16 | 45 | |
1959-08-17 | 33 | |
1959-08-18 | 44 | |
1959-08-19 | 38 | |
1959-08-20 | 46 | |
1959-08-21 | 46 | |
1959-08-22 | 40 | |
1959-08-23 | 39 | |
1959-08-24 | 44 | |
1959-08-25 | 48 | |
1959-08-26 | 50 | |
1959-08-27 | 41 | |
1959-08-28 | 42 | |
1959-08-29 | 51 | |
1959-08-30 | 41 | |
1959-08-31 | 44 | |
1959-09-01 | 38 | |
1959-09-02 | 68 | |
1959-09-03 | 40 | |
1959-09-04 | 42 | |
1959-09-05 | 51 | |
1959-09-06 | 44 | |
1959-09-07 | 45 | |
1959-09-08 | 36 | |
1959-09-09 | 57 | |
1959-09-10 | 44 | |
1959-09-11 | 42 | |
1959-09-12 | 53 | |
1959-09-13 | 42 | |
1959-09-14 | 34 | |
1959-09-15 | 40 | |
1959-09-16 | 56 | |
1959-09-17 | 44 | |
1959-09-18 | 53 | |
1959-09-19 | 55 | |
1959-09-20 | 39 | |
1959-09-21 | 59 | |
1959-09-22 | 55 | |
1959-09-23 | 73 | |
1959-09-24 | 55 | |
1959-09-25 | 44 | |
1959-09-26 | 43 | |
1959-09-27 | 40 | |
1959-09-28 | 47 | |
1959-09-29 | 51 | |
1959-09-30 | 56 | |
1959-10-01 | 49 | |
1959-10-02 | 54 | |
1959-10-03 | 56 | |
1959-10-04 | 47 | |
1959-10-05 | 44 | |
1959-10-06 | 43 | |
1959-10-07 | 42 | |
1959-10-08 | 45 | |
1959-10-09 | 50 | |
1959-10-10 | 48 | |
1959-10-11 | 43 | |
1959-10-12 | 40 | |
1959-10-13 | 59 | |
1959-10-14 | 41 | |
1959-10-15 | 42 | |
1959-10-16 | 51 | |
1959-10-17 | 49 | |
1959-10-18 | 45 | |
1959-10-19 | 43 | |
1959-10-20 | 42 | |
1959-10-21 | 38 | |
1959-10-22 | 47 | |
1959-10-23 | 38 | |
1959-10-24 | 36 | |
1959-10-25 | 42 | |
1959-10-26 | 35 | |
1959-10-27 | 28 | |
1959-10-28 | 44 | |
1959-10-29 | 36 | |
1959-10-30 | 45 | |
1959-10-31 | 46 | |
1959-11-01 | 48 | |
1959-11-02 | 49 | |
1959-11-03 | 43 | |
1959-11-04 | 42 | |
1959-11-05 | 59 | |
1959-11-06 | 45 | |
1959-11-07 | 52 | |
1959-11-08 | 46 | |
1959-11-09 | 42 | |
1959-11-10 | 40 | |
1959-11-11 | 40 | |
1959-11-12 | 45 | |
1959-11-13 | 35 | |
1959-11-14 | 35 | |
1959-11-15 | 40 | |
1959-11-16 | 39 | |
1959-11-17 | 33 | |
1959-11-18 | 42 | |
1959-11-19 | 47 | |
1959-11-20 | 51 | |
1959-11-21 | 44 | |
1959-11-22 | 40 | |
1959-11-23 | 57 | |
1959-11-24 | 49 | |
1959-11-25 | 45 | |
1959-11-26 | 49 | |
1959-11-27 | 51 | |
1959-11-28 | 46 | |
1959-11-29 | 44 | |
1959-11-30 | 52 | |
1959-12-01 | 45 | |
1959-12-02 | 32 | |
1959-12-03 | 46 | |
1959-12-04 | 41 | |
1959-12-05 | 34 | |
1959-12-06 | 33 | |
1959-12-07 | 36 | |
1959-12-08 | 49 | |
1959-12-09 | 43 | |
1959-12-10 | 43 | |
1959-12-11 | 34 | |
1959-12-12 | 39 | |
1959-12-13 | 35 | |
1959-12-14 | 52 | |
1959-12-15 | 47 | |
1959-12-16 | 52 | |
1959-12-17 | 39 | |
1959-12-18 | 40 | |
1959-12-19 | 42 | |
1959-12-20 | 42 | |
1959-12-21 | 53 | |
1959-12-22 | 39 | |
1959-12-23 | 40 | |
1959-12-24 | 38 | |
1959-12-25 | 44 | |
1959-12-26 | 34 | |
1959-12-27 | 37 | |
1959-12-28 | 52 | |
1959-12-29 | 48 | |
1959-12-30 | 55 | |
1959-12-31 | 50 |
1960-01 | 6550 | |
---|---|---|
1960-02 | 8728 | |
1960-03 | 12026 | |
1960-04 | 14395 | |
1960-05 | 14587 | |
1960-06 | 13791 | |
1960-07 | 9498 | |
1960-08 | 8251 | |
1960-09 | 7049 | |
1960-10 | 9545 | |
1960-11 | 9364 | |
1960-12 | 8456 | |
1961-01 | 7237 | |
1961-02 | 9374 | |
1961-03 | 11837 | |
1961-04 | 13784 | |
1961-05 | 15926 | |
1961-06 | 13821 | |
1961-07 | 11143 | |
1961-08 | 7975 | |
1961-09 | 7610 | |
1961-10 | 10015 | |
1961-11 | 12759 | |
1961-12 | 8816 | |
1962-01 | 10677 | |
1962-02 | 10947 | |
1962-03 | 15200 | |
1962-04 | 17010 | |
1962-05 | 20900 | |
1962-06 | 16205 | |
1962-07 | 12143 | |
1962-08 | 8997 | |
1962-09 | 5568 | |
1962-10 | 11474 | |
1962-11 | 12256 | |
1962-12 | 10583 | |
1963-01 | 10862 | |
1963-02 | 10965 | |
1963-03 | 14405 | |
1963-04 | 20379 | |
1963-05 | 20128 | |
1963-06 | 17816 | |
1963-07 | 12268 | |
1963-08 | 8642 | |
1963-09 | 7962 | |
1963-10 | 13932 | |
1963-11 | 15936 | |
1963-12 | 12628 | |
1964-01 | 12267 | |
1964-02 | 12470 | |
1964-03 | 18944 | |
1964-04 | 21259 | |
1964-05 | 22015 | |
1964-06 | 18581 | |
1964-07 | 15175 | |
1964-08 | 10306 | |
1964-09 | 10792 | |
1964-10 | 14752 | |
1964-11 | 13754 | |
1964-12 | 11738 | |
1965-01 | 12181 | |
1965-02 | 12965 | |
1965-03 | 19990 | |
1965-04 | 23125 | |
1965-05 | 23541 | |
1965-06 | 21247 | |
1965-07 | 15189 | |
1965-08 | 14767 | |
1965-09 | 10895 | |
1965-10 | 17130 | |
1965-11 | 17697 | |
1965-12 | 16611 | |
1966-01 | 12674 | |
1966-02 | 12760 | |
1966-03 | 20249 | |
1966-04 | 22135 | |
1966-05 | 20677 | |
1966-06 | 19933 | |
1966-07 | 15388 | |
1966-08 | 15113 | |
1966-09 | 13401 | |
1966-10 | 16135 | |
1966-11 | 17562 | |
1966-12 | 14720 | |
1967-01 | 12225 | |
1967-02 | 11608 | |
1967-03 | 20985 | |
1967-04 | 19692 | |
1967-05 | 24081 | |
1967-06 | 22114 | |
1967-07 | 14220 | |
1967-08 | 13434 | |
1967-09 | 13598 | |
1967-10 | 17187 | |
1967-11 | 16119 | |
1967-12 | 13713 | |
1968-01 | 13210 | |
1968-02 | 14251 | |
1968-03 | 20139 | |
1968-04 | 21725 | |
1968-05 | 26099 | |
1968-06 | 21084 | |
1968-07 | 18024 | |
1968-08 | 16722 | |
1968-09 | 14385 | |
1968-10 | 21342 | |
1968-11 | 17180 | |
1968-12 | 14577 |
1920-01 | 40.6 | |
---|---|---|
1920-02 | 40.8 | |
1920-03 | 44.4 | |
1920-04 | 46.7 | |
1920-05 | 54.1 | |
1920-06 | 58.5 | |
1920-07 | 57.7 | |
1920-08 | 56.4 | |
1920-09 | 54.3 | |
1920-10 | 50.5 | |
1920-11 | 42.9 | |
1920-12 | 39.8 | |
1921-01 | 44.2 | |
1921-02 | 39.8 | |
1921-03 | 45.1 | |
1921-04 | 47.0 | |
1921-05 | 54.1 | |
1921-06 | 58.7 | |
1921-07 | 66.3 | |
1921-08 | 59.9 | |
1921-09 | 57.0 | |
1921-10 | 54.2 | |
1921-11 | 39.7 | |
1921-12 | 42.8 | |
1922-01 | 37.5 | |
1922-02 | 38.7 | |
1922-03 | 39.5 | |
1922-04 | 42.1 | |
1922-05 | 55.7 | |
1922-06 | 57.8 | |
1922-07 | 56.8 | |
1922-08 | 54.3 | |
1922-09 | 54.3 | |
1922-10 | 47.1 | |
1922-11 | 41.8 | |
1922-12 | 41.7 | |
1923-01 | 41.8 | |
1923-02 | 40.1 | |
1923-03 | 42.9 | |
1923-04 | 45.8 | |
1923-05 | 49.2 | |
1923-06 | 52.7 | |
1923-07 | 64.2 | |
1923-08 | 59.6 | |
1923-09 | 54.4 | |
1923-10 | 49.2 | |
1923-11 | 36.6 | |
1923-12 | 37.6 | |
1924-01 | 39.3 | |
1924-02 | 37.5 | |
1924-03 | 38.3 | |
1924-04 | 45.5 | |
1924-05 | 53.2 | |
1924-06 | 57.7 | |
1924-07 | 60.8 | |
1924-08 | 58.2 | |
1924-09 | 56.4 | |
1924-10 | 49.8 | |
1924-11 | 44.4 | |
1924-12 | 43.6 | |
1925-01 | 40.0 | |
1925-02 | 40.5 | |
1925-03 | 40.8 | |
1925-04 | 45.1 | |
1925-05 | 53.8 | |
1925-06 | 59.4 | |
1925-07 | 63.5 | |
1925-08 | 61.0 | |
1925-09 | 53.0 | |
1925-10 | 50.0 | |
1925-11 | 38.1 | |
1925-12 | 36.3 | |
1926-01 | 39.2 | |
1926-02 | 43.4 | |
1926-03 | 43.4 | |
1926-04 | 48.9 | |
1926-05 | 50.6 | |
1926-06 | 56.8 | |
1926-07 | 62.5 | |
1926-08 | 62.0 | |
1926-09 | 57.5 | |
1926-10 | 46.7 | |
1926-11 | 41.6 | |
1926-12 | 39.8 | |
1927-01 | 39.4 | |
1927-02 | 38.5 | |
1927-03 | 45.3 | |
1927-04 | 47.1 | |
1927-05 | 51.7 | |
1927-06 | 55.0 | |
1927-07 | 60.4 | |
1927-08 | 60.5 | |
1927-09 | 54.7 | |
1927-10 | 50.3 | |
1927-11 | 42.3 | |
1927-12 | 35.2 | |
1928-01 | 40.8 | |
1928-02 | 41.1 | |
1928-03 | 42.8 | |
1928-04 | 47.3 | |
1928-05 | 50.9 | |
1928-06 | 56.4 | |
1928-07 | 62.2 | |
1928-08 | 60.5 | |
1928-09 | 55.4 | |
1928-10 | 50.2 | |
1928-11 | 43.0 | |
1928-12 | 37.3 | |
1929-01 | 34.8 | |
1929-02 | 31.3 | |
1929-03 | 41.0 | |
1929-04 | 43.9 | |
1929-05 | 53.1 | |
1929-06 | 56.9 | |
1929-07 | 62.5 | |
1929-08 | 60.3 | |
1929-09 | 59.8 | |
1929-10 | 49.2 | |
1929-11 | 42.9 | |
1929-12 | 41.9 | |
1930-01 | 41.6 | |
1930-02 | 37.1 | |
1930-03 | 41.2 | |
1930-04 | 46.9 | |
1930-05 | 51.2 | |
1930-06 | 60.4 | |
1930-07 | 60.1 | |
1930-08 | 61.6 | |
1930-09 | 57.0 | |
1930-10 | 50.9 | |
1930-11 | 43.0 | |
1930-12 | 38.8 | |
1931-01 | 37.1 | |
1931-02 | 38.4 | |
1931-03 | 38.4 | |
1931-04 | 46.5 | |
1931-05 | 53.5 | |
1931-06 | 58.4 | |
1931-07 | 60.6 | |
1931-08 | 58.2 | |
1931-09 | 53.8 | |
1931-10 | 46.6 | |
1931-11 | 45.5 | |
1931-12 | 40.6 | |
1932-01 | 42.4 | |
1932-02 | 38.4 | |
1932-03 | 40.3 | |
1932-04 | 44.6 | |
1932-05 | 50.9 | |
1932-06 | 57.0 | |
1932-07 | 62.1 | |
1932-08 | 63.5 | |
1932-09 | 56.2 | |
1932-10 | 47.3 | |
1932-11 | 43.6 | |
1932-12 | 41.8 | |
1933-01 | 36.2 | |
1933-02 | 39.3 | |
1933-03 | 44.5 | |
1933-04 | 48.7 | |
1933-05 | 54.2 | |
1933-06 | 60.8 | |
1933-07 | 65.5 | |
1933-08 | 64.9 | |
1933-09 | 60.1 | |
1933-10 | 50.2 | |
1933-11 | 42.1 | |
1933-12 | 35.6 | |
1934-01 | 39.4 | |
1934-02 | 38.2 | |
1934-03 | 40.4 | |
1934-04 | 46.9 | |
1934-05 | 53.4 | |
1934-06 | 59.6 | |
1934-07 | 66.5 | |
1934-08 | 60.4 | |
1934-09 | 59.2 | |
1934-10 | 51.2 | |
1934-11 | 42.8 | |
1934-12 | 45.8 | |
1935-01 | 40.4 | |
1935-02 | 42.6 | |
1935-03 | 43.5 | |
1935-04 | 47.1 | |
1935-05 | 50.0 | |
1935-06 | 60.5 | |
1935-07 | 64.6 | |
1935-08 | 64.0 | |
1935-09 | 56.8 | |
1935-10 | 48.6 | |
1935-11 | 44.2 | |
1935-12 | 36.4 | |
1936-01 | 37.3 | |
1936-02 | 35.0 | |
1936-03 | 44.0 | |
1936-04 | 43.9 | |
1936-05 | 52.7 | |
1936-06 | 58.6 | |
1936-07 | 60.0 | |
1936-08 | 61.1 | |
1936-09 | 58.1 | |
1936-10 | 49.6 | |
1936-11 | 41.6 | |
1936-12 | 41.3 | |
1937-01 | 40.8 | |
1937-02 | 41.0 | |
1937-03 | 38.4 | |
1937-04 | 47.4 | |
1937-05 | 54.1 | |
1937-06 | 58.6 | |
1937-07 | 61.4 | |
1937-08 | 61.8 | |
1937-09 | 56.3 | |
1937-10 | 50.9 | |
1937-11 | 41.4 | |
1937-12 | 37.1 | |
1938-01 | 42.1 | |
1938-02 | 41.2 | |
1938-03 | 47.3 | |
1938-04 | 46.6 | |
1938-05 | 52.4 | |
1938-06 | 59.0 | |
1938-07 | 59.6 | |
1938-08 | 60.4 | |
1938-09 | 57.0 | |
1938-10 | 50.7 | |
1938-11 | 47.8 | |
1938-12 | 39.2 | |
1939-01 | 39.4 | |
1939-02 | 40.9 | |
1939-03 | 42.4 | |
1939-04 | 47.8 | |
1939-05 | 52.4 | |
1939-06 | 58.0 | |
1939-07 | 60.7 | |
1939-08 | 61.8 | |
1939-09 | 58.2 | |
1939-10 | 46.7 | |
1939-11 | 46.6 | |
1939-12 | 37.8 |
1-01 | 266.0 | |
---|---|---|
1-02 | 145.9 | |
1-03 | 183.1 | |
1-04 | 119.3 | |
1-05 | 180.3 | |
1-06 | 168.5 | |
1-07 | 231.8 | |
1-08 | 224.5 | |
1-09 | 192.8 | |
1-10 | 122.9 | |
1-11 | 336.5 | |
1-12 | 185.9 | |
2-01 | 194.3 | |
2-02 | 149.5 | |
2-03 | 210.1 | |
2-04 | 273.3 | |
2-05 | 191.4 | |
2-06 | 287.0 | |
2-07 | 226.0 | |
2-08 | 303.6 | |
2-09 | 289.9 | |
2-10 | 421.6 | |
2-11 | 264.5 | |
2-12 | 342.3 | |
3-01 | 339.7 | |
3-02 | 440.4 | |
3-03 | 315.9 | |
3-04 | 439.3 | |
3-05 | 401.3 | |
3-06 | 437.4 | |
3-07 | 575.5 | |
3-08 | 407.6 | |
3-09 | 682.0 | |
3-10 | 475.3 | |
3-11 | 581.3 | |
3-12 | 646.9 |
# persistence forecast for monthly car sales dataset | |
from math import sqrt | |
from numpy import median | |
from numpy import mean | |
from numpy import std | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# difference dataset | |
def difference(data, interval): | |
return [data[i] - data[i - interval] for i in range(interval, len(data))] | |
# fit a model | |
def model_fit(train, config): | |
return None | |
# forecast with a pre-fit model | |
def model_predict(model, history, config): | |
values = list() | |
for offset in config: | |
values.append(history[-offset]) | |
return median(values) | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# repeat evaluation of a config | |
def repeat_evaluate(data, config, n_test, n_repeats=30): | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
return scores | |
# summarize model performance | |
def summarize_scores(name, scores): | |
# print a summary | |
scores_m, score_std = mean(scores), std(scores) | |
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std)) | |
# box and whisker plot | |
pyplot.boxplot(scores) | |
pyplot.show() | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# define config | |
config = [12, 24, 36] | |
# grid search | |
scores = repeat_evaluate(data, config, n_test) | |
# summarize scores | |
summarize_scores('persistence', scores) |
# evaluate mlp for monthly car sales dataset | |
from math import sqrt | |
from numpy import array | |
from numpy import mean | |
from numpy import std | |
from pandas import DataFrame | |
from pandas import concat | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from matplotlib import pyplot | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# transform list into supervised learning format | |
def series_to_supervised(data, n_in, n_out=1): | |
df = DataFrame(data) | |
cols = list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
# put it all together | |
agg = concat(cols, axis=1) | |
# drop rows with NaN values | |
agg.dropna(inplace=True) | |
return agg.values | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# fit a model | |
def model_fit(train, config): | |
# unpack config | |
n_input, n_nodes, n_epochs, n_batch = config | |
# prepare data | |
data = series_to_supervised(train, n_input) | |
train_x, train_y = data[:, :-1], data[:, -1] | |
# define model | |
model = Sequential() | |
model.add(Dense(n_nodes, activation='relu', input_dim=n_input)) | |
model.add(Dense(1)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit | |
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0) | |
return model | |
# forecast with a pre-fit model | |
def model_predict(model, history, config): | |
# unpack config | |
n_input, _, _, _ = config | |
# prepare data | |
x_input = array(history[-n_input:]).reshape(1, n_input) | |
# forecast | |
yhat = model.predict(x_input, verbose=0) | |
return yhat[0] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# repeat evaluation of a config | |
def repeat_evaluate(data, config, n_test, n_repeats=30): | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
return scores | |
# summarize model performance | |
def summarize_scores(name, scores): | |
# print a summary | |
scores_m, score_std = mean(scores), std(scores) | |
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std)) | |
# box and whisker plot | |
pyplot.boxplot(scores) | |
pyplot.show() | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# define config | |
config = [24, 500, 100, 100] | |
# grid search | |
scores = repeat_evaluate(data, config, n_test) | |
# summarize scores | |
summarize_scores('mlp', scores) |
# evaluate cnn for monthly car sales dataset | |
from math import sqrt | |
from numpy import array | |
from numpy import mean | |
from numpy import std | |
from pandas import DataFrame | |
from pandas import concat | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from matplotlib import pyplot | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# transform list into supervised learning format | |
def series_to_supervised(data, n_in, n_out=1): | |
df = DataFrame(data) | |
cols = list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
# put it all together | |
agg = concat(cols, axis=1) | |
# drop rows with NaN values | |
agg.dropna(inplace=True) | |
return agg.values | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# fit a model | |
def model_fit(train, config): | |
# unpack config | |
n_input, n_filters, n_kernel, n_epochs, n_batch = config | |
# prepare data | |
data = series_to_supervised(train, n_input) | |
train_x, train_y = data[:, :-1], data[:, -1] | |
train_x = train_x.reshape((train_x.shape[0], train_x.shape[1], 1)) | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=n_filters, kernel_size=n_kernel, activation='relu', input_shape=(n_input, 1))) | |
model.add(Conv1D(filters=n_filters, kernel_size=n_kernel, activation='relu')) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(1)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit | |
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0) | |
return model | |
# forecast with a pre-fit model | |
def model_predict(model, history, config): | |
# unpack config | |
n_input, _, _, _, _ = config | |
# prepare data | |
x_input = array(history[-n_input:]).reshape((1, n_input, 1)) | |
# forecast | |
yhat = model.predict(x_input, verbose=0) | |
return yhat[0] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# repeat evaluation of a config | |
def repeat_evaluate(data, config, n_test, n_repeats=30): | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
return scores | |
# summarize model performance | |
def summarize_scores(name, scores): | |
# print a summary | |
scores_m, score_std = mean(scores), std(scores) | |
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std)) | |
# box and whisker plot | |
pyplot.boxplot(scores) | |
pyplot.show() | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# define config | |
config = [36, 256, 3, 100, 100] | |
# grid search | |
scores = repeat_evaluate(data, config, n_test) | |
# summarize scores | |
summarize_scores('cnn', scores) |
# evaluate lstm for monthly car sales dataset | |
from math import sqrt | |
from numpy import array | |
from numpy import mean | |
from numpy import std | |
from pandas import DataFrame | |
from pandas import concat | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
from matplotlib import pyplot | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# transform list into supervised learning format | |
def series_to_supervised(data, n_in, n_out=1): | |
df = DataFrame(data) | |
cols = list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
# put it all together | |
agg = concat(cols, axis=1) | |
# drop rows with NaN values | |
agg.dropna(inplace=True) | |
return agg.values | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# difference dataset | |
def difference(data, interval): | |
return [data[i] - data[i - interval] for i in range(interval, len(data))] | |
# fit a model | |
def model_fit(train, config): | |
# unpack config | |
n_input, n_nodes, n_epochs, n_batch, n_diff = config | |
# prepare data | |
if n_diff > 0: | |
train = difference(train, n_diff) | |
data = series_to_supervised(train, n_input) | |
train_x, train_y = data[:, :-1], data[:, -1] | |
train_x = train_x.reshape((train_x.shape[0], train_x.shape[1], 1)) | |
# define model | |
model = Sequential() | |
model.add(LSTM(n_nodes, activation='relu', input_shape=(n_input, 1))) | |
model.add(Dense(n_nodes, activation='relu')) | |
model.add(Dense(1)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit | |
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0) | |
return model | |
# forecast with a pre-fit model | |
def model_predict(model, history, config): | |
# unpack config | |
n_input, _, _, _, n_diff = config | |
# prepare data | |
correction = 0.0 | |
if n_diff > 0: | |
correction = history[-n_diff] | |
history = difference(history, n_diff) | |
x_input = array(history[-n_input:]).reshape((1, n_input, 1)) | |
# forecast | |
yhat = model.predict(x_input, verbose=0) | |
return correction + yhat[0] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# repeat evaluation of a config | |
def repeat_evaluate(data, config, n_test, n_repeats=30): | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
return scores | |
# summarize model performance | |
def summarize_scores(name, scores): | |
# print a summary | |
scores_m, score_std = mean(scores), std(scores) | |
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std)) | |
# box and whisker plot | |
pyplot.boxplot(scores) | |
pyplot.show() | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# define config | |
config = [36, 50, 100, 100, 12] | |
# grid search | |
scores = repeat_evaluate(data, config, n_test) | |
# summarize scores | |
summarize_scores('lstm', scores) |
# evaluate cnn-lstm for monthly car sales dataset | |
from math import sqrt | |
from numpy import array | |
from numpy import mean | |
from numpy import std | |
from pandas import DataFrame | |
from pandas import concat | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
from keras.layers import TimeDistributed | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from matplotlib import pyplot | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# transform list into supervised learning format | |
def series_to_supervised(data, n_in, n_out=1): | |
df = DataFrame(data) | |
cols = list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
# put it all together | |
agg = concat(cols, axis=1) | |
# drop rows with NaN values | |
agg.dropna(inplace=True) | |
return agg.values | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# fit a model | |
def model_fit(train, config): | |
# unpack config | |
n_seq, n_steps, n_filters, n_kernel, n_nodes, n_epochs, n_batch = config | |
n_input = n_seq * n_steps | |
# prepare data | |
data = series_to_supervised(train, n_input) | |
train_x, train_y = data[:, :-1], data[:, -1] | |
train_x = train_x.reshape((train_x.shape[0], n_seq, n_steps, 1)) | |
# define model | |
model = Sequential() | |
model.add(TimeDistributed(Conv1D(filters=n_filters, kernel_size=n_kernel, activation='relu', input_shape=(None,n_steps,1)))) | |
model.add(TimeDistributed(Conv1D(filters=n_filters, kernel_size=n_kernel, activation='relu'))) | |
model.add(TimeDistributed(MaxPooling1D(pool_size=2))) | |
model.add(TimeDistributed(Flatten())) | |
model.add(LSTM(n_nodes, activation='relu')) | |
model.add(Dense(n_nodes, activation='relu')) | |
model.add(Dense(1)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit | |
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0) | |
return model | |
# forecast with a pre-fit model | |
def model_predict(model, history, config): | |
# unpack config | |
n_seq, n_steps, _, _, _, _, _ = config | |
n_input = n_seq * n_steps | |
# prepare data | |
x_input = array(history[-n_input:]).reshape((1, n_seq, n_steps, 1)) | |
# forecast | |
yhat = model.predict(x_input, verbose=0) | |
return yhat[0] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# repeat evaluation of a config | |
def repeat_evaluate(data, config, n_test, n_repeats=30): | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
return scores | |
# summarize model performance | |
def summarize_scores(name, scores): | |
# print a summary | |
scores_m, score_std = mean(scores), std(scores) | |
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std)) | |
# box and whisker plot | |
pyplot.boxplot(scores) | |
pyplot.show() | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# define config | |
config = [3, 12, 64, 3, 100, 200, 100] | |
# grid search | |
scores = repeat_evaluate(data, config, n_test) | |
# summarize scores | |
summarize_scores('cnn-lstm', scores) |
# evaluate convlstm for monthly car sales dataset | |
from math import sqrt | |
from numpy import array | |
from numpy import mean | |
from numpy import std | |
from pandas import DataFrame | |
from pandas import concat | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import ConvLSTM2D | |
from matplotlib import pyplot | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# transform list into supervised learning format | |
def series_to_supervised(data, n_in, n_out=1): | |
df = DataFrame(data) | |
cols = list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
# put it all together | |
agg = concat(cols, axis=1) | |
# drop rows with NaN values | |
agg.dropna(inplace=True) | |
return agg.values | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# difference dataset | |
def difference(data, interval): | |
return [data[i] - data[i - interval] for i in range(interval, len(data))] | |
# fit a model | |
def model_fit(train, config): | |
# unpack config | |
n_seq, n_steps, n_filters, n_kernel, n_nodes, n_epochs, n_batch = config | |
n_input = n_seq * n_steps | |
# prepare data | |
data = series_to_supervised(train, n_input) | |
train_x, train_y = data[:, :-1], data[:, -1] | |
train_x = train_x.reshape((train_x.shape[0], n_seq, 1, n_steps, 1)) | |
# define model | |
model = Sequential() | |
model.add(ConvLSTM2D(filters=n_filters, kernel_size=(1,n_kernel), activation='relu', input_shape=(n_seq, 1, n_steps, 1))) | |
model.add(Flatten()) | |
model.add(Dense(n_nodes, activation='relu')) | |
model.add(Dense(1)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit | |
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0) | |
return model | |
# forecast with a pre-fit model | |
def model_predict(model, history, config): | |
# unpack config | |
n_seq, n_steps, _, _, _, _, _ = config | |
n_input = n_seq * n_steps | |
# prepare data | |
x_input = array(history[-n_input:]).reshape((1, n_seq, 1, n_steps, 1)) | |
# forecast | |
yhat = model.predict(x_input, verbose=0) | |
return yhat[0] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# repeat evaluation of a config | |
def repeat_evaluate(data, config, n_test, n_repeats=30): | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
return scores | |
# summarize model performance | |
def summarize_scores(name, scores): | |
# print a summary | |
scores_m, score_std = mean(scores), std(scores) | |
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std)) | |
# box and whisker plot | |
pyplot.boxplot(scores) | |
pyplot.show() | |
series = read_csv('monthly-car-sales.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# define config | |
config = [3, 12, 256, 3, 200, 200, 100] | |
# grid search | |
scores = repeat_evaluate(data, config, n_test) | |
# summarize scores | |
summarize_scores('convlstm', scores) |
1960-01 | 6550 | |
---|---|---|
1960-02 | 8728 | |
1960-03 | 12026 | |
1960-04 | 14395 | |
1960-05 | 14587 | |
1960-06 | 13791 | |
1960-07 | 9498 | |
1960-08 | 8251 | |
1960-09 | 7049 | |
1960-10 | 9545 | |
1960-11 | 9364 | |
1960-12 | 8456 | |
1961-01 | 7237 | |
1961-02 | 9374 | |
1961-03 | 11837 | |
1961-04 | 13784 | |
1961-05 | 15926 | |
1961-06 | 13821 | |
1961-07 | 11143 | |
1961-08 | 7975 | |
1961-09 | 7610 | |
1961-10 | 10015 | |
1961-11 | 12759 | |
1961-12 | 8816 | |
1962-01 | 10677 | |
1962-02 | 10947 | |
1962-03 | 15200 | |
1962-04 | 17010 | |
1962-05 | 20900 | |
1962-06 | 16205 | |
1962-07 | 12143 | |
1962-08 | 8997 | |
1962-09 | 5568 | |
1962-10 | 11474 | |
1962-11 | 12256 | |
1962-12 | 10583 | |
1963-01 | 10862 | |
1963-02 | 10965 | |
1963-03 | 14405 | |
1963-04 | 20379 | |
1963-05 | 20128 | |
1963-06 | 17816 | |
1963-07 | 12268 | |
1963-08 | 8642 | |
1963-09 | 7962 | |
1963-10 | 13932 | |
1963-11 | 15936 | |
1963-12 | 12628 | |
1964-01 | 12267 | |
1964-02 | 12470 | |
1964-03 | 18944 | |
1964-04 | 21259 | |
1964-05 | 22015 | |
1964-06 | 18581 | |
1964-07 | 15175 | |
1964-08 | 10306 | |
1964-09 | 10792 | |
1964-10 | 14752 | |
1964-11 | 13754 | |
1964-12 | 11738 | |
1965-01 | 12181 | |
1965-02 | 12965 | |
1965-03 | 19990 | |
1965-04 | 23125 | |
1965-05 | 23541 | |
1965-06 | 21247 | |
1965-07 | 15189 | |
1965-08 | 14767 | |
1965-09 | 10895 | |
1965-10 | 17130 | |
1965-11 | 17697 | |
1965-12 | 16611 | |
1966-01 | 12674 | |
1966-02 | 12760 | |
1966-03 | 20249 | |
1966-04 | 22135 | |
1966-05 | 20677 | |
1966-06 | 19933 | |
1966-07 | 15388 | |
1966-08 | 15113 | |
1966-09 | 13401 | |
1966-10 | 16135 | |
1966-11 | 17562 | |
1966-12 | 14720 | |
1967-01 | 12225 | |
1967-02 | 11608 | |
1967-03 | 20985 | |
1967-04 | 19692 | |
1967-05 | 24081 | |
1967-06 | 22114 | |
1967-07 | 14220 | |
1967-08 | 13434 | |
1967-09 | 13598 | |
1967-10 | 17187 | |
1967-11 | 16119 | |
1967-12 | 13713 | |
1968-01 | 13210 | |
1968-02 | 14251 | |
1968-03 | 20139 | |
1968-04 | 21725 | |
1968-05 | 26099 | |
1968-06 | 21084 | |
1968-07 | 18024 | |
1968-08 | 16722 | |
1968-09 | 14385 | |
1968-10 | 21342 | |
1968-11 | 17180 | |
1968-12 | 14577 |
# load and plot monthly airline passengers dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load | |
series = read_csv('monthly-airline-passengers.csv', header=0, index_col=0) | |
# summarize shape | |
print(series.shape) | |
# plot | |
pyplot.plot(series) | |
pyplot.xticks([]) | |
pyplot.show() |
# grid search persistence models for monthly airline passengers dataset | |
from math import sqrt | |
from numpy import mean | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# fit a model | |
def model_fit(train, config): | |
return None | |
# forecast with a pre-fit model | |
def model_predict(model, history, offset): | |
return history[-offset] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# score a model, return None on failure | |
def repeat_evaluate(data, config, n_test, n_repeats=10): | |
# convert config to a key | |
key = str(config) | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
# summarize score | |
result = mean(scores) | |
print('> Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test): | |
# evaluate configs | |
scores = scores = [repeat_evaluate(data, cfg, n_test) for cfg in cfg_list] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# define dataset | |
series = read_csv('monthly-airline-passengers.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = [1, 6, 12, 24, 36] | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 10 configs | |
for cfg, error in scores[:10]: | |
print(cfg, error) |
# grid search mlps for monthly airline passengers dataset | |
from math import sqrt | |
from numpy import array | |
from numpy import mean | |
from pandas import DataFrame | |
from pandas import concat | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# transform list into supervised learning format | |
def series_to_supervised(data, n_in, n_out=1): | |
df = DataFrame(data) | |
cols = list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
# put it all together | |
agg = concat(cols, axis=1) | |
# drop rows with NaN values | |
agg.dropna(inplace=True) | |
return agg.values | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# difference dataset | |
def difference(data, order): | |
return [data[i] - data[i - order] for i in range(order, len(data))] | |
# fit a model | |
def model_fit(train, config): | |
# unpack config | |
n_input, n_nodes, n_epochs, n_batch, n_diff = config | |
# prepare data | |
if n_diff > 0: | |
train = difference(train, n_diff) | |
# transform series into supervised format | |
data = series_to_supervised(train, n_in=n_input) | |
# separate inputs and outputs | |
train_x, train_y = data[:, :-1], data[:, -1] | |
# define model | |
model = Sequential() | |
model.add(Dense(n_nodes, activation='relu', input_dim=n_input)) | |
model.add(Dense(1)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit model | |
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0) | |
return model | |
# forecast with the fit model | |
def model_predict(model, history, config): | |
# unpack config | |
n_input, _, _, _, n_diff = config | |
# prepare data | |
correction = 0.0 | |
if n_diff > 0: | |
correction = history[-n_diff] | |
history = difference(history, n_diff) | |
# shape input for model | |
x_input = array(history[-n_input:]).reshape((1, n_input)) | |
# make forecast | |
yhat = model.predict(x_input, verbose=0) | |
# correct forecast if it was differenced | |
return correction + yhat[0] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# score a model, return None on failure | |
def repeat_evaluate(data, config, n_test, n_repeats=10): | |
# convert config to a key | |
key = str(config) | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
# summarize score | |
result = mean(scores) | |
print('> Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test): | |
# evaluate configs | |
scores = scores = [repeat_evaluate(data, cfg, n_test) for cfg in cfg_list] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a list of configs to try | |
def model_configs(): | |
# define scope of configs | |
n_input = [12] | |
n_nodes = [50, 100] | |
n_epochs = [100] | |
n_batch = [1, 150] | |
n_diff = [0, 12] | |
# create configs | |
configs = list() | |
for i in n_input: | |
for j in n_nodes: | |
for k in n_epochs: | |
for l in n_batch: | |
for m in n_diff: | |
cfg = [i, j, k, l, m] | |
configs.append(cfg) | |
print('Total configs: %d' % len(configs)) | |
return configs | |
# define dataset | |
series = read_csv('monthly-airline-passengers.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = model_configs() | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 3 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search cnn for monthly airline passengers dataset | |
from math import sqrt | |
from numpy import array | |
from numpy import mean | |
from pandas import DataFrame | |
from pandas import concat | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# transform list into supervised learning format | |
def series_to_supervised(data, n_in, n_out=1): | |
df = DataFrame(data) | |
cols = list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
# put it all together | |
agg = concat(cols, axis=1) | |
# drop rows with NaN values | |
agg.dropna(inplace=True) | |
return agg.values | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# difference dataset | |
def difference(data, order): | |
return [data[i] - data[i - order] for i in range(order, len(data))] | |
# fit a model | |
def model_fit(train, config): | |
# unpack config | |
n_input, n_filters, n_kernel, n_epochs, n_batch, n_diff = config | |
# prepare data | |
if n_diff > 0: | |
train = difference(train, n_diff) | |
# transform series into supervised format | |
data = series_to_supervised(train, n_in=n_input) | |
# separate inputs and outputs | |
train_x, train_y = data[:, :-1], data[:, -1] | |
# reshape input data into [samples, timesteps, features] | |
n_features = 1 | |
train_x = train_x.reshape((train_x.shape[0], train_x.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=n_filters, kernel_size=n_kernel, activation='relu', input_shape=(n_input, n_features))) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(1)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit | |
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0) | |
return model | |
# forecast with the fit model | |
def model_predict(model, history, config): | |
# unpack config | |
n_input, _, _, _, _, n_diff = config | |
# prepare data | |
correction = 0.0 | |
if n_diff > 0: | |
correction = history[-n_diff] | |
history = difference(history, n_diff) | |
x_input = array(history[-n_input:]).reshape((1, n_input, 1)) | |
# forecast | |
yhat = model.predict(x_input, verbose=0) | |
return correction + yhat[0] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# score a model, return None on failure | |
def repeat_evaluate(data, config, n_test, n_repeats=10): | |
# convert config to a key | |
key = str(config) | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
# summarize score | |
result = mean(scores) | |
print('> Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test): | |
# evaluate configs | |
scores = scores = [repeat_evaluate(data, cfg, n_test) for cfg in cfg_list] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a list of configs to try | |
def model_configs(): | |
# define scope of configs | |
n_input = [12] | |
n_filters = [64] | |
n_kernels = [3, 5] | |
n_epochs = [100] | |
n_batch = [1, 150] | |
n_diff = [0, 12] | |
# create configs | |
configs = list() | |
for a in n_input: | |
for b in n_filters: | |
for c in n_kernels: | |
for d in n_epochs: | |
for e in n_batch: | |
for f in n_diff: | |
cfg = [a,b,c,d,e,f] | |
configs.append(cfg) | |
print('Total configs: %d' % len(configs)) | |
return configs | |
# define dataset | |
series = read_csv('monthly-airline-passengers.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = model_configs() | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 10 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
# grid search lstm for monthly airline passengers dataset | |
from math import sqrt | |
from numpy import array | |
from numpy import mean | |
from pandas import DataFrame | |
from pandas import concat | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
# split a univariate dataset into train/test sets | |
def train_test_split(data, n_test): | |
return data[:-n_test], data[-n_test:] | |
# transform list into supervised learning format | |
def series_to_supervised(data, n_in, n_out=1): | |
df = DataFrame(data) | |
cols = list() | |
# input sequence (t-n, ... t-1) | |
for i in range(n_in, 0, -1): | |
cols.append(df.shift(i)) | |
# forecast sequence (t, t+1, ... t+n) | |
for i in range(0, n_out): | |
cols.append(df.shift(-i)) | |
# put it all together | |
agg = concat(cols, axis=1) | |
# drop rows with NaN values | |
agg.dropna(inplace=True) | |
return agg.values | |
# root mean squared error or rmse | |
def measure_rmse(actual, predicted): | |
return sqrt(mean_squared_error(actual, predicted)) | |
# difference dataset | |
def difference(data, order): | |
return [data[i] - data[i - order] for i in range(order, len(data))] | |
# fit a model | |
def model_fit(train, config): | |
# unpack config | |
n_input, n_nodes, n_epochs, n_batch, n_diff = config | |
# prepare data | |
if n_diff > 0: | |
train = difference(train, n_diff) | |
# transform series into supervised format | |
data = series_to_supervised(train, n_in=n_input) | |
# separate inputs and outputs | |
train_x, train_y = data[:, :-1], data[:, -1] | |
# reshape input data into [samples, timesteps, features] | |
n_features = 1 | |
train_x = train_x.reshape((train_x.shape[0], train_x.shape[1], n_features)) | |
# define model | |
model = Sequential() | |
model.add(LSTM(n_nodes, activation='relu', input_shape=(n_input, n_features))) | |
model.add(Dense(n_nodes, activation='relu')) | |
model.add(Dense(1)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit model | |
model.fit(train_x, train_y, epochs=n_epochs, batch_size=n_batch, verbose=0) | |
return model | |
# forecast with the fit model | |
def model_predict(model, history, config): | |
# unpack config | |
n_input, _, _, _, n_diff = config | |
# prepare data | |
correction = 0.0 | |
if n_diff > 0: | |
correction = history[-n_diff] | |
history = difference(history, n_diff) | |
# reshape sample into [samples, timesteps, features] | |
x_input = array(history[-n_input:]).reshape((1, n_input, 1)) | |
# forecast | |
yhat = model.predict(x_input, verbose=0) | |
return correction + yhat[0] | |
# walk-forward validation for univariate data | |
def walk_forward_validation(data, n_test, cfg): | |
predictions = list() | |
# split dataset | |
train, test = train_test_split(data, n_test) | |
# fit model | |
model = model_fit(train, cfg) | |
# seed history with training dataset | |
history = [x for x in train] | |
# step over each time-step in the test set | |
for i in range(len(test)): | |
# fit model and make forecast for history | |
yhat = model_predict(model, history, cfg) | |
# store forecast in list of predictions | |
predictions.append(yhat) | |
# add actual observation to history for the next loop | |
history.append(test[i]) | |
# estimate prediction error | |
error = measure_rmse(test, predictions) | |
print(' > %.3f' % error) | |
return error | |
# score a model, return None on failure | |
def repeat_evaluate(data, config, n_test, n_repeats=10): | |
# convert config to a key | |
key = str(config) | |
# fit and evaluate the model n times | |
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)] | |
# summarize score | |
result = mean(scores) | |
print('> Model[%s] %.3f' % (key, result)) | |
return (key, result) | |
# grid search configs | |
def grid_search(data, cfg_list, n_test): | |
# evaluate configs | |
scores = scores = [repeat_evaluate(data, cfg, n_test) for cfg in cfg_list] | |
# sort configs by error, asc | |
scores.sort(key=lambda tup: tup[1]) | |
return scores | |
# create a list of configs to try | |
def model_configs(): | |
# define scope of configs | |
n_input = [12] | |
n_nodes = [100] | |
n_epochs = [50] | |
n_batch = [1, 150] | |
n_diff = [12] | |
# create configs | |
configs = list() | |
for i in n_input: | |
for j in n_nodes: | |
for k in n_epochs: | |
for l in n_batch: | |
for m in n_diff: | |
cfg = [i, j, k, l, m] | |
configs.append(cfg) | |
print('Total configs: %d' % len(configs)) | |
return configs | |
# define dataset | |
series = read_csv('monthly-airline-passengers.csv', header=0, index_col=0) | |
data = series.values | |
# data split | |
n_test = 12 | |
# model configs | |
cfg_list = model_configs() | |
# grid search | |
scores = grid_search(data, cfg_list, n_test) | |
print('done') | |
# list top 10 configs | |
for cfg, error in scores[:3]: | |
print(cfg, error) |
Month | Passengers | |
---|---|---|
1949-01 | 112 | |
1949-02 | 118 | |
1949-03 | 132 | |
1949-04 | 129 | |
1949-05 | 121 | |
1949-06 | 135 | |
1949-07 | 148 | |
1949-08 | 148 | |
1949-09 | 136 | |
1949-10 | 119 | |
1949-11 | 104 | |
1949-12 | 118 | |
1950-01 | 115 | |
1950-02 | 126 | |
1950-03 | 141 | |
1950-04 | 135 | |
1950-05 | 125 | |
1950-06 | 149 | |
1950-07 | 170 | |
1950-08 | 170 | |
1950-09 | 158 | |
1950-10 | 133 | |
1950-11 | 114 | |
1950-12 | 140 | |
1951-01 | 145 | |
1951-02 | 150 | |
1951-03 | 178 | |
1951-04 | 163 | |
1951-05 | 172 | |
1951-06 | 178 | |
1951-07 | 199 | |
1951-08 | 199 | |
1951-09 | 184 | |
1951-10 | 162 | |
1951-11 | 146 | |
1951-12 | 166 | |
1952-01 | 171 | |
1952-02 | 180 | |
1952-03 | 193 | |
1952-04 | 181 | |
1952-05 | 183 | |
1952-06 | 218 | |
1952-07 | 230 | |
1952-08 | 242 | |
1952-09 | 209 | |
1952-10 | 191 | |
1952-11 | 172 | |
1952-12 | 194 | |
1953-01 | 196 | |
1953-02 | 196 | |
1953-03 | 236 | |
1953-04 | 235 | |
1953-05 | 229 | |
1953-06 | 243 | |
1953-07 | 264 | |
1953-08 | 272 | |
1953-09 | 237 | |
1953-10 | 211 | |
1953-11 | 180 | |
1953-12 | 201 | |
1954-01 | 204 | |
1954-02 | 188 | |
1954-03 | 235 | |
1954-04 | 227 | |
1954-05 | 234 | |
1954-06 | 264 | |
1954-07 | 302 | |
1954-08 | 293 | |
1954-09 | 259 | |
1954-10 | 229 | |
1954-11 | 203 | |
1954-12 | 229 | |
1955-01 | 242 | |
1955-02 | 233 | |
1955-03 | 267 | |
1955-04 | 269 | |
1955-05 | 270 | |
1955-06 | 315 | |
1955-07 | 364 | |
1955-08 | 347 | |
1955-09 | 312 | |
1955-10 | 274 | |
1955-11 | 237 | |
1955-12 | 278 | |
1956-01 | 284 | |
1956-02 | 277 | |
1956-03 | 317 | |
1956-04 | 313 | |
1956-05 | 318 | |
1956-06 | 374 | |
1956-07 | 413 | |
1956-08 | 405 | |
1956-09 | 355 | |
1956-10 | 306 | |
1956-11 | 271 | |
1956-12 | 306 | |
1957-01 | 315 | |
1957-02 | 301 | |
1957-03 | 356 | |
1957-04 | 348 | |
1957-05 | 355 | |
1957-06 | 422 | |
1957-07 | 465 | |
1957-08 | 467 | |
1957-09 | 404 | |
1957-10 | 347 | |
1957-11 | 305 | |
1957-12 | 336 | |
1958-01 | 340 | |
1958-02 | 318 | |
1958-03 | 362 | |
1958-04 | 348 | |
1958-05 | 363 | |
1958-06 | 435 | |
1958-07 | 491 | |
1958-08 | 505 | |
1958-09 | 404 | |
1958-10 | 359 | |
1958-11 | 310 | |
1958-12 | 337 | |
1959-01 | 360 | |
1959-02 | 342 | |
1959-03 | 406 | |
1959-04 | 396 | |
1959-05 | 420 | |
1959-06 | 472 | |
1959-07 | 548 | |
1959-08 | 559 | |
1959-09 | 463 | |
1959-10 | 407 | |
1959-11 | 362 | |
1959-12 | 405 | |
1960-01 | 417 | |
1960-02 | 391 | |
1960-03 | 419 | |
1960-04 | 461 | |
1960-05 | 472 | |
1960-06 | 535 | |
1960-07 | 622 | |
1960-08 | 606 | |
1960-09 | 508 | |
1960-10 | 461 | |
1960-11 | 390 | |
1960-12 | 432 |
# load and clean-up power usage data | |
from numpy import nan | |
from pandas import read_csv | |
# load all data | |
dataset = read_csv('household_power_consumption.txt', sep=';', header=0, low_memory=False, infer_datetime_format=True, parse_dates={'datetime':[0,1]}, index_col=['datetime']) | |
# summarize | |
print(dataset.shape) | |
print(dataset.head()) | |
# mark all missing values | |
dataset.replace('?', nan, inplace=True) | |
# add a column for for the remainder of sub metering | |
values = dataset.values.astype('float32') | |
dataset['sub_metering_4'] = (values[:,0] * 1000 / 60) - (values[:,4] + values[:,5] + values[:,6]) | |
# save updated dataset | |
dataset.to_csv('household_power_consumption.csv') | |
# load the new dataset and summarize | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
print(dataset.head()) |
# line plots for power usage dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load the new file | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# line plot for each variable | |
pyplot.figure() | |
for i in range(len(dataset.columns)): | |
# create subplot | |
pyplot.subplot(len(dataset.columns), 1, i+1) | |
# get variable name | |
name = dataset.columns[i] | |
# plot data | |
pyplot.plot(dataset[name]) | |
# set title | |
pyplot.title(name, y=0) | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
pyplot.show() |
# yearly line plots for power usage dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load the new file | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# plot active power for each year | |
years = ['2007', '2008', '2009', '2010'] | |
pyplot.figure() | |
for i in range(len(years)): | |
# prepare subplot | |
ax = pyplot.subplot(len(years), 1, i+1) | |
# determine the year to plot | |
year = years[i] | |
# get all observations for the year | |
result = dataset[str(year)] | |
# plot the active power for the year | |
pyplot.plot(result['Global_active_power']) | |
# add a title to the subplot | |
pyplot.title(str(year), y=0, loc='left') | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
pyplot.show() |
# monthly line plots for power usage dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load the new file | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# plot active power for each year | |
months = [x for x in range(1, 13)] | |
pyplot.figure() | |
for i in range(len(months)): | |
# prepare subplot | |
ax = pyplot.subplot(len(months), 1, i+1) | |
# determine the month to plot | |
month = '2007-' + str(months[i]) | |
# get all observations for the month | |
result = dataset[month] | |
# plot the active power for the month | |
pyplot.plot(result['Global_active_power']) | |
# add a title to the subplot | |
pyplot.title(month, y=0, loc='left') | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
pyplot.show() |
# daily line plots for power usage dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load the new file | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# plot active power for each year | |
days = [x for x in range(1, 20)] | |
pyplot.figure() | |
for i in range(len(days)): | |
# prepare subplot | |
ax = pyplot.subplot(len(days), 1, i+1) | |
# determine the day to plot | |
day = '2007-01-' + str(days[i]) | |
# get all observations for the day | |
result = dataset[day] | |
# plot the active power for the day | |
pyplot.plot(result['Global_active_power']) | |
# add a title to the subplot | |
pyplot.title(day, y=0, loc='left', size=6) | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
pyplot.show() |
# histogram plots for power usage dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load the new file | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# histogram plot for each variable | |
pyplot.figure() | |
for i in range(len(dataset.columns)): | |
# create subplot | |
pyplot.subplot(len(dataset.columns), 1, i+1) | |
# get variable name | |
name = dataset.columns[i] | |
# create histogram | |
dataset[name].hist(bins=100) | |
# set title | |
pyplot.title(name, y=0, loc='right') | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
pyplot.show() |
# yearly histogram plots for power usage dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load the new file | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# plot active power for each year | |
years = ['2007', '2008', '2009', '2010'] | |
pyplot.figure() | |
for i in range(len(years)): | |
# prepare subplot | |
ax = pyplot.subplot(len(years), 1, i+1) | |
# determine the year to plot | |
year = years[i] | |
# get all observations for the year | |
result = dataset[str(year)] | |
# plot the active power for the year | |
result['Global_active_power'].hist(bins=100) | |
# zoom in on the distribution | |
ax.set_xlim(0, 5) | |
# add a title to the subplot | |
pyplot.title(str(year), y=0, loc='right') | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
pyplot.show() |
# monthly histogram plots for power usage dataset | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load the new file | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# plot active power for each year | |
months = [x for x in range(1, 13)] | |
pyplot.figure() | |
for i in range(len(months)): | |
# prepare subplot | |
ax = pyplot.subplot(len(months), 1, i+1) | |
# determine the month to plot | |
month = '2007-' + str(months[i]) | |
# get all observations for the month | |
result = dataset[month] | |
# plot the active power for the month | |
result['Global_active_power'].hist(bins=100) | |
# zoom in on the distribution | |
ax.set_xlim(0, 5) | |
# add a title to the subplot | |
pyplot.title(month, y=0, loc='right') | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
pyplot.show() |
# load and clean-up the power usage dataset | |
from numpy import nan | |
from numpy import isnan | |
from pandas import read_csv | |
# fill missing values with a value at the same time one day ago | |
def fill_missing(values): | |
one_day = 60 * 24 | |
for row in range(values.shape[0]): | |
for col in range(values.shape[1]): | |
if isnan(values[row, col]): | |
values[row, col] = values[row - one_day, col] | |
# load all data | |
dataset = read_csv('household_power_consumption.txt', sep=';', header=0, low_memory=False, infer_datetime_format=True, parse_dates={'datetime':[0,1]}, index_col=['datetime']) | |
# mark all missing values | |
dataset.replace('?', nan, inplace=True) | |
# make dataset numeric | |
dataset = dataset.astype('float32') | |
# fill missing | |
fill_missing(dataset.values) | |
# add a column for for the remainder of sub metering | |
values = dataset.values | |
dataset['sub_metering_4'] = (values[:,0] * 1000 / 60) - (values[:,4] + values[:,5] + values[:,6]) | |
# save updated dataset | |
dataset.to_csv('household_power_consumption.csv') |
# resample minute data to total for each day for the power usage dataset | |
from pandas import read_csv | |
# load the new file | |
dataset = read_csv('household_power_consumption.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# resample data to daily | |
daily_groups = dataset.resample('D') | |
daily_data = daily_groups.sum() | |
# summarize | |
print(daily_data.shape) | |
print(daily_data.head()) | |
# save | |
daily_data.to_csv('household_power_consumption_days.csv') |
# split the power usage dataset into standard weeks | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
train, test = split_dataset(dataset.values) | |
# validate train data | |
print(train.shape) | |
print(train[0, 0, 0], train[-1, -1, 0]) | |
# validate test | |
print(test.shape) | |
print(test[0, 0, 0], test[-1, -1, 0]) |
# naive forecast strategies for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# evaluate a single model | |
def evaluate_model(model_func, train, test): | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = model_func(history) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
predictions = array(predictions) | |
# evaluate predictions days for each week | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# daily persistence model | |
def daily_persistence(history): | |
# get the data for the prior week | |
last_week = history[-1] | |
# get the total active power for the last day | |
value = last_week[-1, 0] | |
# prepare 7 day forecast | |
forecast = [value for _ in range(7)] | |
return forecast | |
# weekly persistence model | |
def weekly_persistence(history): | |
# get the data for the prior week | |
last_week = history[-1] | |
return last_week[:, 0] | |
# week one year ago persistence model | |
def week_one_year_ago_persistence(history): | |
# get the data for the prior week | |
last_week = history[-52] | |
return last_week[:, 0] | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# define the names and functions for the models we wish to evaluate | |
models = dict() | |
models['daily'] = daily_persistence | |
models['weekly'] = weekly_persistence | |
models['week-oya'] = week_one_year_ago_persistence | |
# evaluate each model | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
for name, func in models.items(): | |
# evaluate and get scores | |
score, scores = evaluate_model(func, train, test) | |
# summarize scores | |
summarize_scores(name, score, scores) | |
# plot scores | |
pyplot.plot(days, scores, marker='o', label=name) | |
# show plot | |
pyplot.legend() | |
pyplot.show() |
# acf and pacf plots of total power usage | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from matplotlib import pyplot | |
from statsmodels.graphics.tsaplots import plot_acf | |
from statsmodels.graphics.tsaplots import plot_pacf | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# convert windows of weekly multivariate data into a series of total power | |
def to_series(data): | |
# extract just the total power from each week | |
series = [week[:, 0] for week in data] | |
# flatten into a single series | |
series = array(series).flatten() | |
return series | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# convert training data into a series | |
series = to_series(train) | |
# plots | |
pyplot.figure() | |
lags = 365 | |
# acf | |
axis = pyplot.subplot(2, 1, 1) | |
plot_acf(series, ax=axis, lags=lags) | |
# pacf | |
axis = pyplot.subplot(2, 1, 2) | |
plot_pacf(series, ax=axis, lags=lags) | |
# show plot | |
pyplot.show() |
# zoomed acf and pacf plots of total power usage | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from matplotlib import pyplot | |
from statsmodels.graphics.tsaplots import plot_acf | |
from statsmodels.graphics.tsaplots import plot_pacf | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# convert windows of weekly multivariate data into a series of total power | |
def to_series(data): | |
# extract just the total power from each week | |
series = [week[:, 0] for week in data] | |
# flatten into a single series | |
series = array(series).flatten() | |
return series | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# convert training data into a series | |
series = to_series(train) | |
# plots | |
pyplot.figure() | |
lags = 50 | |
# acf | |
axis = pyplot.subplot(2, 1, 1) | |
plot_acf(series, ax=axis, lags=lags) | |
# pacf | |
axis = pyplot.subplot(2, 1, 2) | |
plot_pacf(series, ax=axis, lags=lags) | |
# show plot | |
pyplot.show() |
# arima forecast for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from statsmodels.tsa.arima_model import ARIMA | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# evaluate a single model | |
def evaluate_model(model_func, train, test): | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = model_func(history) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
predictions = array(predictions) | |
# evaluate predictions days for each week | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# convert windows of weekly multivariate data into a series of total power | |
def to_series(data): | |
# extract just the total power from each week | |
series = [week[:, 0] for week in data] | |
# flatten into a single series | |
series = array(series).flatten() | |
return series | |
# arima forecast | |
def arima_forecast(history): | |
# convert history into a univariate series | |
series = to_series(history) | |
# define the model | |
model = ARIMA(series, order=(7,0,0)) | |
# fit the model | |
model_fit = model.fit(disp=False) | |
# make forecast | |
yhat = model_fit.predict(len(series), len(series)+6) | |
return yhat | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# define the names and functions for the models we wish to evaluate | |
models = dict() | |
models['arima'] = arima_forecast | |
# evaluate each model | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
for name, func in models.items(): | |
# evaluate and get scores | |
score, scores = evaluate_model(func, train, test) | |
# summarize scores | |
summarize_scores(name, score, scores) | |
# plot scores | |
pyplot.plot(days, scores, marker='o', label=name) | |
# show plot | |
pyplot.legend() | |
pyplot.show() |
# univariate multi-step cnn for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# convert history into inputs and outputs | |
def to_supervised(train, n_input, n_out=7): | |
# flatten data | |
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2])) | |
X, y = list(), list() | |
in_start = 0 | |
# step over the entire history one time step at a time | |
for _ in range(len(data)): | |
# define the end of the input sequence | |
in_end = in_start + n_input | |
out_end = in_end + n_out | |
# ensure we have enough data for this instance | |
if out_end < len(data): | |
x_input = data[in_start:in_end, 0] | |
x_input = x_input.reshape((len(x_input), 1)) | |
X.append(x_input) | |
y.append(data[in_end:out_end, 0]) | |
# move along one time step | |
in_start += 1 | |
return array(X), array(y) | |
# train the model | |
def build_model(train, n_input): | |
# prepare data | |
train_x, train_y = to_supervised(train, n_input) | |
# define parameters | |
verbose, epochs, batch_size = 0, 20, 4 | |
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1] | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=16, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features))) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(10, activation='relu')) | |
model.add(Dense(n_outputs)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit network | |
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
return model | |
# make a forecast | |
def forecast(model, history, n_input): | |
# flatten data | |
data = array(history) | |
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2])) | |
# retrieve last observations for input data | |
input_x = data[-n_input:, 0] | |
# reshape into [1, n_input, 1] | |
input_x = input_x.reshape((1, len(input_x), 1)) | |
# forecast the next week | |
yhat = model.predict(input_x, verbose=0) | |
# we only want the vector forecast | |
yhat = yhat[0] | |
return yhat | |
# evaluate a single model | |
def evaluate_model(train, test, n_input): | |
# fit model | |
model = build_model(train, n_input) | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = forecast(model, history, n_input) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
# evaluate predictions days for each week | |
predictions = array(predictions) | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# evaluate model and get scores | |
n_input = 7 | |
score, scores = evaluate_model(train, test, n_input) | |
# summarize scores | |
summarize_scores('cnn', score, scores) | |
# plot scores | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
pyplot.plot(days, scores, marker='o', label='cnn') | |
pyplot.show() |
# multichannel multi-step cnn for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# convert history into inputs and outputs | |
def to_supervised(train, n_input, n_out=7): | |
# flatten data | |
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2])) | |
X, y = list(), list() | |
in_start = 0 | |
# step over the entire history one time step at a time | |
for _ in range(len(data)): | |
# define the end of the input sequence | |
in_end = in_start + n_input | |
out_end = in_end + n_out | |
# ensure we have enough data for this instance | |
if out_end < len(data): | |
X.append(data[in_start:in_end, :]) | |
y.append(data[in_end:out_end, 0]) | |
# move along one time step | |
in_start += 1 | |
return array(X), array(y) | |
# train the model | |
def build_model(train, n_input): | |
# prepare data | |
train_x, train_y = to_supervised(train, n_input) | |
# define parameters | |
verbose, epochs, batch_size = 0, 70, 16 | |
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1] | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=32, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features))) | |
model.add(Conv1D(filters=32, kernel_size=3, activation='relu')) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Conv1D(filters=16, kernel_size=3, activation='relu')) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit network | |
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
return model | |
# make a forecast | |
def forecast(model, history, n_input): | |
# flatten data | |
data = array(history) | |
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2])) | |
# retrieve last observations for input data | |
input_x = data[-n_input:, :] | |
# reshape into [1, n_input, n] | |
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1])) | |
# forecast the next week | |
yhat = model.predict(input_x, verbose=0) | |
# we only want the vector forecast | |
yhat = yhat[0] | |
return yhat | |
# evaluate a single model | |
def evaluate_model(train, test, n_input): | |
# fit model | |
model = build_model(train, n_input) | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = forecast(model, history, n_input) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
# evaluate predictions days for each week | |
predictions = array(predictions) | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# evaluate model and get scores | |
n_input = 14 | |
score, scores = evaluate_model(train, test, n_input) | |
# summarize scores | |
summarize_scores('cnn', score, scores) | |
# plot scores | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
pyplot.plot(days, scores, marker='o', label='cnn') | |
pyplot.show() |
# multi headed multi-step cnn for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from keras.utils.vis_utils import plot_model | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from keras.models import Model | |
from keras.layers import Input | |
from keras.layers.merge import concatenate | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# convert history into inputs and outputs | |
def to_supervised(train, n_input, n_out=7): | |
# flatten data | |
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2])) | |
X, y = list(), list() | |
in_start = 0 | |
# step over the entire history one time step at a time | |
for _ in range(len(data)): | |
# define the end of the input sequence | |
in_end = in_start + n_input | |
out_end = in_end + n_out | |
# ensure we have enough data for this instance | |
if out_end < len(data): | |
X.append(data[in_start:in_end, :]) | |
y.append(data[in_end:out_end, 0]) | |
# move along one time step | |
in_start += 1 | |
return array(X), array(y) | |
# plot training history | |
def plot_history(history): | |
# plot loss | |
pyplot.subplot(2, 1, 1) | |
pyplot.plot(history.history['loss'], label='train') | |
pyplot.plot(history.history['val_loss'], label='test') | |
pyplot.title('loss', y=0, loc='center') | |
pyplot.legend() | |
# plot rmse | |
pyplot.subplot(2, 1, 2) | |
pyplot.plot(history.history['rmse'], label='train') | |
pyplot.plot(history.history['val_rmse'], label='test') | |
pyplot.title('rmse', y=0, loc='center') | |
pyplot.legend() | |
pyplot.show() | |
# train the model | |
def build_model(train, n_input): | |
# prepare data | |
train_x, train_y = to_supervised(train, n_input) | |
# define parameters | |
verbose, epochs, batch_size = 0, 25, 16 | |
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1] | |
# create a channel for each variable | |
in_layers, out_layers = list(), list() | |
for _ in range(n_features): | |
inputs = Input(shape=(n_timesteps,1)) | |
conv1 = Conv1D(filters=32, kernel_size=3, activation='relu')(inputs) | |
conv2 = Conv1D(filters=32, kernel_size=3, activation='relu')(conv1) | |
pool1 = MaxPooling1D(pool_size=2)(conv2) | |
flat = Flatten()(pool1) | |
# store layers | |
in_layers.append(inputs) | |
out_layers.append(flat) | |
# merge heads | |
merged = concatenate(out_layers) | |
# interpretation | |
dense1 = Dense(200, activation='relu')(merged) | |
dense2 = Dense(100, activation='relu')(dense1) | |
outputs = Dense(n_outputs)(dense2) | |
model = Model(inputs=in_layers, outputs=outputs) | |
# compile model | |
model.compile(loss='mse', optimizer='adam') | |
# plot the model | |
plot_model(model, show_shapes=True, to_file='multiheaded_cnn.png') | |
# fit network | |
input_data = [train_x[:,:,i].reshape((train_x.shape[0],n_timesteps,1)) for i in range(n_features)] | |
model.fit(input_data, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
return model | |
# make a forecast | |
def forecast(model, history, n_input): | |
# flatten data | |
data = array(history) | |
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2])) | |
# retrieve last observations for input data | |
input_x = data[-n_input:, :] | |
# reshape into n input arrays | |
input_x = [input_x[:,i].reshape((1,input_x.shape[0],1)) for i in range(input_x.shape[1])] | |
# forecast the next week | |
yhat = model.predict(input_x, verbose=0) | |
# we only want the vector forecast | |
yhat = yhat[0] | |
return yhat | |
# evaluate a single model | |
def evaluate_model(train, test, n_input): | |
# fit model | |
model = build_model(train, n_input) | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = forecast(model, history, n_input) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
# evaluate predictions days for each week | |
predictions = array(predictions) | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# evaluate model and get scores | |
n_input = 14 | |
score, scores = evaluate_model(train, test, n_input) | |
# summarize scores | |
summarize_scores('cnn', score, scores) | |
# plot scores | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
pyplot.plot(days, scores, marker='o', label='cnn') | |
pyplot.show() |
# univariate multi-step lstm for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# convert history into inputs and outputs | |
def to_supervised(train, n_input, n_out=7): | |
# flatten data | |
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2])) | |
X, y = list(), list() | |
in_start = 0 | |
# step over the entire history one time step at a time | |
for _ in range(len(data)): | |
# define the end of the input sequence | |
in_end = in_start + n_input | |
out_end = in_end + n_out | |
# ensure we have enough data for this instance | |
if out_end < len(data): | |
x_input = data[in_start:in_end, 0] | |
x_input = x_input.reshape((len(x_input), 1)) | |
X.append(x_input) | |
y.append(data[in_end:out_end, 0]) | |
# move along one time step | |
in_start += 1 | |
return array(X), array(y) | |
# train the model | |
def build_model(train, n_input): | |
# prepare data | |
train_x, train_y = to_supervised(train, n_input) | |
# define parameters | |
verbose, epochs, batch_size = 0, 70, 16 | |
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1] | |
# define model | |
model = Sequential() | |
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features))) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs)) | |
model.compile(loss='mse', optimizer='adam') | |
# fit network | |
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
return model | |
# make a forecast | |
def forecast(model, history, n_input): | |
# flatten data | |
data = array(history) | |
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2])) | |
# retrieve last observations for input data | |
input_x = data[-n_input:, 0] | |
# reshape into [1, n_input, 1] | |
input_x = input_x.reshape((1, len(input_x), 1)) | |
# forecast the next week | |
yhat = model.predict(input_x, verbose=0) | |
# we only want the vector forecast | |
yhat = yhat[0] | |
return yhat | |
# evaluate a single model | |
def evaluate_model(train, test, n_input): | |
# fit model | |
model = build_model(train, n_input) | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = forecast(model, history, n_input) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
# evaluate predictions days for each week | |
predictions = array(predictions) | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# evaluate model and get scores | |
n_input = 7 | |
score, scores = evaluate_model(train, test, n_input) | |
# summarize scores | |
summarize_scores('lstm', score, scores) | |
# plot scores | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
pyplot.plot(days, scores, marker='o', label='lstm') | |
pyplot.show() |
# univariate multi-step encoder-decoder lstm for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
from keras.layers import RepeatVector | |
from keras.layers import TimeDistributed | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# convert history into inputs and outputs | |
def to_supervised(train, n_input, n_out=7): | |
# flatten data | |
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2])) | |
X, y = list(), list() | |
in_start = 0 | |
# step over the entire history one time step at a time | |
for _ in range(len(data)): | |
# define the end of the input sequence | |
in_end = in_start + n_input | |
out_end = in_end + n_out | |
# ensure we have enough data for this instance | |
if out_end < len(data): | |
x_input = data[in_start:in_end, 0] | |
x_input = x_input.reshape((len(x_input), 1)) | |
X.append(x_input) | |
y.append(data[in_end:out_end, 0]) | |
# move along one time step | |
in_start += 1 | |
return array(X), array(y) | |
# train the model | |
def build_model(train, n_input): | |
# prepare data | |
train_x, train_y = to_supervised(train, n_input) | |
# define parameters | |
verbose, epochs, batch_size = 0, 20, 16 | |
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1] | |
# reshape output into [samples, timesteps, features] | |
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1)) | |
# define model | |
model = Sequential() | |
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features))) | |
model.add(RepeatVector(n_outputs)) | |
model.add(LSTM(200, activation='relu', return_sequences=True)) | |
model.add(TimeDistributed(Dense(100, activation='relu'))) | |
model.add(TimeDistributed(Dense(1))) | |
model.compile(loss='mse', optimizer='adam') | |
# fit network | |
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
return model | |
# make a forecast | |
def forecast(model, history, n_input): | |
# flatten data | |
data = array(history) | |
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2])) | |
# retrieve last observations for input data | |
input_x = data[-n_input:, 0] | |
# reshape into [1, n_input, 1] | |
input_x = input_x.reshape((1, len(input_x), 1)) | |
# forecast the next week | |
yhat = model.predict(input_x, verbose=0) | |
# we only want the vector forecast | |
yhat = yhat[0] | |
return yhat | |
# evaluate a single model | |
def evaluate_model(train, test, n_input): | |
# fit model | |
model = build_model(train, n_input) | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = forecast(model, history, n_input) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
# evaluate predictions days for each week | |
predictions = array(predictions) | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# evaluate model and get scores | |
n_input = 14 | |
score, scores = evaluate_model(train, test, n_input) | |
# summarize scores | |
summarize_scores('lstm', score, scores) | |
# plot scores | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
pyplot.plot(days, scores, marker='o', label='lstm') | |
pyplot.show() |
# multivariate multi-step encoder-decoder lstm for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
from keras.layers import RepeatVector | |
from keras.layers import TimeDistributed | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# convert history into inputs and outputs | |
def to_supervised(train, n_input, n_out=7): | |
# flatten data | |
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2])) | |
X, y = list(), list() | |
in_start = 0 | |
# step over the entire history one time step at a time | |
for _ in range(len(data)): | |
# define the end of the input sequence | |
in_end = in_start + n_input | |
out_end = in_end + n_out | |
# ensure we have enough data for this instance | |
if out_end < len(data): | |
X.append(data[in_start:in_end, :]) | |
y.append(data[in_end:out_end, 0]) | |
# move along one time step | |
in_start += 1 | |
return array(X), array(y) | |
# train the model | |
def build_model(train, n_input): | |
# prepare data | |
train_x, train_y = to_supervised(train, n_input) | |
# define parameters | |
verbose, epochs, batch_size = 0, 50, 16 | |
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1] | |
# reshape output into [samples, timesteps, features] | |
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1)) | |
# define model | |
model = Sequential() | |
model.add(LSTM(200, activation='relu', input_shape=(n_timesteps, n_features))) | |
model.add(RepeatVector(n_outputs)) | |
model.add(LSTM(200, activation='relu', return_sequences=True)) | |
model.add(TimeDistributed(Dense(100, activation='relu'))) | |
model.add(TimeDistributed(Dense(1))) | |
model.compile(loss='mse', optimizer='adam') | |
# fit network | |
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
return model | |
# make a forecast | |
def forecast(model, history, n_input): | |
# flatten data | |
data = array(history) | |
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2])) | |
# retrieve last observations for input data | |
input_x = data[-n_input:, :] | |
# reshape into [1, n_input, n] | |
input_x = input_x.reshape((1, input_x.shape[0], input_x.shape[1])) | |
# forecast the next week | |
yhat = model.predict(input_x, verbose=0) | |
# we only want the vector forecast | |
yhat = yhat[0] | |
return yhat | |
# evaluate a single model | |
def evaluate_model(train, test, n_input): | |
# fit model | |
model = build_model(train, n_input) | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = forecast(model, history, n_input) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
# evaluate predictions days for each week | |
predictions = array(predictions) | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# evaluate model and get scores | |
n_input = 14 | |
score, scores = evaluate_model(train, test, n_input) | |
# summarize scores | |
summarize_scores('lstm', score, scores) | |
# plot scores | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
pyplot.plot(days, scores, marker='o', label='lstm') | |
pyplot.show() |
# univariate multi-step encoder-decoder cnn-lstm for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import LSTM | |
from keras.layers import RepeatVector | |
from keras.layers import TimeDistributed | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# convert history into inputs and outputs | |
def to_supervised(train, n_input, n_out=7): | |
# flatten data | |
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2])) | |
X, y = list(), list() | |
in_start = 0 | |
# step over the entire history one time step at a time | |
for _ in range(len(data)): | |
# define the end of the input sequence | |
in_end = in_start + n_input | |
out_end = in_end + n_out | |
# ensure we have enough data for this instance | |
if out_end < len(data): | |
x_input = data[in_start:in_end, 0] | |
x_input = x_input.reshape((len(x_input), 1)) | |
X.append(x_input) | |
y.append(data[in_end:out_end, 0]) | |
# move along one time step | |
in_start += 1 | |
return array(X), array(y) | |
# train the model | |
def build_model(train, n_input): | |
# prepare data | |
train_x, train_y = to_supervised(train, n_input) | |
# define parameters | |
verbose, epochs, batch_size = 0, 20, 16 | |
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1] | |
# reshape output into [samples, timesteps, features] | |
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1)) | |
# define model | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features))) | |
model.add(Conv1D(filters=64, kernel_size=3, activation='relu')) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(RepeatVector(n_outputs)) | |
model.add(LSTM(200, activation='relu', return_sequences=True)) | |
model.add(TimeDistributed(Dense(100, activation='relu'))) | |
model.add(TimeDistributed(Dense(1))) | |
model.compile(loss='mse', optimizer='adam') | |
# fit network | |
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
return model | |
# make a forecast | |
def forecast(model, history, n_input): | |
# flatten data | |
data = array(history) | |
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2])) | |
# retrieve last observations for input data | |
input_x = data[-n_input:, 0] | |
# reshape into [1, n_input, 1] | |
input_x = input_x.reshape((1, len(input_x), 1)) | |
# forecast the next week | |
yhat = model.predict(input_x, verbose=0) | |
# we only want the vector forecast | |
yhat = yhat[0] | |
return yhat | |
# evaluate a single model | |
def evaluate_model(train, test, n_input): | |
# fit model | |
model = build_model(train, n_input) | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = forecast(model, history, n_input) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
# evaluate predictions days for each week | |
predictions = array(predictions) | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# evaluate model and get scores | |
n_input = 14 | |
score, scores = evaluate_model(train, test, n_input) | |
# summarize scores | |
summarize_scores('lstm', score, scores) | |
# plot scores | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
pyplot.plot(days, scores, marker='o', label='lstm') | |
pyplot.show() |
# univariate multi-step encoder-decoder convlstm for the power usage dataset | |
from math import sqrt | |
from numpy import split | |
from numpy import array | |
from pandas import read_csv | |
from sklearn.metrics import mean_squared_error | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import LSTM | |
from keras.layers import RepeatVector | |
from keras.layers import TimeDistributed | |
from keras.layers import ConvLSTM2D | |
# split a univariate dataset into train/test sets | |
def split_dataset(data): | |
# split into standard weeks | |
train, test = data[1:-328], data[-328:-6] | |
# restructure into windows of weekly data | |
train = array(split(train, len(train)/7)) | |
test = array(split(test, len(test)/7)) | |
return train, test | |
# evaluate one or more weekly forecasts against expected values | |
def evaluate_forecasts(actual, predicted): | |
scores = list() | |
# calculate an RMSE score for each day | |
for i in range(actual.shape[1]): | |
# calculate mse | |
mse = mean_squared_error(actual[:, i], predicted[:, i]) | |
# calculate rmse | |
rmse = sqrt(mse) | |
# store | |
scores.append(rmse) | |
# calculate overall RMSE | |
s = 0 | |
for row in range(actual.shape[0]): | |
for col in range(actual.shape[1]): | |
s += (actual[row, col] - predicted[row, col])**2 | |
score = sqrt(s / (actual.shape[0] * actual.shape[1])) | |
return score, scores | |
# summarize scores | |
def summarize_scores(name, score, scores): | |
s_scores = ', '.join(['%.1f' % s for s in scores]) | |
print('%s: [%.3f] %s' % (name, score, s_scores)) | |
# convert history into inputs and outputs | |
def to_supervised(train, n_input, n_out=7): | |
# flatten data | |
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2])) | |
X, y = list(), list() | |
in_start = 0 | |
# step over the entire history one time step at a time | |
for _ in range(len(data)): | |
# define the end of the input sequence | |
in_end = in_start + n_input | |
out_end = in_end + n_out | |
# ensure we have enough data for this instance | |
if out_end < len(data): | |
x_input = data[in_start:in_end, 0] | |
x_input = x_input.reshape((len(x_input), 1)) | |
X.append(x_input) | |
y.append(data[in_end:out_end, 0]) | |
# move along one time step | |
in_start += 1 | |
return array(X), array(y) | |
# train the model | |
def build_model(train, n_steps, n_length, n_input): | |
# prepare data | |
train_x, train_y = to_supervised(train, n_input) | |
# define parameters | |
verbose, epochs, batch_size = 0, 20, 16 | |
n_features, n_outputs = train_x.shape[2], train_y.shape[1] | |
# reshape into subsequences [samples, timesteps, rows, cols, channels] | |
train_x = train_x.reshape((train_x.shape[0], n_steps, 1, n_length, n_features)) | |
# reshape output into [samples, timesteps, features] | |
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1)) | |
# define model | |
model = Sequential() | |
model.add(ConvLSTM2D(filters=64, kernel_size=(1,3), activation='relu', input_shape=(n_steps, 1, n_length, n_features))) | |
model.add(Flatten()) | |
model.add(RepeatVector(n_outputs)) | |
model.add(LSTM(200, activation='relu', return_sequences=True)) | |
model.add(TimeDistributed(Dense(100, activation='relu'))) | |
model.add(TimeDistributed(Dense(1))) | |
model.compile(loss='mse', optimizer='adam') | |
# fit network | |
model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
return model | |
# make a forecast | |
def forecast(model, history, n_steps, n_length, n_input): | |
# flatten data | |
data = array(history) | |
data = data.reshape((data.shape[0]*data.shape[1], data.shape[2])) | |
# retrieve last observations for input data | |
input_x = data[-n_input:, 0] | |
# reshape into [samples, timesteps, rows, cols, channels] | |
input_x = input_x.reshape((1, n_steps, 1, n_length, 1)) | |
# forecast the next week | |
yhat = model.predict(input_x, verbose=0) | |
# we only want the vector forecast | |
yhat = yhat[0] | |
return yhat | |
# evaluate a single model | |
def evaluate_model(train, test, n_steps, n_length, n_input): | |
# fit model | |
model = build_model(train, n_steps, n_length, n_input) | |
# history is a list of weekly data | |
history = [x for x in train] | |
# walk-forward validation over each week | |
predictions = list() | |
for i in range(len(test)): | |
# predict the week | |
yhat_sequence = forecast(model, history, n_steps, n_length, n_input) | |
# store the predictions | |
predictions.append(yhat_sequence) | |
# get real observation and add to history for predicting the next week | |
history.append(test[i, :]) | |
# evaluate predictions days for each week | |
predictions = array(predictions) | |
score, scores = evaluate_forecasts(test[:, :, 0], predictions) | |
return score, scores | |
# load the new file | |
dataset = read_csv('household_power_consumption_days.csv', header=0, infer_datetime_format=True, parse_dates=['datetime'], index_col=['datetime']) | |
# split into train and test | |
train, test = split_dataset(dataset.values) | |
# define the number of subsequences and the length of subsequences | |
n_steps, n_length = 2, 7 | |
# define the total days to use as input | |
n_input = n_length * n_steps | |
score, scores = evaluate_model(train, test, n_steps, n_length, n_input) | |
# summarize scores | |
summarize_scores('lstm', score, scores) | |
# plot scores | |
days = ['sun', 'mon', 'tue', 'wed', 'thr', 'fri', 'sat'] | |
pyplot.plot(days, scores, marker='o', label='lstm') | |
pyplot.show() |
# load one file from the har dataset | |
from pandas import read_csv | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
data = load_file('HARDataset/train/Inertial Signals/total_acc_y_train.txt') | |
print(data.shape) |
# load group of files from the har dataset | |
from numpy import dstack | |
from pandas import read_csv | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files, such as x, y, z data for a given variable | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load the total acc data | |
filenames = ['total_acc_x_train.txt', 'total_acc_y_train.txt', 'total_acc_z_train.txt'] | |
total_acc = load_group(filenames, prefix='HARDataset/train/Inertial Signals/') | |
print(total_acc.shape) |
# load all train and test data from the har dataset | |
from numpy import dstack | |
from pandas import read_csv | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files, such as x, y, z data for a given variable | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load all train | |
trainX, trainy = load_dataset('train', 'HARDataset/') | |
print(trainX.shape, trainy.shape) | |
# load all test | |
testX, testy = load_dataset('test', 'HARDataset/') | |
print(testX.shape, testy.shape) |
# summarize class balance from the har dataset | |
from numpy import vstack | |
from pandas import read_csv | |
from pandas import DataFrame | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# summarize the balance of classes in an output variable column | |
def class_breakdown(data): | |
# convert the numpy array into a dataframe | |
df = DataFrame(data) | |
# group data by the class value and calculate the number of rows | |
counts = df.groupby(0).size() | |
# retrieve raw rows | |
counts = counts.values | |
# summarize | |
for i in range(len(counts)): | |
percent = counts[i] / len(df) * 100 | |
print('Class=%d, total=%d, percentage=%.3f' % (i+1, counts[i], percent)) | |
# load train file | |
trainy = load_file('HARDataset/train/y_train.txt') | |
# summarize class breakdown | |
print('Train Dataset') | |
class_breakdown(trainy) | |
# load test file | |
testy = load_file('HARDataset/test/y_test.txt') | |
# summarize class breakdown | |
print('Test Dataset') | |
class_breakdown(testy) | |
# summarize combined class breakdown | |
print('Both') | |
combined = vstack((trainy, testy)) | |
class_breakdown(combined) |
# plot all vars for one subject in the har dataset | |
from numpy import dstack | |
from numpy import unique | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files, such as x, y, z data for a given variable | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# get all data for one subject | |
def data_for_subject(X, y, sub_map, sub_id): | |
# get row indexes for the subject id | |
ix = [i for i in range(len(sub_map)) if sub_map[i]==sub_id] | |
# return the selected samples | |
return X[ix, :, :], y[ix] | |
# convert a series of windows to a 1D list | |
def to_series(windows): | |
series = list() | |
for window in windows: | |
# remove the overlap from the window | |
half = int(len(window) / 2) - 1 | |
for value in window[-half:]: | |
series.append(value) | |
return series | |
# plot the data for one subject | |
def plot_subject(X, y): | |
pyplot.figure() | |
# determine the total number of plots | |
n, off = X.shape[2] + 1, 0 | |
# plot total acc | |
for i in range(3): | |
pyplot.subplot(n, 1, off+1) | |
pyplot.plot(to_series(X[:, :, off])) | |
pyplot.title('total acc '+str(i), y=0, loc='left', size=7) | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
off += 1 | |
# plot body acc | |
for i in range(3): | |
pyplot.subplot(n, 1, off+1) | |
pyplot.plot(to_series(X[:, :, off])) | |
pyplot.title('body acc '+str(i), y=0, loc='left', size=7) | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
off += 1 | |
# plot body gyro | |
for i in range(3): | |
pyplot.subplot(n, 1, off+1) | |
pyplot.plot(to_series(X[:, :, off])) | |
pyplot.title('body gyro '+str(i), y=0, loc='left', size=7) | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
off += 1 | |
# plot activities | |
pyplot.subplot(n, 1, n) | |
pyplot.plot(y) | |
pyplot.title('activity', y=0, loc='left', size=7) | |
# turn off ticks to remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([]) | |
pyplot.show() | |
# load data | |
trainX, trainy = load_dataset('train', 'HARDataset/') | |
# load mapping of rows to subjects | |
sub_map = load_file('HARDataset/train/subject_train.txt') | |
train_subjects = unique(sub_map) | |
print(train_subjects) | |
# get the data for one subject | |
sub_id = train_subjects[0] | |
subX, suby = data_for_subject(trainX, trainy, sub_map, sub_id) | |
print(subX.shape, suby.shape) | |
# plot data for subject | |
plot_subject(subX, suby) |
# plot histograms for multiple subjects from the har dataset | |
from numpy import unique | |
from numpy import dstack | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files, such as x, y, z data for a given variable | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# get all data for one subject | |
def data_for_subject(X, y, sub_map, sub_id): | |
# get row indexes for the subject id | |
ix = [i for i in range(len(sub_map)) if sub_map[i]==sub_id] | |
# return the selected samples | |
return X[ix, :, :], y[ix] | |
# convert a series of windows to a 1D list | |
def to_series(windows): | |
series = list() | |
for window in windows: | |
# remove the overlap from the window | |
half = int(len(window) / 2) - 1 | |
for value in window[-half:]: | |
series.append(value) | |
return series | |
# plot histograms for multiple subjects | |
def plot_subject_histograms(X, y, sub_map, offset, n=10): | |
pyplot.figure() | |
# get unique subjects | |
subject_ids = unique(sub_map[:,0]) | |
# enumerate subjects | |
for k in range(n): | |
sub_id = subject_ids[k] | |
# get data for one subject | |
subX, _ = data_for_subject(X, y, sub_map, sub_id) | |
# total acc | |
for i in range(3): | |
ax = pyplot.subplot(n, 1, k+1) | |
ax.set_xlim(-1,1) | |
ax.hist(to_series(subX[:,:,offset+i]), bins=100) | |
pyplot.yticks([]) | |
pyplot.xticks([-1,0,1]) | |
pyplot.show() | |
# load training dataset | |
X, y = load_dataset('train', 'HARDataset/') | |
# load mapping of rows to subjects | |
sub_map = load_file('HARDataset/train/subject_train.txt') | |
# plot total acceleration histograms for subjects | |
plot_subject_histograms(X, y, sub_map, 0) | |
# plot body acceleration histograms for subjects | |
plot_subject_histograms(X, y, sub_map, 3) | |
# plot gyroscopic histograms for subjects | |
plot_subject_histograms(X, y, sub_map, 6) | |
# plot histograms per activity for a subject from the har dataset | |
from numpy import dstack | |
from numpy import unique | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files, such as x, y, z data for a given variable | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# get all data for one subject | |
def data_for_subject(X, y, sub_map, sub_id): | |
# get row indexes for the subject id | |
ix = [i for i in range(len(sub_map)) if sub_map[i]==sub_id] | |
# return the selected samples | |
return X[ix, :, :], y[ix] | |
# convert a series of windows to a 1D list | |
def to_series(windows): | |
series = list() | |
for window in windows: | |
# remove the overlap from the window | |
half = int(len(window) / 2) - 1 | |
for value in window[-half:]: | |
series.append(value) | |
return series | |
# group data by activity | |
def data_by_activity(X, y, activities): | |
# group windows by activity | |
return {a:X[y[:,0]==a, :, :] for a in activities} | |
# plot histograms for each activity for a subject | |
def plot_activity_histograms(X, y, offset): | |
# get a list of unique activities for the subject | |
activity_ids = unique(y[:,0]) | |
# group windows by activity | |
grouped = data_by_activity(X, y, activity_ids) | |
# plot per activity, histograms for each axis | |
pyplot.figure() | |
for k in range(len(activity_ids)): | |
act_id = activity_ids[k] | |
# total acceleration | |
for i in range(3): | |
ax = pyplot.subplot(len(activity_ids), 1, k+1) | |
ax.set_xlim(-1,1) | |
# create histogra, | |
pyplot.hist(to_series(grouped[act_id][:,:,offset+i]), bins=100) | |
# create title | |
pyplot.title('activity '+str(act_id), y=0, loc='left', size=10) | |
# simplify axis | |
pyplot.yticks([]) | |
pyplot.xticks([-1,0,1]) | |
pyplot.show() | |
# load data | |
trainX, trainy = load_dataset('train', 'HARDataset/') | |
# load mapping of rows to subjects | |
sub_map = load_file('HARDataset/train/subject_train.txt') | |
train_subjects = unique(sub_map) | |
# get the data for one subject | |
sub_id = train_subjects[0] | |
subX, suby = data_for_subject(trainX, trainy, sub_map, sub_id) | |
# plot total acceleration histograms per activity for a subject | |
plot_activity_histograms(subX, suby, 0) | |
# plot body acceleration histograms per activity for a subject | |
plot_activity_histograms(subX, suby, 3) | |
# plot gyroscopic histograms per activity for a subject | |
plot_activity_histograms(subX, suby, 6) |
# plot durations of each activity by subject from the har dataset | |
from numpy import dstack | |
from numpy import unique | |
from pandas import read_csv | |
from matplotlib import pyplot | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files, such as x, y, z data for a given variable | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# get all data for one subject | |
def data_for_subject(X, y, sub_map, sub_id): | |
# get row indexes for the subject id | |
ix = [i for i in range(len(sub_map)) if sub_map[i]==sub_id] | |
# return the selected samples | |
return X[ix, :, :], y[ix] | |
# convert a series of windows to a 1D list | |
def to_series(windows): | |
series = list() | |
for window in windows: | |
# remove the overlap from the window | |
half = int(len(window) / 2) - 1 | |
for value in window[-half:]: | |
series.append(value) | |
return series | |
# group data by activity | |
def data_by_activity(X, y, activities): | |
# group windows by activity | |
return {a:X[y[:,0]==a, :, :] for a in activities} | |
# plot activity durations by subject | |
def plot_activity_durations_by_subject(X, y, sub_map): | |
# get unique subjects and activities | |
subject_ids = unique(sub_map[:,0]) | |
activity_ids = unique(y[:,0]) | |
# enumerate subjects | |
activity_windows = {a:list() for a in activity_ids} | |
for sub_id in subject_ids: | |
# get data for one subject | |
_, subj_y = data_for_subject(X, y, sub_map, sub_id) | |
# count windows by activity | |
for a in activity_ids: | |
activity_windows[a].append(len(subj_y[subj_y[:,0]==a])) | |
# organize durations into a list of lists | |
durations = [activity_windows[a] for a in activity_ids] | |
pyplot.boxplot(durations, labels=activity_ids) | |
pyplot.show() | |
# load training dataset | |
X, y = load_dataset('train', 'HARDataset/') | |
# load mapping of rows to subjects | |
sub_map = load_file('HARDataset/train/subject_train.txt') | |
# plot durations | |
plot_activity_durations_by_subject(X, y, sub_map) |
# spot check ml algorithms on engineered-features from the har dataset | |
from pandas import read_csv | |
from sklearn.metrics import accuracy_score | |
from sklearn.neighbors import KNeighborsClassifier | |
from sklearn.tree import DecisionTreeClassifier | |
from sklearn.svm import SVC | |
from sklearn.naive_bayes import GaussianNB | |
from sklearn.ensemble import BaggingClassifier | |
from sklearn.ensemble import RandomForestClassifier | |
from sklearn.ensemble import ExtraTreesClassifier | |
from sklearn.ensemble import GradientBoostingClassifier | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
# load input data | |
X = load_file(prefix + group + '/X_'+group+'.txt') | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# flatten y | |
trainy, testy = trainy[:,0], testy[:,0] | |
return trainX, trainy, testX, testy | |
# create a dict of standard models to evaluate {name:object} | |
def define_models(models=dict()): | |
# nonlinear models | |
models['knn'] = KNeighborsClassifier(n_neighbors=7) | |
models['cart'] = DecisionTreeClassifier() | |
models['svm'] = SVC() | |
models['bayes'] = GaussianNB() | |
# ensemble models | |
models['bag'] = BaggingClassifier(n_estimators=100) | |
models['rf'] = RandomForestClassifier(n_estimators=100) | |
models['et'] = ExtraTreesClassifier(n_estimators=100) | |
models['gbm'] = GradientBoostingClassifier(n_estimators=100) | |
print('Defined %d models' % len(models)) | |
return models | |
# evaluate a single model | |
def evaluate_model(trainX, trainy, testX, testy, model): | |
# fit the model | |
model.fit(trainX, trainy) | |
# make predictions | |
yhat = model.predict(testX) | |
# evaluate predictions | |
accuracy = accuracy_score(testy, yhat) | |
return accuracy * 100.0 | |
# evaluate a dict of models {name:object}, returns {name:score} | |
def evaluate_models(trainX, trainy, testX, testy, models): | |
results = dict() | |
for name, model in models.items(): | |
# evaluate the model | |
results[name] = evaluate_model(trainX, trainy, testX, testy, model) | |
# show process | |
print('>%s: %.3f' % (name, results[name])) | |
return results | |
# print and plot the results | |
def summarize_results(results, maximize=True): | |
# create a list of (name, mean(scores)) tuples | |
mean_scores = [(k,v) for k,v in results.items()] | |
# sort tuples by mean score | |
mean_scores = sorted(mean_scores, key=lambda x: x[1]) | |
# reverse for descending order (e.g. for accuracy) | |
if maximize: | |
mean_scores = list(reversed(mean_scores)) | |
print() | |
for name, score in mean_scores: | |
print('Name=%s, Score=%.3f' % (name, score)) | |
# load dataset | |
trainX, trainy, testX, testy = load_dataset() | |
# get model list | |
models = define_models() | |
# evaluate models | |
results = evaluate_models(trainX, trainy, testX, testy, models) | |
# summarize results | |
summarize_results(results) |
# spot check on raw data from the har dataset | |
from numpy import dstack | |
from pandas import read_csv | |
from sklearn.metrics import accuracy_score | |
from sklearn.neighbors import KNeighborsClassifier | |
from sklearn.tree import DecisionTreeClassifier | |
from sklearn.svm import SVC | |
from sklearn.naive_bayes import GaussianNB | |
from sklearn.ensemble import BaggingClassifier | |
from sklearn.ensemble import RandomForestClassifier | |
from sklearn.ensemble import ExtraTreesClassifier | |
from sklearn.ensemble import GradientBoostingClassifier | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files into a 3D array of [samples, timesteps, features] | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# flatten X | |
trainX = trainX.reshape((trainX.shape[0], trainX.shape[1] * trainX.shape[2])) | |
testX = testX.reshape((testX.shape[0], testX.shape[1] * testX.shape[2])) | |
# flatten y | |
trainy, testy = trainy[:,0], testy[:,0] | |
return trainX, trainy, testX, testy | |
# create a dict of standard models to evaluate {name:object} | |
def define_models(models=dict()): | |
# nonlinear models | |
models['knn'] = KNeighborsClassifier(n_neighbors=7) | |
models['cart'] = DecisionTreeClassifier() | |
models['svm'] = SVC() | |
models['bayes'] = GaussianNB() | |
# ensemble models | |
models['bag'] = BaggingClassifier(n_estimators=100) | |
models['rf'] = RandomForestClassifier(n_estimators=100) | |
models['et'] = ExtraTreesClassifier(n_estimators=100) | |
models['gbm'] = GradientBoostingClassifier(n_estimators=100) | |
print('Defined %d models' % len(models)) | |
return models | |
# evaluate a single model | |
def evaluate_model(trainX, trainy, testX, testy, model): | |
# fit the model | |
model.fit(trainX, trainy) | |
# make predictions | |
yhat = model.predict(testX) | |
# evaluate predictions | |
accuracy = accuracy_score(testy, yhat) | |
return accuracy * 100.0 | |
# evaluate a dict of models {name:object}, returns {name:score} | |
def evaluate_models(trainX, trainy, testX, testy, models): | |
results = dict() | |
for name, model in models.items(): | |
# evaluate the model | |
results[name] = evaluate_model(trainX, trainy, testX, testy, model) | |
# show process | |
print('>%s: %.3f' % (name, results[name])) | |
return results | |
# print and plot the results | |
def summarize_results(results, maximize=True): | |
# create a list of (name, mean(scores)) tuples | |
mean_scores = [(k,v) for k,v in results.items()] | |
# sort tuples by mean score | |
mean_scores = sorted(mean_scores, key=lambda x: x[1]) | |
# reverse for descending order (e.g. for accuracy) | |
if maximize: | |
mean_scores = list(reversed(mean_scores)) | |
print() | |
for name, score in mean_scores: | |
print('Name=%s, Score=%.3f' % (name, score)) | |
# load dataset | |
trainX, trainy, testX, testy = load_dataset() | |
# get model list | |
models = define_models() | |
# evaluate models | |
results = evaluate_models(trainX, trainy, testX, testy, models) | |
# summarize results | |
summarize_results(results) |
# cnn model for the har dataset | |
from numpy import mean | |
from numpy import std | |
from numpy import dstack | |
from pandas import read_csv | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import Dropout | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from keras.utils import to_categorical | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy): | |
verbose, epochs, batch_size = 0, 10, 32 | |
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features))) | |
model.add(Conv1D(filters=64, kernel_size=3, activation='relu')) | |
model.add(Dropout(0.5)) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs, activation='softmax')) | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores): | |
print(scores) | |
m, s = mean(scores), std(scores) | |
print('Accuracy: %.3f%% (+/-%.3f)' % (m, s)) | |
# run an experiment | |
def run_experiment(repeats=10): | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy) | |
score = score * 100.0 | |
print('>#%d: %.3f' % (r+1, score)) | |
scores.append(score) | |
# summarize results | |
summarize_results(scores) | |
# run the experiment | |
run_experiment() |
# plot distributions for the har dataset | |
from numpy import dstack | |
from pandas import read_csv | |
from keras.utils import to_categorical | |
from matplotlib import pyplot | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# plot a histogram of each variable in the dataset | |
def plot_variable_distributions(trainX): | |
# remove overlap | |
cut = int(trainX.shape[1] / 2) | |
longX = trainX[:, -cut:, :] | |
# flatten windows | |
longX = longX.reshape((longX.shape[0] * longX.shape[1], longX.shape[2])) | |
pyplot.figure() | |
for i in range(longX.shape[1]): | |
# create figure | |
ax = pyplot.subplot(longX.shape[1], 1, i+1) | |
ax.set_xlim(-1, 1) | |
# create histogram | |
pyplot.hist(longX[:, i], bins=100) | |
# simplify axis remove clutter | |
pyplot.yticks([]) | |
pyplot.xticks([-1,0,1]) | |
pyplot.show() | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# plot histograms | |
plot_variable_distributions(trainX) |
# cnn model with standardization for the har dataset | |
from numpy import mean | |
from numpy import std | |
from numpy import dstack | |
from pandas import read_csv | |
from matplotlib import pyplot | |
from sklearn.preprocessing import StandardScaler | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import Dropout | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from keras.utils import to_categorical | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# standardize data | |
def scale_data(trainX, testX, standardize): | |
# remove overlap | |
cut = int(trainX.shape[1] / 2) | |
longX = trainX[:, -cut:, :] | |
# flatten windows | |
longX = longX.reshape((longX.shape[0] * longX.shape[1], longX.shape[2])) | |
# flatten train and test | |
flatTrainX = trainX.reshape((trainX.shape[0] * trainX.shape[1], trainX.shape[2])) | |
flatTestX = testX.reshape((testX.shape[0] * testX.shape[1], testX.shape[2])) | |
# standardize | |
if standardize: | |
s = StandardScaler() | |
# fit on training data | |
s.fit(longX) | |
# apply to training and test data | |
longX = s.transform(longX) | |
flatTrainX = s.transform(flatTrainX) | |
flatTestX = s.transform(flatTestX) | |
# reshape | |
flatTrainX = flatTrainX.reshape((trainX.shape)) | |
flatTestX = flatTestX.reshape((testX.shape)) | |
return flatTrainX, flatTestX | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy, param): | |
verbose, epochs, batch_size = 0, 10, 32 | |
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] | |
# scale data | |
trainX, testX = scale_data(trainX, testX, param) | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features))) | |
model.add(Conv1D(filters=64, kernel_size=3, activation='relu')) | |
model.add(Dropout(0.5)) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs, activation='softmax')) | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores, params): | |
print(scores, params) | |
# summarize mean and standard deviation | |
for i in range(len(scores)): | |
m, s = mean(scores[i]), std(scores[i]) | |
print('Param=%s: %.3f%% (+/-%.3f)' % (params[i], m, s)) | |
# boxplot of scores | |
pyplot.boxplot(scores, labels=params) | |
pyplot.savefig('exp_cnn_standardize.png') | |
# run an experiment | |
def run_experiment(params, repeats=10): | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# test each parameter | |
all_scores = list() | |
for p in params: | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy, p) | |
score = score * 100.0 | |
print('>p=%s #%d: %.3f' % (p, r+1, score)) | |
scores.append(score) | |
all_scores.append(scores) | |
# summarize results | |
summarize_results(all_scores, params) | |
# run the experiment | |
n_params = [False, True] | |
run_experiment(n_params) |
# cnn model with filters for the har dataset | |
from numpy import mean | |
from numpy import std | |
from numpy import dstack | |
from pandas import read_csv | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import Dropout | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from keras.utils import to_categorical | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy, n_filters): | |
verbose, epochs, batch_size = 0, 10, 32 | |
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] | |
model = Sequential() | |
model.add(Conv1D(filters=n_filters, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features))) | |
model.add(Conv1D(filters=n_filters, kernel_size=3, activation='relu')) | |
model.add(Dropout(0.5)) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs, activation='softmax')) | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores, params): | |
print(scores, params) | |
# summarize mean and standard deviation | |
for i in range(len(scores)): | |
m, s = mean(scores[i]), std(scores[i]) | |
print('Param=%d: %.3f%% (+/-%.3f)' % (params[i], m, s)) | |
# boxplot of scores | |
pyplot.boxplot(scores, labels=params) | |
pyplot.savefig('exp_cnn_filters.png') | |
# run an experiment | |
def run_experiment(params, repeats=10): | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# test each parameter | |
all_scores = list() | |
for p in params: | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy, p) | |
score = score * 100.0 | |
print('>p=%d #%d: %.3f' % (p, r+1, score)) | |
scores.append(score) | |
all_scores.append(scores) | |
# summarize results | |
summarize_results(all_scores, params) | |
# run the experiment | |
n_params = [8, 16, 32, 64, 128, 256] | |
run_experiment(n_params) |
# cnn model vary kernel size for the har dataset | |
from numpy import mean | |
from numpy import std | |
from numpy import dstack | |
from pandas import read_csv | |
from matplotlib import pyplot | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import Dropout | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from keras.utils import to_categorical | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy, n_kernel): | |
verbose, epochs, batch_size = 0, 15, 32 | |
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] | |
model = Sequential() | |
model.add(Conv1D(filters=64, kernel_size=n_kernel, activation='relu', input_shape=(n_timesteps,n_features))) | |
model.add(Conv1D(filters=64, kernel_size=n_kernel, activation='relu')) | |
model.add(Dropout(0.5)) | |
model.add(MaxPooling1D(pool_size=2)) | |
model.add(Flatten()) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs, activation='softmax')) | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores, params): | |
print(scores, params) | |
# summarize mean and standard deviation | |
for i in range(len(scores)): | |
m, s = mean(scores[i]), std(scores[i]) | |
print('Param=%d: %.3f%% (+/-%.3f)' % (params[i], m, s)) | |
# boxplot of scores | |
pyplot.boxplot(scores, labels=params) | |
pyplot.savefig('exp_cnn_kernel.png') | |
# run an experiment | |
def run_experiment(params, repeats=10): | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# test each parameter | |
all_scores = list() | |
for p in params: | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy, p) | |
score = score * 100.0 | |
print('>p=%d #%d: %.3f' % (p, r+1, score)) | |
scores.append(score) | |
all_scores.append(scores) | |
# summarize results | |
summarize_results(all_scores, params) | |
# run the experiment | |
n_params = [2, 3, 5, 7, 11] | |
run_experiment(n_params) |
# multi-headed cnn model for the har dataset | |
from numpy import mean | |
from numpy import std | |
from numpy import dstack | |
from pandas import read_csv | |
from keras.utils import to_categorical | |
from keras.utils.vis_utils import plot_model | |
from keras.models import Model | |
from keras.layers import Input | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import Dropout | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from keras.layers.merge import concatenate | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy): | |
verbose, epochs, batch_size = 0, 10, 32 | |
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] | |
# head 1 | |
inputs1 = Input(shape=(n_timesteps,n_features)) | |
conv1 = Conv1D(filters=64, kernel_size=3, activation='relu')(inputs1) | |
drop1 = Dropout(0.5)(conv1) | |
pool1 = MaxPooling1D(pool_size=2)(drop1) | |
flat1 = Flatten()(pool1) | |
# head 2 | |
inputs2 = Input(shape=(n_timesteps,n_features)) | |
conv2 = Conv1D(filters=64, kernel_size=5, activation='relu')(inputs2) | |
drop2 = Dropout(0.5)(conv2) | |
pool2 = MaxPooling1D(pool_size=2)(drop2) | |
flat2 = Flatten()(pool2) | |
# head 3 | |
inputs3 = Input(shape=(n_timesteps,n_features)) | |
conv3 = Conv1D(filters=64, kernel_size=11, activation='relu')(inputs3) | |
drop3 = Dropout(0.5)(conv3) | |
pool3 = MaxPooling1D(pool_size=2)(drop3) | |
flat3 = Flatten()(pool3) | |
# merge | |
merged = concatenate([flat1, flat2, flat3]) | |
# interpretation | |
dense1 = Dense(100, activation='relu')(merged) | |
outputs = Dense(n_outputs, activation='softmax')(dense1) | |
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs) | |
# save a plot of the model | |
plot_model(model, show_shapes=True, to_file='multiheaded.png') | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit([trainX,trainX,trainX], trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate([testX,testX,testX], testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores): | |
print(scores) | |
m, s = mean(scores), std(scores) | |
print('Accuracy: %.3f%% (+/-%.3f)' % (m, s)) | |
# run an experiment | |
def run_experiment(repeats=10): | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy) | |
score = score * 100.0 | |
print('>#%d: %.3f' % (r+1, score)) | |
scores.append(score) | |
# summarize results | |
summarize_results(scores) | |
# run the experiment | |
run_experiment() |
# lstm model for the har dataset | |
from numpy import mean | |
from numpy import std | |
from numpy import dstack | |
from pandas import read_csv | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Dropout | |
from keras.layers import LSTM | |
from keras.utils import to_categorical | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy): | |
verbose, epochs, batch_size = 0, 15, 64 | |
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] | |
model = Sequential() | |
model.add(LSTM(100, input_shape=(n_timesteps,n_features))) | |
model.add(Dropout(0.5)) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs, activation='softmax')) | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores): | |
print(scores) | |
m, s = mean(scores), std(scores) | |
print('Accuracy: %.3f%% (+/-%.3f)' % (m, s)) | |
# run an experiment | |
def run_experiment(repeats=10): | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy) | |
score = score * 100.0 | |
print('>#%d: %.3f' % (r+1, score)) | |
scores.append(score) | |
# summarize results | |
summarize_results(scores) | |
# run the experiment | |
run_experiment() |
# cnn lstm model for the har dataset | |
from numpy import mean | |
from numpy import std | |
from numpy import dstack | |
from pandas import read_csv | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import Dropout | |
from keras.layers import LSTM | |
from keras.layers import TimeDistributed | |
from keras.layers.convolutional import Conv1D | |
from keras.layers.convolutional import MaxPooling1D | |
from keras.utils import to_categorical | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy): | |
# define model | |
verbose, epochs, batch_size = 0, 25, 64 | |
n_features, n_outputs = trainX.shape[2], trainy.shape[1] | |
# reshape data into time steps of sub-sequences | |
n_steps, n_length = 4, 32 | |
trainX = trainX.reshape((trainX.shape[0], n_steps, n_length, n_features)) | |
testX = testX.reshape((testX.shape[0], n_steps, n_length, n_features)) | |
# define model | |
model = Sequential() | |
model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu'), input_shape=(None,n_length,n_features))) | |
model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu'))) | |
model.add(TimeDistributed(Dropout(0.5))) | |
model.add(TimeDistributed(MaxPooling1D(pool_size=2))) | |
model.add(TimeDistributed(Flatten())) | |
model.add(LSTM(100)) | |
model.add(Dropout(0.5)) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs, activation='softmax')) | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores): | |
print(scores) | |
m, s = mean(scores), std(scores) | |
print('Accuracy: %.3f%% (+/-%.3f)' % (m, s)) | |
# run an experiment | |
def run_experiment(repeats=10): | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy) | |
score = score * 100.0 | |
print('>#%d: %.3f' % (r+1, score)) | |
scores.append(score) | |
# summarize results | |
summarize_results(scores) | |
# run the experiment | |
run_experiment() |
# convlstm model for the har dataset | |
from numpy import mean | |
from numpy import std | |
from numpy import dstack | |
from pandas import read_csv | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import Flatten | |
from keras.layers import Dropout | |
from keras.layers import ConvLSTM2D | |
from keras.utils import to_categorical | |
# load a single file as a numpy array | |
def load_file(filepath): | |
dataframe = read_csv(filepath, header=None, delim_whitespace=True) | |
return dataframe.values | |
# load a list of files and return as a 3d numpy array | |
def load_group(filenames, prefix=''): | |
loaded = list() | |
for name in filenames: | |
data = load_file(prefix + name) | |
loaded.append(data) | |
# stack group so that features are the 3rd dimension | |
loaded = dstack(loaded) | |
return loaded | |
# load a dataset group, such as train or test | |
def load_dataset_group(group, prefix=''): | |
filepath = prefix + group + '/Inertial Signals/' | |
# load all 9 files as a single array | |
filenames = list() | |
# total acceleration | |
filenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt'] | |
# body acceleration | |
filenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt'] | |
# body gyroscope | |
filenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt'] | |
# load input data | |
X = load_group(filenames, filepath) | |
# load class output | |
y = load_file(prefix + group + '/y_'+group+'.txt') | |
return X, y | |
# load the dataset, returns train and test X and y elements | |
def load_dataset(prefix=''): | |
# load all train | |
trainX, trainy = load_dataset_group('train', prefix + 'HARDataset/') | |
# load all test | |
testX, testy = load_dataset_group('test', prefix + 'HARDataset/') | |
# zero-offset class values | |
trainy = trainy - 1 | |
testy = testy - 1 | |
# one hot encode y | |
trainy = to_categorical(trainy) | |
testy = to_categorical(testy) | |
return trainX, trainy, testX, testy | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy): | |
# define model | |
verbose, epochs, batch_size = 0, 25, 64 | |
n_features, n_outputs = trainX.shape[2], trainy.shape[1] | |
# reshape into subsequences (samples, time steps, rows, cols, channels) | |
n_steps, n_length = 4, 32 | |
trainX = trainX.reshape((trainX.shape[0], n_steps, 1, n_length, n_features)) | |
testX = testX.reshape((testX.shape[0], n_steps, 1, n_length, n_features)) | |
# define model | |
model = Sequential() | |
model.add(ConvLSTM2D(filters=64, kernel_size=(1,3), activation='relu', input_shape=(n_steps, 1, n_length, n_features))) | |
model.add(Dropout(0.5)) | |
model.add(Flatten()) | |
model.add(Dense(100, activation='relu')) | |
model.add(Dense(n_outputs, activation='softmax')) | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores): | |
print(scores) | |
m, s = mean(scores), std(scores) | |
print('Accuracy: %.3f%% (+/-%.3f)' % (m, s)) | |
# run an experiment | |
def run_experiment(repeats=10): | |
# load data | |
trainX, trainy, testX, testy = load_dataset() | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy) | |
score = score * 100.0 | |
print('>#%d: %.3f' % (r+1, score)) | |
scores.append(score) | |
# summarize results | |
summarize_results(scores) | |
# run the experiment | |
run_experiment() |
# load and plot daily births dataset | |
from numpy import array | |
from numpy import mean | |
from numpy import std | |
from pandas import read_csv | |
from keras.models import Sequential | |
from keras.layers import Dense | |
from keras.layers import LSTM | |
from keras.utils.np_utils import to_categorical | |
import warnings | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
warnings.simplefilter(action='ignore', category=DeprecationWarning) | |
def split_sequences(sequences, n_steps): | |
X = list() | |
for i in range(len(sequences)): | |
# find the end of this pattern | |
end_ix = i + n_steps | |
# check if we are beyond the dataset | |
if end_ix > len(sequences) - 1: | |
break | |
# gather input and output parts of the pattern | |
seq_x = sequences[i:end_ix, :] | |
X.append(seq_x) | |
return array(X) | |
# load | |
def load_dataset(nday, n_steps): | |
series = read_csv('/Users/ruoang/Company/astocks/600868.SH.csv', header=0, index_col=0) # summarize shape | |
nday_close = series['Close'].shift(nday) | |
nday_pct_chg = (nday_close - series['Close']) > 0 | |
nday_pct_chg = nday_pct_chg[:(0 - n_steps)] | |
nday_pct_chg = nday_pct_chg.values | |
y = nday_pct_chg.reshape((len(nday_pct_chg), 1)) | |
y = to_categorical(y) | |
#print(y) | |
dataset = series.values | |
X = split_sequences(dataset, n_steps) | |
#print(X.shape, y.shape) | |
train_len = int(0.88 * len(X)) | |
train_X = X[:train_len, :] | |
test_X = X[train_len:, :] | |
train_y = y[:train_len, :] | |
test_y = y[train_len:, :] | |
#print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) | |
return train_X, train_y, test_X, test_y | |
# fit and evaluate a model | |
def evaluate_model(trainX, trainy, testX, testy): | |
verbose, epochs, batch_size = 0, 15, 32 | |
n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] | |
model = Sequential() | |
#model.add(LSTM(10, input_shape=(n_timesteps, n_features), return_sequences=True)) | |
#model.add(LSTM(10, input_shape=(n_timesteps, n_features), return_sequences=True)) | |
model.add(LSTM(10, input_shape=(n_timesteps, n_features))) | |
model.add(Dense(10, activation='relu')) | |
model.add(Dense(n_outputs, activation='softmax')) | |
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) | |
# fit network | |
model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose) | |
# evaluate model | |
_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0) | |
return accuracy | |
# summarize scores | |
def summarize_results(scores): | |
print(scores) | |
m, s = mean(scores), std(scores) | |
print('Accuracy: %.3f%% (+/-%.3f)' % (m, s)) | |
# run an experiment | |
def run_experiment(repeats=1): | |
for ndays in range(1, 2): | |
for nsteps in range(10, 20): | |
# load data | |
trainX, trainy, testX, testy = load_dataset(ndays, nsteps) | |
# repeat experiment | |
scores = list() | |
for r in range(repeats): | |
score = evaluate_model(trainX, trainy, testX, testy) | |
score = score * 100.0 | |
#print('>#%d: %.3f' % (r + 1, score)) | |
scores.append(score) | |
print('ndays: %d, nsteps: %d, score: %.3f' % (ndays, nsteps, mean(scores))) | |
if repeats > 1: | |
# summarize results | |
summarize_results(scores) | |
# run the experiment | |
run_experiment() |
import tushare as ts | |
import math | |
import warnings | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
# Obtain a database connection to the MySQL instance | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
data = pro.stock_basic(exchange_id='', list_status='L', fields='ts_code') | |
ts_code = data['ts_code'] | |
api = ts.pro_api() | |
for stock in ts_code: | |
df = ts.pro_bar(pro_api = api, ts_code = stock, adj = 'qfq', start_date = '20130101', end_date = '20191231', ma=[5, 20, 50, 200]) | |
if df is None: | |
continue | |
#print(df) | |
cur = con.cursor() | |
column_str = """date, code, open, close, high, low, volume, pct_chg, ma5, ma20, ma50, ma200, v_ma5, v_ma20, v_ma50, v_ma200, day_price_chg, week_price_chg, month_price_chg""" | |
for i in range(0, len(df)): | |
if i < 20: | |
continue | |
row = df.iloc[i] | |
if math.isnan(row['ma50']) or math.isnan(row['ma200']): | |
continue | |
day_price_chg = (df.iloc[i-1]['close']/row['close'] - 1) * 100 | |
week_price_chg = (df.iloc[i-5]['close']/row['close'] - 1) * 100 | |
month_price_chg = (df.iloc[i-20]['close']/row['close'] - 1) * 100 | |
final_str = "REPLACE INTO daily (%s) VALUES ('%s', '%s', '%s', '%s','%s', '%s', '%s', '%s','%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \ | |
(column_str, | |
row['trade_date'], | |
row['ts_code'], | |
row['open'], | |
row['close'], | |
row['high'], | |
row['low'], | |
row['vol'], | |
row['pct_chg'], | |
row['ma5'], | |
row['ma20'], | |
row['ma50'], | |
row['ma200'], | |
row['ma_v_5'], | |
row['ma_v_20'], | |
row['ma_v_50'], | |
row['ma_v_200'], | |
day_price_chg, | |
week_price_chg, | |
month_price_chg) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' %(stock)) | |
con.commit() | |
con.close() | |
print("Done") |
from hurst import * | |
from common import * | |
import math | |
import warnings | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
with open("whole") as f: | |
content = f.readlines() | |
whole = [x.strip() for x in content] | |
for stock in whole: | |
df = None | |
while True: | |
try: | |
df = pro.daily_basic(ts_code=stock, | |
fields='trade_date, ts_code, turnover_rate_f,volume_ratio,pe,pb,ps,circ_mv, total_mv', | |
start_date='20130801', end_date='20191231') | |
break | |
except: | |
print('%s error' % stock) | |
time.sleep(5) | |
if df is None: | |
continue | |
cur = con.cursor() | |
for i in range(0, len(df) - 1): | |
row = df.iloc[i] | |
try: | |
if math.isnan(row['turnover_rate_f']) or \ | |
row['volume_ratio'] is None or \ | |
math.isnan(row['volume_ratio']) or \ | |
math.isnan(row['pe']) or \ | |
math.isnan(row['pb']) or \ | |
math.isnan(row['circ_mv']) or \ | |
math.isnan(row['total_mv']) or \ | |
math.isnan(row['ps']): | |
continue | |
except: | |
continue | |
final_str = "UPDATE daily SET turnover_rate_f={}, volume_ratio={}, pe={}, pb={}, ps={}, circ_mv={}, total_mv={} WHERE date = '{}' AND code = '{}'".format( | |
row['turnover_rate_f'], | |
row['volume_ratio'], | |
row['pe'], | |
row['pb'], | |
row['ps'], | |
row['circ_mv'], | |
row['total_mv'], | |
row['trade_date'], | |
row['ts_code']) | |
# print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
import numpy as np | |
from sklearn.preprocessing import MinMaxScaler # library required for scaling the data | |
import pandas as pd | |
import warnings | |
warnings.filterwarnings("ignore") | |
data = pd.read_csv('nn/Recurrent Neural Networks/SBIN.csv',index_col = 0,parse_dates = True, na_values = 0).dropna() | |
max_=data[['Open','High','Low','Close']].max().max() | |
min_=data[['Open','High','Low','Close']].min().min() | |
X1=(data[['Open','High','Low','Close']]-min_)/(max_-min_) | |
X1=np.array(X1) | |
data[['Volume']] = data[['Volume']].astype(float) | |
scl=MinMaxScaler() | |
X2=scl.fit_transform(data[['Volume']].values.reshape(-1,1)) | |
data=data.assign(Open=X1[:,0]) | |
data=data.assign(High=X1[:,1]) | |
data=data.assign(Low=X1[:,2]) | |
data=data.assign(Close=X1[:,3]) | |
data=data.assign(Volume=X2[:,0]) | |
X=data[['Open','High','Low','Close','Volume']] | |
y=np.ones(len(data)) | |
y=np.where(data.Close.shift(-5)>data.Close,1,0) | |
test_size=100 | |
X_train=np.array(X.iloc[:-test_size]) | |
y_train=np.array(y[:-test_size]) | |
X_test=np.array(X.iloc[-test_size:]) | |
y_test=np.array(y[-test_size:]) | |
class_1=y_train.sum() | |
class_0=len(y_train)-class_1 | |
class_0_percentage=class_0/len(y_train) | |
class_1_percentage=class_1/len(y_train) | |
class_0_percentage=class_0/len(y_train) | |
class_1_percentage=class_1/len(y_train) | |
class_0_weight=class_1_percentage | |
class_1_weight=class_0_percentage | |
class_weight={0:class_0_weight,1:class_1_weight} | |
# Libraries | |
from keras.models import Sequential | |
from keras.layers import Dropout | |
from keras.layers import Dense, Activation | |
from keras.callbacks import ModelCheckpoint | |
from keras.layers import BatchNormalization | |
# Parameters | |
dropout_ratio=0.20 | |
momentum=0.99 | |
np.random.seed(42) | |
neurons=175 | |
act_1='tanh' | |
act_2='softmax' | |
# The DNN Model | |
print(X_train.shape[1:]) | |
model=Sequential() #We want the layers to work sequentially, one after other. | |
# Please Type Your Code Below | |
model.add(Dense(neurons, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros',input_shape=X_train.shape[1:])) |
from datetime import datetime | |
from hurst import * | |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import requests | |
import MySQLdb as mdb | |
url = 'http://pai-eas-vpc.cn-shanghai.aliyuncs.com/api/predict/short' | |
headers = {"Authorization": 'MDU4NDgwOWNjNjY5ODY1NmFiMmE4MzRmN2EzZjYzNTVhMGVhMDgzMw=='} | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock, list_date, industry, market, is_hs, name from stock_basic" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], datetime.combine(d[1], datetime.min.time()), d[2], d[3], d[4], d[5]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
predict_results = [] | |
date = 0 | |
start_date = '20190901' | |
end_date = '20201231' | |
for (stock, list_date, industry, market, is_hs, name) in obtain_stock_basic(): | |
if 'ST' in name or name.startswith('688'): | |
continue | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date=start_date, end_date=end_date, ma=[5, 10, 20, 60]) | |
if df is None or len(df) < 60: | |
continue | |
if df.head(60)['amount'].mean() < 30000: | |
continue | |
# print(df) | |
for i in range(0, 1): | |
row = df.iloc[i] | |
if row['trade_date'] is None: | |
continue | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
if row.pct_chg > 9.9: | |
continue | |
# print(row['trade_date']) | |
last_60_days = df[i: i + 60][::-1] | |
last_30_days = last_60_days.tail(30) | |
last_20_days = last_30_days.tail(20) | |
last_15_days = last_20_days.tail(15) | |
last_10_days = last_15_days.tail(10) | |
last_7_days = last_10_days.tail(7) | |
last_5_days = last_10_days.tail(5) | |
last_3_days = last_10_days.tail(3) | |
last_day = last_3_days.tail(1) | |
md10 = max_drawback(last_10_days) | |
md20 = max_drawback(last_20_days) | |
md30 = max_drawback(last_30_days) | |
md60 = max_drawback(last_60_days) | |
mp10 = max_profit(last_10_days) | |
mp20 = max_profit(last_20_days) | |
mp30 = max_profit(last_30_days) | |
mp60 = max_profit(last_60_days) | |
udvr10 = up_down_v_ratio_v2(last_10_days) | |
udvr20 = up_down_v_ratio_v2(last_20_days) | |
udvr30 = up_down_v_ratio_v2(last_30_days) | |
udvr60 = up_down_v_ratio_v2(last_60_days) | |
udpr10 = up_down_pct_chg_ratio(last_10_days) | |
udpr20 = up_down_pct_chg_ratio(last_20_days) | |
udpr30 = up_down_pct_chg_ratio(last_30_days) | |
udpr60 = up_down_pct_chg_ratio(last_60_days) | |
av1 = avg_volatility(last_day) | |
av3 = avg_volatility(last_3_days) | |
av5 = avg_volatility(last_5_days) | |
av7 = avg_volatility(last_7_days) | |
av10 = avg_volatility(last_10_days) | |
av20 = avg_volatility(last_20_days) | |
av60 = avg_volatility(last_60_days) | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['vol'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
pc1 = avg_pct_chg(last_day) | |
pc3 = avg_pct_chg(last_3_days) | |
pc5 = avg_pct_chg(last_5_days) | |
pc7 = avg_pct_chg(last_7_days) | |
pc10 = avg_pct_chg(last_10_days) | |
pc20 = avg_pct_chg(last_20_days) | |
pc60 = avg_pct_chg(last_60_days) | |
cur = con.cursor() | |
data = '[{"md10":%s, "md20":%d, "md30":%d, "md60":%s, "mp10":%s, "mp20":%s, \ | |
"mp30":%s, "mp60":%s, "udvr10":%s, "udvr20":%s, "udvr30":%s, "udvr60":%s, "udpr10":%s, "udpr20":%s, \ | |
"udpr30":%s, "udpr60":%s, "av1":%s, "av3":%s, "av5":%s, "av7":%s, "av10":%s, "av20":%s, "av60":%s, \ | |
"pc1":%s, "pc3":%s, "pc5":%s, "pc7":%s, "pc10":%s, "pc20":%s, "pc60":%s,\ | |
"pr_5c":%s, "pr_510":%s, "pr_1020":%s, "pr_2060":%s,\ | |
"vr_5c":%s, "vr_510":%s, "vr_1020":%s, "vr_2060":%s }]' % \ | |
(md10, | |
md20, | |
md30, | |
md60, | |
mp10, | |
mp20, | |
mp30, | |
mp60, | |
udvr10, | |
udvr20, | |
udvr30, | |
udvr60, | |
udpr10, | |
udpr20, | |
udpr30, | |
udpr60, | |
av1, | |
av3, | |
av5, | |
av7, | |
av10, | |
av20, | |
av60, | |
pc1, | |
pc3, | |
pc5, | |
pc7, | |
pc10, | |
pc20, | |
pc60, | |
pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060) | |
# print(data) | |
resp = requests.post(url, data=data, headers=headers) | |
if resp.status_code != 200: | |
print(resp.content) | |
else: | |
#print(resp.content) | |
value = float(str(resp.content).split(":")[1].split("}")[0]) | |
if value > 5: | |
query = "'%s', '%s', '%s'" % (row['trade_date'], stock, value) | |
predict_results.append(query) | |
print(query) | |
print('%s finish' % (stock)) | |
print(predict_results) | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
# Obtain a database connection to the MySQL instance | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
cur = con.cursor() | |
for q in predict_results: | |
query = "INSERT IGNORE INTO predict_short (date, code, value) VALUES (%s)" % (q) | |
print(query) | |
cur.execute(query) | |
con.commit() | |
con.close() | |
print("Done") |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import requests | |
import MySQLdb as mdb | |
url = 'http://pai-eas-vpc.cn-shanghai.aliyuncs.com/api/predict/short_hk' | |
headers = {"Authorization": 'Y2E3YWEzYzM3ZThjZGFlYTdlZDc2NmQwNjQwMTBmZjg0NTkyZjk5MQ=='} | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_hk_basic where amount > 300000" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
predict_results = [] | |
date = 0 | |
start_date = '20190901' | |
end_date = '20201231' | |
for stock in obtain_stock_basic(): | |
try: | |
df = pro.hk_daily(ts_code=stock,start_date=start_date, end_date=end_date, ma=[5, 10, 20, 60]) | |
except: | |
print("%s error" % stock) | |
if df is None or len(df) < 60: | |
continue | |
#print(df) | |
rdf = df[::-1] | |
rdf = sma(rdf, "ma5", n=5) | |
rdf = sma(rdf, "ma10", n=10) | |
rdf = sma(rdf, "ma20", n=20) | |
rdf = sma(rdf, "ma60", n=60) | |
rdf = sma(rdf, "ma_v_5", n=5, val_name='vol') | |
rdf = sma(rdf, "ma_v_10", n=10, val_name='vol') | |
rdf = sma(rdf, "ma_v_20", n=20, val_name='vol') | |
rdf = sma(rdf, "ma_v_60", n=60, val_name='vol') | |
df = rdf[::-1] | |
for i in range(0, 1): | |
row = df.iloc[i] | |
#print(row) | |
if row['trade_date'] is None: | |
continue | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
last_60_days = df[i: i + 60][::-1] | |
last_30_days = last_60_days.tail(30) | |
last_20_days = last_30_days.tail(20) | |
last_15_days = last_20_days.tail(15) | |
last_10_days = last_15_days.tail(10) | |
last_7_days = last_10_days.tail(7) | |
last_5_days = last_10_days.tail(5) | |
last_3_days = last_10_days.tail(3) | |
last_day = last_3_days.tail(1) | |
md10 = max_drawback(last_10_days) | |
md20 = max_drawback(last_20_days) | |
md30 = max_drawback(last_30_days) | |
md60 = max_drawback(last_60_days) | |
mp10 = max_profit(last_10_days) | |
mp20 = max_profit(last_20_days) | |
mp30 = max_profit(last_30_days) | |
mp60 = max_profit(last_60_days) | |
udvr10 = up_down_v_ratio_v2(last_10_days) | |
udvr20 = up_down_v_ratio_v2(last_20_days) | |
udvr30 = up_down_v_ratio_v2(last_30_days) | |
udvr60 = up_down_v_ratio_v2(last_60_days) | |
udpr10 = up_down_pct_chg_ratio(last_10_days) | |
udpr20 = up_down_pct_chg_ratio(last_20_days) | |
udpr30 = up_down_pct_chg_ratio(last_30_days) | |
udpr60 = up_down_pct_chg_ratio(last_60_days) | |
av1 = avg_volatility(last_day) | |
av3 = avg_volatility(last_3_days) | |
av5 = avg_volatility(last_5_days) | |
av7 = avg_volatility(last_7_days) | |
av10 = avg_volatility(last_10_days) | |
av20 = avg_volatility(last_20_days) | |
av60 = avg_volatility(last_60_days) | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['vol'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
pc1 = avg_pct_chg(last_day) | |
pc3 = avg_pct_chg(last_3_days) | |
pc5 = avg_pct_chg(last_5_days) | |
pc7 = avg_pct_chg(last_7_days) | |
pc10 = avg_pct_chg(last_10_days) | |
pc20 = avg_pct_chg(last_20_days) | |
pc60 = avg_pct_chg(last_60_days) | |
cur = con.cursor() | |
data = '[{"md10":%s, "md20":%d, "md30":%d, "md60":%s, "mp10":%s, "mp20":%s, \ | |
"mp30":%s, "mp60":%s, "udvr10":%s, "udvr20":%s, "udvr30":%s, "udvr60":%s, "udpr10":%s, "udpr20":%s, \ | |
"udpr30":%s, "udpr60":%s, "av1":%s, "av3":%s, "av5":%s, "av7":%s, "av10":%s, "av20":%s, "av60":%s, \ | |
"pc1":%s, "pc3":%s, "pc5":%s, "pc7":%s, "pc10":%s, "pc20":%s, "pc60":%s,\ | |
"pr_5c":%s, "pr_510":%s, "pr_1020":%s, "pr_2060":%s,\ | |
"vr_5c":%s, "vr_510":%s, "vr_1020":%s, "vr_2060":%s }]' % \ | |
(md10, | |
md20, | |
md30, | |
md60, | |
mp10, | |
mp20, | |
mp30, | |
mp60, | |
udvr10, | |
udvr20, | |
udvr30, | |
udvr60, | |
udpr10, | |
udpr20, | |
udpr30, | |
udpr60, | |
av1, | |
av3, | |
av5, | |
av7, | |
av10, | |
av20, | |
av60, | |
pc1, | |
pc3, | |
pc5, | |
pc7, | |
pc10, | |
pc20, | |
pc60, | |
pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060) | |
#print(data) | |
resp = requests.post(url, data=data, headers=headers) | |
if resp.status_code != 200: | |
print(resp.content) | |
else: | |
#print(resp.content) | |
value = float(str(resp.content).split(":")[1].split("}")[0]) | |
if value > 7 or value < -5: | |
query = "'%s', '%s', '%s'" % (row['trade_date'], stock, value) | |
predict_results.append(query) | |
print(query) | |
print('%s finish' % (stock)) | |
print(predict_results) | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
# Obtain a database connection to the MySQL instance | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
cur = con.cursor() | |
for q in predict_results: | |
query = "INSERT IGNORE INTO predict_short_hk (date, code, value) VALUES (%s)" % (q) | |
print(query) | |
cur.execute(query) | |
con.commit() | |
con.close() | |
print("Done") |
import warnings | |
from factor import * | |
import pandas_datareader.data as web | |
from datetime import datetime | |
warnings.simplefilter(action='ignore') | |
import requests | |
import MySQLdb as mdb | |
url = 'http://pai-eas-vpc.cn-shanghai.aliyuncs.com/api/predict/short_us' | |
headers = {"Authorization": 'OGNjNjQyNjlmNjFlMzEzMTIwNTc5NjY0MWE3MGU3Y2QxZmNiMzExZA=='} | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_us limit 1000" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
predict_results = [] | |
date = 0 | |
start = datetime(2019, 10, 1) | |
end = datetime(2020, 12, 31) | |
for stock in obtain_stock_basic(): | |
try: | |
df = web.DataReader(stock, 'iex', start, end, access_key='pk_88d98ab1d0344ceb8184b898313a18cc') | |
if df is None or len(df) < 60: | |
continue | |
df['pct_chg'] = df['close'].pct_change() | |
df = sma(df, "ma5", n=5) | |
df = sma(df, "ma10", n=10) | |
df = sma(df, "ma20", n=20) | |
df = sma(df, "ma60", n=60) | |
df = sma(df, "ma_v_5", n=5, val_name='volume') | |
df = sma(df, "ma_v_10", n=10, val_name='volume') | |
df = sma(df, "ma_v_20", n=20, val_name='volume') | |
df = sma(df, "ma_v_60", n=60, val_name='volume') | |
for i in range(len(df) - 1, len(df)): | |
row = df.iloc[i] | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
last_60_days = df.tail(60) | |
last_30_days = last_60_days.tail(30) | |
last_20_days = last_30_days.tail(20) | |
last_15_days = last_20_days.tail(15) | |
last_10_days = last_15_days.tail(10) | |
last_7_days = last_10_days.tail(7) | |
last_5_days = last_10_days.tail(5) | |
last_3_days = last_10_days.tail(3) | |
last_day = last_3_days.tail(1) | |
md10 = max_drawback(last_10_days) | |
md20 = max_drawback(last_20_days) | |
md30 = max_drawback(last_30_days) | |
md60 = max_drawback(last_60_days) | |
mp10 = max_profit(last_10_days) | |
mp20 = max_profit(last_20_days) | |
mp30 = max_profit(last_30_days) | |
mp60 = max_profit(last_60_days) | |
udvr10 = up_down_volume_ratio_v2(last_10_days) | |
udvr20 = up_down_volume_ratio_v2(last_20_days) | |
udvr30 = up_down_volume_ratio_v2(last_30_days) | |
udvr60 = up_down_volume_ratio_v2(last_60_days) | |
udpr10 = up_down_pct_chg_ratio(last_10_days) | |
udpr20 = up_down_pct_chg_ratio(last_20_days) | |
udpr30 = up_down_pct_chg_ratio(last_30_days) | |
udpr60 = up_down_pct_chg_ratio(last_60_days) | |
av1 = avg_volatility(last_day) | |
av3 = avg_volatility(last_3_days) | |
av5 = avg_volatility(last_5_days) | |
av7 = avg_volatility(last_7_days) | |
av10 = avg_volatility(last_10_days) | |
av20 = avg_volatility(last_20_days) | |
av60 = avg_volatility(last_60_days) | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['volume'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
pc1 = avg_pct_chg(last_day) | |
pc3 = avg_pct_chg(last_3_days) | |
pc5 = avg_pct_chg(last_5_days) | |
pc7 = avg_pct_chg(last_7_days) | |
pc10 = avg_pct_chg(last_10_days) | |
pc20 = avg_pct_chg(last_20_days) | |
pc60 = avg_pct_chg(last_60_days) | |
cur = con.cursor() | |
data = '[{"md10":%s, "md20":%d, "md30":%d, "md60":%s, "mp10":%s, "mp20":%s, \ | |
"mp30":%s, "mp60":%s, "udvr10":%s, "udvr20":%s, "udvr30":%s, "udvr60":%s, "udpr10":%s, "udpr20":%s, \ | |
"udpr30":%s, "udpr60":%s, "av1":%s, "av3":%s, "av5":%s, "av7":%s, "av10":%s, "av20":%s, "av60":%s, \ | |
"pc1":%s, "pc3":%s, "pc5":%s, "pc7":%s, "pc10":%s, "pc20":%s, "pc60":%s,\ | |
"pr_5c":%s, "pr_510":%s, "pr_1020":%s, "pr_2060":%s,\ | |
"vr_5c":%s, "vr_510":%s, "vr_1020":%s, "vr_2060":%s }]' % \ | |
(md10, | |
md20, | |
md30, | |
md60, | |
mp10, | |
mp20, | |
mp30, | |
mp60, | |
udvr10, | |
udvr20, | |
udvr30, | |
udvr60, | |
udpr10, | |
udpr20, | |
udpr30, | |
udpr60, | |
av1, | |
av3, | |
av5, | |
av7, | |
av10, | |
av20, | |
av60, | |
pc1, | |
pc3, | |
pc5, | |
pc7, | |
pc10, | |
pc20, | |
pc60, | |
pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060) | |
print(data) | |
resp = requests.post(url, data=data, headers=headers) | |
if resp.status_code != 200: | |
print(resp.content) | |
else: | |
# print(resp.content) | |
value = float(str(resp.content).split(":")[1].split("}")[0]) | |
if value > 2 or value < -1: | |
query = "'%s', '%s', '%s'" % (row.name, stock, value) | |
predict_results.append(query) | |
print(query) | |
print('%s finish' % (stock)) | |
except Exception as e: | |
print(e) | |
print('%s error' % (stock)) | |
print(predict_results) | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
# Obtain a database connection to the MySQL instance | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
cur = con.cursor() | |
for q in predict_results: | |
query = "INSERT IGNORE INTO predict_short_us (date, code, value) VALUES (%s)" % (q) | |
print(query) | |
cur.execute(query) | |
con.commit() | |
con.close() | |
print("Done") |
import requests | |
url = 'http://pai-eas-vpc.cn-shanghai.aliyuncs.com/api/predict/daily_lb' | |
headers = {"Authorization": 'MGQ1MDgxNTExYjllZTg2MDk3YmM4MWQwMzhkNTdjZDZlZWRmNTMwOA=='} | |
data = '[{"open":7.63, "close":7.43, "high":7.79, "low":7.43, "volume":122927, "pct_chg":-4.12903225806, "ma5":7.92, "ma20":8.84, "ma50":7.04,"ma200":5.79, "v_ma5":152690.88, "v_ma20":357717.28, "v_ma50":249808.42, "v_ma200":78657.53, "turnover_rate_f":6.4754, "circ_mv":305770.8073, "total_mv":306844.27, "volume_ratio":0.72, "pe":81.3303, "pb":3.1623, "ps":3}]' | |
resp = requests.post(url, data=data, headers=headers) | |
if resp.status_code != 200: | |
print(resp.content) | |
else: | |
print(resp.content) | |
print(str(resp.content).split(":")[1].split("}")[0]) | |
print(str(resp.content).split(":}")) |
import requests | |
url = 'http://pai-eas-vpc.cn-shanghai.aliyuncs.com/api/predict/daily_lb' | |
headers = {"Authorization": 'MGQ1MDgxNTExYjllZTg2MDk3YmM4MWQwMzhkNTdjZDZlZWRmNTMwOA=='} | |
url_month = 'http://pai-eas-vpc.cn-shanghai.aliyuncs.com/api/predict/month' | |
headers_month = {"Authorization": 'MzM2MGRiMzQ5ZmM0OWUzZTZhMzYzMjE4ZWM4ZGUwN2MxMDA5MjQ0YQ=='} | |
data = '[{"open":7.63, "close":7.43, "high":7.79, "low":7.43, "volume":122927, "pct_chg":-4.12903225806, "ma5":7.92, "ma20":8.84, "ma50":7.04,"ma200":5.79, "v_ma5":152690.88, "v_ma20":357717.28, "v_ma50":249808.42, "v_ma200":78657.53, "turnover_rate_f":6.4754, "circ_mv":305770.8073, "total_mv":306844.27, "volume_ratio":0.72, "pe":81.3303, "pb":3.1623, "ps":3}]' | |
#resp = requests.post(url, data=data, headers=headers) | |
resp = requests.post(url_month, data=data, headers=headers_month) | |
if resp.status_code != 200: | |
print(resp.content) | |
else: | |
print(resp.content) | |
print(str(resp.content).split(":")[1].split("}")[0]) | |
print(str(resp.content).split(":}")) |
from datetime import datetime | |
from hurst import * | |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock, list_date, industry, market, is_hs from stock_basic" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], datetime.combine(d[1], datetime.min.time()), d[2], d[3], d[4]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
start_date = '20130101' | |
end_date = '20191231' | |
for (stock, list_date, industry, market, is_hs) in obtain_stock_basic(): | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date = start_date, end_date = end_date, ma=[50, 150, 200]) | |
if df is None or len(df) < 260: | |
continue | |
# print(df) | |
baisc_df = None | |
while True: | |
try: | |
basic_df = pro.daily_basic(ts_code=stock, | |
fields='trade_date, turnover_rate_f,volume_ratio,pe,pe_ttm, pb,ps,circ_mv, total_mv', | |
start_date=start_date, end_date=end_date) | |
break | |
except: | |
print('%s error' % stock) | |
time.sleep(5) | |
if basic_df is None or len(basic_df) == 0: | |
continue | |
basic_df = basic_df.set_index('trade_date') | |
for i in range(15, len(df) - 260): | |
row = df.iloc[i] | |
if row['trade_date'] is None: | |
continue | |
# print(row) | |
if math.isnan(row['ma200']) or math.isnan(row['ma50']) or math.isnan(row['ma150']): | |
continue | |
basic_row = basic_df.loc[row['trade_date']] | |
#print(basic_row) | |
if basic_row['pe'] is None or \ | |
basic_row['pb'] is None or \ | |
basic_row['ps'] is None or \ | |
math.isnan(basic_row['pe']) or \ | |
math.isnan(basic_row['pe_ttm']) or \ | |
math.isnan(basic_row['pb']) or math.isnan(basic_row['ps']): | |
continue | |
trade_date = datetime.strptime(row['trade_date'], "%Y%m%d") | |
list_months = int((trade_date - list_date).days / 30) | |
ma50 = row['ma50'] | |
ma150 = row['ma150'] | |
ma200 = row['ma200'] | |
v_ma50 = row['ma_v_50'] | |
v_ma150 = row['ma_v_150'] | |
v_ma200 = row['ma_v_200'] | |
close = row['close'] | |
volume = row['vol'] | |
pct_chg = row['pct_chg'] | |
price_50_150_ratio = ma50 / ma150 | |
price_150_close_ratio = ma150 / close | |
price_200_close_ratio = ma200 / close | |
v_50_volume_ratio = v_ma50 / volume | |
v_200_volume_ratio = v_ma200 / volume | |
v_50_150_ratio = v_ma50 / v_ma150 | |
volatility = (row['high'] / row['low'] - 1) * 100 | |
df_30 = df[i: i + 30] | |
up_days_200 = days_above(df_30, 'ma200') | |
up_days_50 = days_above(df_30, 'ma50') | |
df_260 = df[i: i + 260] | |
pct_below_highest = 1 - close / df_260['close'].max() | |
pct_up_lowest = close / df_260['close'].min() - 1 | |
df_last_week = df[i: i + 5] | |
up_days_last_week = up_days(df_last_week) | |
close_up_days_last_week = close_up_days(df_last_week) | |
df_last_two_week = df[i: i + 10] | |
up_days_last_two_week = up_days(df_last_two_week) | |
close_up_days_last_two_week = close_up_days(df_last_two_week) | |
up_down_v_ratio_last_two_week = up_down_v_ratio(df_last_two_week) | |
df_last_15_days = df[i: i + 15] | |
up_days_last_15_days = up_days(df_last_15_days) | |
pct_chg_last_15_days = df_last_15_days['pct_chg'].mean() | |
up_down_v_ratio_last_15_days = up_down_v_ratio(df_last_15_days) | |
days_15_avg_volatility = avg_volatility(df_last_15_days) | |
price_chg_15_days = ((df.iloc[i-15]['close'] / close) - 1) * 100 | |
cur = con.cursor() | |
column_str = """date, code, list_month, market, is_hs, 50_150_ratio, 150_close_ratio, 200_close_ratio, 50_volume_ratio, pct_chg, volatility, 200_up_days, 50_up_days, pct_below_highest, pct_up_lowest, industry, \ | |
200_volume_ratio, 50_150_v_ratio, turnover_rate_f, circ_mv, total_mv, volume_ratio, pe, pe_ttm, pb, ps, \ | |
up_days_last_week, up_days_last_two_week, close_up_days_last_week, close_up_days_last_two_week,\ | |
up_down_v_ratio_last_two_week, up_down_v_ratio_last_15_days, pct_chg_last_15_days, up_days_last_15_days, 15days_avg_volatility, 15days_price_chg""" | |
final_str = "REPLACE INTO sepa (%s) VALUES ('%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s','%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \ | |
(column_str, | |
row['trade_date'], | |
stock, | |
list_months, | |
market, | |
is_hs, | |
price_50_150_ratio, | |
price_150_close_ratio, | |
price_200_close_ratio, | |
v_50_volume_ratio, | |
pct_chg, | |
volatility, | |
up_days_200, | |
up_days_50, | |
pct_below_highest, | |
pct_up_lowest, | |
industry, | |
v_200_volume_ratio, | |
v_50_150_ratio, | |
basic_row['turnover_rate_f'], | |
basic_row['circ_mv'], | |
basic_row['total_mv'], | |
basic_row['volume_ratio'], | |
basic_row['pe'], | |
basic_row['pe_ttm'], | |
basic_row['pb'], | |
basic_row['ps'], | |
up_days_last_week, | |
up_days_last_two_week, | |
close_up_days_last_week, | |
close_up_days_last_two_week, | |
up_down_v_ratio_last_two_week, | |
up_down_v_ratio_last_15_days, | |
pct_chg_last_15_days, | |
up_days_last_15_days, | |
days_15_avg_volatility, | |
price_chg_15_days) | |
#print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
from datetime import datetime | |
from hurst import * | |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock, list_date, industry, market, is_hs from stock_basic WHERE stock LIKE '%SZ'" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], datetime.combine(d[1], datetime.min.time()), d[2], d[3], d[4]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
start_date = '20130101' | |
end_date = '20191231' | |
for (stock, list_date, industry, market, is_hs) in obtain_stock_basic(): | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date = start_date, end_date = end_date, ma=[50, 150, 200]) | |
if df is None or len(df) < 260: | |
continue | |
#print(df) | |
for i in range(15, len(df) - 260): | |
row = df.iloc[i] | |
if row['trade_date'] is None: | |
continue | |
if math.isnan(row['ma200']) or math.isnan(row['ma50']) or math.isnan(row['ma150']): | |
continue | |
#print(row['trade_date']) | |
trade_date = datetime.strptime(row['trade_date'], "%Y%m%d") | |
new_year_date = trade_date.replace(month=1, day=1) | |
days = (trade_date - new_year_date).days | |
#print("%s %s %d %s" %(trade_date, new_year_date, days, row['trade_date'])) | |
list_months = int((trade_date - list_date).days / 30) | |
df_last_5_days = df[i: i + 5] | |
df_last_10_days = df[i: i + 10] | |
df_last_15_days = df[i: i + 15] | |
df_last_30_days = df[i: i + 30] | |
df_last_60_days = df[i: i + 60] | |
up_down_v_ratio_last_two_week = up_down_v_ratio_v2(df_last_10_days) | |
up_down_v_ratio_last_15_days = up_down_v_ratio_v2(df_last_15_days) | |
up_down_v_ratio_last_30_days = up_down_v_ratio_v2(df_last_30_days) | |
days_5_avg_volatility = avg_volatility(df_last_5_days) | |
days_15_avg_volatility = avg_volatility(df_last_15_days) | |
days_30_avg_volatility = avg_volatility(df_last_30_days) | |
days_60_avg_volatility = avg_volatility(df_last_60_days) | |
up_down_ratio_last_10_days = up_down_pct_chg_ratio(df_last_10_days) | |
up_down_ratio_last_15_days = up_down_pct_chg_ratio(df_last_15_days) | |
up_down_ratio_last_30_days = up_down_pct_chg_ratio(df_last_30_days) | |
cur = con.cursor() | |
query = " UPDATE sepa SET days = {}, up_down_v_ratio_last_two_week = {}, up_down_v_ratio_last_15_days = {}, up_down_v_ratio_last_30_days = {}, " \ | |
"5days_avg_volatility = {}, 15days_avg_volatility = {}, 30days_avg_volatility = {}, 60days_avg_volatility = {}, " \ | |
"up_down_ratio_last_10_days = {}, up_down_ratio_last_15_days = {}, up_down_ratio_last_30_days = {}, list_month= {} WHERE date = '{}' AND code = '{}'"\ | |
.format(days, up_down_v_ratio_last_two_week, up_down_v_ratio_last_15_days, up_down_v_ratio_last_30_days, | |
days_5_avg_volatility, days_15_avg_volatility, days_30_avg_volatility, days_60_avg_volatility, | |
up_down_ratio_last_10_days, up_down_ratio_last_15_days, up_down_ratio_last_30_days, list_months, row.trade_date, stock) | |
#print(query) | |
cur.execute(query) | |
con.commit() | |
print('%s finish' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_hk_basic where amount > 30000000 limit 160, 500" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
start_date = '20170101' | |
end_date = '20191231' | |
for stock in obtain_stock_basic(): | |
try: | |
df = pro.hk_daily(ts_code=stock,start_date=start_date, end_date=end_date, ma=[5, 10, 20, 60]) | |
except: | |
print("%s error" % stock) | |
if df is None or len(df) < 60: | |
continue | |
df['ma5'] = ma(df, n=5) | |
df['ma10'] = ma(df, n=10) | |
df['ma20'] = ma(df, n=20) | |
df['ma60'] = ma(df, n=60) | |
df['ma_v_5'] = ma(df, n=5, val_name='vol') | |
df['ma_v_10'] = ma(df, n=10, val_name='vol') | |
df['ma_v_20'] = ma(df, n=20, val_name='vol') | |
df['ma_v_60'] = ma(df, n=60, val_name='vol') | |
#print(df.head(10)) | |
for i in range(6, len(df) - 60): | |
row = df.iloc[i] | |
if row['trade_date'] is None: | |
continue | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
# print(row['trade_date']) | |
last_60_days = df[i: i + 60][::-1] | |
last_30_days = last_60_days.tail(30) | |
last_20_days = last_30_days.tail(20) | |
last_15_days = last_20_days.tail(15) | |
last_10_days = last_15_days.tail(10) | |
last_7_days = last_10_days.tail(7) | |
last_5_days = last_10_days.tail(5) | |
last_3_days = last_10_days.tail(3) | |
last_day = last_3_days.tail(1) | |
md10 = max_drawback(last_10_days) | |
md20 = max_drawback(last_20_days) | |
md30 = max_drawback(last_30_days) | |
md60 = max_drawback(last_60_days) | |
mp10 = max_profit(last_10_days) | |
mp20 = max_profit(last_20_days) | |
mp30 = max_profit(last_30_days) | |
mp60 = max_profit(last_60_days) | |
udvr10 = up_down_v_ratio_v2(last_10_days) | |
udvr20 = up_down_v_ratio_v2(last_20_days) | |
udvr30 = up_down_v_ratio_v2(last_30_days) | |
udvr60 = up_down_v_ratio_v2(last_60_days) | |
udpr10 = up_down_pct_chg_ratio(last_10_days) | |
udpr20 = up_down_pct_chg_ratio(last_20_days) | |
udpr30 = up_down_pct_chg_ratio(last_30_days) | |
udpr60 = up_down_pct_chg_ratio(last_60_days) | |
av1 = avg_volatility(last_day) | |
av3 = avg_volatility(last_3_days) | |
av5 = avg_volatility(last_5_days) | |
av7 = avg_volatility(last_7_days) | |
av10 = avg_volatility(last_10_days) | |
av20 = avg_volatility(last_20_days) | |
av60 = avg_volatility(last_60_days) | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['vol'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
pc1 = avg_pct_chg(last_day) | |
pc3 = avg_pct_chg(last_3_days) | |
pc5 = avg_pct_chg(last_5_days) | |
pc7 = avg_pct_chg(last_7_days) | |
pc10 = avg_pct_chg(last_10_days) | |
pc20 = avg_pct_chg(last_20_days) | |
pc60 = avg_pct_chg(last_60_days) | |
target = round((df.iloc[i - 6]['open'] / df.iloc[i - 1]['open'] - 1) * 100, 2) | |
cur = con.cursor() | |
column_str = """date, code, md10,md20,md30,md60,mp10,mp20,mp30,mp60,\ | |
udvr10,udvr20,udvr30,udvr60,udpr10,udpr20,udpr30,udpr60,\ | |
av1,av3,av5,av7,av10,av20,av60,pc1,pc3,pc5,pc7,pc10,pc20,pc60,\ | |
pr_5c,pr_510,pr_1020,pr_2060,vr_5c,vr_510,vr_1020,vr_2060,target""" | |
final_str = "REPLACE INTO short_hk (%s) VALUES ('%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s',\ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \ | |
(column_str, | |
row['trade_date'], | |
stock, | |
md10, | |
md20, | |
md30, | |
md60, | |
mp10, | |
mp20, | |
mp30, | |
mp60, | |
udvr10, | |
udvr20, | |
udvr30, | |
udvr60, | |
udpr10, | |
udpr20, | |
udpr30, | |
udpr60, | |
av1, | |
av3, | |
av5, | |
av7, | |
av10, | |
av20, | |
av60, | |
pc1, | |
pc3, | |
pc5, | |
pc7, | |
pc10, | |
pc20, | |
pc60, | |
pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060, | |
target) | |
#print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
from datetime import datetime | |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock, list_date, industry, market, is_hs from stock_basic" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0], datetime.combine(d[1], datetime.min.time()), d[2], d[3], d[4]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
start_date = '20170101' | |
end_date = '20191231' | |
for (stock, list_date, industry, market, is_hs) in obtain_stock_basic(): | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date=start_date, end_date=end_date, ma=[5, 10, 20, 60]) | |
if df is None or len(df) < 60: | |
continue | |
# print(df) | |
for i in range(470, len(df) - 60): | |
row = df.iloc[i] | |
if row['trade_date'] is None: | |
continue | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
# print(row['trade_date']) | |
last_60_days = df[i: i + 60][::-1] | |
last_30_days = last_60_days.tail(30) | |
last_20_days = last_30_days.tail(20) | |
last_15_days = last_20_days.tail(15) | |
last_10_days = last_15_days.tail(10) | |
last_7_days = last_10_days.tail(7) | |
last_5_days = last_10_days.tail(5) | |
last_3_days = last_10_days.tail(3) | |
last_day = last_3_days.tail(1) | |
md10 = max_drawback(last_10_days) | |
md20 = max_drawback(last_20_days) | |
md30 = max_drawback(last_30_days) | |
md60 = max_drawback(last_60_days) | |
mp10 = max_profit(last_10_days) | |
mp20 = max_profit(last_20_days) | |
mp30 = max_profit(last_30_days) | |
mp60 = max_profit(last_60_days) | |
udvr10 = up_down_v_ratio_v2(last_10_days) | |
udvr20 = up_down_v_ratio_v2(last_20_days) | |
udvr30 = up_down_v_ratio_v2(last_30_days) | |
udvr60 = up_down_v_ratio_v2(last_60_days) | |
udpr10 = up_down_pct_chg_ratio(last_10_days) | |
udpr20 = up_down_pct_chg_ratio(last_20_days) | |
udpr30 = up_down_pct_chg_ratio(last_30_days) | |
udpr60 = up_down_pct_chg_ratio(last_60_days) | |
av1 = avg_volatility(last_day) | |
av3 = avg_volatility(last_3_days) | |
av5 = avg_volatility(last_5_days) | |
av7 = avg_volatility(last_7_days) | |
av10 = avg_volatility(last_10_days) | |
av20 = avg_volatility(last_20_days) | |
av60 = avg_volatility(last_60_days) | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['vol'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
pc1 = avg_pct_chg(last_day) | |
pc3 = avg_pct_chg(last_3_days) | |
pc5 = avg_pct_chg(last_5_days) | |
pc7 = avg_pct_chg(last_7_days) | |
pc10 = avg_pct_chg(last_10_days) | |
pc20 = avg_pct_chg(last_20_days) | |
pc60 = avg_pct_chg(last_60_days) | |
target = round((df.iloc[i - 4]['open'] / df.iloc[i - 1]['open'] - 1) * 100, 2) | |
cur = con.cursor() | |
column_str = """date, code, md10,md20,md30,md60,mp10,mp20,mp30,mp60,\ | |
udvr10,udvr20,udvr30,udvr60,udpr10,udpr20,udpr30,udpr60,\ | |
av1,av3,av5,av7,av10,av20,av60,pc1,pc3,pc5,pc7,pc10,pc20,pc60,\ | |
pr_5c,pr_510,pr_1020,pr_2060,vr_5c,vr_510,vr_1020,vr_2060,target""" | |
final_str = "REPLACE INTO short (%s) VALUES ('%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s',\ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \ | |
(column_str, | |
row['trade_date'], | |
stock, | |
md10, | |
md20, | |
md30, | |
md60, | |
mp10, | |
mp20, | |
mp30, | |
mp60, | |
udvr10, | |
udvr20, | |
udvr30, | |
udvr60, | |
udpr10, | |
udpr20, | |
udpr30, | |
udpr60, | |
av1, | |
av3, | |
av5, | |
av7, | |
av10, | |
av20, | |
av60, | |
pc1, | |
pc3, | |
pc5, | |
pc7, | |
pc10, | |
pc20, | |
pc60, | |
pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060, | |
target) | |
#print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_hk_basic where amount > 30000000 limit 0, 160" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
start_date = '20170101' | |
end_date = '20191231' | |
for stock in obtain_stock_basic(): | |
try: | |
df = pro.hk_daily(ts_code=stock,start_date=start_date, end_date=end_date, ma=[5, 10, 20, 60]) | |
except: | |
print("%s error" % stock) | |
if df is None or len(df) < 60: | |
continue | |
rdf = df[::-1] | |
rdf = sma(rdf, "ma5", n=5) | |
rdf = sma(rdf, "ma10", n=10) | |
rdf = sma(rdf, "ma20", n=20) | |
rdf = sma(rdf, "ma60", n=60) | |
rdf = sma(rdf, "ma_v_5", n=5, val_name='vol') | |
rdf = sma(rdf, "ma_v_10", n=10, val_name='vol') | |
rdf = sma(rdf, "ma_v_20", n=20, val_name='vol') | |
rdf = sma(rdf, "ma_v_60", n=60, val_name='vol') | |
df = rdf[::-1] | |
#print(df.head(10)) | |
for i in range(6, len(df) - 60): | |
row = df.iloc[i] | |
if row['trade_date'] is None: | |
continue | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
# print(row['trade_date']) | |
last_60_days = df[i: i + 60][::-1] | |
last_30_days = last_60_days.tail(30) | |
last_20_days = last_30_days.tail(20) | |
last_15_days = last_20_days.tail(15) | |
last_10_days = last_15_days.tail(10) | |
last_7_days = last_10_days.tail(7) | |
last_5_days = last_10_days.tail(5) | |
last_3_days = last_10_days.tail(3) | |
last_day = last_3_days.tail(1) | |
md10 = max_drawback(last_10_days) | |
md20 = max_drawback(last_20_days) | |
md30 = max_drawback(last_30_days) | |
md60 = max_drawback(last_60_days) | |
mp10 = max_profit(last_10_days) | |
mp20 = max_profit(last_20_days) | |
mp30 = max_profit(last_30_days) | |
mp60 = max_profit(last_60_days) | |
udvr10 = up_down_v_ratio_v2(last_10_days) | |
udvr20 = up_down_v_ratio_v2(last_20_days) | |
udvr30 = up_down_v_ratio_v2(last_30_days) | |
udvr60 = up_down_v_ratio_v2(last_60_days) | |
udpr10 = up_down_pct_chg_ratio(last_10_days) | |
udpr20 = up_down_pct_chg_ratio(last_20_days) | |
udpr30 = up_down_pct_chg_ratio(last_30_days) | |
udpr60 = up_down_pct_chg_ratio(last_60_days) | |
av1 = avg_volatility(last_day) | |
av3 = avg_volatility(last_3_days) | |
av5 = avg_volatility(last_5_days) | |
av7 = avg_volatility(last_7_days) | |
av10 = avg_volatility(last_10_days) | |
av20 = avg_volatility(last_20_days) | |
av60 = avg_volatility(last_60_days) | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['vol'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
pc1 = avg_pct_chg(last_day) | |
pc3 = avg_pct_chg(last_3_days) | |
pc5 = avg_pct_chg(last_5_days) | |
pc7 = avg_pct_chg(last_7_days) | |
pc10 = avg_pct_chg(last_10_days) | |
pc20 = avg_pct_chg(last_20_days) | |
pc60 = avg_pct_chg(last_60_days) | |
target = round((df.iloc[i - 6]['open'] / df.iloc[i - 1]['open'] - 1) * 100, 2) | |
cur = con.cursor() | |
column_str = """date, code, md10,md20,md30,md60,mp10,mp20,mp30,mp60,\ | |
udvr10,udvr20,udvr30,udvr60,udpr10,udpr20,udpr30,udpr60,\ | |
av1,av3,av5,av7,av10,av20,av60,pc1,pc3,pc5,pc7,pc10,pc20,pc60,\ | |
pr_5c,pr_510,pr_1020,pr_2060,vr_5c,vr_510,vr_1020,vr_2060,target""" | |
final_str = "REPLACE INTO short_hk (%s) VALUES ('%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s',\ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \ | |
(column_str, | |
row['trade_date'], | |
stock, | |
md10, | |
md20, | |
md30, | |
md60, | |
mp10, | |
mp20, | |
mp30, | |
mp60, | |
udvr10, | |
udvr20, | |
udvr30, | |
udvr60, | |
udpr10, | |
udpr20, | |
udpr30, | |
udpr60, | |
av1, | |
av3, | |
av5, | |
av7, | |
av10, | |
av20, | |
av60, | |
pc1, | |
pc3, | |
pc5, | |
pc7, | |
pc10, | |
pc20, | |
pc60, | |
pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060, | |
target) | |
#print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
import warnings | |
from factor import * | |
import pandas_datareader.data as web | |
from datetime import datetime | |
import math | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_us limit 0, 261" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
start = datetime(2017, 1, 1) | |
end = datetime(2020, 1, 21) | |
for stock in obtain_stock_basic(): | |
try: | |
df = web.DataReader(stock, 'iex', start, end, access_key='pk_88d98ab1d0344ceb8184b898313a18cc') | |
if df is None or len(df) < 60: | |
continue | |
df['pct_chg'] = df['close'].pct_change() | |
df = sma(df, "ma5", n=5) | |
df = sma(df, "ma10", n=10) | |
df = sma(df, "ma20", n=20) | |
df = sma(df, "ma60", n=60) | |
df = sma(df, "ma_v_5", n=5, val_name='volume') | |
df = sma(df, "ma_v_10", n=10, val_name='volume') | |
df = sma(df, "ma_v_20", n=20, val_name='volume') | |
df = sma(df, "ma_v_60", n=60, val_name='volume') | |
for i in range(60, len(df) - 6): | |
row = df.iloc[i] | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
last_60_days = df[i-59: i+1] | |
last_30_days = last_60_days.tail(30) | |
last_20_days = last_30_days.tail(20) | |
last_15_days = last_20_days.tail(15) | |
last_10_days = last_15_days.tail(10) | |
last_7_days = last_10_days.tail(7) | |
last_5_days = last_10_days.tail(5) | |
last_3_days = last_10_days.tail(3) | |
last_day = last_3_days.tail(1) | |
md10 = max_drawback(last_10_days) | |
md20 = max_drawback(last_20_days) | |
md30 = max_drawback(last_30_days) | |
md60 = max_drawback(last_60_days) | |
mp10 = max_profit(last_10_days) | |
mp20 = max_profit(last_20_days) | |
mp30 = max_profit(last_30_days) | |
mp60 = max_profit(last_60_days) | |
udvr10 = up_down_volume_ratio_v2(last_10_days) | |
udvr20 = up_down_volume_ratio_v2(last_20_days) | |
udvr30 = up_down_volume_ratio_v2(last_30_days) | |
udvr60 = up_down_volume_ratio_v2(last_60_days) | |
udpr10 = up_down_pct_chg_ratio(last_10_days) | |
udpr20 = up_down_pct_chg_ratio(last_20_days) | |
udpr30 = up_down_pct_chg_ratio(last_30_days) | |
udpr60 = up_down_pct_chg_ratio(last_60_days) | |
av1 = avg_volatility(last_day) | |
av3 = avg_volatility(last_3_days) | |
av5 = avg_volatility(last_5_days) | |
av7 = avg_volatility(last_7_days) | |
av10 = avg_volatility(last_10_days) | |
av20 = avg_volatility(last_20_days) | |
av60 = avg_volatility(last_60_days) | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['volume'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
pc1 = avg_pct_chg(last_day) | |
pc3 = avg_pct_chg(last_3_days) | |
pc5 = avg_pct_chg(last_5_days) | |
pc7 = avg_pct_chg(last_7_days) | |
pc10 = avg_pct_chg(last_10_days) | |
pc20 = avg_pct_chg(last_20_days) | |
pc60 = avg_pct_chg(last_60_days) | |
target = round((df.iloc[i+6]['open'] / df.iloc[i+1]['open'] - 1) * 100, 2) | |
cur = con.cursor() | |
column_str = """date, code, md10,md20,md30,md60,mp10,mp20,mp30,mp60,\ | |
udvr10,udvr20,udvr30,udvr60,udpr10,udpr20,udpr30,udpr60,\ | |
av1,av3,av5,av7,av10,av20,av60,pc1,pc3,pc5,pc7,pc10,pc20,pc60,\ | |
pr_5c,pr_510,pr_1020,pr_2060,vr_5c,vr_510,vr_1020,vr_2060,target""" | |
final_str = "REPLACE INTO short_us (%s) VALUES ('%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s',\ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \ | |
(column_str, | |
row.name, | |
stock, | |
md10, | |
md20, | |
md30, | |
md60, | |
mp10, | |
mp20, | |
mp30, | |
mp60, | |
udvr10, | |
udvr20, | |
udvr30, | |
udvr60, | |
udpr10, | |
udpr20, | |
udpr30, | |
udpr60, | |
av1, | |
av3, | |
av5, | |
av7, | |
av10, | |
av20, | |
av60, | |
pc1, | |
pc3, | |
pc5, | |
pc7, | |
pc10, | |
pc20, | |
pc60, | |
pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060, | |
target) | |
#print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
except Exception as e: | |
print(e) | |
print('%s error' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
import warnings | |
from factor import * | |
import pandas_datareader.data as web | |
from datetime import datetime | |
import math | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_us limit 261, 500" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
start = datetime(2017, 1, 1) | |
end = datetime(2020, 1, 21) | |
for stock in obtain_stock_basic(): | |
try: | |
df = web.DataReader(stock, 'iex', start, end, access_key='pk_88d98ab1d0344ceb8184b898313a18cc') | |
if df is None or len(df) < 60: | |
continue | |
df['pct_chg'] = df['close'].pct_change() | |
df = sma(df, "ma5", n=5) | |
df = sma(df, "ma10", n=10) | |
df = sma(df, "ma20", n=20) | |
df = sma(df, "ma60", n=60) | |
df = sma(df, "ma_v_5", n=5, val_name='volume') | |
df = sma(df, "ma_v_10", n=10, val_name='volume') | |
df = sma(df, "ma_v_20", n=20, val_name='volume') | |
df = sma(df, "ma_v_60", n=60, val_name='volume') | |
for i in range(60, len(df) - 6): | |
row = df.iloc[i] | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
last_60_days = df[i-59: i+1] | |
last_30_days = last_60_days.tail(30) | |
last_20_days = last_30_days.tail(20) | |
last_15_days = last_20_days.tail(15) | |
last_10_days = last_15_days.tail(10) | |
last_7_days = last_10_days.tail(7) | |
last_5_days = last_10_days.tail(5) | |
last_3_days = last_10_days.tail(3) | |
last_day = last_3_days.tail(1) | |
md10 = max_drawback(last_10_days) | |
md20 = max_drawback(last_20_days) | |
md30 = max_drawback(last_30_days) | |
md60 = max_drawback(last_60_days) | |
mp10 = max_profit(last_10_days) | |
mp20 = max_profit(last_20_days) | |
mp30 = max_profit(last_30_days) | |
mp60 = max_profit(last_60_days) | |
udvr10 = up_down_volume_ratio_v2(last_10_days) | |
udvr20 = up_down_volume_ratio_v2(last_20_days) | |
udvr30 = up_down_volume_ratio_v2(last_30_days) | |
udvr60 = up_down_volume_ratio_v2(last_60_days) | |
udpr10 = up_down_pct_chg_ratio(last_10_days) | |
udpr20 = up_down_pct_chg_ratio(last_20_days) | |
udpr30 = up_down_pct_chg_ratio(last_30_days) | |
udpr60 = up_down_pct_chg_ratio(last_60_days) | |
av1 = avg_volatility(last_day) | |
av3 = avg_volatility(last_3_days) | |
av5 = avg_volatility(last_5_days) | |
av7 = avg_volatility(last_7_days) | |
av10 = avg_volatility(last_10_days) | |
av20 = avg_volatility(last_20_days) | |
av60 = avg_volatility(last_60_days) | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['volume'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
pc1 = avg_pct_chg(last_day) | |
pc3 = avg_pct_chg(last_3_days) | |
pc5 = avg_pct_chg(last_5_days) | |
pc7 = avg_pct_chg(last_7_days) | |
pc10 = avg_pct_chg(last_10_days) | |
pc20 = avg_pct_chg(last_20_days) | |
pc60 = avg_pct_chg(last_60_days) | |
target = round((df.iloc[i+6]['open'] / df.iloc[i+1]['open'] - 1) * 100, 2) | |
cur = con.cursor() | |
column_str = """date, code, md10,md20,md30,md60,mp10,mp20,mp30,mp60,\ | |
udvr10,udvr20,udvr30,udvr60,udpr10,udpr20,udpr30,udpr60,\ | |
av1,av3,av5,av7,av10,av20,av60,pc1,pc3,pc5,pc7,pc10,pc20,pc60,\ | |
pr_5c,pr_510,pr_1020,pr_2060,vr_5c,vr_510,vr_1020,vr_2060,target""" | |
final_str = "REPLACE INTO short_us (%s) VALUES ('%s', '%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s',\ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', \ | |
'%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \ | |
(column_str, | |
row.name, | |
stock, | |
md10, | |
md20, | |
md30, | |
md60, | |
mp10, | |
mp20, | |
mp30, | |
mp60, | |
udvr10, | |
udvr20, | |
udvr30, | |
udvr60, | |
udpr10, | |
udpr20, | |
udpr30, | |
udpr60, | |
av1, | |
av3, | |
av5, | |
av7, | |
av10, | |
av20, | |
av60, | |
pc1, | |
pc3, | |
pc5, | |
pc7, | |
pc10, | |
pc20, | |
pc60, | |
pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060, | |
target) | |
#print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
except Exception as e: | |
print(e) | |
print('%s error' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_hk_basic where amount > 30000000 limit 160, 200" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
start_date = '20170101' | |
end_date = '20191231' | |
for stock in obtain_stock_basic(): | |
try: | |
df = pro.hk_daily(ts_code=stock,start_date=start_date, end_date=end_date, ma=[5, 10, 20, 60]) | |
except: | |
print("%s error" % stock) | |
if df is None or len(df) < 60: | |
continue | |
rdf = df[::-1] | |
rdf = sma(rdf, "ma5", n=5) | |
rdf = sma(rdf, "ma10", n=10) | |
rdf = sma(rdf, "ma20", n=20) | |
rdf = sma(rdf, "ma60", n=60) | |
rdf = sma(rdf, "ma_v_5", n=5, val_name='vol') | |
rdf = sma(rdf, "ma_v_10", n=10, val_name='vol') | |
rdf = sma(rdf, "ma_v_20", n=20, val_name='vol') | |
rdf = sma(rdf, "ma_v_60", n=60, val_name='vol') | |
df = rdf[::-1] | |
#print(df.head(10)) | |
for i in range(6, len(df) - 60): | |
row = df.iloc[i] | |
if row['trade_date'] is None: | |
continue | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['vol'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
cur = con.cursor() | |
final_str = "UPDATE short_hk set pr_5c='%s', pr_510='%s', pr_1020='%s', pr_2060='%s', \ | |
vr_5c='%s', vr_510='%s', vr_1020='%s', vr_2060='%s' where date='%s' and code='%s'" % \ | |
(pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060, | |
row['trade_date'], | |
stock) | |
#print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
import tushare as ts | |
import warnings | |
from factor import * | |
warnings.simplefilter(action='ignore') | |
import MySQLdb as mdb | |
with open("db") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
db_host = content[0] | |
db_user = content[1] | |
db_pass = content[2] | |
db_name = content[3] | |
con = mdb.connect(db_host, db_user, db_pass, db_name, port=3306, charset="utf8", use_unicode=True) | |
def obtain_stock_basic(): | |
cur = con.cursor() | |
query_str = "select stock from stock_hk_basic where amount > 30000000 limit 0, 160" | |
cur.execute(query_str) | |
data = cur.fetchall() | |
cur.close() | |
return [(d[0]) for d in data] | |
ts.set_token('d014b0f46a93f4d9472162c809649eb34b0e9770b8a6d0de3155df04') | |
pro = ts.pro_api() | |
start_date = '20170101' | |
end_date = '20191231' | |
for stock in obtain_stock_basic(): | |
try: | |
df = pro.hk_daily(ts_code=stock,start_date=start_date, end_date=end_date, ma=[5, 10, 20, 60]) | |
except: | |
print("%s error" % stock) | |
if df is None or len(df) < 60: | |
continue | |
rdf = df[::-1] | |
rdf = sma(rdf, "ma5", n=5) | |
rdf = sma(rdf, "ma10", n=10) | |
rdf = sma(rdf, "ma20", n=20) | |
rdf = sma(rdf, "ma60", n=60) | |
rdf = sma(rdf, "ma_v_5", n=5, val_name='vol') | |
rdf = sma(rdf, "ma_v_10", n=10, val_name='vol') | |
rdf = sma(rdf, "ma_v_20", n=20, val_name='vol') | |
rdf = sma(rdf, "ma_v_60", n=60, val_name='vol') | |
df = rdf[::-1] | |
#print(df.head(10)) | |
for i in range(6, len(df) - 60): | |
row = df.iloc[i] | |
if row['trade_date'] is None: | |
continue | |
if math.isnan(row['ma60']) or math.isnan(row['ma20']) or math.isnan(row['ma10']): | |
continue | |
ma5 = row['ma5'] | |
ma10 = row['ma10'] | |
ma20 = row['ma20'] | |
ma60 = row['ma60'] | |
v_ma5 = row['ma_v_5'] | |
v_ma10 = row['ma_v_10'] | |
v_ma20 = row['ma_v_20'] | |
v_ma60 = row['ma_v_60'] | |
close = row['close'] | |
volume = row['vol'] | |
pct_chg = row['pct_chg'] | |
pr_5c = ma5 / close | |
pr_510 = ma5 / ma10 | |
pr_1020 = ma10 / ma20 | |
pr_2060 = ma20 / ma60 | |
vr_5c = v_ma5 / volume | |
vr_510 = v_ma5 / v_ma10 | |
vr_1020 = v_ma10 / v_ma20 | |
vr_2060 = v_ma20 / v_ma60 | |
cur = con.cursor() | |
final_str = "UPDATE short_hk set pr_5c='%s', pr_510='%s', pr_1020='%s', pr_2060='%s', \ | |
vr_5c='%s', vr_510='%s', vr_1020='%s', vr_2060='%s' where date='%s' and code='%s'" % \ | |
(pr_5c, | |
pr_510, | |
pr_1020, | |
pr_2060, | |
vr_5c, | |
vr_510, | |
vr_1020, | |
vr_2060, | |
row['trade_date'], | |
stock) | |
#print(final_str) | |
cur.execute(final_str) | |
con.commit() | |
print('%s finish' % (stock)) | |
con.commit() | |
con.close() | |
print("Done") |
from common import * | |
import warnings | |
import math | |
warnings.simplefilter(action='ignore') | |
def is_cross(df): | |
for index, row in df.iterrows(): | |
if math.fabs(row['open'] / row['close'] - 1) < 0.005 and \ | |
row['vol'] / row['ma_v_5'] < 0.9 and \ | |
row['close'] / row['ma5'] < 1.014: | |
print(row['trade_date']) | |
return True | |
return False | |
stocks = pro.stock_basic(exchange_id='', list_status='L', fields='ts_code, name, list_date') | |
start_date = (datetime.today() - timedelta(days=120)).strftime("%Y%m%d") | |
end_date = (datetime.today() - timedelta(days=30)).strftime("%Y%m%d") | |
column_str = """date, code, ocr, v5r, c5r""" | |
queries = [] | |
for row in stocks.itertuples(): | |
if 'ST' in row[2]: | |
continue | |
# if not('688' in row[1]): | |
# continue | |
if row[3] < start_date or row[3] > end_date: | |
continue | |
df = ts.pro_bar(api=pro, ts_code=row[1], adj='qfq', start_date=start_date, end_date='20220423', ma=[5, 10]) | |
if is_cross(df.head(1)): | |
print("%s" %(row[1])) | |
last = df.head(1) | |
ocr = math.fabs(last['open'].max() / last['close'].max() - 1) * 100 | |
v5r = last['vol'].max() / last['ma_v_5'].max() | |
c5r = last['close'] / last['ma5'].max() | |
query = "INSERT IGNORE INTO new (%s) VALUES (%s, '%s', %.2f, %.2f, %.2f)" % \ | |
(column_str, df.head(1)['trade_date'].max(), row[1], ocr, v5r, c5r) | |
queries.append(query) | |
print('%s finish' % (row[1])) | |
con = get_conn("db8") | |
execute_queries(con, queries) | |
print("Done") |
from common import * | |
import warnings | |
import datetime | |
warnings.simplefilter(action='ignore', category=FutureWarning) | |
data = pro.stock_basic(exchange_id='', list_status='L', fields='ts_code,name,list_date') | |
with open("1800") as f: | |
content = f.readlines() | |
content = [x.strip() for x in content] | |
stocks_300 = set(content) | |
stock_len = len(stocks_300) | |
count = 0 | |
new_high = 0 | |
new_low = 0 | |
date = 0 | |
for row in data.itertuples(): | |
listdate = int(row[3]) | |
if listdate > 20190101: | |
continue | |
stock = row[1] | |
name = row[2] | |
code = stock.split('.', 1) | |
if not (code[0] in stocks_300): | |
continue | |
count += 1 | |
if count % 50 == 0: | |
print('finish %d/%d' % (count, stock_len)) | |
try: | |
last_year = datetime.date.today() - datetime.timedelta(days=365) | |
start_date = last_year.strftime("%Y%m%d") | |
df = ts.pro_bar(api=pro, ts_code=stock, adj='qfq', start_date=start_date, end_date='20201230') | |
except Exception as e: | |
continue | |
d = int(df.head(1)['trade_date'].max()) | |
if d > date: | |
date = d | |
last_close = df.head(1)['close'].max() | |
max_close = df['close'].max() | |
min_close = df['close'].min() | |
if last_close >= max_close: | |
new_high += 1 | |
elif last_close <= min_close: | |
new_low += 1 | |
print('high/low %d/%d' % (new_high, new_low)) | |
con = get_conn() | |
query = format("INSERT IGNORE INTO high_vs_low (date, high, low) VALUES (%d,%d,%d)" % (date, new_high, new_low)) | |
execute_query(con, query) | |
print("Done") |
date | open | high | low | close | volume | |
---|---|---|---|---|---|---|
2011-01-03 00:00:00+00:00 | 14.19 | 14.23 | 13.8 | 13.85 | 354322300 | |
2011-01-04 00:00:00+00:00 | 14.24 | 14.25 | 14.02 | 14.23 | 218978200 | |
2011-01-05 00:00:00+00:00 | 14.5 | 14.6 | 14.15 | 14.19 | 246151200 | |
2011-01-06 00:00:00+00:00 | 14.44 | 14.69 | 14.34 | 14.54 | 241658500 | |
2011-01-07 00:00:00+00:00 | 14.25 | 14.68 | 13.98 | 14.54 | 392328700 | |
2011-01-10 00:00:00+00:00 | 14.4 | 14.43 | 14.09 | 14.17 | 185382600 | |
2011-01-11 00:00:00+00:00 | 14.69 | 14.73 | 14.53 | 14.61 | 212239400 | |
2011-01-12 00:00:00+00:00 | 14.99 | 14.99 | 14.85 | 14.89 | 204060800 | |
2011-01-13 00:00:00+00:00 | 14.77 | 15.02 | 14.72 | 15.01 | 158944400 | |
2011-01-14 00:00:00+00:00 | 15.25 | 15.31 | 14.68 | 14.735 | 282493800 | |
2011-01-18 00:00:00+00:00 | 15 | 15.16 | 14.85 | 15.08 | 198400500 | |
2011-01-19 00:00:00+00:00 | 14.37 | 14.95 | 14.35 | 14.85 | 247013200 | |
2011-01-20 00:00:00+00:00 | 14.54 | 14.605 | 13.94 | 14.27 | 245219600 | |
2011-01-21 00:00:00+00:00 | 14.25 | 14.71 | 14.22 | 14.41 | 291812600 | |
2011-01-24 00:00:00+00:00 | 13.92 | 14.26 | 13.88 | 14.25 | 225424700 | |
2011-01-25 00:00:00+00:00 | 13.63 | 13.84 | 13.4 | 13.78 | 303642100 | |
2011-01-26 00:00:00+00:00 | 13.55 | 13.77 | 13.55 | 13.71 | 145810300 | |
2011-01-27 00:00:00+00:00 | 13.67 | 13.67 | 13.48 | 13.58 | 153086100 | |
2011-01-28 00:00:00+00:00 | 13.6 | 14.06 | 13.58 | 13.83 | 226452500 | |
2011-01-31 00:00:00+00:00 | 13.73 | 13.79 | 13.64 | 13.71 | 118000800 | |
2011-02-01 00:00:00+00:00 | 14.31 | 14.37 | 13.87 | 13.895 | 211978400 | |
2011-02-02 00:00:00+00:00 | 14.24 | 14.35 | 14.13 | 14.33 | 140312700 | |
2011-02-03 00:00:00+00:00 | 14.43 | 14.47 | 14.15 | 14.16 | 145885300 | |
2011-02-04 00:00:00+00:00 | 14.29 | 14.47 | 14.11 | 14.43 | 141015200 | |
2011-02-07 00:00:00+00:00 | 14.67 | 14.77 | 14.43 | 14.51 | 149423500 | |
2011-02-08 00:00:00+00:00 | 14.61 | 14.76 | 14.5 | 14.64 | 158426200 | |
2011-02-09 00:00:00+00:00 | 14.64 | 14.69 | 14.41 | 14.46 | 150179100 | |
2011-02-10 00:00:00+00:00 | 14.49 | 14.64 | 14.47 | 14.505 | 132240800 | |
2011-02-11 00:00:00+00:00 | 14.77 | 14.87 | 14.35 | 14.37 | 156195600 | |
2011-02-14 00:00:00+00:00 | 14.89 | 14.95 | 14.71 | 14.775 | 112571100 | |
2011-02-15 00:00:00+00:00 | 14.77 | 14.88 | 14.69 | 14.8 | 109532300 | |
2011-02-16 00:00:00+00:00 | 14.84 | 14.88 | 14.7 | 14.81 | 132821900 | |
2011-02-17 00:00:00+00:00 | 14.81 | 14.91 | 14.73 | 14.75 | 103495400 | |
2011-02-18 00:00:00+00:00 | 14.75 | 14.89 | 14.67 | 14.84 | 98369000 | |
2011-02-22 00:00:00+00:00 | 14.18 | 14.52 | 14.09 | 14.38 | 187522800 | |
2011-02-23 00:00:00+00:00 | 14.17 | 14.44 | 13.92 | 14.17 | 196355700 | |
2011-02-24 00:00:00+00:00 | 13.97 | 14.16 | 13.79 | 14.11 | 201697200 | |
2011-02-25 00:00:00+00:00 | 14.2 | 14.32 | 14.12 | 14.16 | 126869100 | |
2011-02-28 00:00:00+00:00 | 14.29 | 14.48 | 14.16 | 14.27 | 137039100 | |
2011-03-01 00:00:00+00:00 | 13.93 | 14.35 | 13.91 | 14.315 | 161263900 | |
2011-03-02 00:00:00+00:00 | 13.83 | 14.07 | 13.81 | 13.92 | 115609400 | |
2011-03-03 00:00:00+00:00 | 14.27 | 14.29 | 14.05 | 14.05 | 139513500 | |
2011-03-04 00:00:00+00:00 | 14.12 | 14.31 | 13.98 | 14.3 | 146184700 | |
2011-03-07 00:00:00+00:00 | 14.03 | 14.27 | 13.92 | 14.18 | 139010300 | |
2011-03-08 00:00:00+00:00 | 14.69 | 14.7 | 14.2 | 14.27 | 250033200 | |
2011-03-09 00:00:00+00:00 | 14.59 | 14.69 | 14.48 | 14.66 | 148290400 | |
2011-03-10 00:00:00+00:00 | 14.26 | 14.46 | 14.26 | 14.42 | 155462700 | |
2011-03-11 00:00:00+00:00 | 14.38 | 14.43 | 14.1 | 14.11 | 111555000 | |
2011-03-14 00:00:00+00:00 | 14.23 | 14.35 | 14.07 | 14.26 | 112139700 | |
2011-03-15 00:00:00+00:00 | 13.96 | 14.06 | 13.71 | 13.77 | 167837200 | |
2011-03-16 00:00:00+00:00 | 13.69 | 14.1 | 13.66 | 14.01 | 178630900 | |
2011-03-17 00:00:00+00:00 | 13.98 | 14.04 | 13.75 | 13.89 | 131745300 | |
2011-03-18 00:00:00+00:00 | 14.04 | 14.29 | 13.98 | 14.2 | 199209100 | |
2011-03-21 00:00:00+00:00 | 14.05 | 14.22 | 13.9 | 14.2 | 114391500 | |
2011-03-22 00:00:00+00:00 | 13.88 | 14.05 | 13.88 | 14.04 | 86286500 | |
2011-03-23 00:00:00+00:00 | 13.65 | 13.74 | 13.37 | 13.72 | 230626600 | |
2011-03-24 00:00:00+00:00 | 13.48 | 13.59 | 13.32 | 13.56 | 170761100 | |
2011-03-25 00:00:00+00:00 | 13.34 | 13.53 | 13.32 | 13.49 | 115322600 | |
2011-03-28 00:00:00+00:00 | 13.37 | 13.56 | 13.37 | 13.42 | 78640400 | |
2011-03-29 00:00:00+00:00 | 13.35 | 13.41 | 13.16 | 13.41 | 117737500 | |
2011-03-30 00:00:00+00:00 | 13.45 | 13.56 | 13.27 | 13.4 | 121082200 | |
2011-03-31 00:00:00+00:00 | 13.33 | 13.39 | 13.29 | 13.35 | 86180900 | |
2011-04-01 00:00:00+00:00 | 13.37 | 13.61 | 13.35 | 13.45 | 95017100 | |
2011-04-04 00:00:00+00:00 | 13.44 | 13.59 | 13.4 | 13.4 | 71113900 | |
2011-04-05 00:00:00+00:00 | 13.47 | 13.5 | 13.37 | 13.43 | 64968800 | |
2011-04-06 00:00:00+00:00 | 13.72 | 13.78 | 13.53 | 13.6 | 136151600 | |
2011-04-07 00:00:00+00:00 | 13.61 | 13.88 | 13.54 | 13.79 | 119206000 | |
2011-04-08 00:00:00+00:00 | 13.48 | 13.72 | 13.45 | 13.63 | 87592400 | |
2011-04-11 00:00:00+00:00 | 13.49 | 13.59 | 13.43 | 13.5 | 63348300 | |
2011-04-12 00:00:00+00:00 | 13.47 | 13.58 | 13.31 | 13.4 | 100372600 | |
2011-04-13 00:00:00+00:00 | 13.27 | 13.64 | 13.21 | 13.61 | 124630800 | |
2011-04-14 00:00:00+00:00 | 13.13 | 13.26 | 13.07 | 13.16 | 115624900 | |
2011-04-15 00:00:00+00:00 | 12.82 | 13.33 | 12.82 | 13.22 | 266336200 | |
2011-04-18 00:00:00+00:00 | 12.42 | 12.6 | 12.24 | 12.59 | 262012500 | |
2011-04-19 00:00:00+00:00 | 12.34 | 12.53 | 12.15 | 12.53 | 182612900 | |
2011-04-20 00:00:00+00:00 | 12.27 | 12.48 | 12.21 | 12.43 | 179989100 | |
2011-04-21 00:00:00+00:00 | 12.31 | 12.4 | 12.24 | 12.33 | 100820900 | |
2011-04-25 00:00:00+00:00 | 12.44 | 12.6 | 12.32 | 12.325 | 112414300 | |
2011-04-26 00:00:00+00:00 | 12.23 | 12.53 | 12.23 | 12.49 | 146800000 | |
2011-04-27 00:00:00+00:00 | 12.33 | 12.4 | 12.23 | 12.24 | 118705100 | |
2011-04-28 00:00:00+00:00 | 12.42 | 12.45 | 12.25 | 12.31 | 92414200 | |
2011-04-29 00:00:00+00:00 | 12.28 | 12.42 | 12.28 | 12.39 | 87169300 | |
2011-05-02 00:00:00+00:00 | 12.34 | 12.47 | 12.33 | 12.36 | 89940100 | |
2011-05-03 00:00:00+00:00 | 12.6 | 12.71 | 12.33 | 12.35 | 145530300 | |
2011-05-04 00:00:00+00:00 | 12.49 | 12.68 | 12.42 | 12.64 | 99234300 | |
2011-05-05 00:00:00+00:00 | 12.3 | 12.47 | 12.2 | 12.41 | 130921200 | |
2011-05-06 00:00:00+00:00 | 12.31 | 12.45 | 12.28 | 12.41 | 111324100 | |
2011-05-09 00:00:00+00:00 | 12.18 | 12.315 | 12.11 | 12.28 | 110668200 | |
2011-05-10 00:00:00+00:00 | 12.28 | 12.37 | 12.18 | 12.21 | 91577700 | |
2011-05-11 00:00:00+00:00 | 12.25 | 12.43 | 12.23 | 12.275 | 150517800 | |
2011-05-12 00:00:00+00:00 | 12.2 | 12.27 | 12.09 | 12.2 | 111941900 | |
2011-05-13 00:00:00+00:00 | 11.93 | 12.21 | 11.91 | 12.195 | 159056700 | |
2011-05-16 00:00:00+00:00 | 11.86 | 12.11 | 11.82 | 11.89 | 123646400 | |
2011-05-17 00:00:00+00:00 | 11.9 | 11.94 | 11.8 | 11.81 | 145394700 | |
2011-05-18 00:00:00+00:00 | 11.79 | 11.9 | 11.73 | 11.9 | 152963400 | |
2011-05-19 00:00:00+00:00 | 11.69 | 11.89 | 11.68 | 11.88 | 121082700 | |
2011-05-20 00:00:00+00:00 | 11.58 | 11.78 | 11.53 | 11.64 | 115821500 | |
2011-05-23 00:00:00+00:00 | 11.42 | 11.52 | 11.38 | 11.47 | 122134100 | |
2011-05-24 00:00:00+00:00 | 11.46 | 11.51 | 11.29 | 11.45 | 129513400 | |
2011-05-25 00:00:00+00:00 | 11.38 | 11.58 | 11.35 | 11.35 | 103583400 | |
2011-05-26 00:00:00+00:00 | 11.46 | 11.56 | 11.38 | 11.39 | 124072200 | |
2011-05-27 00:00:00+00:00 | 11.69 | 11.78 | 11.54 | 11.57 | 107370400 | |
2011-05-31 00:00:00+00:00 | 11.75 | 11.92 | 11.62 | 11.87 | 106228200 | |
2011-06-01 00:00:00+00:00 | 11.24 | 11.74 | 11.23 | 11.71 | 176331800 | |
2011-06-02 00:00:00+00:00 | 11.29 | 11.42 | 11.08 | 11.29 | 198074000 | |
2011-06-03 00:00:00+00:00 | 11.28 | 11.45 | 11.16 | 11.2 | 115786700 | |
2011-06-06 00:00:00+00:00 | 10.83 | 11.2 | 10.75 | 11.18 | 213289400 | |
2011-06-07 00:00:00+00:00 | 10.65 | 11.05 | 10.6 | 10.96 | 160224800 | |
2011-06-08 00:00:00+00:00 | 10.54 | 10.79 | 10.495 | 10.59 | 159384200 | |
2011-06-09 00:00:00+00:00 | 10.65 | 10.75 | 10.5 | 10.58 | 129533300 | |
2011-06-10 00:00:00+00:00 | 10.8 | 11.03 | 10.41 | 10.62 | 210796300 | |
2011-06-13 00:00:00+00:00 | 10.97 | 11.02 | 10.64 | 10.89 | 186095900 | |
2011-06-14 00:00:00+00:00 | 10.8 | 11.12 | 10.75 | 11.12 | 173051000 | |
2011-06-15 00:00:00+00:00 | 10.5 | 10.67 | 10.41 | 10.6 | 213408200 | |
2011-06-16 00:00:00+00:00 | 10.6 | 10.69 | 10.4 | 10.45 | 200127400 | |
2011-06-17 00:00:00+00:00 | 10.68 | 10.77 | 10.58 | 10.75 | 116367000 | |
2011-06-20 00:00:00+00:00 | 10.6 | 10.66 | 10.53 | 10.59 | 88402500 | |
2011-06-21 00:00:00+00:00 | 10.83 | 10.87 | 10.53 | 10.68 | 109392600 | |
2011-06-22 00:00:00+00:00 | 10.79 | 10.94 | 10.78 | 10.79 | 102448600 | |
2011-06-23 00:00:00+00:00 | 10.71 | 10.77 | 10.56 | 10.65 | 180604400 | |
2011-06-24 00:00:00+00:00 | 10.52 | 10.77 | 10.485 | 10.73 | 122210200 | |
2011-06-27 00:00:00+00:00 | 10.85 | 10.95 | 10.52 | 10.52 | 202814500 | |
2011-06-28 00:00:00+00:00 | 10.82 | 10.92 | 10.77 | 10.91 | 113027000 | |
2011-06-29 00:00:00+00:00 | 11.14 | 11.25 | 11 | 11.17 | 301628700 | |
2011-06-30 00:00:00+00:00 | 10.96 | 11.18 | 10.84 | 11.17 | 255993900 | |
2011-07-01 00:00:00+00:00 | 11.09 | 11.14 | 10.92 | 10.98 | 137487700 | |
2011-07-05 00:00:00+00:00 | 11 | 11.07 | 10.91 | 11.06 | 110261100 | |
2011-07-06 00:00:00+00:00 | 10.74 | 10.85 | 10.66 | 10.83 | 145455700 | |
2011-07-07 00:00:00+00:00 | 10.92 | 10.96 | 10.85 | 10.9 | 113940400 | |
2011-07-08 00:00:00+00:00 | 10.7 | 10.8 | 10.61 | 10.75 | 129514100 | |
2011-07-11 00:00:00+00:00 | 10.35 | 10.55 | 10.3 | 10.53 | 145323900 | |
2011-07-12 00:00:00+00:00 | 10.21 | 10.4 | 10.2 | 10.27 | 145156100 | |
2011-07-13 00:00:00+00:00 | 10.2 | 10.36 | 10.15 | 10.27 | 164765500 | |
2011-07-14 00:00:00+00:00 | 10.07 | 10.33 | 10.06 | 10.3 | 148536100 | |
2011-07-15 00:00:00+00:00 | 10 | 10.18 | 9.88 | 10.12 | 183416000 | |
2011-07-18 00:00:00+00:00 | 9.72 | 9.93 | 9.53 | 9.88 | 226934200 | |
2011-07-19 00:00:00+00:00 | 9.57 | 9.85 | 9.4 | 9.75 | 322430300 | |
2011-07-20 00:00:00+00:00 | 9.85 | 10 | 9.63 | 9.66 | 249783800 | |
2011-07-21 00:00:00+00:00 | 10.23 | 10.28 | 10 | 10.03 | 238464700 | |
2011-07-22 00:00:00+00:00 | 10.13 | 10.27 | 10.05 | 10.26 | 138441500 | |
2011-07-25 00:00:00+00:00 | 10.01 | 10.07 | 9.88 | 9.97 | 130582700 | |
2011-07-26 00:00:00+00:00 | 10 | 10.09 | 9.91 | 9.97 | 121168700 | |
2011-07-27 00:00:00+00:00 | 9.68 | 9.94 | 9.68 | 9.92 | 151491700 | |
2011-07-28 00:00:00+00:00 | 9.79 | 9.85 | 9.69 | 9.72 | 113623300 | |
2011-07-29 00:00:00+00:00 | 9.71 | 9.95 | 9.6 | 9.64 | 187926700 | |
2011-08-01 00:00:00+00:00 | 9.81 | 10.05 | 9.7 | 10.04 | 159616700 | |
2011-08-02 00:00:00+00:00 | 9.49 | 9.86 | 9.47 | 9.76 | 173972600 | |
2011-08-03 00:00:00+00:00 | 9.54 | 9.59 | 9.32 | 9.49 | 175691200 | |
2011-08-04 00:00:00+00:00 | 8.83 | 9.46 | 8.77 | 9.37 | 305309300 | |
2011-08-05 00:00:00+00:00 | 8.17 | 9.05 | 8.029 | 8.97 | 546781600 | |
2011-08-08 00:00:00+00:00 | 6.51 | 7.7 | 6.31 | 7.4 | 682052900 | |
2011-08-09 00:00:00+00:00 | 7.6 | 7.66 | 6.68 | 7.02 | 496094500 | |
2011-08-10 00:00:00+00:00 | 6.77 | 7.59 | 6.74 | 7.53 | 493986500 | |
2011-08-11 00:00:00+00:00 | 7.25 | 7.42 | 6.96 | 7.1 | 344079000 | |
2011-08-12 00:00:00+00:00 | 7.19 | 7.71 | 7.13 | 7.49 | 298997800 | |
2011-08-15 00:00:00+00:00 | 7.76 | 7.84 | 7.35 | 7.46 | 268370400 | |
2011-08-16 00:00:00+00:00 | 7.4 | 7.66 | 7.34 | 7.61 | 275997300 | |
2011-08-17 00:00:00+00:00 | 7.46 | 7.59 | 7.4 | 7.46 | 159346200 | |
2011-08-18 00:00:00+00:00 | 7.01 | 7.09 | 6.78 | 7.05 | 335205500 | |
2011-08-19 00:00:00+00:00 | 6.97 | 7.12 | 6.75 | 6.8 | 289217900 | |
2011-08-22 00:00:00+00:00 | 6.42 | 7.22 | 6.42 | 7.2 | 398777800 | |
2011-08-23 00:00:00+00:00 | 6.3 | 6.44 | 6.01 | 6.37 | 563788800 | |
2011-08-24 00:00:00+00:00 | 6.99 | 7.05 | 6.32 | 6.36 | 602753000 | |
2011-08-25 00:00:00+00:00 | 7.65 | 8.8 | 7.38 | 8.29 | 859643400 | |
2011-08-26 00:00:00+00:00 | 7.76 | 7.98 | 7.45 | 7.62 | 424194600 | |
2011-08-29 00:00:00+00:00 | 8.39 | 8.41 | 7.96 | 8.1 | 363420500 | |
2011-08-30 00:00:00+00:00 | 8.119 | 8.39 | 8.05 | 8.29 | 297220400 | |
2011-08-31 00:00:00+00:00 | 8.17 | 8.29 | 8.109 | 8.28 | 282263500 | |
2011-09-01 00:00:00+00:00 | 7.91 | 8.18 | 7.91 | 8.18 | 243549600 | |
2011-09-02 00:00:00+00:00 | 7.25 | 7.45 | 7.17 | 7.31 | 305988600 | |
2011-09-06 00:00:00+00:00 | 6.99 | 7.07 | 6.8 | 6.91 | 283101800 | |
2011-09-07 00:00:00+00:00 | 7.48 | 7.52 | 7.1 | 7.23 | 257251800 | |
2011-09-08 00:00:00+00:00 | 7.2 | 7.44 | 7.2 | 7.38 | 204593600 | |
2011-09-09 00:00:00+00:00 | 6.98 | 7.21 | 6.96 | 7.11 | 255828800 | |
2011-09-12 00:00:00+00:00 | 7.05 | 7.1 | 6.81 | 7 | 286856000 | |
2011-09-13 00:00:00+00:00 | 7 | 7.18 | 6.97 | 7.09 | 220657500 | |
2011-09-14 00:00:00+00:00 | 7.05 | 7.13 | 6.92 | 7.09 | 212940200 | |
2011-09-15 00:00:00+00:00 | 7.33 | 7.34 | 7.13 | 7.18 | 230299700 | |
2011-09-16 00:00:00+00:00 | 7.23 | 7.39 | 7.08 | 7.38 | 240523700 | |
2011-09-19 00:00:00+00:00 | 6.99 | 7.07 | 6.935 | 7.06 | 199254700 | |
2011-09-20 00:00:00+00:00 | 6.9 | 7.06 | 6.9 | 7.04 | 142828700 | |
2011-09-21 00:00:00+00:00 | 6.38 | 6.97 | 6.36 | 6.95 | 381953600 | |
2011-09-22 00:00:00+00:00 | 6.06 | 6.28 | 6 | 6.24 | 384622500 | |
2011-09-23 00:00:00+00:00 | 6.31 | 6.39 | 6.07 | 6.09 | 503374000 | |
2011-09-26 00:00:00+00:00 | 6.6 | 6.6 | 6.31 | 6.48 | 228406400 | |
2011-09-27 00:00:00+00:00 | 6.48 | 6.85 | 6.41 | 6.84 | 231598900 | |
2011-09-28 00:00:00+00:00 | 6.16 | 6.53 | 6.16 | 6.51 | 222449300 | |
2011-09-29 00:00:00+00:00 | 6.35 | 6.45 | 6.15 | 6.4 | 224013100 | |
2011-09-30 00:00:00+00:00 | 6.12 | 6.315 | 6.11 | 6.18 | 175213100 | |
2011-10-03 00:00:00+00:00 | 5.53 | 6.18 | 5.52 | 6.08 | 369335100 | |
2011-10-04 00:00:00+00:00 | 5.76 | 5.76 | 5.13 | 5.48 | 448300000 | |
2011-10-05 00:00:00+00:00 | 5.77 | 5.83 | 5.51 | 5.71 | 291785700 | |
2011-10-06 00:00:00+00:00 | 6.28 | 6.31 | 5.65 | 5.77 | 336087400 | |
2011-10-07 00:00:00+00:00 | 5.9 | 6.33 | 5.88 | 6.32 | 285598300 | |
2011-10-10 00:00:00+00:00 | 6.28 | 6.29 | 6.12 | 6.14 | 225326600 | |
2011-10-11 00:00:00+00:00 | 6.37 | 6.47 | 6.13 | 6.18 | 204431100 | |
2011-10-12 00:00:00+00:00 | 6.58 | 6.74 | 6.34 | 6.51 | 284617700 | |
2011-10-13 00:00:00+00:00 | 6.22 | 6.44 | 6.17 | 6.44 | 230115400 | |
2011-10-14 00:00:00+00:00 | 6.19 | 6.36 | 6.12 | 6.31 | 203564500 | |
2011-10-17 00:00:00+00:00 | 6.03 | 6.28 | 6.03 | 6.18 | 175512200 | |
2011-10-18 00:00:00+00:00 | 6.64 | 6.79 | 6.16 | 6.27 | 496883800 | |
2011-10-19 00:00:00+00:00 | 6.4 | 6.86 | 6.37 | 6.68 | 318554300 | |
2011-10-20 00:00:00+00:00 | 6.47 | 6.48 | 6.18 | 6.43 | 254971700 | |
2011-10-21 00:00:00+00:00 | 6.46 | 6.65 | 6.38 | 6.59 | 252586400 | |
2011-10-24 00:00:00+00:00 | 6.72 | 6.74 | 6.49 | 6.59 | 217032400 | |
2011-10-25 00:00:00+00:00 | 6.46 | 6.67 | 6.46 | 6.65 | 200837500 | |
2011-10-26 00:00:00+00:00 | 6.59 | 6.66 | 6.44 | 6.58 | 208197400 | |
2011-10-27 00:00:00+00:00 | 7.05 | 7.23 | 6.9 | 7.11 | 408959900 | |
2011-10-28 00:00:00+00:00 | 7.35 | 7.43 | 7.05 | 7.08 | 273167300 | |
2011-10-31 00:00:00+00:00 | 6.83 | 7.16 | 6.82 | 7.09 | 252026900 | |
2011-11-01 00:00:00+00:00 | 6.4 | 6.68 | 6.32 | 6.38 | 371823200 | |
2011-11-02 00:00:00+00:00 | 6.72 | 6.74 | 6.57 | 6.72 | 216830700 | |
2011-11-03 00:00:00+00:00 | 6.91 | 6.98 | 6.58 | 6.91 | 244340400 | |
2011-11-04 00:00:00+00:00 | 6.49 | 6.75 | 6.45 | 6.73 | 266446300 | |
2011-11-07 00:00:00+00:00 | 6.45 | 6.59 | 6.298 | 6.44 | 192433500 | |
2011-11-08 00:00:00+00:00 | 6.53 | 6.59 | 6.4 | 6.54 | 219000600 | |
2011-11-09 00:00:00+00:00 | 6.16 | 6.36 | 6.15 | 6.31 | 265539000 | |
2011-11-10 00:00:00+00:00 | 6.03 | 6.33 | 6.01 | 6.32 | 324988000 | |
2011-11-11 00:00:00+00:00 | 6.21 | 6.29 | 6.12 | 6.12 | 210025900 | |
2011-11-14 00:00:00+00:00 | 6.05 | 6.19 | 6.02 | 6.16 | 225481700 | |
2011-11-15 00:00:00+00:00 | 6.13 | 6.24 | 6 | 6.01 | 268228500 | |
2011-11-16 00:00:00+00:00 | 5.9 | 6.09 | 5.9 | 6.09 | 292502400 | |
2011-11-17 00:00:00+00:00 | 5.8 | 6.03 | 5.79 | 5.98 | 285881900 | |
2011-11-18 00:00:00+00:00 | 5.78 | 5.89 | 5.75 | 5.86 | 227038600 | |
2011-11-21 00:00:00+00:00 | 5.49 | 5.68 | 5.48 | 5.66 | 269658300 | |
2011-11-22 00:00:00+00:00 | 5.37 | 5.585 | 5.32 | 5.52 | 270959000 | |
2011-11-23 00:00:00+00:00 | 5.14 | 5.31 | 5.13 | 5.3 | 264978500 | |
2011-11-25 00:00:00+00:00 | 5.17 | 5.33 | 5.12 | 5.16 | 138425100 | |
2011-11-28 00:00:00+00:00 | 5.25 | 5.53 | 5.14 | 5.5 | 345537900 | |
2011-11-29 00:00:00+00:00 | 5.08 | 5.28 | 5.03 | 5.19 | 333749700 | |
2011-11-30 00:00:00+00:00 | 5.44 | 5.44 | 5.18 | 5.4 | 436268200 | |
2011-12-01 00:00:00+00:00 | 5.53 | 5.63 | 5.29 | 5.37 | 315495500 | |
2011-12-02 00:00:00+00:00 | 5.64 | 5.88 | 5.61 | 5.67 | 283132900 | |
2011-12-05 00:00:00+00:00 | 5.79 | 5.95 | 5.73 | 5.86 | 293977700 | |
2011-12-06 00:00:00+00:00 | 5.78 | 5.92 | 5.7 | 5.74 | 254787200 | |
2011-12-07 00:00:00+00:00 | 5.89 | 5.92 | 5.7 | 5.73 | 244700100 | |
2011-12-08 00:00:00+00:00 | 5.59 | 5.88 | 5.53 | 5.84 | 286031100 | |
2011-12-09 00:00:00+00:00 | 5.72 | 5.85 | 5.63 | 5.67 | 293654100 | |
2011-12-12 00:00:00+00:00 | 5.45 | 5.6 | 5.4 | 5.59 | 191824000 | |
2011-12-13 00:00:00+00:00 | 5.32 | 5.6 | 5.22 | 5.56 | 227194200 | |
2011-12-14 00:00:00+00:00 | 5.23 | 5.37 | 5.2 | 5.24 | 226723100 | |
2011-12-15 00:00:00+00:00 | 5.26 | 5.37 | 5.213 | 5.33 | 196092100 | |
2011-12-16 00:00:00+00:00 | 5.2 | 5.42 | 5.16 | 5.31 | 227111300 | |
2011-12-19 00:00:00+00:00 | 4.985 | 5.22 | 4.92 | 5.2 | 344317700 | |
2011-12-20 00:00:00+00:00 | 5.17 | 5.22 | 5.04 | 5.11 | 239246100 | |
2011-12-21 00:00:00+00:00 | 5.23 | 5.25 | 5.1 | 5.18 | 203711800 | |
2011-12-22 00:00:00+00:00 | 5.47 | 5.51 | 5.21 | 5.26 | 303085900 | |
2011-12-23 00:00:00+00:00 | 5.6 | 5.63 | 5.47 | 5.54 | 190712000 | |
2011-12-27 00:00:00+00:00 | 5.48 | 5.58 | 5.46 | 5.55 | 158575400 | |
2011-12-28 00:00:00+00:00 | 5.285 | 5.46 | 5.27 | 5.45 | 146104500 | |
2011-12-29 00:00:00+00:00 | 5.46 | 5.48 | 5.28 | 5.29 | 168098500 | |
2011-12-30 00:00:00+00:00 | 5.56 | 5.58 | 5.37 | 5.39 | 176441000 | |
2012-01-03 00:00:00+00:00 | 5.8 | 5.89 | 5.74 | 5.75 | 246293200 | |
2012-01-04 00:00:00+00:00 | 5.81 | 5.88 | 5.62 | 5.71 | 243711200 | |
2012-01-05 00:00:00+00:00 | 6.31 | 6.35 | 5.71 | 5.75 | 547780000 | |
2012-01-06 00:00:00+00:00 | 6.18 | 6.3 | 6.06 | 6.21 | 299630600 | |
2012-01-09 00:00:00+00:00 | 6.27 | 6.37 | 6.19 | 6.26 | 240614400 | |
2012-01-10 00:00:00+00:00 | 6.63 | 6.66 | 6.44 | 6.44 | 354292100 | |
2012-01-11 00:00:00+00:00 | 6.87 | 6.9 | 6.52 | 6.6 | 354039600 | |
2012-01-12 00:00:00+00:00 | 6.79 | 7.02 | 6.66 | 6.99 | 362099600 | |
2012-01-13 00:00:00+00:00 | 6.61 | 6.69 | 6.41 | 6.49 | 337048400 | |
2012-01-17 00:00:00+00:00 | 6.48 | 6.82 | 6.44 | 6.63 | 294196500 | |
2012-01-18 00:00:00+00:00 | 6.8 | 6.8 | 6.46 | 6.5 | 302497700 | |
2012-01-19 00:00:00+00:00 | 6.96 | 7.29 | 6.745 | 7.21 | 491008100 | |
2012-01-20 00:00:00+00:00 | 7.07 | 7.08 | 6.83 | 6.95 | 236630700 | |
2012-01-23 00:00:00+00:00 | 7.25 | 7.37 | 7.11 | 7.13 | 339942200 | |
2012-01-24 00:00:00+00:00 | 7.29 | 7.35 | 7.06 | 7.11 | 228810300 | |
2012-01-25 00:00:00+00:00 | 7.35 | 7.37 | 7.15 | 7.2 | 249340100 | |
2012-01-26 00:00:00+00:00 | 7.3 | 7.5 | 7.23 | 7.45 | 265938800 | |
2012-01-27 00:00:00+00:00 | 7.29 | 7.35 | 7.2 | 7.21 | 230914000 | |
2012-01-30 00:00:00+00:00 | 7.07 | 7.15 | 7.02 | 7.13 | 231240100 | |
2012-01-31 00:00:00+00:00 | 7.13 | 7.19 | 7.05 | 7.17 | 212736300 | |
2012-02-01 00:00:00+00:00 | 7.36 | 7.44 | 7.21 | 7.25 | 318811000 | |
2012-02-02 00:00:00+00:00 | 7.45 | 7.49 | 7.33 | 7.43 | 232425400 | |
2012-02-03 00:00:00+00:00 | 7.84 | 7.89 | 7.63 | 7.66 | 364195700 | |
2012-02-06 00:00:00+00:00 | 7.97 | 7.97 | 7.77 | 7.785 | 236316000 | |
2012-02-07 00:00:00+00:00 | 7.85 | 7.99 | 7.8 | 7.95 | 253694200 | |
2012-02-08 00:00:00+00:00 | 8.13 | 8.15 | 7.93 | 7.96 | 436439700 | |
2012-02-09 00:00:00+00:00 | 8.18 | 8.35 | 8.1 | 8.31 | 480206700 | |
2012-02-10 00:00:00+00:00 | 8.07 | 8.119 | 7.98 | 8.05 | 254420800 | |
2012-02-13 00:00:00+00:00 | 8.25 | 8.31 | 8.21 | 8.27 | 308490200 | |
2012-02-14 00:00:00+00:00 | 7.98 | 8.17 | 7.95 | 8.15 | 384682800 | |
2012-02-15 00:00:00+00:00 | 7.78 | 8.119 | 7.77 | 8.01 | 372981400 | |
2012-02-16 00:00:00+00:00 | 8.09 | 8.13 | 7.66 | 7.71 | 337131800 | |
2012-02-17 00:00:00+00:00 | 8.02 | 8.08 | 7.92 | 8.039 | 355445600 | |
2012-02-21 00:00:00+00:00 | 8.109 | 8.2 | 8 | 8.025 | 333407700 | |
2012-02-22 00:00:00+00:00 | 7.95 | 8.13 | 7.95 | 8.06 | 228867800 | |
2012-02-23 00:00:00+00:00 | 8.02 | 8.05 | 7.9 | 7.97 | 144231800 | |
2012-02-24 00:00:00+00:00 | 7.88 | 8.09 | 7.88 | 8.05 | 163128200 | |
2012-02-27 00:00:00+00:00 | 8.039 | 8.05 | 7.67 | 7.79 | 298930800 | |
2012-02-28 00:00:00+00:00 | 8.119 | 8.15 | 8 | 8.05 | 193443200 | |
2012-02-29 00:00:00+00:00 | 7.97 | 8.24 | 7.97 | 8.16 | 266720500 | |
2012-03-01 00:00:00+00:00 | 8.119 | 8.17 | 8.05 | 8.09 | 197850100 | |
2012-03-02 00:00:00+00:00 | 8.13 | 8.21 | 8.095 | 8.109 | 143887500 | |
2012-03-05 00:00:00+00:00 | 7.97 | 8.13 | 7.95 | 8.09 | 196328300 | |
2012-03-06 00:00:00+00:00 | 7.71 | 7.79 | 7.66 | 7.78 | 262871200 | |
2012-03-07 00:00:00+00:00 | 8.02 | 8.02 | 7.77 | 7.81 | 328331900 | |
2012-03-08 00:00:00+00:00 | 8.06 | 8.119 | 8 | 8.07 | 160282000 | |
2012-03-09 00:00:00+00:00 | 8.05 | 8.189 | 8.01 | 8.115 | 195281400 | |
2012-03-12 00:00:00+00:00 | 7.99 | 8.06 | 7.91 | 8.01 | 165298900 | |
2012-03-13 00:00:00+00:00 | 8.49 | 8.5 | 8.05 | 8.07 | 385984100 | |
2012-03-14 00:00:00+00:00 | 8.84 | 8.9 | 8.56 | 8.66 | 488077000 | |
2012-03-15 00:00:00+00:00 | 9.24 | 9.25 | 8.85 | 8.98 | 489593000 | |
2012-03-16 00:00:00+00:00 | 9.8 | 9.8 | 9.33 | 9.41 | 582294100 | |
2012-03-19 00:00:00+00:00 | 9.53 | 10.1 | 9.51 | 9.78 | 669479900 | |
2012-03-20 00:00:00+00:00 | 9.81 | 9.97 | 9.59 | 9.63 | 451213900 | |
2012-03-21 00:00:00+00:00 | 9.82 | 10.03 | 9.738 | 9.96 | 326573800 | |
2012-03-22 00:00:00+00:00 | 9.6 | 9.77 | 9.52 | 9.66 | 264520500 | |
2012-03-23 00:00:00+00:00 | 9.85 | 9.85 | 9.4 | 9.47 | 283110900 | |
2012-03-26 00:00:00+00:00 | 9.93 | 10.04 | 9.85 | 10.01 | 241605700 | |
2012-03-27 00:00:00+00:00 | 9.6 | 9.9 | 9.58 | 9.865 | 250069600 | |
2012-03-28 00:00:00+00:00 | 9.75 | 9.78 | 9.58 | 9.62 | 234021500 | |
2012-03-29 00:00:00+00:00 | 9.53 | 9.68 | 9.43 | 9.63 | 253625800 | |
2012-03-30 00:00:00+00:00 | 9.57 | 9.64 | 9.35 | 9.61 | 250234500 | |
2012-04-02 00:00:00+00:00 | 9.68 | 9.78 | 9.41 | 9.54 | 179229000 | |
2012-04-03 00:00:00+00:00 | 9.49 | 9.67 | 9.41 | 9.67 | 190453400 | |
2012-04-04 00:00:00+00:00 | 9.2 | 9.36 | 9.15 | 9.35 | 228560600 | |
2012-04-05 00:00:00+00:00 | 9.23 | 9.4 | 9.11 | 9.14 | 180868100 | |
2012-04-09 00:00:00+00:00 | 8.93 | 9.04 | 8.83 | 9.04 | 211427600 | |
2012-04-10 00:00:00+00:00 | 8.54 | 9.09 | 8.5 | 8.97 | 377220000 | |
2012-04-11 00:00:00+00:00 | 8.86 | 8.91 | 8.72 | 8.78 | 252563700 | |
2012-04-12 00:00:00+00:00 | 9.17 | 9.18 | 8.91 | 8.93 | 223979900 | |
2012-04-13 00:00:00+00:00 | 8.68 | 9.08 | 8.68 | 9.08 | 282354600 | |
2012-04-16 00:00:00+00:00 | 8.79 | 8.93 | 8.62 | 8.87 | 217284500 | |
2012-04-17 00:00:00+00:00 | 8.92 | 9 | 8.87 | 8.965 | 193424400 | |
2012-04-18 00:00:00+00:00 | 8.92 | 9 | 8.84 | 8.88 | 160064100 | |
2012-04-19 00:00:00+00:00 | 8.77 | 9.17 | 8.67 | 9.16 | 348804500 | |
2012-04-20 00:00:00+00:00 | 8.36 | 8.78 | 8.33 | 8.78 | 277965300 | |
2012-04-23 00:00:00+00:00 | 8.18 | 8.3 | 7.95 | 8.02 | 256073900 | |
2012-04-24 00:00:00+00:00 | 8.21 | 8.27 | 8.1 | 8.25 | 192443800 | |
2012-04-25 00:00:00+00:00 | 8.26 | 8.35 | 8.17 | 8.3 | 164902400 | |
2012-04-26 00:00:00+00:00 | 8.27 | 8.4 | 8.17 | 8.189 | 131586300 | |
2012-04-27 00:00:00+00:00 | 8.25 | 8.34 | 8.189 | 8.34 | 120948200 | |
2012-04-30 00:00:00+00:00 | 8.109 | 8.24 | 8.039 | 8.22 | 137422300 | |
2012-05-01 00:00:00+00:00 | 8.31 | 8.4 | 8.08 | 8.109 | 177953400 | |
2012-05-02 00:00:00+00:00 | 8.16 | 8.23 | 8.119 | 8.21 | 136208900 | |
2012-05-03 00:00:00+00:00 | 8 | 8.18 | 7.91 | 8.175 | 204584500 | |
2012-05-04 00:00:00+00:00 | 7.74 | 7.92 | 7.7 | 7.9 | 194857200 | |
2012-05-07 00:00:00+00:00 | 7.96 | 8 | 7.66 | 7.71 | 167662600 | |
2012-05-08 00:00:00+00:00 | 7.79 | 7.9 | 7.69 | 7.87 | 180627900 | |
2012-05-09 00:00:00+00:00 | 7.73 | 7.82 | 7.65 | 7.67 | 174742300 | |
2012-05-10 00:00:00+00:00 | 7.7 | 7.94 | 7.67 | 7.91 | 156787300 | |
2012-05-11 00:00:00+00:00 | 7.55 | 7.77 | 7.41 | 7.48 | 245533600 | |
2012-05-14 00:00:00+00:00 | 7.35 | 7.52 | 7.35 | 7.37 | 170716800 | |
2012-05-15 00:00:00+00:00 | 7.3 | 7.55 | 7.25 | 7.37 | 201254900 | |
2012-05-16 00:00:00+00:00 | 7.11 | 7.47 | 7.08 | 7.4 | 216226100 | |
2012-05-17 00:00:00+00:00 | 6.98 | 7.25 | 6.93 | 7.1 | 241468300 | |
2012-05-18 00:00:00+00:00 | 7.02 | 7.03 | 6.89 | 7.01 | 223783500 | |
2012-05-21 00:00:00+00:00 | 6.83 | 7.11 | 6.72 | 7.03 | 229980800 | |
2012-05-22 00:00:00+00:00 | 6.98 | 7.15 | 6.86 | 6.92 | 221551500 | |
2012-05-23 00:00:00+00:00 | 7.17 | 7.17 | 6.85 | 6.9 | 194441500 | |
2012-05-24 00:00:00+00:00 | 7.14 | 7.33 | 7.01 | 7.26 | 206041500 | |
2012-05-25 00:00:00+00:00 | 7.15 | 7.25 | 7.07 | 7.07 | 108661200 | |
2012-05-29 00:00:00+00:00 | 7.44 | 7.45 | 7.22 | 7.28 | 159872200 | |
2012-05-30 00:00:00+00:00 | 7.2 | 7.41 | 7.1 | 7.33 | 206801500 | |
2012-05-31 00:00:00+00:00 | 7.35 | 7.37 | 7.04 | 7.21 | 203267300 | |
2012-06-01 00:00:00+00:00 | 7.02 | 7.19 | 6.94 | 7.12 | 240859000 | |
2012-06-04 00:00:00+00:00 | 6.9 | 7.1 | 6.85 | 7.1 | 163821500 | |
2012-06-05 00:00:00+00:00 | 7.1 | 7.14 | 6.9 | 6.91 | 147346700 | |
2012-06-06 00:00:00+00:00 | 7.64 | 7.77 | 7.18 | 7.24 | 356852900 | |
2012-06-07 00:00:00+00:00 | 7.42 | 7.9 | 7.38 | 7.81 | 276170400 | |
2012-06-08 00:00:00+00:00 | 7.56 | 7.58 | 7.2 | 7.35 | 234555000 | |
2012-06-11 00:00:00+00:00 | 7.28 | 7.77 | 7.28 | 7.72 | 204590700 | |
2012-06-12 00:00:00+00:00 | 7.49 | 7.5 | 7.22 | 7.32 | 148255600 | |
2012-06-13 00:00:00+00:00 | 7.5 | 7.61 | 7.35 | 7.4 | 162374500 | |
2012-06-14 00:00:00+00:00 | 7.66 | 7.69 | 7.446 | 7.52 | 157528300 | |
2012-06-15 00:00:00+00:00 | 7.9 | 7.9 | 7.55 | 7.72 | 211751900 | |
2012-06-18 00:00:00+00:00 | 7.76 | 7.91 | 7.73 | 7.77 | 140319900 | |
2012-06-19 00:00:00+00:00 | 8.109 | 8.207 | 7.86 | 7.88 | 248771500 | |
2012-06-20 00:00:00+00:00 | 8.14 | 8.22 | 8.02 | 8.16 | 213582800 | |
2012-06-21 00:00:00+00:00 | 7.82 | 8.17 | 7.8 | 8.109 | 227157600 | |
2012-06-22 00:00:00+00:00 | 7.94 | 8.01 | 7.79 | 7.965 | 142219300 | |
2012-06-25 00:00:00+00:00 | 7.6 | 7.74 | 7.56 | 7.735 | 151907200 | |
2012-06-26 00:00:00+00:00 | 7.615 | 7.68 | 7.48 | 7.65 | 129776500 | |
2012-06-27 00:00:00+00:00 | 7.77 | 7.82 | 7.61 | 7.68 | 111335500 | |
2012-06-28 00:00:00+00:00 | 7.74 | 7.75 | 7.53 | 7.62 | 133125300 | |
2012-06-29 00:00:00+00:00 | 8.18 | 8.2 | 7.95 | 8 | 258769800 | |
2012-07-02 00:00:00+00:00 | 8.05 | 8.21 | 7.87 | 8.2 | 152974000 | |
2012-07-03 00:00:00+00:00 | 8.06 | 8.119 | 8.01 | 8.06 | 57653700 | |
2012-07-05 00:00:00+00:00 | 7.82 | 8.06 | 7.82 | 8.029 | 120162000 | |
2012-07-06 00:00:00+00:00 | 7.66 | 7.8 | 7.65 | 7.69 | 116595200 | |
2012-07-09 00:00:00+00:00 | 7.56 | 7.7 | 7.53 | 7.63 | 87078300 | |
2012-07-10 00:00:00+00:00 | 7.48 | 7.67 | 7.4 | 7.65 | 101758300 | |
2012-07-11 00:00:00+00:00 | 7.63 | 7.69 | 7.45 | 7.49 | 128619400 | |
2012-07-12 00:00:00+00:00 | 7.48 | 7.55 | 7.43 | 7.53 | 107637000 | |
2012-07-13 00:00:00+00:00 | 7.82 | 7.83 | 7.55 | 7.56 | 177154500 | |
2012-07-16 00:00:00+00:00 | 7.81 | 7.96 | 7.77 | 7.925 | 109439900 | |
2012-07-17 00:00:00+00:00 | 7.92 | 7.93 | 7.75 | 7.92 | 127266800 | |
2012-07-18 00:00:00+00:00 | 7.53 | 7.93 | 7.5 | 7.88 | 254341100 | |
2012-07-19 00:00:00+00:00 | 7.26 | 7.65 | 7.12 | 7.56 | 260959200 | |
2012-07-20 00:00:00+00:00 | 7.07 | 7.21 | 7.06 | 7.205 | 160463000 | |
2012-07-23 00:00:00+00:00 | 7.09 | 7.15 | 6.9 | 6.94 | 168638000 | |
2012-07-24 00:00:00+00:00 | 7.04 | 7.18 | 6.97 | 7.16 | 138286700 | |
2012-07-25 00:00:00+00:00 | 7.07 | 7.161 | 7.01 | 7.11 | 117241300 | |
2012-07-26 00:00:00+00:00 | 7.17 | 7.24 | 7.11 | 7.22 | 121121500 | |
2012-07-27 00:00:00+00:00 | 7.31 | 7.4 | 7.14 | 7.2 | 146210900 | |
2012-07-30 00:00:00+00:00 | 7.28 | 7.38 | 7.26 | 7.28 | 74683800 | |
2012-07-31 00:00:00+00:00 | 7.34 | 7.34 | 7.21 | 7.28 | 86093200 | |
2012-08-01 00:00:00+00:00 | 7.22 | 7.36 | 7.21 | 7.35 | 97187800 | |
2012-08-02 00:00:00+00:00 | 7.18 | 7.26 | 7.1 | 7.12 | 112848000 | |
2012-08-03 00:00:00+00:00 | 7.43 | 7.49 | 7.27 | 7.3 | 130250800 | |
2012-08-06 00:00:00+00:00 | 7.64 | 7.68 | 7.44 | 7.45 | 112657000 | |
2012-08-07 00:00:00+00:00 | 7.67 | 7.85 | 7.66 | 7.71 | 119572200 | |
2012-08-08 00:00:00+00:00 | 7.67 | 7.77 | 7.57 | 7.58 | 73353500 | |
2012-08-09 00:00:00+00:00 | 7.72 | 7.76 | 7.67 | 7.68 | 59676500 | |
2012-08-10 00:00:00+00:00 | 7.74 | 7.76 | 7.63 | 7.66 | 50443800 | |
2012-08-13 00:00:00+00:00 | 7.72 | 7.83 | 7.68 | 7.72 | 58341300 | |
2012-08-14 00:00:00+00:00 | 7.78 | 7.9 | 7.74 | 7.78 | 104644000 | |
2012-08-15 00:00:00+00:00 | 7.87 | 7.87 | 7.73 | 7.75 | 73559200 | |
2012-08-16 00:00:00+00:00 | 7.93 | 7.96 | 7.83 | 7.89 | 78181200 | |
2012-08-17 00:00:00+00:00 | 8 | 8.119 | 7.94 | 7.97 | 138307600 | |
2012-08-20 00:00:00+00:00 | 8.15 | 8.189 | 7.98 | 7.98 | 100276900 | |
2012-08-21 00:00:00+00:00 | 8.189 | 8.4 | 8.16 | 8.25 | 190820700 | |
2012-08-22 00:00:00+00:00 | 8.22 | 8.32 | 8.119 | 8.14 | 140333800 | |
2012-08-23 00:00:00+00:00 | 8.15 | 8.27 | 8.119 | 8.22 | 98866200 | |
2012-08-24 00:00:00+00:00 | 8.16 | 8.21 | 8.029 | 8.119 | 88822200 | |
2012-08-27 00:00:00+00:00 | 8.07 | 8.2 | 8.05 | 8.2 | 96142100 | |
2012-08-28 00:00:00+00:00 | 7.96 | 8.109 | 7.95 | 8.039 | 96280400 | |
2012-08-29 00:00:00+00:00 | 8 | 8.1 | 7.98 | 7.98 | 106910900 | |
2012-08-30 00:00:00+00:00 | 7.91 | 7.95 | 7.83 | 7.95 | 91900100 | |
2012-08-31 00:00:00+00:00 | 7.99 | 8.02 | 7.88 | 8 | 91730500 | |
2012-09-04 00:00:00+00:00 | 8 | 8.109 | 7.95 | 8 | 81461800 | |
2012-09-05 00:00:00+00:00 | 7.95 | 8.02 | 7.93 | 7.99 | 55769500 | |
2012-09-06 00:00:00+00:00 | 8.35 | 8.35 | 8.029 | 8.035 | 200570300 | |
2012-09-07 00:00:00+00:00 | 8.8 | 8.8 | 8.45 | 8.465 | 232843000 | |
2012-09-10 00:00:00+00:00 | 8.58 | 8.92 | 8.53 | 8.835 | 190045600 | |
2012-09-11 00:00:00+00:00 | 9.03 | 9.05 | 8.58 | 8.63 | 201630400 | |
2012-09-12 00:00:00+00:00 | 8.97 | 9.19 | 8.87 | 9.15 | 204167100 | |
2012-09-13 00:00:00+00:00 | 9.4 | 9.48 | 8.81 | 8.89 | 330759200 | |
2012-09-14 00:00:00+00:00 | 9.55 | 9.79 | 9.45 | 9.6 | 329619800 | |
2012-09-17 00:00:00+00:00 | 9.3 | 9.49 | 9.27 | 9.39 | 141409300 | |
2012-09-18 00:00:00+00:00 | 9.23 | 9.29 | 9.09 | 9.16 | 150503600 | |
2012-09-19 00:00:00+00:00 | 9.29 | 9.46 | 9.26 | 9.33 | 126360100 | |
2012-09-20 00:00:00+00:00 | 9.19 | 9.25 | 9.08 | 9.14 | 111354300 | |
2012-09-21 00:00:00+00:00 | 9.11 | 9.35 | 9.08 | 9.35 | 155865300 | |
2012-09-24 00:00:00+00:00 | 9.1 | 9.2 | 8.95 | 8.99 | 113331700 | |
2012-09-25 00:00:00+00:00 | 8.925 | 9.21 | 8.91 | 9.165 | 146678000 | |
2012-09-26 00:00:00+00:00 | 8.815 | 8.91 | 8.7 | 8.855 | 158640100 | |
2012-09-27 00:00:00+00:00 | 8.97 | 9.07 | 8.899 | 8.935 | 119023800 | |
2012-09-28 00:00:00+00:00 | 8.83 | 8.93 | 8.79 | 8.89 | 119039200 | |
2012-10-01 00:00:00+00:00 | 8.96 | 9.13 | 8.85 | 8.87 | 133194500 | |
2012-10-02 00:00:00+00:00 | 8.93 | 9.105 | 8.88 | 9.1 | 109601600 | |
2012-10-03 00:00:00+00:00 | 9.11 | 9.12 | 8.91 | 8.975 | 115670100 | |
2012-10-04 00:00:00+00:00 | 9.41 | 9.42 | 9.15 | 9.2 | 160486300 | |
2012-10-05 00:00:00+00:00 | 9.32 | 9.65 | 9.23 | 9.56 | 204255800 | |
2012-10-08 00:00:00+00:00 | 9.28 | 9.38 | 9.13 | 9.16 | 81944200 | |
2012-10-09 00:00:00+00:00 | 9.21 | 9.38 | 9.04 | 9.325 | 153036100 | |
2012-10-10 00:00:00+00:00 | 9.21 | 9.27 | 9.06 | 9.19 | 110640900 | |
2012-10-11 00:00:00+00:00 | 9.34 | 9.42 | 9.3 | 9.39 | 122925800 | |
2012-10-12 00:00:00+00:00 | 9.12 | 9.28 | 9.05 | 9.16 | 158562500 | |
2012-10-15 00:00:00+00:00 | 9.44 | 9.44 | 9.16 | 9.235 | 154458500 | |
2012-10-16 00:00:00+00:00 | 9.46 | 9.6 | 9.38 | 9.53 | 172663600 | |
2012-10-17 00:00:00+00:00 | 9.44 | 9.6 | 9.3 | 9.42 | 229733500 | |
2012-10-18 00:00:00+00:00 | 9.47 | 9.57 | 9.37 | 9.38 | 149739000 | |
2012-10-19 00:00:00+00:00 | 9.44 | 9.55 | 9.39 | 9.42 | 169451300 | |
2012-10-22 00:00:00+00:00 | 9.55 | 9.56 | 9.41 | 9.47 | 132617600 | |
2012-10-23 00:00:00+00:00 | 9.36 | 9.47 | 9.28 | 9.44 | 160627600 | |
2012-10-24 00:00:00+00:00 | 9.31 | 9.49 | 9.3 | 9.45 | 120868300 | |
2012-10-25 00:00:00+00:00 | 9.24 | 9.415 | 9.17 | 9.365 | 121778900 | |
2012-10-26 00:00:00+00:00 | 9.12 | 9.27 | 9.047 | 9.165 | 124687200 | |
2012-10-31 00:00:00+00:00 | 9.32 | 9.35 | 9.15 | 9.2 | 94925500 | |
2012-11-01 00:00:00+00:00 | 9.74 | 9.75 | 9.272 | 9.34 | 205700000 | |
2012-11-02 00:00:00+00:00 | 9.85 | 9.97 | 9.77 | 9.87 | 220993300 | |
2012-11-05 00:00:00+00:00 | 9.75 | 9.925 | 9.62 | 9.83 | 121104400 | |
2012-11-06 00:00:00+00:00 | 9.94 | 9.97 | 9.75 | 9.825 | 132533500 | |
2012-11-07 00:00:00+00:00 | 9.23 | 9.67 | 9.22 | 9.63 | 286124300 | |
2012-11-08 00:00:00+00:00 | 9.39 | 9.6 | 9.38 | 9.49 | 223927000 | |
2012-11-09 00:00:00+00:00 | 9.43 | 9.59 | 9.27 | 9.295 | 141093100 | |
2012-11-12 00:00:00+00:00 | 9.39 | 9.52 | 9.38 | 9.5 | 68498300 | |
2012-11-13 00:00:00+00:00 | 9.33 | 9.55 | 9.28 | 9.31 | 119629700 | |
2012-11-14 00:00:00+00:00 | 8.99 | 9.42 | 8.95 | 9.38 | 197392900 | |
2012-11-15 00:00:00+00:00 | 9.09 | 9.2 | 9.02 | 9.03 | 144569800 | |
2012-11-16 00:00:00+00:00 | 9.12 | 9.21 | 8.92 | 9.12 | 178995000 | |
2012-11-19 00:00:00+00:00 | 9.49 | 9.533 | 9.32 | 9.36 | 146244000 | |
2012-11-20 00:00:00+00:00 | 9.63 | 9.68 | 9.42 | 9.465 | 150696200 | |
2012-11-21 00:00:00+00:00 | 9.77 | 9.78 | 9.63 | 9.67 | 132499300 | |
2012-11-23 00:00:00+00:00 | 9.9 | 9.9 | 9.8 | 9.835 | 59048000 | |
2012-11-26 00:00:00+00:00 | 9.835 | 9.87 | 9.75 | 9.82 | 99730900 | |
2012-11-27 00:00:00+00:00 | 9.66 | 9.95 | 9.66 | 9.89 | 149684000 | |
2012-11-28 00:00:00+00:00 | 9.76 | 9.76 | 9.38 | 9.56 | 152946500 | |
2012-11-29 00:00:00+00:00 | 9.83 | 9.88 | 9.76 | 9.84 | 125905400 | |
2012-11-30 00:00:00+00:00 | 9.86 | 9.87 | 9.755 | 9.78 | 108739700 | |
2012-12-03 00:00:00+00:00 | 9.8 | 9.94 | 9.78 | 9.93 | 99872800 | |
2012-12-04 00:00:00+00:00 | 9.91 | 9.91 | 9.77 | 9.79 | 144124500 | |
2012-12-05 00:00:00+00:00 | 10.46 | 10.56 | 9.95 | 9.97 | 463491000 | |
2012-12-06 00:00:00+00:00 | 10.455 | 10.58 | 10.29 | 10.51 | 176607900 | |
2012-12-07 00:00:00+00:00 | 10.635 | 10.68 | 10.48 | 10.56 | 192055100 | |
2012-12-10 00:00:00+00:00 | 10.57 | 10.63 | 10.46 | 10.6 | 148065200 | |
2012-12-11 00:00:00+00:00 | 10.51 | 10.71 | 10.5 | 10.64 | 159121300 | |
2012-12-12 00:00:00+00:00 | 10.61 | 10.7 | 10.51 | 10.59 | 166819300 | |
2012-12-13 00:00:00+00:00 | 10.54 | 10.66 | 10.51 | 10.59 | 106232100 | |
2012-12-14 00:00:00+00:00 | 10.58 | 10.6 | 10.52 | 10.545 | 91707200 | |
2012-12-17 00:00:00+00:00 | 11 | 11 | 10.64 | 10.645 | 170087700 | |
2012-12-18 00:00:00+00:00 | 11.36 | 11.36 | 11.05 | 11.24 | 255238400 | |
2012-12-19 00:00:00+00:00 | 11.19 | 11.49 | 11.17 | 11.4 | 193013700 | |
2012-12-20 00:00:00+00:00 | 11.52 | 11.52 | 11.08 | 11.1 | 184449000 | |
2012-12-21 00:00:00+00:00 | 11.29 | 11.35 | 11.12 | 11.2 | 244892300 | |
2012-12-24 00:00:00+00:00 | 11.25 | 11.29 | 11.21 | 11.27 | 50657100 | |
2012-12-26 00:00:00+00:00 | 11.54 | 11.63 | 11.27 | 11.29 | 146097900 | |
2012-12-27 00:00:00+00:00 | 11.47 | 11.69 | 11.23 | 11.66 | 210411400 | |
2012-12-28 00:00:00+00:00 | 11.36 | 11.49 | 11.27 | 11.32 | 131872200 | |
2012-12-31 00:00:00+00:00 | 11.61 | 11.65 | 11.3 | 11.37 | 170837500 | |
2013-01-02 00:00:00+00:00 | 12.03 | 12.15 | 11.9 | 12.05 | 236021400 | |
2013-01-03 00:00:00+00:00 | 11.96 | 12.05 | 11.88 | 12.01 | 157149700 | |
2013-01-04 00:00:00+00:00 | 12.11 | 12.11 | 11.93 | 11.97 | 132601900 | |
2013-01-07 00:00:00+00:00 | 12.09 | 12.2 | 12 | 12.15 | 201403500 | |
2013-01-08 00:00:00+00:00 | 11.98 | 12.1 | 11.89 | 12.09 | 168461100 | |
2013-01-09 00:00:00+00:00 | 11.43 | 12 | 11.33 | 11.87 | 335692000 | |
2013-01-10 00:00:00+00:00 | 11.78 | 11.81 | 11.54 | 11.61 | 199964900 | |
2013-01-11 00:00:00+00:00 | 11.63 | 11.72 | 11.51 | 11.7 | 146136700 | |
2013-01-14 00:00:00+00:00 | 11.47 | 11.63 | 11.38 | 11.61 | 110032900 | |
2013-01-15 00:00:00+00:00 | 11.55 | 11.62 | 11.3 | 11.38 | 126058300 | |
2013-01-16 00:00:00+00:00 | 11.78 | 11.79 | 11.47 | 11.58 | 164579300 | |
2013-01-17 00:00:00+00:00 | 11.28 | 11.7 | 11.17 | 11.69 | 323622200 | |
2013-01-18 00:00:00+00:00 | 11.14 | 11.33 | 11.02 | 11.26 | 179971800 | |
2013-01-22 00:00:00+00:00 | 11.35 | 11.36 | 11.09 | 11.12 | 137242700 | |
2013-01-23 00:00:00+00:00 | 11.42 | 11.44 | 11.23 | 11.38 | 121638200 | |
2013-01-24 00:00:00+00:00 | 11.53 | 11.67 | 11.42 | 11.45 | 127870800 | |
2013-01-25 00:00:00+00:00 | 11.62 | 11.72 | 11.51 | 11.69 | 100171600 | |
2013-01-28 00:00:00+00:00 | 11.48 | 11.67 | 11.43 | 11.64 | 91304200 | |
2013-01-29 00:00:00+00:00 | 11.49 | 11.58 | 11.4 | 11.42 | 96824100 | |
2013-01-30 00:00:00+00:00 | 11.38 | 11.54 | 11.35 | 11.49 | 89130200 | |
2013-01-31 00:00:00+00:00 | 11.32 | 11.36 | 11.22 | 11.32 | 97024700 | |
2013-02-01 00:00:00+00:00 | 11.71 | 11.73 | 11.37 | 11.41 | 161141300 | |
2013-02-04 00:00:00+00:00 | 11.48 | 11.69 | 11.46 | 11.58 | 139733000 | |
2013-02-05 00:00:00+00:00 | 11.88 | 11.98 | 11.56 | 11.59 | 187785900 | |
2013-02-06 00:00:00+00:00 | 11.93 | 11.97 | 11.73 | 11.73 | 173626800 | |
2013-02-07 00:00:00+00:00 | 11.84 | 11.98 | 11.73 | 11.97 | 173209900 | |
2013-02-08 00:00:00+00:00 | 11.76 | 11.9 | 11.72 | 11.86 | 145321100 | |
2013-02-11 00:00:00+00:00 | 11.86 | 11.9 | 11.67 | 11.73 | 103510800 | |
2013-02-12 00:00:00+00:00 | 12.245 | 12.34 | 11.78 | 11.87 | 232173900 | |
2013-02-13 00:00:00+00:00 | 12.17 | 12.42 | 12.05 | 12.35 | 192742500 | |
2013-02-14 00:00:00+00:00 | 12.13 | 12.27 | 12.07 | 12.09 | 144104800 | |
2013-02-15 00:00:00+00:00 | 12.03 | 12.21 | 11.97 | 12.205 | 158242700 | |
2013-02-19 00:00:00+00:00 | 12.19 | 12.31 | 12.06 | 12.1 | 170783400 | |
2013-02-20 00:00:00+00:00 | 11.8 | 12.29 | 11.75 | 12.18 | 193466400 | |
2013-02-21 00:00:00+00:00 | 11.42 | 11.73 | 11.35 | 11.725 | 235532400 | |
2013-02-22 00:00:00+00:00 | 11.44 | 11.635 | 11.26 | 11.62 | 179268200 | |
2013-02-25 00:00:00+00:00 | 11.03 | 11.61 | 10.98 | 11.6 | 206893900 | |
2013-02-26 00:00:00+00:00 | 11.13 | 11.22 | 11.03 | 11.12 | 173039600 | |
2013-02-27 00:00:00+00:00 | 11.3 | 11.36 | 11.1 | 11.15 | 147145400 | |
2013-02-28 00:00:00+00:00 | 11.23 | 11.37 | 11.195 | 11.325 | 143546700 | |
2013-03-01 00:00:00+00:00 | 11.34 | 11.55 | 11.02 | 11.13 | 189050700 | |
2013-03-04 00:00:00+00:00 | 11.41 | 11.45 | 11.22 | 11.27 | 116436000 | |
2013-03-05 00:00:00+00:00 | 11.55 | 11.709 | 11.53 | 11.56 | 135869700 | |
2013-03-06 00:00:00+00:00 | 11.92 | 12.02 | 11.76 | 11.77 | 182557500 | |
2013-03-07 00:00:00+00:00 | 12.26 | 12.28 | 11.98 | 12 | 212403800 | |
2013-03-08 00:00:00+00:00 | 12.07 | 12.44 | 12.02 | 12.42 | 209777100 | |
2013-03-11 00:00:00+00:00 | 12.15 | 12.22 | 12.02 | 12.08 | 106427800 | |
2013-03-12 00:00:00+00:00 | 12.01 | 12.18 | 11.91 | 12.11 | 128134700 | |
2013-03-13 00:00:00+00:00 | 12.06 | 12.11 | 11.98 | 12.04 | 86216800 | |
2013-03-14 00:00:00+00:00 | 12.11 | 12.19 | 12.1 | 12.12 | 115440000 | |
2013-03-15 00:00:00+00:00 | 12.57 | 12.66 | 12.35 | 12.52 | 319019100 | |
2013-03-18 00:00:00+00:00 | 12.56 | 12.68 | 12.26 | 12.29 | 189783300 | |
2013-03-19 00:00:00+00:00 | 12.71 | 12.94 | 12.59 | 12.79 | 242939600 | |
2013-03-20 00:00:00+00:00 | 12.78 | 12.89 | 12.71 | 12.785 | 219121700 | |
2013-03-21 00:00:00+00:00 | 12.57 | 12.84 | 12.55 | 12.71 | 154529400 | |
2013-03-22 00:00:00+00:00 | 12.56 | 12.67 | 12.48 | 12.62 | 101974400 | |
2013-03-25 00:00:00+00:00 | 12.4 | 12.72 | 12.32 | 12.68 | 154320200 | |
2013-03-26 00:00:00+00:00 | 12.28 | 12.5 | 12.15 | 12.45 | 135654700 | |
2013-03-27 00:00:00+00:00 | 12.23 | 12.28 | 12.12 | 12.14 | 107177200 | |
2013-03-28 00:00:00+00:00 | 12.18 | 12.28 | 12.11 | 12.24 | 92013900 | |
2013-04-01 00:00:00+00:00 | 12.15 | 12.28 | 12.1 | 12.15 | 86281600 | |
2013-04-02 00:00:00+00:00 | 12.15 | 12.245 | 12.14 | 12.24 | 102626000 | |
2013-04-03 00:00:00+00:00 | 11.81 | 12.14 | 11.72 | 12.115 | 199765800 | |
2013-04-04 00:00:00+00:00 | 11.94 | 11.99 | 11.72 | 11.81 | 117831400 | |
2013-04-05 00:00:00+00:00 | 11.97 | 12.01 | 11.64 | 11.67 | 141061900 | |
2013-04-08 00:00:00+00:00 | 12.21 | 12.21 | 11.91 | 12 | 101419200 | |
2013-04-09 00:00:00+00:00 | 12.25 | 12.35 | 12.21 | 12.25 | 132365800 | |
2013-04-10 00:00:00+00:00 | 12.32 | 12.4 | 12.26 | 12.31 | 105853900 | |
2013-04-11 00:00:00+00:00 | 12.27 | 12.33 | 12.16 | 12.31 | 100241900 | |
2013-04-12 00:00:00+00:00 | 12.17 | 12.25 | 12.07 | 12.145 | 88191700 | |
2013-04-15 00:00:00+00:00 | 11.98 | 12.32 | 11.97 | 12.19 | 176504300 | |
2013-04-16 00:00:00+00:00 | 12.28 | 12.36 | 12.08 | 12.21 | 147641900 | |
2013-04-17 00:00:00+00:00 | 11.7 | 12.02 | 11.45 | 11.91 | 335408600 | |
2013-04-18 00:00:00+00:00 | 11.44 | 11.65 | 11.23 | 11.61 | 220290500 | |
2013-04-19 00:00:00+00:00 | 11.66 | 11.69 | 11.43 | 11.56 | 119792000 | |
2013-04-22 00:00:00+00:00 | 11.72 | 11.75 | 11.57 | 11.68 | 88532300 | |
2013-04-23 00:00:00+00:00 | 12.07 | 12.16 | 11.9 | 11.92 | 176816000 | |
2013-04-24 00:00:00+00:00 | 12.31 | 12.37 | 12.12 | 12.135 | 120624900 | |
2013-04-25 00:00:00+00:00 | 12.44 | 12.54 | 12.36 | 12.39 | 118694600 | |
2013-04-26 00:00:00+00:00 | 12.42 | 12.46 | 12.29 | 12.34 | 83093200 | |
2013-04-29 00:00:00+00:00 | 12.38 | 12.48 | 12.37 | 12.45 | 65432400 | |
2013-04-30 00:00:00+00:00 | 12.31 | 12.4 | 12.2 | 12.39 | 89362100 | |
2013-05-01 00:00:00+00:00 | 12.14 | 12.27 | 12.08 | 12.2 | 88417800 | |
2013-05-02 00:00:00+00:00 | 12.19 | 12.21 | 12.05 | 12.17 | 76530500 | |
2013-05-03 00:00:00+00:00 | 12.24 | 12.41 | 12.205 | 12.36 | 94322600 | |
2013-05-06 00:00:00+00:00 | 12.88 | 12.89 | 12.36 | 12.39 | 265062800 | |
2013-05-07 00:00:00+00:00 | 12.9 | 13.11 | 12.76 | 12.92 | 218248100 | |
2013-05-08 00:00:00+00:00 | 13.02 | 13.18 | 12.84 | 12.85 | 149755400 | |
2013-05-09 00:00:00+00:00 | 12.91 | 13.06 | 12.87 | 13.05 | 112952300 | |
2013-05-10 00:00:00+00:00 | 13.02 | 13.04 | 12.89 | 12.94 | 87625100 | |
2013-05-13 00:00:00+00:00 | 12.98 | 13.1 | 12.95 | 12.98 | 94295600 | |
2013-05-14 00:00:00+00:00 | 13.34 | 13.36 | 13.02 | 13.04 | 154128100 | |
2013-05-15 00:00:00+00:00 | 13.44 | 13.55 | 13.29 | 13.29 | 139728300 | |
2013-05-16 00:00:00+00:00 | 13.36 | 13.55 | 13.32 | 13.41 | 117977000 | |
2013-05-17 00:00:00+00:00 | 13.43 | 13.52 | 13.39 | 13.5 | 107335600 | |
2013-05-20 00:00:00+00:00 | 13.51 | 13.6 | 13.39 | 13.39 | 88941700 | |
2013-05-21 00:00:00+00:00 | 13.44 | 13.56 | 13.36 | 13.525 | 111810300 | |
2013-05-22 00:00:00+00:00 | 13.31 | 13.73 | 13.17 | 13.485 | 174531500 | |
2013-05-23 00:00:00+00:00 | 13.21 | 13.42 | 12.82 | 12.93 | 190522000 | |
2013-05-24 00:00:00+00:00 | 13.24 | 13.26 | 13.12 | 13.17 | 83452600 | |
2013-05-28 00:00:00+00:00 | 13.35 | 13.51 | 13.31 | 13.49 | 133553000 | |
2013-05-29 00:00:00+00:00 | 13.48 | 13.555 | 13.27 | 13.31 | 126397300 | |
2013-05-30 00:00:00+00:00 | 13.83 | 13.93 | 13.43 | 13.5 | 153383400 | |
2013-05-31 00:00:00+00:00 | 13.66 | 13.99 | 13.652 | 13.91 | 160176100 | |
2013-06-03 00:00:00+00:00 | 13.55 | 13.73 | 13.21 | 13.69 | 197168200 | |
2013-06-04 00:00:00+00:00 | 13.36 | 13.67 | 13.31 | 13.51 | 130055400 | |
2013-06-05 00:00:00+00:00 | 13.09 | 13.45 | 12.97 | 13.285 | 185778900 | |
2013-06-06 00:00:00+00:00 | 13.2 | 13.24 | 12.85 | 13.075 | 142361900 | |
2013-06-07 00:00:00+00:00 | 13.38 | 13.39 | 13.15 | 13.33 | 121057300 | |
2013-06-10 00:00:00+00:00 | 13.3 | 13.55 | 13.26 | 13.49 | 104648500 | |
2013-06-11 00:00:00+00:00 | 13.12 | 13.28 | 13.07 | 13.11 | 106369100 | |
2013-06-12 00:00:00+00:00 | 13.06 | 13.25 | 12.97 | 13.24 | 117289400 | |
2013-06-13 00:00:00+00:00 | 13.21 | 13.26 | 12.97 | 13 | 103636400 | |
2013-06-14 00:00:00+00:00 | 13.07 | 13.24 | 13.03 | 13.23 | 101380700 | |
2013-06-17 00:00:00+00:00 | 13.21 | 13.262 | 13.13 | 13.17 | 115441500 | |
2013-06-18 00:00:00+00:00 | 13.27 | 13.34 | 13.2 | 13.215 | 67257600 | |
2013-06-19 00:00:00+00:00 | 13.19 | 13.4 | 13.17 | 13.285 | 103850000 | |
2013-06-20 00:00:00+00:00 | 12.89 | 13.14 | 12.8 | 13.08 | 185741100 | |
2013-06-21 00:00:00+00:00 | 12.69 | 13 | 12.39 | 12.99 | 196004700 | |
2013-06-24 00:00:00+00:00 | 12.3 | 12.45 | 12.13 | 12.4 | 159762500 | |
2013-06-25 00:00:00+00:00 | 12.67 | 12.77 | 12.43 | 12.61 | 132844500 | |
2013-06-26 00:00:00+00:00 | 12.76 | 12.853 | 12.68 | 12.84 | 105282000 | |
2013-06-27 00:00:00+00:00 | 13.01 | 13.03 | 12.78 | 12.84 | 124616300 | |
2013-06-28 00:00:00+00:00 | 12.86 | 13 | 12.84 | 12.97 | 90297100 | |
2013-07-01 00:00:00+00:00 | 12.93 | 13.1 | 12.92 | 12.95 | 83146300 | |
2013-07-02 00:00:00+00:00 | 12.9 | 13.1 | 12.8 | 12.95 | 83708600 | |
2013-07-03 00:00:00+00:00 | 12.83 | 12.84 | 12.73 | 12.82 | 37971700 | |
2013-07-05 00:00:00+00:00 | 13.06 | 13.08 | 12.91 | 12.99 | 80758500 | |
2013-07-08 00:00:00+00:00 | 13.28 | 13.37 | 13.08 | 13.11 | 107441200 | |
2013-07-09 00:00:00+00:00 | 13.53 | 13.53 | 13.25 | 13.37 | 106067300 | |
2013-07-10 00:00:00+00:00 | 13.37 | 13.53 | 13.31 | 13.51 | 104353400 | |
2013-07-11 00:00:00+00:00 | 13.51 | 13.58 | 13.33 | 13.54 | 93756500 | |
2013-07-12 00:00:00+00:00 | 13.78 | 13.8 | 13.47 | 13.54 | 123958400 | |
2013-07-15 00:00:00+00:00 | 13.88 | 13.94 | 13.76 | 13.93 | 90765200 | |
2013-07-16 00:00:00+00:00 | 13.92 | 14.02 | 13.77 | 13.93 | 146041900 | |
2013-07-17 00:00:00+00:00 | 14.31 | 14.44 | 14.04 | 14.055 | 249674500 | |
2013-07-18 00:00:00+00:00 | 14.76 | 14.85 | 14.4 | 14.4 | 221796400 | |
2013-07-19 00:00:00+00:00 | 14.75 | 14.76 | 14.6 | 14.76 | 136196600 | |
2013-07-22 00:00:00+00:00 | 14.92 | 14.99 | 14.65 | 14.74 | 112416800 | |
2013-07-23 00:00:00+00:00 | 14.94 | 15.03 | 14.86 | 14.98 | 113736000 | |
2013-07-24 00:00:00+00:00 | 14.71 | 15 | 14.68 | 15 | 117771100 | |
2013-07-25 00:00:00+00:00 | 14.83 | 14.85 | 14.54 | 14.62 | 89243300 | |
2013-07-26 00:00:00+00:00 | 14.73 | 14.76 | 14.62 | 14.7 | 73777800 | |
2013-07-29 00:00:00+00:00 | 14.52 | 14.7 | 14.45 | 14.65 | 88616700 | |
2013-07-30 00:00:00+00:00 | 14.52 | 14.63 | 14.46 | 14.58 | 71675200 | |
2013-07-31 00:00:00+00:00 | 14.6 | 14.85 | 14.57 | 14.58 | 128182500 | |
2013-08-01 00:00:00+00:00 | 14.95 | 14.97 | 14.82 | 14.845 | 108554200 | |
2013-08-02 00:00:00+00:00 | 14.84 | 14.91 | 14.78 | 14.88 | 83367400 | |
2013-08-05 00:00:00+00:00 | 14.8 | 14.84 | 14.7 | 14.77 | 61711900 | |
2013-08-06 00:00:00+00:00 | 14.64 | 14.76 | 14.63 | 14.74 | 84249800 | |
2013-08-07 00:00:00+00:00 | 14.53 | 14.57 | 14.25 | 14.43 | 128693600 | |
2013-08-08 00:00:00+00:00 | 14.61 | 14.7 | 14.44 | 14.675 | 88544100 | |
2013-08-09 00:00:00+00:00 | 14.45 | 14.61 | 14.4 | 14.56 | 72897900 | |
2013-08-12 00:00:00+00:00 | 14.41 | 14.47 | 14.32 | 14.355 | 60867500 | |
2013-08-13 00:00:00+00:00 | 14.51 | 14.62 | 14.33 | 14.47 | 75777900 | |
2013-08-14 00:00:00+00:00 | 14.6 | 14.75 | 14.54 | 14.55 | 78553700 | |
2013-08-15 00:00:00+00:00 | 14.32 | 14.47 | 14.31 | 14.46 | 99238900 | |
2013-08-16 00:00:00+00:00 | 14.42 | 14.47 | 14.21 | 14.29 | 104959000 | |
2013-08-19 00:00:00+00:00 | 14.15 | 14.38 | 14.13 | 14.37 | 98384600 | |
2013-08-20 00:00:00+00:00 | 14.29 | 14.38 | 13.98 | 14.18 | 114332500 | |
2013-08-21 00:00:00+00:00 | 14.34 | 14.48 | 14.2 | 14.26 | 91680600 | |
2013-08-22 00:00:00+00:00 | 14.57 | 14.6 | 14.4 | 14.47 | 76348300 | |
2013-08-23 00:00:00+00:00 | 14.57 | 14.69 | 14.51 | 14.59 | 67699200 | |
2013-08-26 00:00:00+00:00 | 14.49 | 14.7 | 14.49 | 14.53 | 68938900 | |
2013-08-27 00:00:00+00:00 | 14.11 | 14.41 | 14.1 | 14.26 | 122534200 | |
2013-08-28 00:00:00+00:00 | 14.12 | 14.25 | 13.99 | 14.07 | 96473600 | |
2013-08-29 00:00:00+00:00 | 14.17 | 14.3 | 14.12 | 14.14 | 76691500 | |
2013-08-30 00:00:00+00:00 | 14.12 | 14.25 | 14.07 | 14.23 | 73233400 | |
2013-09-03 00:00:00+00:00 | 14.25 | 14.38 | 14.18 | 14.3 | 74628700 | |
2013-09-04 00:00:00+00:00 | 14.32 | 14.4 | 14.205 | 14.215 | 71132500 | |
2013-09-05 00:00:00+00:00 | 14.37 | 14.55 | 14.36 | 14.41 | 71229200 | |
2013-09-06 00:00:00+00:00 | 14.36 | 14.5 | 14.27 | 14.47 | 75513100 | |
2013-09-09 00:00:00+00:00 | 14.48 | 14.49 | 14.37 | 14.41 | 53447300 | |
2013-09-10 00:00:00+00:00 | 14.61 | 14.69 | 14.52 | 14.52 | 93180300 | |
2013-09-11 00:00:00+00:00 | 14.65 | 14.68 | 14.52 | 14.56 | 81098900 | |
2013-09-12 00:00:00+00:00 | 14.48 | 14.68 | 14.46 | 14.65 | 63355900 | |
2013-09-13 00:00:00+00:00 | 14.49 | 14.55 | 14.45 | 14.46 | 56793400 | |
2013-09-16 00:00:00+00:00 | 14.53 | 14.68 | 14.53 | 14.61 | 65276500 | |
2013-09-17 00:00:00+00:00 | 14.55 | 14.62 | 14.4 | 14.53 | 73686100 | |
2013-09-18 00:00:00+00:00 | 14.715 | 14.82 | 14.49 | 14.53 | 127092600 | |
2013-09-19 00:00:00+00:00 | 14.61 | 14.83 | 14.58 | 14.8 | 79710700 | |
2013-09-20 00:00:00+00:00 | 14.44 | 14.66 | 14.44 | 14.655 | 145174800 | |
2013-09-23 00:00:00+00:00 | 14.14 | 14.325 | 14.09 | 14.31 | 127283100 | |
2013-09-24 00:00:00+00:00 | 14.09 | 14.25 | 14.01 | 14.18 | 96912800 | |
2013-09-25 00:00:00+00:00 | 14.14 | 14.24 | 14.03 | 14.12 | 86546100 | |
2013-09-26 00:00:00+00:00 | 14.08 | 14.22 | 13.98 | 14.19 | 89352900 | |
2013-09-27 00:00:00+00:00 | 13.9 | 14.03 | 13.88 | 13.99 | 79156000 | |
2013-09-30 00:00:00+00:00 | 13.8 | 13.93 | 13.6 | 13.685 | 104439900 | |
2013-10-01 00:00:00+00:00 | 13.9 | 13.92 | 13.81 | 13.85 | 57631000 | |
2013-10-02 00:00:00+00:00 | 14.06 | 14.1 | 13.79 | 13.81 | 89186300 | |
2013-10-03 00:00:00+00:00 | 14 | 14.1 | 13.83 | 14.07 | 95269000 | |
2013-10-04 00:00:00+00:00 | 14.05 | 14.08 | 13.96 | 14.02 | 63495400 | |
2013-10-07 00:00:00+00:00 | 13.81 | 13.95 | 13.8 | 13.91 | 64822700 | |
2013-10-08 00:00:00+00:00 | 13.69 | 13.9 | 13.68 | 13.83 | 92746200 | |
2013-10-09 00:00:00+00:00 | 13.84 | 13.93 | 13.69 | 13.72 | 96012800 | |
2013-10-10 00:00:00+00:00 | 14.23 | 14.25 | 14.01 | 14.03 | 102305800 | |
2013-10-11 00:00:00+00:00 | 14.19 | 14.28 | 14.11 | 14.24 | 83422000 | |
2013-10-14 00:00:00+00:00 | 14.35 | 14.39 | 14.05 | 14.06 | 78367500 | |
2013-10-15 00:00:00+00:00 | 14.24 | 14.43 | 14.21 | 14.39 | 102099300 | |
2013-10-16 00:00:00+00:00 | 14.56 | 14.61 | 14.315 | 14.32 | 153481000 | |
2013-10-17 00:00:00+00:00 | 14.66 | 14.66 | 14.41 | 14.445 | 94090800 | |
2013-10-18 00:00:00+00:00 | 14.63 | 14.72 | 14.53 | 14.68 | 96986400 | |
2013-10-21 00:00:00+00:00 | 14.52 | 14.59 | 14.47 | 14.53 | 91280600 | |
2013-10-22 00:00:00+00:00 | 14.52 | 14.63 | 14.45 | 14.55 | 102836000 | |
2013-10-23 00:00:00+00:00 | 14.21 | 14.48 | 14.17 | 14.45 | 108529700 | |
2013-10-24 00:00:00+00:00 | 14.17 | 14.2 | 14.07 | 14.12 | 92566100 | |
2013-10-25 00:00:00+00:00 | 14.26 | 14.27 | 14.14 | 14.15 | 58592400 | |
2013-10-28 00:00:00+00:00 | 14.23 | 14.32 | 14.21 | 14.29 | 62328000 | |
2013-10-29 00:00:00+00:00 | 14.15 | 14.29 | 14.06 | 14.26 | 79563100 | |
2013-10-30 00:00:00+00:00 | 14.17 | 14.29 | 14.15 | 14.19 | 76624200 | |
2013-10-31 00:00:00+00:00 | 13.97 | 14.16 | 13.96 | 14.135 | 105713400 | |
2013-11-01 00:00:00+00:00 | 14.02 | 14.1 | 13.96 | 13.995 | 75610400 | |
2013-11-04 00:00:00+00:00 | 14.04 | 14.12 | 14 | 14.09 | 53063200 | |
2013-11-05 00:00:00+00:00 | 13.93 | 14.02 | 13.9 | 13.99 | 71809700 | |
2013-11-06 00:00:00+00:00 | 13.96 | 14.02 | 13.91 | 13.99 | 66712900 | |
2013-11-07 00:00:00+00:00 | 13.8 | 14.03 | 13.8 | 14 | 80775000 | |
2013-11-08 00:00:00+00:00 | 14.32 | 14.32 | 13.835 | 13.86 | 158227300 | |
2013-11-11 00:00:00+00:00 | 14.4 | 14.42 | 14.24 | 14.27 | 66357400 | |
2013-11-12 00:00:00+00:00 | 14.32 | 14.46 | 14.26 | 14.33 | 69498300 | |
2013-11-13 00:00:00+00:00 | 14.64 | 14.64 | 14.2 | 14.255 | 128018500 | |
2013-11-14 00:00:00+00:00 | 14.795 | 14.82 | 14.61 | 14.675 | 128999400 | |
2013-11-15 00:00:00+00:00 | 14.92 | 14.95 | 14.77 | 14.79 | 105475300 | |
2013-11-18 00:00:00+00:00 | 14.92 | 15.17 | 14.86 | 14.975 | 145554400 | |
2013-11-19 00:00:00+00:00 | 15.2 | 15.3 | 14.97 | 14.975 | 161505600 | |
2013-11-20 00:00:00+00:00 | 15.14 | 15.27 | 15.08 | 15.25 | 107174900 | |
2013-11-21 00:00:00+00:00 | 15.59 | 15.6 | 15.2 | 15.22 | 142053600 | |
2013-11-22 00:00:00+00:00 | 15.64 | 15.787 | 15.6 | 15.695 | 110331100 | |
2013-11-25 00:00:00+00:00 | 15.81 | 15.98 | 15.66 | 15.67 | 153765500 | |
2013-11-26 00:00:00+00:00 | 15.88 | 15.97 | 15.81 | 15.835 | 114833700 | |
2013-11-27 00:00:00+00:00 | 15.83 | 15.94 | 15.76 | 15.87 | 75503600 | |
2013-11-29 00:00:00+00:00 | 15.82 | 15.92 | 15.79 | 15.835 | 44288400 | |
2013-12-02 00:00:00+00:00 | 15.73 | 15.97 | 15.7 | 15.84 | 92987400 | |
2013-12-03 00:00:00+00:00 | 15.54 | 15.79 | 15.39 | 15.64 | 106450300 | |
2013-12-04 00:00:00+00:00 | 15.63 | 15.73 | 15.36 | 15.39 | 96678100 | |
2013-12-05 00:00:00+00:00 | 15.43 | 15.64 | 15.36 | 15.61 | 95230500 | |
2013-12-06 00:00:00+00:00 | 15.56 | 15.72 | 15.46 | 15.61 | 92203100 | |
2013-12-09 00:00:00+00:00 | 15.58 | 15.67 | 15.56 | 15.62 | 50167200 | |
2013-12-10 00:00:00+00:00 | 15.56 | 15.675 | 15.51 | 15.52 | 67486300 | |
2013-12-11 00:00:00+00:00 | 15.25 | 15.55 | 15.18 | 15.55 | 117565000 | |
2013-12-12 00:00:00+00:00 | 15.25 | 15.35 | 15.12 | 15.26 | 76113900 | |
2013-12-13 00:00:00+00:00 | 15.18 | 15.33 | 15.13 | 15.275 | 61575600 | |
2013-12-16 00:00:00+00:00 | 15.24 | 15.34 | 15.2 | 15.22 | 71467000 | |
2013-12-17 00:00:00+00:00 | 15.18 | 15.295 | 15.14 | 15.24 | 74247300 | |
2013-12-18 00:00:00+00:00 | 15.69 | 15.71 | 15.06 | 15.28 | 152929300 | |
2013-12-19 00:00:00+00:00 | 15.75 | 15.79 | 15.6 | 15.66 | 97776100 | |
2013-12-20 00:00:00+00:00 | 15.6 | 15.87 | 15.6 | 15.75 | 131956000 | |
2013-12-23 00:00:00+00:00 | 15.69 | 15.79 | 15.69 | 15.72 | 52565800 | |
2013-12-24 00:00:00+00:00 | 15.7 | 15.75 | 15.67 | 15.72 | 21770400 | |
2013-12-26 00:00:00+00:00 | 15.65 | 15.75 | 15.64 | 15.735 | 48803600 | |
2013-12-27 00:00:00+00:00 | 15.67 | 15.7 | 15.58 | 15.67 | 40049300 | |
2013-12-30 00:00:00+00:00 | 15.54 | 15.69 | 15.52 | 15.64 | 56218800 | |
2013-12-31 00:00:00+00:00 | 15.57 | 15.615 | 15.51 | 15.6 | 57188900 | |
2014-01-02 00:00:00+00:00 | 16.1 | 16.16 | 15.68 | 15.69 | 148709900 | |
2014-01-03 00:00:00+00:00 | 16.41 | 16.5 | 16.23 | 16.27 | 129921800 | |
2014-01-06 00:00:00+00:00 | 16.66 | 16.73 | 16.56 | 16.625 | 114431300 | |
2014-01-07 00:00:00+00:00 | 16.5 | 16.79 | 16.45 | 16.77 | 110605100 | |
2014-01-08 00:00:00+00:00 | 16.58 | 16.69 | 16.519 | 16.67 | 101036400 | |
2014-01-09 00:00:00+00:00 | 16.83 | 16.926 | 16.615 | 16.67 | 100947200 | |
2014-01-10 00:00:00+00:00 | 16.77 | 16.79 | 16.61 | 16.75 | 87454100 | |
2014-01-13 00:00:00+00:00 | 16.43 | 16.8 | 16.4 | 16.79 | 90025400 | |
2014-01-14 00:00:00+00:00 | 16.77 | 16.77 | 16.53 | 16.54 | 97786600 | |
2014-01-15 00:00:00+00:00 | 17.15 | 17.42 | 17.11 | 17.23 | 329333100 | |
2014-01-16 00:00:00+00:00 | 17.08 | 17.14 | 16.99 | 17.09 | 163613100 | |
2014-01-17 00:00:00+00:00 | 17.01 | 17.22 | 16.99 | 17.2 | 96232200 | |
2014-01-21 00:00:00+00:00 | 17.01 | 17.155 | 16.87 | 17.08 | 118269200 | |
2014-01-22 00:00:00+00:00 | 17.15 | 17.15 | 17 | 17.08 | 68114400 | |
2014-01-23 00:00:00+00:00 | 16.86 | 17.09 | 16.74 | 17.07 | 123765900 | |
2014-01-24 00:00:00+00:00 | 16.45 | 16.72 | 16.45 | 16.67 | 112899300 | |
2014-01-27 00:00:00+00:00 | 16.309 | 16.54 | 16.059 | 16.37 | 134872600 | |
2014-01-28 00:00:00+00:00 | 16.73 | 16.76 | 16.37 | 16.45 | 92891300 | |
2014-01-29 00:00:00+00:00 | 16.68 | 16.85 | 16.49 | 16.55 | 130026900 | |
2014-01-30 00:00:00+00:00 | 16.93 | 16.99 | 16.765 | 16.81 | 91028200 | |
2014-01-31 00:00:00+00:00 | 16.75 | 16.98 | 16.61 | 16.72 | 139432100 | |
2014-02-03 00:00:00+00:00 | 16.35 | 16.88 | 16.3 | 16.77 | 159981200 | |
2014-02-04 00:00:00+00:00 | 16.35 | 16.56 | 16.26 | 16.49 | 125337000 | |
2014-02-05 00:00:00+00:00 | 16.4 | 16.45 | 16.149 | 16.309 | 110576200 | |
2014-02-06 00:00:00+00:00 | 16.69 | 16.73 | 16.44 | 16.45 | 110029100 | |
2014-02-07 00:00:00+00:00 | 16.82 | 16.88 | 16.63 | 16.82 | 122571100 | |
2014-02-10 00:00:00+00:00 | 16.72 | 16.73 | 16.6 | 16.71 | 95792500 | |
2014-02-11 00:00:00+00:00 | 16.88 | 16.92 | 16.635 | 16.725 | 92617500 | |
2014-02-12 00:00:00+00:00 | 16.75 | 16.91 | 16.65 | 16.87 | 96948900 | |
2014-02-13 00:00:00+00:00 | 16.75 | 16.82 | 16.63 | 16.645 | 95756900 | |
2014-02-14 00:00:00+00:00 | 16.7 | 16.76 | 16.65 | 16.74 | 100684300 | |
2014-02-18 00:00:00+00:00 | 16.47 | 16.74 | 16.47 | 16.7 | 121778600 | |
2014-02-19 00:00:00+00:00 | 16.2 | 16.45 | 16.18 | 16.379 | 137734500 | |
2014-02-20 00:00:00+00:00 | 16.3 | 16.36 | 16.18 | 16.219 | 107425900 | |
2014-02-21 00:00:00+00:00 | 16.29 | 16.485 | 16.25 | 16.29 | 106236900 | |
2014-02-24 00:00:00+00:00 | 16.53 | 16.63 | 16.3 | 16.3 | 102909600 | |
2014-02-25 00:00:00+00:00 | 16.34 | 16.56 | 16.32 | 16.5 | 81457000 | |
2014-02-26 00:00:00+00:00 | 16.329 | 16.4 | 16.129 | 16.36 | 89831600 | |
2014-02-27 00:00:00+00:00 | 16.49 | 16.49 | 16.21 | 16.27 | 71314500 | |
2014-02-28 00:00:00+00:00 | 16.53 | 16.65 | 16.344 | 16.49 | 126252100 | |
2014-03-03 00:00:00+00:00 | 16.3 | 16.41 | 16.19 | 16.3 | 87526800 | |
2014-03-04 00:00:00+00:00 | 16.73 | 16.74 | 16.43 | 16.48 | 100849900 | |
2014-03-05 00:00:00+00:00 | 17.25 | 17.31 | 16.78 | 16.79 | 207180000 | |
2014-03-06 00:00:00+00:00 | 17.35 | 17.631 | 17.31 | 17.42 | 138078900 | |
2014-03-07 00:00:00+00:00 | 17.33 | 17.59 | 17.246 | 17.54 | 109547100 | |
2014-03-10 00:00:00+00:00 | 17.47 | 17.47 | 17.25 | 17.27 | 81732700 | |
2014-03-11 00:00:00+00:00 | 17.27 | 17.52 | 17.25 | 17.51 | 90183400 | |
2014-03-12 00:00:00+00:00 | 17.28 | 17.33 | 17.07 | 17.18 | 83309200 | |
2014-03-13 00:00:00+00:00 | 17.16 | 17.47 | 17.07 | 17.34 | 100875700 | |
2014-03-14 00:00:00+00:00 | 16.8 | 17.22 | 16.76 | 17.08 | 131297200 | |
2014-03-17 00:00:00+00:00 | 17.11 | 17.17 | 16.97 | 16.98 | 79942200 | |
2014-03-18 00:00:00+00:00 | 17.19 | 17.22 | 17.07 | 17.14 | 67402400 | |
2014-03-19 00:00:00+00:00 | 17.44 | 17.49 | 17.12 | 17.16 | 105006500 | |
2014-03-20 00:00:00+00:00 | 17.92 | 18 | 17.43 | 17.44 | 167048700 | |
2014-03-21 00:00:00+00:00 | 17.56 | 18.03 | 17.56 | 18.03 | 155721000 | |
2014-03-24 00:00:00+00:00 | 17.37 | 17.65 | 17.3 | 17.62 | 118500000 | |
2014-03-25 00:00:00+00:00 | 17.21 | 17.58 | 17.2 | 17.49 | 98348600 | |
2014-03-26 00:00:00+00:00 | 17.18 | 17.4 | 17.18 | 17.38 | 97573700 | |
2014-03-27 00:00:00+00:00 | 17.01 | 17.49 | 16.83 | 17.28 | 175139500 | |
2014-03-28 00:00:00+00:00 | 16.98 | 17.15 | 16.85 | 17.085 | 79909500 | |
2014-03-31 00:00:00+00:00 | 17.2 | 17.27 | 17.12 | 17.15 | 62115800 | |
2014-04-01 00:00:00+00:00 | 17.34 | 17.4 | 17.26 | 17.28 | 57423800 | |
2014-04-02 00:00:00+00:00 | 17.23 | 17.38 | 17.13 | 17.35 | 65349000 | |
2014-04-03 00:00:00+00:00 | 17.15 | 17.24 | 17.03 | 17.21 | 56534500 | |
2014-04-04 00:00:00+00:00 | 16.72 | 17.22 | 16.7 | 17.21 | 110409600 | |
2014-04-07 00:00:00+00:00 | 16.379 | 16.7 | 16.19 | 16.69 | 129464900 | |
2014-04-08 00:00:00+00:00 | 16.44 | 16.5 | 16.25 | 16.37 | 75769700 | |
2014-04-09 00:00:00+00:00 | 16.62 | 16.63 | 16.344 | 16.55 | 83096400 | |
2014-04-10 00:00:00+00:00 | 16.12 | 16.62 | 16.1 | 16.62 | 98390900 | |
2014-04-11 00:00:00+00:00 | 15.77 | 16.129 | 15.62 | 15.86 | 133757100 | |
2014-04-14 00:00:00+00:00 | 16 | 16.219 | 15.78 | 16.05 | 99249400 | |
2014-04-15 00:00:00+00:00 | 16.39 | 16.41 | 15.96 | 16.09 | 134956600 | |
2014-04-16 00:00:00+00:00 | 16.129 | 16.219 | 15.78 | 16.2 | 172948900 | |
2014-04-17 00:00:00+00:00 | 16.149 | 16.25 | 15.93 | 16.14 | 104765500 | |
2014-04-21 00:00:00+00:00 | 16.09 | 16.17 | 16.03 | 16.149 | 51280500 | |
2014-04-22 00:00:00+00:00 | 16.29 | 16.34 | 16.04 | 16.09 | 77888900 | |
2014-04-23 00:00:00+00:00 | 16.37 | 16.4 | 16.23 | 16.3 | 52425600 | |
2014-04-24 00:00:00+00:00 | 16.34 | 16.5 | 16.21 | 16.43 | 73910900 | |
2014-04-25 00:00:00+00:00 | 15.95 | 16.23 | 15.93 | 16.1 | 84617700 | |
2014-04-28 00:00:00+00:00 | 14.95 | 15.41 | 14.86 | 15.33 | 344935200 | |
2014-04-29 00:00:00+00:00 | 15.24 | 15.3 | 14.91 | 15.04 | 155495000 | |
2014-04-30 00:00:00+00:00 | 15.14 | 15.27 | 15.13 | 15.26 | 82359700 | |
2014-05-01 00:00:00+00:00 | 15.09 | 15.217 | 15.03 | 15.14 | 68368300 | |
2014-05-02 00:00:00+00:00 | 15.25 | 15.29 | 15.1 | 15.18 | 73562900 | |
2014-05-05 00:00:00+00:00 | 15.08 | 15.13 | 15.02 | 15.075 | 51669100 | |
2014-05-06 00:00:00+00:00 | 14.73 | 15.03 | 14.72 | 15.03 | 96268300 | |
2014-05-07 00:00:00+00:00 | 14.8 | 14.92 | 14.75 | 14.86 | 91733000 | |
2014-05-08 00:00:00+00:00 | 14.93 | 15.04 | 14.8 | 14.85 | 65232700 | |
2014-05-09 00:00:00+00:00 | 14.74 | 14.985 | 14.67 | 14.95 | 86751600 | |
2014-05-12 00:00:00+00:00 | 15.07 | 15.1 | 14.79 | 14.79 | 70669900 | |
2014-05-13 00:00:00+00:00 | 15.03 | 15.1 | 14.94 | 15.04 | 58469100 | |
2014-05-14 00:00:00+00:00 | 14.84 | 15.02 | 14.81 | 14.99 | 52431900 | |
2014-05-15 00:00:00+00:00 | 14.55 | 14.83 | 14.39 | 14.82 | 104193000 | |
2014-05-16 00:00:00+00:00 | 14.51 | 14.54 | 14.367 | 14.51 | 80476100 | |
2014-05-19 00:00:00+00:00 | 14.67 | 14.7 | 14.38 | 14.425 | 51034700 | |
2014-05-20 00:00:00+00:00 | 14.53 | 14.69 | 14.48 | 14.65 | 60108200 | |
2014-05-21 00:00:00+00:00 | 14.61 | 14.71 | 14.5 | 14.59 | 64808600 | |
2014-05-22 00:00:00+00:00 | 14.71 | 14.72 | 14.55 | 14.58 | 51929400 | |
2014-05-23 00:00:00+00:00 | 14.72 | 14.815 | 14.65 | 14.71 | 51648900 | |
2014-05-27 00:00:00+00:00 | 15.22 | 15.31 | 14.98 | 15 | 125398900 | |
2014-05-28 00:00:00+00:00 | 15.14 | 15.28 | 15.03 | 15.27 | 72481700 | |
2014-05-29 00:00:00+00:00 | 15.15 | 15.21 | 15.1 | 15.12 | 39423000 | |
2014-05-30 00:00:00+00:00 | 15.14 | 15.23 | 15.07 | 15.12 | 45787100 | |
2014-06-02 00:00:00+00:00 | 15.26 | 15.28 | 15.05 | 15.16 | 46374000 | |
2014-06-03 00:00:00+00:00 | 15.21 | 15.28 | 15.11 | 15.2 | 49025400 | |
2014-06-04 00:00:00+00:00 | 15.21 | 15.26 | 15.15 | 15.19 | 39757300 | |
2014-06-05 00:00:00+00:00 | 15.43 | 15.48 | 15.22 | 15.28 | 60300500 | |
2014-06-06 00:00:00+00:00 | 15.59 | 15.65 | 15.43 | 15.45 | 73930500 | |
2014-06-09 00:00:00+00:00 | 15.84 | 15.88 | 15.6 | 15.6 | 78647900 | |
2014-06-10 00:00:00+00:00 | 15.92 | 15.94 | 15.73 | 15.78 | 49852900 | |
2014-06-11 00:00:00+00:00 | 15.59 | 15.82 | 15.55 | 15.69 | 73729300 | |
2014-06-12 00:00:00+00:00 | 15.42 | 15.66 | 15.38 | 15.57 | 66173200 | |
2014-06-13 00:00:00+00:00 | 15.44 | 15.55 | 15.33 | 15.46 | 61588100 | |
2014-06-16 00:00:00+00:00 | 15.28 | 15.31 | 15.18 | 15.3 | 55425400 | |
2014-06-17 00:00:00+00:00 | 15.59 | 15.62 | 15.255 | 15.265 | 58011600 | |
2014-06-18 00:00:00+00:00 | 15.65 | 15.69 | 15.44 | 15.63 | 70220300 | |
2014-06-19 00:00:00+00:00 | 15.55 | 15.67 | 15.5 | 15.66 | 40953900 | |
2014-06-20 00:00:00+00:00 | 15.45 | 15.63 | 15.44 | 15.63 | 54765300 | |
2014-06-23 00:00:00+00:00 | 15.64 | 15.65 | 15.38 | 15.47 | 58492200 | |
2014-06-24 00:00:00+00:00 | 15.49 | 15.7 | 15.44 | 15.555 | 73262500 | |
2014-06-25 00:00:00+00:00 | 15.47 | 15.49 | 15.28 | 15.45 | 71766400 | |
2014-06-26 00:00:00+00:00 | 15.41 | 15.53 | 15.27 | 15.44 | 68519600 | |
2014-06-27 00:00:00+00:00 | 15.33 | 15.42 | 15.31 | 15.38 | 58149200 | |
2014-06-30 00:00:00+00:00 | 15.37 | 15.45 | 15.29 | 15.31 | 47776800 | |
2014-07-01 00:00:00+00:00 | 15.6 | 15.65 | 15.38 | 15.38 | 74193500 | |
2014-07-02 00:00:00+00:00 | 15.85 | 16.03 | 15.76 | 15.78 | 87685800 | |
2014-07-03 00:00:00+00:00 | 16.03 | 16.23 | 16 | 16.07 | 70582300 | |
2014-07-07 00:00:00+00:00 | 15.94 | 16 | 15.83 | 15.99 | 62437100 | |
2014-07-08 00:00:00+00:00 | 15.58 | 15.83 | 15.52 | 15.82 | 73343400 | |
2014-07-09 00:00:00+00:00 | 15.6 | 15.69 | 15.54 | 15.62 | 46415800 | |
2014-07-10 00:00:00+00:00 | 15.44 | 15.53 | 15.25 | 15.33 | 61458500 | |
2014-07-11 00:00:00+00:00 | 15.38 | 15.43 | 15.3 | 15.39 | 56849400 | |
2014-07-14 00:00:00+00:00 | 15.57 | 15.67 | 15.52 | 15.62 | 59181700 | |
2014-07-15 00:00:00+00:00 | 15.81 | 15.85 | 15.66 | 15.75 | 99946700 | |
2014-07-16 00:00:00+00:00 | 15.51 | 15.66 | 15.43 | 15.66 | 123897400 | |
2014-07-17 00:00:00+00:00 | 15.2 | 15.48 | 15.13 | 15.45 | 114839900 | |
2014-07-18 00:00:00+00:00 | 15.49 | 15.5 | 15.25 | 15.27 | 74859200 | |
2014-07-21 00:00:00+00:00 | 15.52 | 15.55 | 15.36 | 15.42 | 61802200 | |
2014-07-22 00:00:00+00:00 | 15.52 | 15.62 | 15.47 | 15.59 | 58331100 | |
2014-07-23 00:00:00+00:00 | 15.52 | 15.63 | 15.51 | 15.52 | 47253300 | |
2014-07-24 00:00:00+00:00 | 15.62 | 15.64 | 15.55 | 15.56 | 46549800 | |
2014-07-25 00:00:00+00:00 | 15.59 | 15.63 | 15.55 | 15.585 | 35627200 | |
2014-07-28 00:00:00+00:00 | 15.5 | 15.61 | 15.46 | 15.585 | 39313000 | |
2014-07-29 00:00:00+00:00 | 15.34 | 15.53 | 15.34 | 15.52 | 51480500 | |
2014-07-30 00:00:00+00:00 | 15.58 | 15.665 | 15.31 | 15.43 | 83243100 | |
2014-07-31 00:00:00+00:00 | 15.25 | 15.55 | 15.25 | 15.44 | 70677300 | |
2014-08-01 00:00:00+00:00 | 14.98 | 15.39 | 14.84 | 15.18 | 115978900 | |
2014-08-04 00:00:00+00:00 | 15.05 | 15.12 | 14.98 | 15.065 | 51954700 | |
2014-08-05 00:00:00+00:00 | 15 | 15.2 | 14.9 | 15.01 | 65071500 | |
2014-08-06 00:00:00+00:00 | 15.2 | 15.36 | 15.14 | 15.14 | 95977900 | |
2014-08-07 00:00:00+00:00 | 15.12 | 15.44 | 15.09 | 15.44 | 80058300 | |
2014-08-08 00:00:00+00:00 | 15.2 | 15.2 | 14.99 | 15.08 | 54495000 | |
2014-08-11 00:00:00+00:00 | 15.22 | 15.27 | 15.15 | 15.26 | 41545100 | |
2014-08-12 00:00:00+00:00 | 15.21 | 15.3 | 15.15 | 15.18 | 33674400 | |
2014-08-13 00:00:00+00:00 | 15.25 | 15.29 | 15.2 | 15.25 | 34394600 | |
2014-08-14 00:00:00+00:00 | 15.32 | 15.32 | 15.26 | 15.26 | 29934600 | |
2014-08-15 00:00:00+00:00 | 15.22 | 15.41 | 15.14 | 15.34 | 61535500 | |
2014-08-18 00:00:00+00:00 | 15.45 | 15.45 | 15.27 | 15.28 | 54968600 | |
2014-08-19 00:00:00+00:00 | 15.45 | 15.65 | 15.44 | 15.52 | 44825500 | |
2014-08-20 00:00:00+00:00 | 15.52 | 15.633 | 15.37 | 15.41 | 57825000 | |
2014-08-21 00:00:00+00:00 | 16.16 | 16.219 | 15.62 | 15.69 | 177294400 | |
2014-08-22 00:00:00+00:00 | 16.129 | 16.29 | 16.05 | 16.16 | 107641800 | |
2014-08-25 00:00:00+00:00 | 16.29 | 16.4 | 16.2 | 16.27 | 89396500 | |
2014-08-26 00:00:00+00:00 | 16.329 | 16.46 | 16.32 | 16.34 | 73323400 | |
2014-08-27 00:00:00+00:00 | 16.2 | 16.39 | 16.14 | 16.37 | 63061800 | |
2014-08-28 00:00:00+00:00 | 16.01 | 16.1 | 15.99 | 16.094 | 62170400 | |
2014-08-29 00:00:00+00:00 | 16.09 | 16.14 | 16.02 | 16.045 | 50106600 | |
2014-09-02 00:00:00+00:00 | 16.27 | 16.28 | 16.059 | 16.14 | 59400400 | |
2014-09-03 00:00:00+00:00 | 16.1 | 16.28 | 16.04 | 16.27 | 67418900 | |
2014-09-04 00:00:00+00:00 | 16.11 | 16.27 | 16.04 | 16.094 | 56378600 | |
2014-09-05 00:00:00+00:00 | 16.02 | 16.07 | 15.9 | 16.05 | 80974900 | |
2014-09-08 00:00:00+00:00 | 16.35 | 16.37 | 16.149 | 16.19 | 99411200 | |
2014-09-09 00:00:00+00:00 | 16.14 | 16.26 | 16.1 | 16.25 | 82428900 | |
2014-09-10 00:00:00+00:00 | 16.36 | 16.4 | 16.129 | 16.155 | 75656100 | |
2014-09-11 00:00:00+00:00 | 16.57 | 16.63 | 16.3 | 16.32 | 106598100 | |
2014-09-12 00:00:00+00:00 | 16.79 | 16.83 | 16.61 | 16.62 | 117118300 | |
2014-09-15 00:00:00+00:00 | 16.74 | 16.93 | 16.62 | 16.795 | 87306900 | |
2014-09-16 00:00:00+00:00 | 16.71 | 16.84 | 16.67 | 16.67 | 64845600 | |
2014-09-17 00:00:00+00:00 | 16.77 | 16.93 | 16.68 | 16.71 | 82813200 | |
2014-09-18 00:00:00+00:00 | 17.04 | 17.15 | 16.87 | 16.88 | 110396000 | |
2014-09-19 00:00:00+00:00 | 16.95 | 17.17 | 16.88 | 17.15 | 88215900 | |
2014-09-22 00:00:00+00:00 | 17.03 | 17.17 | 16.99 | 17.09 | 108394117 | |
2014-09-23 00:00:00+00:00 | 17.05 | 17.2 | 17.03 | 17.05 | 91178038 | |
2014-09-24 00:00:00+00:00 | 17.18 | 17.19 | 16.97 | 17.12 | 85925116 | |
2014-09-25 00:00:00+00:00 | 16.85 | 17.18 | 16.85 | 17.155 | 102101191 | |
2014-09-26 00:00:00+00:00 | 17.03 | 17.05 | 16.89 | 16.91 | 66785876 | |
2014-09-29 00:00:00+00:00 | 17.01 | 17.05 | 16.88 | 16.91 | 68015923 | |
2014-09-30 00:00:00+00:00 | 17.05 | 17.11 | 16.97 | 17.08 | 82143402 | |
2014-10-01 00:00:00+00:00 | 16.82 | 17.09 | 16.8 | 17.07 | 91190551 | |
2014-10-02 00:00:00+00:00 | 16.88 | 16.99 | 16.63 | 16.86 | 118018774 | |
2014-10-03 00:00:00+00:00 | 17.29 | 17.3 | 17.06 | 17.11 | 110782588 | |
2014-10-06 00:00:00+00:00 | 17.29 | 17.41 | 17.22 | 17.37 | 66212066 | |
2014-10-07 00:00:00+00:00 | 16.88 | 17.2 | 16.88 | 17.18 | 91396327 | |
2014-10-08 00:00:00+00:00 | 17.12 | 17.12 | 16.72 | 16.88 | 101178019 | |
2014-10-09 00:00:00+00:00 | 16.59 | 17.11 | 16.55 | 17.04 | 121286578 | |
2014-10-10 00:00:00+00:00 | 16.48 | 16.77 | 16.36 | 16.52 | 129458091 | |
2014-10-13 00:00:00+00:00 | 16.4 | 16.67 | 16.4 | 16.48 | 92674130 | |
2014-10-14 00:00:00+00:00 | 16.52 | 16.63 | 16.36 | 16.51 | 97451881 | |
2014-10-15 00:00:00+00:00 | 15.76 | 16.239 | 15.43 | 16.23 | 216591804 | |
2014-10-16 00:00:00+00:00 | 16.079 | 16.25 | 15.52 | 15.61 | 148136470 | |
2014-10-17 00:00:00+00:00 | 16.21 | 16.41 | 16.16 | 16.25 | 94339180 | |
2014-10-20 00:00:00+00:00 | 16.26 | 16.329 | 16.16 | 16.2 | 76446700 | |
2014-10-21 00:00:00+00:00 | 16.6 | 16.61 | 16.309 | 16.43 | 78226090 | |
2014-10-22 00:00:00+00:00 | 16.4 | 16.7 | 16.37 | 16.59 | 85160035 | |
2014-10-23 00:00:00+00:00 | 16.6 | 16.73 | 16.515 | 16.58 | 68367854 | |
2014-10-24 00:00:00+00:00 | 16.72 | 16.72 | 16.56 | 16.63 | 41904957 | |
2014-10-27 00:00:00+00:00 | 16.59 | 16.69 | 16.5 | 16.68 | 51484734 | |
2014-10-28 00:00:00+00:00 | 16.8 | 16.8 | 16.61 | 16.62 | 71850839 | |
2014-10-29 00:00:00+00:00 | 16.99 | 17.02 | 16.71 | 16.77 | 99875657 | |
2014-10-30 00:00:00+00:00 | 17.03 | 17.12 | 16.84 | 16.96 | 72542662 | |
2014-10-31 00:00:00+00:00 | 17.16 | 17.22 | 17.1 | 17.17 | 82788923 | |
2014-11-03 00:00:00+00:00 | 17.27 | 17.35 | 17.1 | 17.18 | 64048210 | |
2014-11-04 00:00:00+00:00 | 17.21 | 17.3 | 17.02 | 17.22 | 53463983 | |
2014-11-05 00:00:00+00:00 | 17.34 | 17.37 | 17.21 | 17.32 | 58562613 | |
2014-11-06 00:00:00+00:00 | 17.36 | 17.4 | 17.28 | 17.34 | 57175259 | |
2014-11-07 00:00:00+00:00 | 17.36 | 17.38 | 17.22 | 17.31 | 53907826 | |
2014-11-10 00:00:00+00:00 | 17.37 | 17.4 | 17.3 | 17.36 | 54224010 | |
2014-11-11 00:00:00+00:00 | 17.32 | 17.46 | 17.3 | 17.37 | 62236029 | |
2014-11-12 00:00:00+00:00 | 17.29 | 17.3 | 17.07 | 17.24 | 48641530 | |
2014-11-13 00:00:00+00:00 | 17.22 | 17.3 | 17.12 | 17.285 | 50586273 | |
2014-11-14 00:00:00+00:00 | 17.14 | 17.25 | 17.1 | 17.165 | 41116439 | |
2014-11-17 00:00:00+00:00 | 17.09 | 17.14 | 16.97 | 17.06 | 43359749 | |
2014-11-18 00:00:00+00:00 | 17.14 | 17.22 | 17.06 | 17.07 | 39490695 | |
2014-11-19 00:00:00+00:00 | 17.06 | 17.15 | 17.01 | 17.145 | 49409552 | |
2014-11-20 00:00:00+00:00 | 17 | 17.01 | 16.83 | 16.96 | 49641573 | |
2014-11-21 00:00:00+00:00 | 17.12 | 17.19 | 17.06 | 17.15 | 62980975 | |
2014-11-24 00:00:00+00:00 | 17.18 | 17.28 | 17.08 | 17.16 | 52762793 | |
2014-11-25 00:00:00+00:00 | 17.1 | 17.26 | 17.08 | 17.23 | 44786324 | |
2014-11-26 00:00:00+00:00 | 17.11 | 17.15 | 17.04 | 17.12 | 27358440 | |
2014-11-28 00:00:00+00:00 | 17.04 | 17.15 | 17.03 | 17.07 | 27834925 | |
2014-12-01 00:00:00+00:00 | 16.79 | 16.94 | 16.73 | 16.92 | 61173148 | |
2014-12-02 00:00:00+00:00 | 17.15 | 17.15 | 16.86 | 16.87 | 62945173 | |
2014-12-03 00:00:00+00:00 | 17.29 | 17.3 | 17.06 | 17.1 | 70104217 | |
2014-12-04 00:00:00+00:00 | 17.21 | 17.34 | 17.13 | 17.24 | 49761358 | |
2014-12-05 00:00:00+00:00 | 17.68 | 17.71 | 17.35 | 17.41 | 131823990 | |
2014-12-08 00:00:00+00:00 | 17.66 | 17.87 | 17.51 | 17.66 | 100987510 | |
2014-12-09 00:00:00+00:00 | 17.56 | 17.59 | 17.17 | 17.175 | 95939373 | |
2014-12-10 00:00:00+00:00 | 17.38 | 17.68 | 17.37 | 17.48 | 103574081 | |
2014-12-11 00:00:00+00:00 | 17.47 | 17.65 | 17.44 | 17.44 | 80139049 | |
2014-12-12 00:00:00+00:00 | 17.13 | 17.45 | 17.13 | 17.37 | 88836682 | |
2014-12-15 00:00:00+00:00 | 16.85 | 17.31 | 16.76 | 17.3 | 101305256 | |
2014-12-16 00:00:00+00:00 | 16.72 | 17.19 | 16.59 | 16.74 | 96000647 | |
2014-12-17 00:00:00+00:00 | 17.26 | 17.27 | 16.82 | 16.87 | 92164082 | |
2014-12-18 00:00:00+00:00 | 17.53 | 17.53 | 17.34 | 17.47 | 79264648 | |
2014-12-19 00:00:00+00:00 | 17.62 | 17.7 | 17.49 | 17.49 | 105588213 | |
2014-12-22 00:00:00+00:00 | 17.71 | 17.73 | 17.55 | 17.65 | 70912446 | |
2014-12-23 00:00:00+00:00 | 17.93 | 17.99 | 17.78 | 17.83 | 94340090 | |
2014-12-24 00:00:00+00:00 | 17.98 | 18.1 | 17.92 | 17.965 | 35091254 | |
2014-12-26 00:00:00+00:00 | 17.98 | 18.05 | 17.95 | 18.02 | 34324407 | |
2014-12-29 00:00:00+00:00 | 18.11 | 18.19 | 17.91 | 17.97 | 58370887 | |
2014-12-30 00:00:00+00:00 | 18.13 | 18.18 | 18.01 | 18.04 | 41433523 | |
2014-12-31 00:00:00+00:00 | 17.89 | 18.21 | 17.89 | 18.19 | 57819572 | |
2015-01-02 00:00:00+00:00 | 17.9 | 18.03 | 17.68 | 17.99 | 48915838 | |
2015-01-05 00:00:00+00:00 | 17.38 | 17.81 | 17.29 | 17.785 | 105605485 | |
2015-01-06 00:00:00+00:00 | 16.86 | 17.44 | 16.78 | 17.42 | 144847119 | |
2015-01-07 00:00:00+00:00 | 16.94 | 17.18 | 16.87 | 17.14 | 104563015 | |
2015-01-08 00:00:00+00:00 | 17.29 | 17.34 | 17.1 | 17.16 | 73310178 | |
2015-01-09 00:00:00+00:00 | 16.98 | 17.38 | 16.95 | 17.38 | 82933457 | |
2015-01-12 00:00:00+00:00 | 16.68 | 17.03 | 16.66 | 17.025 | 92704852 | |
2015-01-13 00:00:00+00:00 | 16.45 | 16.89 | 16.32 | 16.82 | 102036015 | |
2015-01-14 00:00:00+00:00 | 16.04 | 16.215 | 15.77 | 16 | 164685486 | |
2015-01-15 00:00:00+00:00 | 15.2 | 15.76 | 15.15 | 15.59 | 193140606 | |
2015-01-16 00:00:00+00:00 | 15.38 | 15.4 | 14.97 | 15.16 | 149802031 | |
2015-01-20 00:00:00+00:00 | 15.26 | 15.63 | 15.2 | 15.59 | 123048254 | |
2015-01-21 00:00:00+00:00 | 15.41 | 15.57 | 15.15 | 15.28 | 100658196 | |
2015-01-22 00:00:00+00:00 | 16.09 | 16.19 | 15.43 | 15.55 | 183015219 | |
2015-01-23 00:00:00+00:00 | 15.73 | 16.19 | 15.73 | 16.04 | 103268518 | |
2015-01-26 00:00:00+00:00 | 15.85 | 15.93 | 15.7 | 15.72 | 69948779 | |
2015-01-27 00:00:00+00:00 | 15.63 | 15.79 | 15.48 | 15.55 | 86184227 | |
2015-01-28 00:00:00+00:00 | 15.2 | 15.73 | 15.18 | 15.72 | 105298688 | |
2015-01-29 00:00:00+00:00 | 15.43 | 15.49 | 15.2 | 15.31 | 76055669 | |
2015-01-30 00:00:00+00:00 | 15.15 | 15.47 | 15.15 | 15.23 | 99755260 | |
2015-02-02 00:00:00+00:00 | 15.46 | 15.49 | 15.12 | 15.27 | 101557039 | |
2015-02-03 00:00:00+00:00 | 15.89 | 15.93 | 15.61 | 15.62 | 105159425 | |
2015-02-04 00:00:00+00:00 | 15.79 | 16 | 15.75 | 15.79 | 83814535 | |
2015-02-05 00:00:00+00:00 | 15.97 | 16.09 | 15.9 | 15.98 | 92166071 | |
2015-02-06 00:00:00+00:00 | 16.49 | 16.75 | 16.219 | 16.3 | 160764889 | |
2015-02-09 00:00:00+00:00 | 16.35 | 16.5 | 16.25 | 16.35 | 95520604 | |
2015-02-10 00:00:00+00:00 | 16.42 | 16.62 | 16.35 | 16.56 | 100322839 | |
2015-02-11 00:00:00+00:00 | 16.36 | 16.45 | 16.21 | 16.329 | 99884358 | |
2015-02-12 00:00:00+00:00 | 16.67 | 16.73 | 16.37 | 16.41 | 116394126 | |
2015-02-13 00:00:00+00:00 | 16.61 | 16.79 | 16.54 | 16.73 | 93632718 | |
2015-02-17 00:00:00+00:00 | 16.63 | 16.68 | 16.37 | 16.525 | 93339898 | |
2015-02-18 00:00:00+00:00 | 16.3 | 16.56 | 16.239 | 16.55 | 84024165 | |
2015-02-19 00:00:00+00:00 | 16.21 | 16.379 | 16.079 | 16.21 | 83591315 | |
2015-02-20 00:00:00+00:00 | 16.379 | 16.43 | 16.01 | 16.14 | 89731324 | |
2015-02-23 00:00:00+00:00 | 16.2 | 16.32 | 16.1 | 16.32 | 103841653 | |
2015-02-24 00:00:00+00:00 | 16.379 | 16.55 | 16.25 | 16.27 | 74109596 | |
2015-02-25 00:00:00+00:00 | 16.49 | 16.5 | 16.32 | 16.37 | 57200974 | |
2015-02-26 00:00:00+00:00 | 16.04 | 16.44 | 15.9 | 16.42 | 161030274 | |
2015-02-27 00:00:00+00:00 | 15.81 | 15.88 | 15.62 | 15.78 | 130419006 | |
2015-03-02 00:00:00+00:00 | 16.01 | 16.03 | 15.715 | 15.79 | 71379858 | |
2015-03-03 00:00:00+00:00 | 16.04 | 16.149 | 15.96 | 16.03 | 65627992 | |
2015-03-04 00:00:00+00:00 | 15.84 | 15.98 | 15.76 | 15.96 | 78084858 | |
2015-03-05 00:00:00+00:00 | 16 | 16.03 | 15.745 | 15.92 | 69509958 | |
2015-03-06 00:00:00+00:00 | 16.219 | 16.62 | 16.16 | 16.41 | 163207226 | |
2015-03-09 00:00:00+00:00 | 16.17 | 16.329 | 16.12 | 16.309 | 72912722 | |
2015-03-10 00:00:00+00:00 | 15.79 | 16.129 | 15.79 | 16.04 | 85633552 | |
2015-03-11 00:00:00+00:00 | 16.11 | 16.16 | 15.87 | 15.92 | 85531260 | |
2015-03-12 00:00:00+00:00 | 16.09 | 16.21 | 15.9 | 16.05 | 126376330 | |
2015-03-13 00:00:00+00:00 | 16.09 | 16.149 | 15.94 | 16.079 | 88561897 | |
2015-03-16 00:00:00+00:00 | 16.129 | 16.219 | 15.85 | 16.11 | 63374831 | |
2015-03-17 00:00:00+00:00 | 16.09 | 16.12 | 15.96 | 16.07 | 67297458 | |
2015-03-18 00:00:00+00:00 | 15.98 | 16.1 | 15.91 | 16.01 | 86363525 | |
2015-03-19 00:00:00+00:00 | 15.61 | 15.97 | 15.61 | 15.96 | 109092453 | |
2015-03-20 00:00:00+00:00 | 15.84 | 15.93 | 15.64 | 15.695 | 99867679 | |
2015-03-23 00:00:00+00:00 | 15.72 | 15.88 | 15.72 | 15.82 | 72662013 | |
2015-03-24 00:00:00+00:00 | 15.61 | 15.8 | 15.61 | 15.73 | 76599529 | |
2015-03-25 00:00:00+00:00 | 15.41 | 15.65 | 15.4 | 15.63 | 88466042 | |
2015-03-26 00:00:00+00:00 | 15.42 | 15.54 | 15.26 | 15.385 | 76699386 | |
2015-03-27 00:00:00+00:00 | 15.31 | 15.49 | 15.27 | 15.48 | 72913154 | |
2015-03-30 00:00:00+00:00 | 15.52 | 15.61 | 15.4 | 15.42 | 70819151 | |
2015-03-31 00:00:00+00:00 | 15.39 | 15.52 | 15.38 | 15.5 | 61333548 | |
2015-04-01 00:00:00+00:00 | 15.41 | 15.46 | 15.25 | 15.42 | 73153854 | |
2015-04-02 00:00:00+00:00 | 15.54 | 15.62 | 15.43 | 15.43 | 50443508 | |
2015-04-06 00:00:00+00:00 | 15.51 | 15.6 | 15.34 | 15.39 | 51161893 | |
2015-04-07 00:00:00+00:00 | 15.46 | 15.65 | 15.45 | 15.53 | 49566392 | |
2015-04-08 00:00:00+00:00 | 15.61 | 15.74 | 15.5 | 15.51 | 71732476 | |
2015-04-09 00:00:00+00:00 | 15.71 | 15.76 | 15.53 | 15.63 | 44719259 | |
2015-04-10 00:00:00+00:00 | 15.72 | 15.79 | 15.6 | 15.69 | 43801947 | |
2015-04-13 00:00:00+00:00 | 15.8 | 15.86 | 15.74 | 15.78 | 49191244 | |
2015-04-14 00:00:00+00:00 | 15.82 | 15.95 | 15.71 | 15.88 | 84385405 | |
2015-04-15 00:00:00+00:00 | 15.64 | 15.85 | 15.59 | 15.7 | 124479058 | |
2015-04-16 00:00:00+00:00 | 15.79 | 15.94 | 15.58 | 15.64 | 105591004 | |
2015-04-17 00:00:00+00:00 | 15.56 | 15.75 | 15.5 | 15.71 | 88852590 | |
2015-04-20 00:00:00+00:00 | 15.57 | 15.69 | 15.56 | 15.62 | 53649399 | |
2015-04-21 00:00:00+00:00 | 15.5 | 15.64 | 15.43 | 15.6 | 64033133 | |
2015-04-22 00:00:00+00:00 | 15.74 | 15.83 | 15.49 | 15.55 | 73075265 | |
2015-04-23 00:00:00+00:00 | 15.69 | 15.8 | 15.68 | 15.72 | 50234512 | |
2015-04-24 00:00:00+00:00 | 15.64 | 15.75 | 15.61 | 15.71 | 40766109 | |
2015-04-27 00:00:00+00:00 | 15.56 | 15.76 | 15.56 | 15.63 | 73436466 | |
2015-04-28 00:00:00+00:00 | 15.65 | 15.71 | 15.5 | 15.58 | 58076725 | |
2015-04-29 00:00:00+00:00 | 15.98 | 16.04 | 15.57 | 15.6 | 134438101 | |
2015-04-30 00:00:00+00:00 | 15.93 | 16.05 | 15.795 | 16 | 78201256 | |
2015-05-01 00:00:00+00:00 | 16.11 | 16.149 | 15.92 | 16 | 78171362 | |
2015-05-04 00:00:00+00:00 | 16.44 | 16.45 | 16.12 | 16.14 | 76504381 | |
2015-05-05 00:00:00+00:00 | 16.35 | 16.61 | 16.329 | 16.42 | 106053204 | |
2015-05-06 00:00:00+00:00 | 16.29 | 16.49 | 16.079 | 16.36 | 96396889 | |
2015-05-07 00:00:00+00:00 | 16.239 | 16.329 | 16.09 | 16.23 | 73740256 | |
2015-05-08 00:00:00+00:00 | 16.45 | 16.46 | 16.219 | 16.32 | 86321384 | |
2015-05-11 00:00:00+00:00 | 16.49 | 16.59 | 16.43 | 16.45 | 56185501 | |
2015-05-12 00:00:00+00:00 | 16.43 | 16.48 | 16.35 | 16.46 | 59589310 | |
2015-05-13 00:00:00+00:00 | 16.47 | 16.52 | 16.36 | 16.44 | 47330638 | |
2015-05-14 00:00:00+00:00 | 16.52 | 16.59 | 16.45 | 16.53 | 55380166 | |
2015-05-15 00:00:00+00:00 | 16.35 | 16.52 | 16.3 | 16.52 | 54868077 | |
2015-05-18 00:00:00+00:00 | 16.51 | 16.54 | 16.309 | 16.309 | 51047229 | |
2015-05-19 00:00:00+00:00 | 16.77 | 16.78 | 16.58 | 16.59 | 89115475 | |
2015-05-20 00:00:00+00:00 | 16.74 | 16.85 | 16.63 | 16.78 | 67640605 | |
2015-05-21 00:00:00+00:00 | 16.73 | 16.75 | 16.56 | 16.71 | 52045653 | |
2015-05-22 00:00:00+00:00 | 16.75 | 16.8 | 16.705 | 16.72 | 47032906 | |
2015-05-26 00:00:00+00:00 | 16.5 | 16.73 | 16.43 | 16.73 | 99306419 | |
2015-05-27 00:00:00+00:00 | 16.74 | 16.75 | 16.54 | 16.56 | 69999158 | |
2015-05-28 00:00:00+00:00 | 16.67 | 16.73 | 16.58 | 16.72 | 60969979 | |
2015-05-29 00:00:00+00:00 | 16.5 | 16.67 | 16.47 | 16.66 | 74246895 | |
2015-06-01 00:00:00+00:00 | 16.55 | 16.64 | 16.47 | 16.58 | 62912633 | |
2015-06-02 00:00:00+00:00 | 16.72 | 16.76 | 16.5 | 16.52 | 65498460 | |
2015-06-03 00:00:00+00:00 | 16.93 | 17.02 | 16.74 | 16.79 | 89614868 | |
2015-06-04 00:00:00+00:00 | 16.78 | 16.98 | 16.72 | 16.87 | 61025453 | |
2015-06-05 00:00:00+00:00 | 17.19 | 17.35 | 16.965 | 17.04 | 119087408 | |
2015-06-08 00:00:00+00:00 | 17.08 | 17.33 | 17.05 | 17.27 | 69749600 | |
2015-06-09 00:00:00+00:00 | 17.31 | 17.35 | 17 | 17.05 | 82104858 | |
2015-06-10 00:00:00+00:00 | 17.59 | 17.62 | 17.36 | 17.39 | 102643147 | |
2015-06-11 00:00:00+00:00 | 17.49 | 17.675 | 17.44 | 17.62 | 77191527 | |
2015-06-12 00:00:00+00:00 | 17.49 | 17.58 | 17.36 | 17.45 | 53149442 | |
2015-06-15 00:00:00+00:00 | 17.47 | 17.5 | 17.25 | 17.33 | 68053373 | |
2015-06-16 00:00:00+00:00 | 17.55 | 17.56 | 17.37 | 17.46 | 47291181 | |
2015-06-17 00:00:00+00:00 | 17.37 | 17.595 | 17.3 | 17.59 | 88712103 | |
2015-06-18 00:00:00+00:00 | 17.38 | 17.4 | 17.22 | 17.35 | 97574677 | |
2015-06-19 00:00:00+00:00 | 17.17 | 17.39 | 17.12 | 17.28 | 81165957 | |
2015-06-22 00:00:00+00:00 | 17.47 | 17.53 | 17.35 | 17.36 | 58447195 | |
2015-06-23 00:00:00+00:00 | 17.67 | 17.72 | 17.56 | 17.56 | 63865708 | |
2015-06-24 00:00:00+00:00 | 17.49 | 17.69 | 17.45 | 17.53 | 59052661 | |
2015-06-25 00:00:00+00:00 | 17.37 | 17.62 | 17.35 | 17.57 | 59704817 | |
2015-06-26 00:00:00+00:00 | 17.41 | 17.52 | 17.35 | 17.48 | 62897209 | |
2015-06-29 00:00:00+00:00 | 16.89 | 17.25 | 16.86 | 17.13 | 98515800 | |
2015-06-30 00:00:00+00:00 | 17.02 | 17.13 | 16.85 | 17.08 | 89039753 | |
2015-07-01 00:00:00+00:00 | 17.22 | 17.31 | 17.09 | 17.25 | 62317352 | |
2015-07-02 00:00:00+00:00 | 17.03 | 17.21 | 16.89 | 17.16 | 64736475 | |
2015-07-06 00:00:00+00:00 | 16.94 | 17.01 | 16.71 | 16.78 | 58666946 | |
2015-07-07 00:00:00+00:00 | 16.69 | 16.93 | 16.34 | 16.9 | 115399859 | |
2015-07-08 00:00:00+00:00 | 16.25 | 16.5 | 16.219 | 16.42 | 83032960 | |
2015-07-09 00:00:00+00:00 | 16.48 | 16.72 | 16.45 | 16.53 | 81534426 | |
2015-07-10 00:00:00+00:00 | 16.7 | 16.84 | 16.66 | 16.795 | 72941057 | |
2015-07-13 00:00:00+00:00 | 17.02 | 17.05 | 16.9 | 16.97 | 67934294 | |
2015-07-14 00:00:00+00:00 | 17.13 | 17.15 | 16.86 | 16.93 | 69933168 | |
2015-07-15 00:00:00+00:00 | 17.68 | 17.85 | 17.49 | 17.53 | 173099934 | |
2015-07-16 00:00:00+00:00 | 17.95 | 18.07 | 17.86 | 17.91 | 117054318 | |
2015-07-17 00:00:00+00:00 | 18.1 | 18.16 | 17.89 | 17.96 | 96860681 | |
2015-07-20 00:00:00+00:00 | 18.12 | 18.23 | 18.08 | 18.15 | 71909590 | |
2015-07-21 00:00:00+00:00 | 18.08 | 18.17 | 18.02 | 18.11 | 63684538 | |
2015-07-22 00:00:00+00:00 | 18.45 | 18.48 | 18.03 | 18.03 | 110097735 | |
2015-07-23 00:00:00+00:00 | 18.18 | 18.43 | 18.08 | 18.39 | 103650135 | |
2015-07-24 00:00:00+00:00 | 17.9 | 18.23 | 17.84 | 18.2 | 86296535 | |
2015-07-27 00:00:00+00:00 | 17.67 | 17.78 | 17.5 | 17.66 | 93231917 | |
2015-07-28 00:00:00+00:00 | 17.88 | 17.89 | 17.6 | 17.79 | 82154817 | |
2015-07-29 00:00:00+00:00 | 18.16 | 18.2 | 17.92 | 17.95 | 96388569 | |
2015-07-30 00:00:00+00:00 | 18.13 | 18.29 | 18.05 | 18.2 | 65660338 | |
2015-07-31 00:00:00+00:00 | 17.88 | 18.06 | 17.86 | 18.03 | 67176946 | |
2015-08-03 00:00:00+00:00 | 17.77 | 17.965 | 17.64 | 17.91 | 62341245 | |
2015-08-04 00:00:00+00:00 | 17.8 | 17.93 | 17.71 | 17.79 | 69741507 | |
2015-08-05 00:00:00+00:00 | 17.87 | 18.045 | 17.8 | 17.93 | 65044706 | |
2015-08-06 00:00:00+00:00 | 17.81 | 17.975 | 17.77 | 17.91 | 47582061 | |
2015-08-07 00:00:00+00:00 | 17.75 | 18.07 | 17.6 | 17.92 | 71020703 | |
2015-08-10 00:00:00+00:00 | 18.04 | 18.05 | 17.86 | 17.87 | 65765231 | |
2015-08-11 00:00:00+00:00 | 17.79 | 17.97 | 17.72 | 17.85 | 65868585 | |
2015-08-12 00:00:00+00:00 | 17.52 | 17.645 | 17.02 | 17.62 | 143433552 | |
2015-08-13 00:00:00+00:00 | 17.62 | 17.69 | 17.44 | 17.56 | 67406457 | |
2015-08-14 00:00:00+00:00 | 17.7 | 17.7 | 17.54 | 17.57 | 52710258 | |
2015-08-17 00:00:00+00:00 | 17.77 | 17.81 | 17.56 | 17.61 | 42309963 | |
2015-08-18 00:00:00+00:00 | 17.69 | 17.9 | 17.65 | 17.72 | 50346194 | |
2015-08-19 00:00:00+00:00 | 17.46 | 17.69 | 17.44 | 17.55 | 78522838 | |
2015-08-20 00:00:00+00:00 | 16.72 | 17.2 | 16.72 | 17.19 | 149185431 | |
2015-08-21 00:00:00+00:00 | 16.1 | 16.66 | 16.1 | 16.195 | 148684325 | |
2015-08-24 00:00:00+00:00 | 15.29 | 15.98 | 14.6 | 15.02 | 214559086 | |
2015-08-25 00:00:00+00:00 | 15.26 | 16.204 | 15.25 | 16.2 | 194227130 | |
2015-08-26 00:00:00+00:00 | 16.059 | 16.059 | 15.335 | 15.81 | 173838973 | |
2015-08-27 00:00:00+00:00 | 16.44 | 16.45 | 16.09 | 16.36 | 124129428 | |
2015-08-28 00:00:00+00:00 | 16.36 | 16.44 | 16.2 | 16.4 | 75850400 | |
2015-08-31 00:00:00+00:00 | 16.34 | 16.375 | 16.18 | 16.329 | 66685093 | |
2015-09-01 00:00:00+00:00 | 15.58 | 16.05 | 15.46 | 15.95 | 118974368 | |
2015-09-02 00:00:00+00:00 | 15.85 | 15.86 | 15.57 | 15.81 | 74737988 | |
2015-09-03 00:00:00+00:00 | 15.94 | 16.19 | 15.87 | 15.97 | 77175757 | |
2015-09-04 00:00:00+00:00 | 15.65 | 15.86 | 15.5 | 15.79 | 104539500 | |
2015-09-08 00:00:00+00:00 | 16.16 | 16.16 | 15.9 | 15.96 | 73939623 | |
2015-09-09 00:00:00+00:00 | 15.9 | 16.46 | 15.83 | 16.37 | 70466139 | |
2015-09-10 00:00:00+00:00 | 16.04 | 16.11 | 15.81 | 15.87 | 71085643 | |
2015-09-11 00:00:00+00:00 | 16.04 | 16.059 | 15.9 | 15.99 | 54275460 | |
2015-09-14 00:00:00+00:00 | 15.96 | 16.07 | 15.9 | 15.97 | 50640946 | |
2015-09-15 00:00:00+00:00 | 16.309 | 16.39 | 16.01 | 16.04 | 62327105 | |
2015-09-16 00:00:00+00:00 | 16.329 | 16.39 | 16.09 | 16.35 | 83721453 | |
2015-09-17 00:00:00+00:00 | 15.86 | 16.48 | 15.78 | 16.29 | 119088186 | |
2015-09-18 00:00:00+00:00 | 15.56 | 15.71 | 15.5 | 15.68 | 136531198 | |
2015-09-21 00:00:00+00:00 | 15.7 | 15.75 | 15.57 | 15.67 | 79081690 | |
2015-09-22 00:00:00+00:00 | 15.57 | 15.58 | 15.45 | 15.49 | 92227216 | |
2015-09-23 00:00:00+00:00 | 15.72 | 15.85 | 15.58 | 15.6 | 78117507 | |
2015-09-24 00:00:00+00:00 | 15.55 | 15.62 | 15.4 | 15.535 | 99432185 | |
2015-09-25 00:00:00+00:00 | 15.89 | 16.02 | 15.81 | 15.88 | 88567972 | |
2015-09-28 00:00:00+00:00 | 15.47 | 15.82 | 15.38 | 15.79 | 86826540 | |
2015-09-29 00:00:00+00:00 | 15.35 | 15.53 | 15.25 | 15.5 | 79804455 | |
2015-09-30 00:00:00+00:00 | 15.58 | 15.6 | 15.32 | 15.55 | 71074169 | |
2015-10-01 00:00:00+00:00 | 15.55 | 15.64 | 15.36 | 15.52 | 71471505 | |
2015-10-02 00:00:00+00:00 | 15.38 | 15.38 | 14.63 | 15.08 | 181788433 | |
2015-10-05 00:00:00+00:00 | 15.69 | 15.79 | 15.44 | 15.45 | 75293923 | |
2015-10-06 00:00:00+00:00 | 15.69 | 15.82 | 15.57 | 15.68 | 67445122 | |
2015-10-07 00:00:00+00:00 | 15.75 | 15.93 | 15.57 | 15.78 | 66997033 | |
2015-10-08 00:00:00+00:00 | 15.75 | 15.78 | 15.51 | 15.72 | 79056883 | |
2015-10-09 00:00:00+00:00 | 15.58 | 15.82 | 15.5 | 15.75 | 76667097 | |
2015-10-12 00:00:00+00:00 | 15.52 | 15.62 | 15.43 | 15.6 | 50725777 | |
2015-10-13 00:00:00+00:00 | 15.52 | 15.59 | 15.4 | 15.45 | 73689847 | |
2015-10-14 00:00:00+00:00 | 15.64 | 15.92 | 15.55 | 15.77 | 122207202 | |
2015-10-15 00:00:00+00:00 | 16.19 | 16.28 | 15.72 | 15.81 | 127028772 | |
2015-10-16 00:00:00+00:00 | 16.12 | 16.29 | 16.03 | 16.285 | 66966667 | |
2015-10-19 00:00:00+00:00 | 16.14 | 16.21 | 15.98 | 16 | 62953541 | |
2015-10-20 00:00:00+00:00 | 16.2 | 16.29 | 16.1 | 16.16 | 50797790 | |
2015-10-21 00:00:00+00:00 | 15.9 | 16.29 | 15.9 | 16.26 | 64825296 | |
2015-10-22 00:00:00+00:00 | 16.16 | 16.2 | 15.995 | 16 | 74099343 | |
2015-10-23 00:00:00+00:00 | 16.52 | 16.55 | 16.25 | 16.29 | 82285833 | |
2015-10-26 00:00:00+00:00 | 16.51 | 16.55 | 16.34 | 16.52 | 67702832 | |
2015-10-27 00:00:00+00:00 | 16.4 | 16.47 | 16.3 | 16.4 | 56147451 | |
2015-10-28 00:00:00+00:00 | 17.28 | 17.31 | 16.445 | 16.45 | 147672009 | |
2015-10-29 00:00:00+00:00 | 17.09 | 17.44 | 16.97 | 17.16 | 95510244 | |
2015-10-30 00:00:00+00:00 | 16.78 | 17.18 | 16.76 | 17.18 | 87645788 | |
2015-11-02 00:00:00+00:00 | 17.06 | 17.14 | 16.87 | 16.9 | 56894296 | |
2015-11-03 00:00:00+00:00 | 17.18 | 17.26 | 16.99 | 17.01 | 66096236 | |
2015-11-04 00:00:00+00:00 | 17.01 | 17.31 | 16.96 | 17.3 | 78688573 | |
2015-11-05 00:00:00+00:00 | 17.31 | 17.37 | 17.03 | 17.03 | 85477288 | |
2015-11-06 00:00:00+00:00 | 17.95 | 18.09 | 17.76 | 17.84 | 157987106 | |
2015-11-09 00:00:00+00:00 | 17.68 | 18.08 | 17.56 | 18.03 | 129842355 | |
2015-11-10 00:00:00+00:00 | 17.85 | 17.94 | 17.56 | 17.63 | 59845097 | |
2015-11-11 00:00:00+00:00 | 17.75 | 17.99 | 17.68 | 17.99 | 59235017 | |
2015-11-12 00:00:00+00:00 | 17.37 | 17.63 | 17.35 | 17.58 | 77712229 | |
2015-11-13 00:00:00+00:00 | 17.2 | 17.31 | 17.09 | 17.13 | 103497332 | |
2015-11-16 00:00:00+00:00 | 17.43 | 17.46 | 17.01 | 17.13 | 70377293 | |
2015-11-17 00:00:00+00:00 | 17.42 | 17.6 | 17.34 | 17.5 | 70480628 | |
2015-11-18 00:00:00+00:00 | 17.84 | 17.87 | 17.43 | 17.43 | 85633743 | |
2015-11-19 00:00:00+00:00 | 17.69 | 17.85 | 17.62 | 17.78 | 51812145 | |
2015-11-20 00:00:00+00:00 | 17.65 | 17.833 | 17.59 | 17.81 | 56264227 | |
2015-11-23 00:00:00+00:00 | 17.47 | 17.73 | 17.46 | 17.62 | 50350218 | |
2015-11-24 00:00:00+00:00 | 17.47 | 17.57 | 17.25 | 17.26 | 58724161 | |
2015-11-25 00:00:00+00:00 | 17.44 | 17.57 | 17.41 | 17.51 | 34926498 | |
2015-11-27 00:00:00+00:00 | 17.48 | 17.5 | 17.33 | 17.46 | 22937528 | |
2015-11-30 00:00:00+00:00 | 17.43 | 17.58 | 17.42 | 17.48 | 61852667 | |
2015-12-01 00:00:00+00:00 | 17.81 | 17.81 | 17.48 | 17.52 | 74298721 | |
2015-12-02 00:00:00+00:00 | 17.62 | 17.89 | 17.55 | 17.88 | 76857387 | |
2015-12-03 00:00:00+00:00 | 17.3 | 17.765 | 17.25 | 17.68 | 95781361 | |
2015-12-04 00:00:00+00:00 | 17.8 | 17.83 | 17.38 | 17.44 | 102999510 | |
2015-12-07 00:00:00+00:00 | 17.54 | 17.8 | 17.44 | 17.79 | 81623517 | |
2015-12-08 00:00:00+00:00 | 17.19 | 17.459 | 17.13 | 17.39 | 84772872 | |
2015-12-09 00:00:00+00:00 | 17.1 | 17.38 | 16.87 | 17.11 | 82418050 | |
2015-12-10 00:00:00+00:00 | 17.2 | 17.41 | 16.96 | 17.15 | 68375559 | |
2015-12-11 00:00:00+00:00 | 16.73 | 17.06 | 16.64 | 16.97 | 91451006 | |
2015-12-14 00:00:00+00:00 | 16.8 | 16.89 | 16.495 | 16.76 | 121067256 | |
2015-12-15 00:00:00+00:00 | 17.42 | 17.49 | 16.99 | 17.02 | 99737154 | |
2015-12-16 00:00:00+00:00 | 17.75 | 17.78 | 17.23 | 17.65 | 171512739 | |
2015-12-17 00:00:00+00:00 | 17.3 | 17.83 | 17.3 | 17.8 | 97265726 | |
2015-12-18 00:00:00+00:00 | 16.76 | 17.265 | 16.76 | 17.19 | 136520314 | |
2015-12-21 00:00:00+00:00 | 16.97 | 17.03 | 16.77 | 16.98 | 65133240 | |
2015-12-22 00:00:00+00:00 | 17.08 | 17.11 | 16.85 | 17.05 | 56144957 | |
2015-12-23 00:00:00+00:00 | 17.34 | 17.34 | 17.095 | 17.16 | 65700709 | |
2015-12-24 00:00:00+00:00 | 17.27 | 17.38 | 17.22 | 17.32 | 29373415 | |
2015-12-28 00:00:00+00:00 | 17.13 | 17.23 | 16.98 | 17.22 | 41759993 | |
2015-12-29 00:00:00+00:00 | 17.28 | 17.35 | 17.16 | 17.25 | 45628449 | |
2015-12-30 00:00:00+00:00 | 17.05 | 17.24 | 17.04 | 17.2 | 35035518 | |
2015-12-31 00:00:00+00:00 | 16.83 | 17.07 | 16.83 | 17.01 | 47106760 | |
2016-01-04 00:00:00+00:00 | 16.43 | 16.49 | 16.25 | 16.45 | 114855342 | |
2016-01-05 00:00:00+00:00 | 16.43 | 16.59 | 16.23 | 16.52 | 66591885 | |
2016-01-06 00:00:00+00:00 | 16.079 | 16.29 | 16.02 | 16.19 | 102669915 | |
2016-01-07 00:00:00+00:00 | 15.5 | 15.9 | 15.44 | 15.73 | 116188238 | |
2016-01-08 00:00:00+00:00 | 15.2 | 15.94 | 15.16 | 15.94 | 124670826 | |
2016-01-11 00:00:00+00:00 | 15.31 | 15.37 | 14.94 | 15.26 | 104581569 | |
2016-01-12 00:00:00+00:00 | 15.31 | 15.58 | 15.06 | 15.54 | 99949556 | |
2016-01-13 00:00:00+00:00 | 14.9 | 15.52 | 14.85 | 15.47 | 119412972 | |
2016-01-14 00:00:00+00:00 | 14.99 | 15.2 | 14.65 | 15.01 | 125809900 | |
2016-01-15 00:00:00+00:00 | 14.46 | 14.66 | 14.13 | 14.41 | 172228199 | |
2016-01-19 00:00:00+00:00 | 14.24 | 14.79 | 14.01 | 14.69 | 185641283 | |
2016-01-20 00:00:00+00:00 | 13.69 | 14 | 13.27 | 13.79 | 249990820 | |
2016-01-21 00:00:00+00:00 | 13.36 | 13.84 | 13.25 | 13.67 | 188802260 | |
2016-01-22 00:00:00+00:00 | 13.56 | 13.72 | 13.47 | 13.65 | 169961438 | |
2016-01-25 00:00:00+00:00 | 12.96 | 13.55 | 12.94 | 13.54 | 186248993 | |
2016-01-26 00:00:00+00:00 | 13.31 | 13.35 | 13.04 | 13.07 | 124183841 | |
2016-01-27 00:00:00+00:00 | 13.36 | 13.73 | 13.19 | 13.2 | 123984025 | |
2016-01-28 00:00:00+00:00 | 13.53 | 13.705 | 13.26 | 13.59 | 110951618 | |
2016-01-29 00:00:00+00:00 | 14.14 | 14.15 | 13.59 | 13.66 | 159925361 | |
2016-02-01 00:00:00+00:00 | 13.96 | 14.09 | 13.8 | 14.05 | 105699706 | |
2016-02-02 00:00:00+00:00 | 13.23 | 13.745 | 13.13 | 13.74 | 146990214 | |
2016-02-03 00:00:00+00:00 | 13.03 | 13.29 | 12.52 | 13.28 | 257591135 | |
2016-02-04 00:00:00+00:00 | 13.25 | 13.59 | 12.885 | 12.89 | 179319106 | |
2016-02-05 00:00:00+00:00 | 12.95 | 13.39 | 12.89 | 13.32 | 125785587 | |
2016-02-08 00:00:00+00:00 | 12.27 | 12.7 | 12.13 | 12.67 | 221271714 | |
2016-02-09 00:00:00+00:00 | 12.2 | 12.43 | 11.96 | 11.99 | 247227500 | |
2016-02-10 00:00:00+00:00 | 11.98 | 12.54 | 11.91 | 12.42 | 227371472 | |
2016-02-11 00:00:00+00:00 | 11.16 | 11.55 | 10.99 | 11.46 | 374686686 | |
2016-02-12 00:00:00+00:00 | 11.95 | 12.03 | 11.4 | 11.48 | 245320697 | |
2016-02-16 00:00:00+00:00 | 12.25 | 12.39 | 12.11 | 12.38 | 194138587 | |
2016-02-17 00:00:00+00:00 | 12.56 | 12.69 | 12.47 | 12.57 | 221671695 | |
2016-02-18 00:00:00+00:00 | 12.24 | 12.74 | 12.1 | 12.71 | 161492893 | |
2016-02-19 00:00:00+00:00 | 12.13 | 12.24 | 11.98 | 12.22 | 120974304 | |
2016-02-22 00:00:00+00:00 | 12.54 | 12.54 | 12.36 | 12.38 | 88785977 | |
2016-02-23 00:00:00+00:00 | 12.16 | 12.475 | 12.1 | 12.47 | 104401671 | |
2016-02-24 00:00:00+00:00 | 12.13 | 12.15 | 11.65 | 11.96 | 159423970 | |
2016-02-25 00:00:00+00:00 | 12.32 | 12.33 | 12.09 | 12.14 | 103003028 | |
2016-02-26 00:00:00+00:00 | 12.7 | 12.95 | 12.4 | 12.49 | 160262798 | |
2016-02-29 00:00:00+00:00 | 12.52 | 12.86 | 12.51 | 12.7 | 123480562 | |
2016-03-01 00:00:00+00:00 | 13.19 | 13.21 | 12.62 | 12.64 | 153473420 | |
2016-03-02 00:00:00+00:00 | 13.41 | 13.51 | 13.13 | 13.2 | 139562706 | |
2016-03-03 00:00:00+00:00 | 13.5 | 13.55 | 13.25 | 13.38 | 105031428 | |
2016-03-04 00:00:00+00:00 | 13.54 | 13.89 | 13.49 | 13.76 | 171338150 | |
2016-03-07 00:00:00+00:00 | 13.53 | 13.62 | 13.37 | 13.45 | 97565818 | |
2016-03-08 00:00:00+00:00 | 13.06 | 13.465 | 13.05 | 13.4 | 124883295 | |
2016-03-09 00:00:00+00:00 | 13.14 | 13.27 | 13 | 13.17 | 92198484 | |
2016-03-10 00:00:00+00:00 | 13.27 | 13.4 | 13 | 13.23 | 123467835 | |
2016-03-11 00:00:00+00:00 | 13.79 | 13.84 | 13.37 | 13.44 | 120366396 | |
2016-03-14 00:00:00+00:00 | 13.64 | 13.74 | 13.49 | 13.72 | 87695689 | |
2016-03-15 00:00:00+00:00 | 13.57 | 13.6 | 13.39 | 13.51 | 80403985 | |
2016-03-16 00:00:00+00:00 | 13.31 | 13.81 | 13.09 | 13.51 | 149645421 | |
2016-03-17 00:00:00+00:00 | 13.4 | 13.475 | 13.05 | 13.22 | 122129018 | |
2016-03-18 00:00:00+00:00 | 13.79 | 13.88 | 13.55 | 13.68 | 146530926 | |
2016-03-21 00:00:00+00:00 | 13.84 | 14.03 | 13.72 | 13.8 | 104633493 | |
2016-03-22 00:00:00+00:00 | 13.76 | 13.84 | 13.63 | 13.67 | 83196383 | |
2016-03-23 00:00:00+00:00 | 13.62 | 13.83 | 13.6 | 13.77 | 95747961 | |
2016-03-24 00:00:00+00:00 | 13.68 | 13.69 | 13.27 | 13.41 | 94772909 | |
2016-03-28 00:00:00+00:00 | 13.62 | 13.74 | 13.54 | 13.73 | 54455597 | |
2016-03-29 00:00:00+00:00 | 13.42 | 13.54 | 13.26 | 13.54 | 101606836 | |
2016-03-30 00:00:00+00:00 | 13.48 | 13.66 | 13.46 | 13.49 | 85876825 | |
2016-03-31 00:00:00+00:00 | 13.52 | 13.71 | 13.45 | 13.49 | 79560389 | |
2016-04-01 00:00:00+00:00 | 13.56 | 13.65 | 13.32 | 13.47 | 73092379 | |
2016-04-04 00:00:00+00:00 | 13.51 | 13.65 | 13.41 | 13.54 | 59638541 | |
2016-04-05 00:00:00+00:00 | 13.19 | 13.34 | 13.16 | 13.3 | 78398301 | |
2016-04-06 00:00:00+00:00 | 13.27 | 13.3 | 13.11 | 13.2 | 62509221 | |
2016-04-07 00:00:00+00:00 | 12.85 | 13.205 | 12.75 | 13.15 | 105619597 | |
2016-04-08 00:00:00+00:00 | 12.88 | 13.14 | 12.86 | 13.03 | 76527808 | |
2016-04-11 00:00:00+00:00 | 12.97 | 13.09 | 12.88 | 12.92 | 79266678 | |
2016-04-12 00:00:00+00:00 | 13.27 | 13.33 | 12.93 | 13 | 100558608 | |
2016-04-13 00:00:00+00:00 | 13.79 | 13.85 | 13.53 | 13.55 | 138454434 | |
2016-04-14 00:00:00+00:00 | 14.14 | 14.28 | 13.7 | 13.71 | 179342548 | |
2016-04-15 00:00:00+00:00 | 14 | 14.29 | 13.95 | 14.27 | 87934720 | |
2016-04-18 00:00:00+00:00 | 14.17 | 14.24 | 13.82 | 13.85 | 81952829 | |
2016-04-19 00:00:00+00:00 | 14.45 | 14.54 | 14.24 | 14.26 | 107054754 | |
2016-04-20 00:00:00+00:00 | 14.93 | 14.94 | 14.53 | 14.56 | 115452870 | |
2016-04-21 00:00:00+00:00 | 14.9 | 15.135 | 14.75 | 14.98 | 121502366 | |
2016-04-22 00:00:00+00:00 | 15.11 | 15.14 | 14.865 | 14.865 | 83399543 | |
2016-04-25 00:00:00+00:00 | 14.96 | 15.14 | 14.81 | 15.02 | 70489478 | |
2016-04-26 00:00:00+00:00 | 15.09 | 15.14 | 14.8 | 15.02 | 84745857 | |
2016-04-27 00:00:00+00:00 | 15.02 | 15.3 | 14.98 | 15.02 | 115753717 | |
2016-04-28 00:00:00+00:00 | 14.79 | 15.08 | 14.76 | 14.92 | 78536135 | |
2016-04-29 00:00:00+00:00 | 14.56 | 14.85 | 14.43 | 14.73 | 122108474 | |
2016-05-02 00:00:00+00:00 | 14.77 | 14.78 | 14.4 | 14.58 | 68882814 | |
2016-05-03 00:00:00+00:00 | 14.36 | 14.51 | 14.15 | 14.51 | 110487308 | |
2016-05-04 00:00:00+00:00 | 14.13 | 14.27 | 14 | 14.09 | 99238480 | |
2016-05-05 00:00:00+00:00 | 14.05 | 14.32 | 14 | 14.15 | 73052136 | |
2016-05-06 00:00:00+00:00 | 14.11 | 14.14 | 13.8 | 13.83 | 76555048 | |
2016-05-09 00:00:00+00:00 | 13.99 | 14.19 | 13.91 | 14.08 | 55342432 | |
2016-05-10 00:00:00+00:00 | 14.3 | 14.33 | 14.05 | 14.08 | 59857712 | |
2016-05-11 00:00:00+00:00 | 14.2 | 14.5 | 14.19 | 14.25 | 68041516 | |
2016-05-12 00:00:00+00:00 | 14.14 | 14.47 | 14.05 | 14.3 | 72550966 | |
2016-05-13 00:00:00+00:00 | 13.88 | 14.37 | 13.87 | 14.15 | 86141548 | |
2016-05-16 00:00:00+00:00 | 13.93 | 14.02 | 13.8 | 13.82 | 57848601 | |
2016-05-17 00:00:00+00:00 | 14.01 | 14.13 | 13.82 | 13.89 | 76237641 | |
2016-05-18 00:00:00+00:00 | 14.69 | 14.75 | 14.01 | 14.02 | 151871694 | |
2016-05-19 00:00:00+00:00 | 14.53 | 14.865 | 14.43 | 14.595 | 106156946 | |
2016-05-20 00:00:00+00:00 | 14.52 | 14.7 | 14.43 | 14.64 | 81195479 | |
2016-05-23 00:00:00+00:00 | 14.47 | 14.6 | 14.4 | 14.54 | 66278981 | |
2016-05-24 00:00:00+00:00 | 14.68 | 14.75 | 14.53 | 14.6 | 96140736 | |
2016-05-25 00:00:00+00:00 | 14.92 | 15.15 | 14.83 | 14.83 | 123568712 | |
2016-05-26 00:00:00+00:00 | 14.7 | 14.99 | 14.69 | 14.98 | 65307024 | |
2016-05-27 00:00:00+00:00 | 14.88 | 14.88 | 14.69 | 14.76 | 62383643 | |
2016-05-31 00:00:00+00:00 | 14.79 | 15.05 | 14.73 | 15.03 | 85580697 | |
2016-06-01 00:00:00+00:00 | 14.86 | 14.9 | 14.45 | 14.6 | 62596061 | |
2016-06-02 00:00:00+00:00 | 14.94 | 14.98 | 14.77 | 14.95 | 64327307 | |
2016-06-03 00:00:00+00:00 | 14.42 | 14.5 | 14.19 | 14.46 | 148554288 | |
2016-06-06 00:00:00+00:00 | 14.52 | 14.7 | 14.37 | 14.44 | 80939209 | |
2016-06-07 00:00:00+00:00 | 14.35 | 14.6 | 14.34 | 14.54 | 67447137 | |
2016-06-08 00:00:00+00:00 | 14.43 | 14.46 | 14.3 | 14.35 | 58292864 | |
2016-06-09 00:00:00+00:00 | 14.19 | 14.32 | 14.08 | 14.32 | 74782442 | |
2016-06-10 00:00:00+00:00 | 13.83 | 14 | 13.75 | 13.98 | 90725677 | |
2016-06-13 00:00:00+00:00 | 13.6 | 13.905 | 13.58 | 13.64 | 74831253 | |
2016-06-14 00:00:00+00:00 | 13.26 | 13.726 | 13.17 | 13.56 | 85308733 | |
2016-06-15 00:00:00+00:00 | 13.34 | 13.66 | 13.28 | 13.38 | 100619784 | |
2016-06-16 00:00:00+00:00 | 13.31 | 13.33 | 13.02 | 13.23 | 90082453 | |
2016-06-17 00:00:00+00:00 | 13.4 | 13.53 | 13.28 | 13.38 | 89022300 | |
2016-06-20 00:00:00+00:00 | 13.54 | 13.85 | 13.51 | 13.74 | 89873995 | |
2016-06-21 00:00:00+00:00 | 13.62 | 13.65 | 13.445 | 13.615 | 73730632 | |
2016-06-22 00:00:00+00:00 | 13.61 | 13.78 | 13.59 | 13.6 | 77910297 | |
2016-06-23 00:00:00+00:00 | 14.04 | 14.05 | 13.82 | 13.84 | 81994923 | |
2016-06-24 00:00:00+00:00 | 13 | 13.44 | 12.97 | 13.05 | 231376163 | |
2016-06-27 00:00:00+00:00 | 12.18 | 12.77 | 12.05 | 12.77 | 259697521 | |
2016-06-28 00:00:00+00:00 | 12.7 | 12.72 | 12.35 | 12.57 | 150037397 | |
2016-06-29 00:00:00+00:00 | 13.19 | 13.22 | 12.92 | 13.07 | 118484022 | |
2016-06-30 00:00:00+00:00 | 13.27 | 13.38 | 13.07 | 13.37 | 125773865 | |
2016-07-01 00:00:00+00:00 | 13.1 | 13.27 | 13.02 | 13.19 | 88806340 | |
2016-07-05 00:00:00+00:00 | 12.74 | 12.94 | 12.63 | 12.93 | 98165899 | |
2016-07-06 00:00:00+00:00 | 12.86 | 12.92 | 12.45 | 12.52 | 94953823 | |
2016-07-07 00:00:00+00:00 | 13.01 | 13.11 | 12.84 | 12.865 | 101100345 | |
2016-07-08 00:00:00+00:00 | 13.17 | 13.32 | 13.11 | 13.28 | 92698914 | |
2016-07-11 00:00:00+00:00 | 13.21 | 13.4 | 13.2 | 13.29 | 67222146 | |
2016-07-12 00:00:00+00:00 | 13.54 | 13.6 | 13.4 | 13.41 | 89042101 | |
2016-07-13 00:00:00+00:00 | 13.44 | 13.58 | 13.32 | 13.5 | 74431683 | |
2016-07-14 00:00:00+00:00 | 13.65 | 13.78 | 13.64 | 13.73 | 97784904 | |
2016-07-15 00:00:00+00:00 | 13.66 | 13.79 | 13.52 | 13.78 | 78528450 | |
2016-07-18 00:00:00+00:00 | 14.11 | 14.23 | 13.82 | 13.84 | 187014216 | |
2016-07-19 00:00:00+00:00 | 14.26 | 14.37 | 14.03 | 14.06 | 96838139 | |
2016-07-20 00:00:00+00:00 | 14.4 | 14.43 | 14.26 | 14.35 | 79212132 | |
2016-07-21 00:00:00+00:00 | 14.27 | 14.47 | 14.26 | 14.43 | 70904379 | |
2016-07-22 00:00:00+00:00 | 14.38 | 14.4 | 14.18 | 14.26 | 46604794 | |
2016-07-25 00:00:00+00:00 | 14.37 | 14.39 | 14.28 | 14.31 | 46404316 | |
2016-07-26 00:00:00+00:00 | 14.53 | 14.55 | 14.31 | 14.32 | 60153804 | |
2016-07-27 00:00:00+00:00 | 14.63 | 14.7 | 14.45 | 14.57 | 100965373 | |
2016-07-28 00:00:00+00:00 | 14.68 | 14.69 | 14.46 | 14.57 | 72049503 | |
2016-07-29 00:00:00+00:00 | 14.49 | 14.7 | 14.48 | 14.56 | 63257654 | |
2016-08-01 00:00:00+00:00 | 14.33 | 14.6 | 14.26 | 14.52 | 61279863 | |
2016-08-02 00:00:00+00:00 | 14.13 | 14.45 | 14.09 | 14.28 | 83776290 | |
2016-08-03 00:00:00+00:00 | 14.48 | 14.48 | 14.095 | 14.11 | 65263266 | |
2016-08-04 00:00:00+00:00 | 14.48 | 14.54 | 14.36 | 14.48 | 46171776 | |
2016-08-05 00:00:00+00:00 | 15.05 | 15.06 | 14.75 | 14.75 | 120947818 | |
2016-08-08 00:00:00+00:00 | 15.13 | 15.17 | 14.92 | 15.08 | 71936030 | |
2016-08-09 00:00:00+00:00 | 15.19 | 15.19 | 15.07 | 15.11 | 49973298 | |
2016-08-10 00:00:00+00:00 | 14.81 | 15.18 | 14.78 | 15.16 | 79080688 | |
2016-08-11 00:00:00+00:00 | 14.88 | 14.97 | 14.745 | 14.83 | 68945407 | |
2016-08-12 00:00:00+00:00 | 14.91 | 14.91 | 14.7 | 14.77 | 63022911 | |
2016-08-15 00:00:00+00:00 | 15.02 | 15.03 | 14.945 | 14.97 | 48592071 | |
2016-08-16 00:00:00+00:00 | 15.17 | 15.19 | 14.93 | 14.97 | 80307508 | |
2016-08-17 00:00:00+00:00 | 15.15 | 15.24 | 15.08 | 15.16 | 93372229 | |
2016-08-18 00:00:00+00:00 | 15.16 | 15.24 | 15.06 | 15.11 | 62804324 | |
2016-08-19 00:00:00+00:00 | 15.22 | 15.25 | 15.05 | 15.13 | 59800158 | |
2016-08-22 00:00:00+00:00 | 15.18 | 15.25 | 15.12 | 15.2 | 60877097 | |
2016-08-23 00:00:00+00:00 | 15.35 | 15.41 | 15.26 | 15.26 | 67798132 | |
2016-08-24 00:00:00+00:00 | 15.4 | 15.5 | 15.36 | 15.37 | 65415804 | |
2016-08-25 00:00:00+00:00 | 15.53 | 15.54 | 15.4 | 15.42 | 67624942 | |
2016-08-26 00:00:00+00:00 | 15.79 | 15.9 | 15.58 | 15.61 | 126886261 | |
2016-08-29 00:00:00+00:00 | 15.84 | 16.01 | 15.775 | 15.81 | 120111303 | |
2016-08-30 00:00:00+00:00 | 16.19 | 16.23 | 15.8 | 15.87 | 121173617 | |
2016-08-31 00:00:00+00:00 | 16.14 | 16.239 | 15.88 | 16.21 | 127664267 | |
2016-09-01 00:00:00+00:00 | 15.98 | 16.149 | 15.75 | 16.149 | 128812599 | |
2016-09-02 00:00:00+00:00 | 16 | 16.059 | 15.82 | 15.94 | 100972145 | |
2016-09-06 00:00:00+00:00 | 15.78 | 16.059 | 15.7 | 16.05 | 96503738 | |
2016-09-07 00:00:00+00:00 | 15.7 | 15.85 | 15.65 | 15.72 | 63284147 | |
2016-09-08 00:00:00+00:00 | 15.86 | 15.94 | 15.68 | 15.77 | 69292664 | |
2016-09-09 00:00:00+00:00 | 15.74 | 16.149 | 15.74 | 15.91 | 135182534 | |
2016-09-12 00:00:00+00:00 | 15.9 | 15.94 | 15.48 | 15.65 | 109723978 | |
2016-09-13 00:00:00+00:00 | 15.72 | 15.86 | 15.55 | 15.67 | 89317564 | |
2016-09-14 00:00:00+00:00 | 15.63 | 15.84 | 15.56 | 15.67 | 75517820 | |
2016-09-15 00:00:00+00:00 | 15.67 | 15.745 | 15.58 | 15.64 | 67883486 | |
2016-09-16 00:00:00+00:00 | 15.49 | 15.68 | 15.48 | 15.66 | 90873348 | |
2016-09-19 00:00:00+00:00 | 15.59 | 15.78 | 15.55 | 15.55 | 66168885 | |
2016-09-20 00:00:00+00:00 | 15.6 | 15.76 | 15.51 | 15.74 | 68201707 | |
2016-09-21 00:00:00+00:00 | 15.65 | 15.83 | 15.39 | 15.71 | 95464559 | |
2016-09-22 00:00:00+00:00 | 15.6 | 15.71 | 15.5 | 15.69 | 75586903 | |
2016-09-23 00:00:00+00:00 | 15.52 | 15.67 | 15.5 | 15.53 | 51578372 | |
2016-09-26 00:00:00+00:00 | 15.09 | 15.44 | 15.02 | 15.4 | 94947767 | |
2016-09-27 00:00:00+00:00 | 15.29 | 15.34 | 14.81 | 15.01 | 81992627 | |
2016-09-28 00:00:00+00:00 | 15.38 | 15.39 | 15.15 | 15.36 | 66021211 | |
2016-09-29 00:00:00+00:00 | 15.16 | 15.5 | 15.06 | 15.38 | 78815768 | |
2016-09-30 00:00:00+00:00 | 15.65 | 15.73 | 15.17 | 15.26 | 119309179 | |
2016-10-03 00:00:00+00:00 | 15.63 | 15.73 | 15.5 | 15.59 | 69864051 | |
2016-10-04 00:00:00+00:00 | 15.8 | 16.05 | 15.67 | 15.7 | 98501476 | |
2016-10-05 00:00:00+00:00 | 16.11 | 16.23 | 15.96 | 15.96 | 86056859 | |
2016-10-06 00:00:00+00:00 | 16.219 | 16.23 | 16.035 | 16.17 | 75838739 | |
2016-10-07 00:00:00+00:00 | 16.129 | 16.25 | 15.98 | 16.19 | 99597964 | |
2016-10-10 00:00:00+00:00 | 16.3 | 16.4 | 16.184 | 16.215 | 64158491 | |
2016-10-11 00:00:00+00:00 | 16.11 | 16.329 | 16.024 | 16.26 | 77348564 | |
2016-10-12 00:00:00+00:00 | 16.03 | 16.239 | 16.01 | 16.09 | 67144049 | |
2016-10-13 00:00:00+00:00 | 15.83 | 16.09 | 15.6 | 16.09 | 78636990 | |
2016-10-14 00:00:00+00:00 | 16 | 16.23 | 15.94 | 16.149 | 108815580 | |
2016-10-17 00:00:00+00:00 | 16.05 | 16.195 | 15.9 | 16.17 | 101795541 | |
2016-10-18 00:00:00+00:00 | 16.26 | 16.27 | 16.11 | 16.19 | 71433078 | |
2016-10-19 00:00:00+00:00 | 16.47 | 16.53 | 16.28 | 16.3 | 97968987 | |
2016-10-20 00:00:00+00:00 | 16.56 | 16.63 | 16.41 | 16.45 | 88077661 | |
2016-10-21 00:00:00+00:00 | 16.67 | 16.67 | 16.4 | 16.46 | 77921403 | |
2016-10-24 00:00:00+00:00 | 16.77 | 16.8 | 16.61 | 16.75 | 68237544 | |
2016-10-25 00:00:00+00:00 | 16.72 | 16.82 | 16.68 | 16.71 | 49342342 | |
2016-10-26 00:00:00+00:00 | 16.87 | 16.87 | 16.62 | 16.64 | 58690167 | |
2016-10-27 00:00:00+00:00 | 16.91 | 17.1 | 16.864 | 16.95 | 97937253 | |
2016-10-28 00:00:00+00:00 | 16.68 | 16.98 | 16.5 | 16.95 | 115090796 | |
2016-10-31 00:00:00+00:00 | 16.5 | 16.73 | 16.5 | 16.68 | 70352567 | |
2016-11-01 00:00:00+00:00 | 16.61 | 16.75 | 16.329 | 16.56 | 88557065 | |
2016-11-02 00:00:00+00:00 | 16.48 | 16.56 | 16.28 | 16.46 | 119136779 | |
2016-11-03 00:00:00+00:00 | 16.48 | 16.67 | 16.45 | 16.475 | 69655425 | |
2016-11-04 00:00:00+00:00 | 16.55 | 16.71 | 16.354 | 16.53 | 82431002 | |
2016-11-07 00:00:00+00:00 | 17.01 | 17.04 | 16.85 | 16.86 | 91581902 | |
2016-11-08 00:00:00+00:00 | 17 | 17.105 | 16.71 | 16.82 | 95702452 | |
2016-11-09 00:00:00+00:00 | 17.97 | 18.05 | 17.4 | 17.66 | 319516881 | |
2016-11-10 00:00:00+00:00 | 18.76 | 18.99 | 18.25 | 18.26 | 304889621 | |
2016-11-11 00:00:00+00:00 | 19.02 | 19.03 | 18.63 | 18.64 | 212952487 | |
2016-11-14 00:00:00+00:00 | 20.08 | 20.2 | 19.4 | 19.41 | 320959885 | |
2016-11-15 00:00:00+00:00 | 20.16 | 20.18 | 19.6 | 19.79 | 190293076 | |
2016-11-16 00:00:00+00:00 | 19.75 | 19.96 | 19.68 | 19.78 | 126662472 | |
2016-11-17 00:00:00+00:00 | 20.08 | 20.22 | 19.705 | 19.76 | 167895828 | |
2016-11-18 00:00:00+00:00 | 20 | 20.14 | 19.87 | 20.06 | 120620238 | |
2016-11-21 00:00:00+00:00 | 20.33 | 20.35 | 20.01 | 20.1 | 117479106 | |
2016-11-22 00:00:00+00:00 | 20.3 | 20.47 | 20.12 | 20.45 | 104720899 | |
2016-11-23 00:00:00+00:00 | 20.56 | 20.66 | 20.2 | 20.576 | 110936499 | |
2016-11-25 00:00:00+00:00 | 20.86 | 20.875 | 20.5 | 20.62 | 62610385 | |
2016-11-28 00:00:00+00:00 | 20.3 | 20.85 | 20.25 | 20.67 | 108651231 | |
2016-11-29 00:00:00+00:00 | 20.29 | 20.54 | 20.25 | 20.38 | 114159274 | |
2016-11-30 00:00:00+00:00 | 21.12 | 21.19 | 20.72 | 20.77 | 191597643 | |
2016-12-01 00:00:00+00:00 | 21.5 | 21.94 | 21.34 | 21.42 | 199092169 | |
2016-12-02 00:00:00+00:00 | 21.23 | 21.5 | 21.02 | 21.49 | 128447563 | |
2016-12-05 00:00:00+00:00 | 21.84 | 21.869 | 21.46 | 21.47 | 130377307 | |
2016-12-06 00:00:00+00:00 | 22.16 | 22.16 | 21.72 | 22.09 | 120789194 | |
2016-12-07 00:00:00+00:00 | 22.57 | 22.57 | 21.95 | 22.19 | 168005152 | |
2016-12-08 00:00:00+00:00 | 22.95 | 23.24 | 22.69 | 22.79 | 182492861 | |
2016-12-09 00:00:00+00:00 | 23.09 | 23.17 | 22.78 | 22.95 | 130590406 | |
2016-12-12 00:00:00+00:00 | 22.61 | 23.25 | 22.535 | 23 | 136869792 | |
2016-12-13 00:00:00+00:00 | 22.61 | 22.88 | 22.29 | 22.8 | 118204085 | |
2016-12-14 00:00:00+00:00 | 22.67 | 23.3 | 22.21 | 22.31 | 225253082 | |
2016-12-15 00:00:00+00:00 | 23.16 | 23.39 | 22.8 | 22.9 | 162025814 | |
2016-12-16 00:00:00+00:00 | 22.66 | 23.32 | 22.65 | 23.32 | 147447864 | |
2016-12-19 00:00:00+00:00 | 22.48 | 22.72 | 22.33 | 22.6 | 97439884 | |
2016-12-20 00:00:00+00:00 | 22.71 | 22.83 | 22.61 | 22.63 | 83879520 | |
2016-12-21 00:00:00+00:00 | 22.63 | 22.72 | 22.47 | 22.72 | 61046690 | |
2016-12-22 00:00:00+00:00 | 22.54 | 22.73 | 22.47 | 22.6 | 67089135 | |
2016-12-23 00:00:00+00:00 | 22.6 | 22.65 | 22.43 | 22.51 | 38172533 | |
2016-12-27 00:00:00+00:00 | 22.61 | 22.735 | 22.54 | 22.71 | 39968380 | |
2016-12-28 00:00:00+00:00 | 22.33 | 22.67 | 22.26 | 22.62 | 52619447 | |
2016-12-29 00:00:00+00:00 | 22 | 22.39 | 21.77 | 22.33 | 79188397 | |
2016-12-30 00:00:00+00:00 | 22.1 | 22.26 | 21.95 | 22.02 | 72592692 | |
2017-01-03 00:00:00+00:00 | 22.53 | 22.68 | 22.2 | 22.6 | 99298080 | |
2017-01-04 00:00:00+00:00 | 22.95 | 22.96 | 22.6 | 22.72 | 76846195 | |
2017-01-05 00:00:00+00:00 | 22.68 | 22.93 | 22.345 | 22.82 | 86800397 | |
2017-01-06 00:00:00+00:00 | 22.68 | 22.85 | 22.56 | 22.78 | 66281476 | |
2017-01-09 00:00:00+00:00 | 22.55 | 22.71 | 22.4 | 22.51 | 75886389 | |
2017-01-10 00:00:00+00:00 | 22.94 | 23.14 | 22.54 | 22.59 | 100977665 | |
2017-01-11 00:00:00+00:00 | 23.07 | 23.07 | 22.72 | 22.94 | 92385551 | |
2017-01-12 00:00:00+00:00 | 22.92 | 23.12 | 22.61 | 23.01 | 120274108 | |
2017-01-13 00:00:00+00:00 | 23.01 | 23.409 | 22.8 | 23.21 | 161874990 | |
2017-01-17 00:00:00+00:00 | 22.05 | 22.79 | 22.01 | 22.68 | 152495923 | |
2017-01-18 00:00:00+00:00 | 22.63 | 22.65 | 22.1 | 22.3 | 124323059 | |
2017-01-19 00:00:00+00:00 | 22.53 | 22.81 | 22.41 | 22.73 | 75990836 | |
2017-01-20 00:00:00+00:00 | 22.64 | 22.93 | 22.52 | 22.66 | 102546886 | |
2017-01-23 00:00:00+00:00 | 22.56 | 22.76 | 22.415 | 22.62 | 61333028 | |
2017-01-24 00:00:00+00:00 | 22.95 | 23.095 | 22.48 | 22.61 | 98508678 | |
2017-01-25 00:00:00+00:00 | 23.37 | 23.42 | 23.1 | 23.32 | 99753317 | |
2017-01-26 00:00:00+00:00 | 23.44 | 23.55 | 23.28 | 23.41 | 84103638 | |
2017-01-27 00:00:00+00:00 | 23.36 | 23.45 | 23.28 | 23.43 | 54558089 | |
2017-01-30 00:00:00+00:00 | 22.95 | 23.2 | 22.71 | 23.2 | 91521352 | |
2017-01-31 00:00:00+00:00 | 22.64 | 23.025 | 22.5 | 22.77 | 90978767 | |
2017-02-01 00:00:00+00:00 | 22.89 | 23.22 | 22.82 | 22.97 | 103570051 | |
2017-02-02 00:00:00+00:00 | 22.72 | 22.79 | 22.51 | 22.74 | 88679063 | |
2017-02-03 00:00:00+00:00 | 23.29 | 23.35 | 22.95 | 23.15 | 115977015 | |
2017-02-06 00:00:00+00:00 | 23.12 | 23.38 | 23.07 | 23.15 | 92207765 | |
2017-02-07 00:00:00+00:00 | 22.9 | 23.29 | 22.86 | 23.28 | 87930117 | |
2017-02-08 00:00:00+00:00 | 22.67 | 22.73 | 22.45 | 22.73 | 102250692 | |
2017-02-09 00:00:00+00:00 | 23.12 | 23.15 | 22.64 | 22.76 | 102634174 | |
2017-02-10 00:00:00+00:00 | 23.08 | 23.24 | 22.96 | 23.19 | 90548254 | |
2017-02-13 00:00:00+00:00 | 23.4 | 23.54 | 23.17 | 23.17 | 105342461 | |
2017-02-14 00:00:00+00:00 | 24.06 | 24.17 | 23.33 | 23.4 | 139611821 | |
2017-02-15 00:00:00+00:00 | 24.58 | 24.77 | 24.11 | 24.34 | 151233334 | |
2017-02-16 00:00:00+00:00 | 24.58 | 24.62 | 24.3 | 24.54 | 98144469 | |
2017-02-17 00:00:00+00:00 | 24.52 | 24.58 | 24.2 | 24.28 | 85766765 | |
2017-02-21 00:00:00+00:00 | 24.78 | 24.8 | 24.58 | 24.59 | 78522523 | |
2017-02-22 00:00:00+00:00 | 24.79 | 24.95 | 24.54 | 24.61 | 81531895 | |
2017-02-23 00:00:00+00:00 | 24.58 | 24.89 | 24.51 | 24.79 | 85845982 | |
2017-02-24 00:00:00+00:00 | 24.23 | 24.35 | 24.02 | 24.12 | 97055343 | |
2017-02-27 00:00:00+00:00 | 24.57 | 24.66 | 24.19 | 24.2 | 69057612 | |
2017-02-28 00:00:00+00:00 | 24.68 | 24.7 | 24.42 | 24.48 | 90084039 | |
2017-03-01 00:00:00+00:00 | 25.5 | 25.61 | 25.22 | 25.37 | 143947510 | |
2017-03-02 00:00:00+00:00 | 25.23 | 25.8 | 25.2 | 25.68 | 99744502 | |
2017-03-03 00:00:00+00:00 | 25.44 | 25.65 | 25.3 | 25.3 | 92681401 | |
2017-03-06 00:00:00+00:00 | 25.25 | 25.35 | 25.08 | 25.33 | 75660430 | |
2017-03-07 00:00:00+00:00 | 25.21 | 25.36 | 25.1 | 25.22 | 64019227 | |
2017-03-08 00:00:00+00:00 | 25.26 | 25.77 | 25.22 | 25.6 | 105314569 | |
2017-03-09 00:00:00+00:00 | 25.35 | 25.53 | 25.23 | 25.35 | 78984607 | |
2017-03-10 00:00:00+00:00 | 25.31 | 25.62 | 25.09 | 25.62 | 86937037 | |
2017-03-13 00:00:00+00:00 | 25.3 | 25.41 | 25.13 | 25.3 | 56886181 | |
2017-03-14 00:00:00+00:00 | 25.32 | 25.34 | 25.05 | 25.19 | 63182745 | |
2017-03-15 00:00:00+00:00 | 25.18 | 25.55 | 24.96 | 25.39 | 114662671 | |
2017-03-16 00:00:00+00:00 | 25.22 | 25.49 | 25.19 | 25.24 | 70306233 | |
2017-03-17 00:00:00+00:00 | 24.86 | 25.27 | 24.83 | 25.22 | 105222387 | |
2017-03-20 00:00:00+00:00 | 24.44 | 24.76 | 24.42 | 24.58 | 87287477 | |
2017-03-21 00:00:00+00:00 | 23.02 | 24.56 | 22.95 | 24.5 | 259545810 | |
2017-03-22 00:00:00+00:00 | 22.94 | 23.18 | 22.45 | 22.65 | 167984438 | |
2017-03-23 00:00:00+00:00 | 23.07 | 23.49 | 22.91 | 23.04 | 111666799 | |
2017-03-24 00:00:00+00:00 | 23.12 | 23.35 | 22.76 | 23.2 | 113390157 | |
2017-03-27 00:00:00+00:00 | 23.03 | 23.05 | 22.16 | 22.28 | 119513475 | |
2017-03-28 00:00:00+00:00 | 23.48 | 23.67 | 23 | 23.01 | 95632231 | |
2017-03-29 00:00:00+00:00 | 23.35 | 23.65 | 23.31 | 23.51 | 67204492 | |
2017-03-30 00:00:00+00:00 | 23.87 | 23.98 | 23.33 | 23.38 | 79189235 | |
2017-03-31 00:00:00+00:00 | 23.59 | 23.97 | 23.59 | 23.83 | 77158276 | |
2017-04-03 00:00:00+00:00 | 23.59 | 23.75 | 23.08 | 23.65 | 87981757 | |
2017-04-04 00:00:00+00:00 | 23.44 | 23.51 | 23.2 | 23.2 | 75219535 | |
2017-04-05 00:00:00+00:00 | 23.17 | 23.88 | 23.15 | 23.77 | 97453336 | |
2017-04-06 00:00:00+00:00 | 23.26 | 23.46 | 22.96 | 23.17 | 82975196 | |
2017-04-07 00:00:00+00:00 | 23.16 | 23.34 | 22.93 | 23.03 | 79497011 | |
2017-04-10 00:00:00+00:00 | 23.02 | 23.28 | 22.91 | 23.13 | 63254657 | |
2017-04-11 00:00:00+00:00 | 22.92 | 22.95 | 22.58 | 22.83 | 94192834 | |
2017-04-12 00:00:00+00:00 | 22.65 | 22.94 | 22.61 | 22.88 | 76556334 | |
2017-04-13 00:00:00+00:00 | 22.34 | 22.96 | 22.34 | 22.56 | 88061760 | |
2017-04-17 00:00:00+00:00 | 22.81 | 22.83 | 22.26 | 22.36 | 85053244 | |
2017-04-18 00:00:00+00:00 | 22.71 | 23.15 | 22.38 | 23.11 | 146488440 | |
2017-04-19 00:00:00+00:00 | 22.74 | 23.15 | 22.68 | 22.92 | 101771793 | |
2017-04-20 00:00:00+00:00 | 23.07 | 23.17 | 22.82 | 22.96 | 103706737 | |
2017-04-21 00:00:00+00:00 | 22.71 | 23.08 | 22.59 | 23.03 | 127235359 | |
2017-04-24 00:00:00+00:00 | 23.63 | 23.85 | 23.24 | 23.24 | 138638933 | |
2017-04-25 00:00:00+00:00 | 23.98 | 24.35 | 23.91 | 23.99 | 131531125 | |
2017-04-26 00:00:00+00:00 | 23.89 | 24.19 | 23.86 | 24 | 95172221 | |
2017-04-27 00:00:00+00:00 | 23.65 | 23.925 | 23.46 | 23.9 | 80024211 | |
2017-04-28 00:00:00+00:00 | 23.34 | 23.72 | 23.32 | 23.57 | 69384908 | |
2017-05-01 00:00:00+00:00 | 23.61 | 23.77 | 23.35 | 23.52 | 72382852 | |
2017-05-02 00:00:00+00:00 | 23.53 | 23.67 | 23.33 | 23.61 | 60262632 | |
2017-05-03 00:00:00+00:00 | 23.77 | 23.78 | 23.34 | 23.37 | 64179991 | |
2017-05-04 00:00:00+00:00 | 23.85 | 24.05 | 23.73 | 23.98 | 68113987 | |
2017-05-05 00:00:00+00:00 | 23.74 | 23.99 | 23.6 | 23.98 | 56678794 | |
2017-05-08 00:00:00+00:00 | 23.96 | 24.06 | 23.75 | 23.75 | 56016460 | |
2017-05-09 00:00:00+00:00 | 23.98 | 24.31 | 23.87 | 24.03 | 61621953 | |
2017-05-10 00:00:00+00:00 | 24.15 | 24.17 | 23.87 | 23.91 | 48495255 | |
2017-05-11 00:00:00+00:00 | 24.07 | 24.23 | 23.84 | 24.095 | 68350823 | |
2017-05-12 00:00:00+00:00 | 24 | 24 | 23.75 | 23.92 | 62589008 | |
2017-05-15 00:00:00+00:00 | 24.06 | 24.14 | 23.98 | 24.06 | 50517102 | |
2017-05-16 00:00:00+00:00 | 23.99 | 24.17 | 23.94 | 24.07 | 55774774 | |
2017-05-17 00:00:00+00:00 | 22.57 | 23.45 | 22.46 | 23.37 | 190353263 | |
2017-05-18 00:00:00+00:00 | 22.74 | 23 | 22.5 | 22.61 | 108444096 | |
2017-05-19 00:00:00+00:00 | 23.05 | 23.3 | 22.87 | 22.91 | 89566747 | |
2017-05-22 00:00:00+00:00 | 23.04 | 23.24 | 22.82 | 23.19 | 74735545 | |
2017-05-23 00:00:00+00:00 | 23.39 | 23.5 | 22.84 | 23.03 | 79020023 | |
2017-05-24 00:00:00+00:00 | 23.36 | 23.47 | 23.22 | 23.44 | 59198571 | |
2017-05-25 00:00:00+00:00 | 23.25 | 23.58 | 23.22 | 23.41 | 60435403 | |
2017-05-26 00:00:00+00:00 | 23.24 | 23.42 | 23.17 | 23.23 | 50399458 | |
2017-05-30 00:00:00+00:00 | 22.91 | 23.21 | 22.88 | 23.13 | 61307936 | |
2017-05-31 00:00:00+00:00 | 22.41 | 22.8 | 22.09 | 22.78 | 138636343 | |
2017-06-01 00:00:00+00:00 | 22.63 | 22.64 | 22.3 | 22.48 | 69629209 | |
2017-06-02 00:00:00+00:00 | 22.45 | 22.59 | 22.16 | 22.27 | 79689080 | |
2017-06-05 00:00:00+00:00 | 22.41 | 22.7 | 22.39 | 22.44 | 51590269 | |
2017-06-06 00:00:00+00:00 | 22.23 | 22.41 | 22.07 | 22.2 | 71735574 | |
2017-06-07 00:00:00+00:00 | 22.6 | 22.62 | 22.265 | 22.3 | 66366724 | |
2017-06-08 00:00:00+00:00 | 22.97 | 23.21 | 22.6 | 22.62 | 93685867 | |
2017-06-09 00:00:00+00:00 | 23.67 | 23.67 | 23.21 | 23.22 | 108254319 | |
2017-06-12 00:00:00+00:00 | 23.78 | 24.035 | 23.55 | 23.82 | 85810394 | |
2017-06-13 00:00:00+00:00 | 23.77 | 24.11 | 23.664 | 23.95 | 68047282 | |
2017-06-14 00:00:00+00:00 | 23.76 | 23.81 | 23.2 | 23.55 | 102105009 | |
2017-06-15 00:00:00+00:00 | 23.54 | 23.945 | 23.47 | 23.55 | 69707531 | |
2017-06-16 00:00:00+00:00 | 23.43 | 23.62 | 23.37 | 23.56 | 57745812 | |
2017-06-19 00:00:00+00:00 | 23.91 | 24 | 23.6 | 23.61 | 58544184 | |
2017-06-20 00:00:00+00:00 | 23.49 | 23.99 | 23.48 | 23.91 | 59693544 | |
2017-06-21 00:00:00+00:00 | 23.13 | 23.57 | 23.07 | 23.49 | 72432788 | |
2017-06-22 00:00:00+00:00 | 22.93 | 23.195 | 22.9 | 23.12 | 64763274 | |
2017-06-23 00:00:00+00:00 | 22.82 | 23.11 | 22.74 | 23.11 | 85399612 | |
2017-06-26 00:00:00+00:00 | 22.89 | 23.14 | 22.73 | 22.91 | 61307843 | |
2017-06-27 00:00:00+00:00 | 23.27 | 23.6 | 23 | 23.1 | 88257752 | |
2017-06-28 00:00:00+00:00 | 23.88 | 23.96 | 23.529 | 23.53 | 118743321 | |
2017-06-29 00:00:00+00:00 | 24.32 | 24.67 | 24.035 | 24.58 | 150501245 | |
2017-06-30 00:00:00+00:00 | 24.26 | 24.62 | 24.17 | 24.62 | 83988074 | |
2017-07-03 00:00:00+00:00 | 24.68 | 24.91 | 24.44 | 24.46 | 58119029 | |
2017-07-05 00:00:00+00:00 | 24.92 | 24.95 | 24.62 | 24.8 | 76172541 | |
2017-07-06 00:00:00+00:00 | 24.71 | 25.11 | 24.69 | 24.9 | 84465444 | |
2017-07-07 00:00:00+00:00 | 24.83 | 25 | 24.65 | 24.92 | 54478781 | |
2017-07-10 00:00:00+00:00 | 24.89 | 24.99 | 24.64 | 24.72 | 54632844 | |
2017-07-11 00:00:00+00:00 | 24.6 | 24.91 | 24.46 | 24.88 | 69349320 | |
2017-07-12 00:00:00+00:00 | 24.35 | 24.6 | 24.3 | 24.42 | 68708652 | |
2017-07-13 00:00:00+00:00 | 24.62 | 24.63 | 24.37 | 24.43 | 55533384 | |
2017-07-14 00:00:00+00:00 | 24.21 | 24.32 | 23.82 | 24.2 | 92554236 | |
2017-07-17 00:00:00+00:00 | 24.02 | 24.22 | 23.93 | 24.22 | 81367339 | |
2017-07-18 00:00:00+00:00 | 23.9 | 24.06 | 23.61 | 23.91 | 107981434 | |
2017-07-19 00:00:00+00:00 | 24.06 | 24.2 | 23.81 | 24.05 | 74140055 | |
2017-07-20 00:00:00+00:00 | 23.94 | 24.116 | 23.83 | 24.02 | 62775151 | |
2017-07-21 00:00:00+00:00 | 23.8 | 23.96 | 23.66 | 23.84 | 65922474 | |
2017-07-24 00:00:00+00:00 | 23.91 | 24.03 | 23.725 | 23.74 | 51366867 | |
2017-07-25 00:00:00+00:00 | 24.48 | 24.67 | 24.24 | 24.25 | 86154719 | |
2017-07-26 00:00:00+00:00 | 24.21 | 24.67 | 24.135 | 24.67 | 65704665 | |
2017-07-27 00:00:00+00:00 | 24.11 | 24.45 | 24.034 | 24.26 | 62902110 | |
2017-07-28 00:00:00+00:00 | 24.03 | 24.21 | 23.96 | 24.05 | 50143443 | |
2017-07-31 00:00:00+00:00 | 24.12 | 24.3 | 24.09 | 24.12 | 61618916 | |
2017-08-01 00:00:00+00:00 | 24.45 | 24.49 | 24.27 | 24.29 | 52782495 | |
2017-08-02 00:00:00+00:00 | 24.59 | 24.59 | 24.26 | 24.44 | 55317130 | |
2017-08-03 00:00:00+00:00 | 24.37 | 24.6 | 24.32 | 24.55 | 53168561 | |
2017-08-04 00:00:00+00:00 | 24.97 | 25.08 | 24.65 | 24.68 | 105206610 | |
2017-08-07 00:00:00+00:00 | 24.96 | 25.05 | 24.9 | 25.01 | 41860914 | |
2017-08-08 00:00:00+00:00 | 24.9 | 25.35 | 24.82 | 24.95 | 73399327 | |
2017-08-09 00:00:00+00:00 | 24.74 | 24.77 | 24.46 | 24.6 | 56910375 | |
2017-08-10 00:00:00+00:00 | 24.12 | 24.58 | 24.09 | 24.58 | 77973831 | |
2017-08-11 00:00:00+00:00 | 23.86 | 24.24 | 23.8 | 24.07 | 68157429 | |
2017-08-14 00:00:00+00:00 | 24.42 | 24.485 | 24.17 | 24.23 | 54989495 | |
2017-08-15 00:00:00+00:00 | 24.47 | 24.85 | 24.45 | 24.75 | 45143228 | |
2017-08-16 00:00:00+00:00 | 24.19 | 24.62 | 24.08 | 24.54 | 59163774 | |
2017-08-17 00:00:00+00:00 | 23.64 | 24.166 | 23.62 | 24.07 | 78969326 | |
2017-08-18 00:00:00+00:00 | 23.62 | 23.9 | 23.45 | 23.56 | 60461197 | |
2017-08-21 00:00:00+00:00 | 23.38 | 23.64 | 23.14 | 23.58 | 70106629 | |
2017-08-22 00:00:00+00:00 | 23.83 | 23.89 | 23.57 | 23.6 | 59197032 | |
2017-08-23 00:00:00+00:00 | 23.76 | 23.97 | 23.59 | 23.61 | 39825518 | |
2017-08-24 00:00:00+00:00 | 23.84 | 23.94 | 23.68 | 23.93 | 39131435 | |
2017-08-25 00:00:00+00:00 | 23.77 | 24.07 | 23.75 | 23.89 | 43511878 | |
2017-08-28 00:00:00+00:00 | 23.72 | 23.9 | 23.57 | 23.84 | 37062661 | |
2017-08-29 00:00:00+00:00 | 23.58 | 23.7 | 23.12 | 23.22 | 60846381 | |
2017-08-30 00:00:00+00:00 | 23.87 | 24.04 | 23.56 | 23.73 | 58412886 | |
2017-08-31 00:00:00+00:00 | 23.89 | 24.04 | 23.78 | 24.03 | 62362421 | |
2017-09-01 00:00:00+00:00 | 24.09 | 24.18 | 23.8 | 23.9 | 56233871 | |
2017-09-05 00:00:00+00:00 | 23.31 | 23.83 | 23.23 | 23.83 | 108981485 | |
2017-09-06 00:00:00+00:00 | 23.41 | 23.575 | 23.21 | 23.49 | 64188863 | |
2017-09-07 00:00:00+00:00 | 22.97 | 23.41 | 22.75 | 23.38 | 103471888 | |
2017-09-08 00:00:00+00:00 | 22.89 | 23.23 | 22.85 | 22.93 | 68365012 | |
2017-09-11 00:00:00+00:00 | 23.36 | 23.41 | 23.08 | 23.22 | 73741048 | |
2017-09-12 00:00:00+00:00 | 23.95 | 24.03 | 23.45 | 23.46 | 99629843 | |
2017-09-13 00:00:00+00:00 | 24.33 | 24.35 | 23.871 | 23.92 | 86339775 | |
2017-09-14 00:00:00+00:00 | 24.24 | 24.54 | 24.2 | 24.38 | 69854207 | |
2017-09-15 00:00:00+00:00 | 24.38 | 24.41 | 24.15 | 24.21 | 85937551 | |
2017-09-18 00:00:00+00:00 | 24.7 | 24.77 | 24.41 | 24.42 | 68076369 | |
2017-09-19 00:00:00+00:00 | 24.86 | 24.99 | 24.62 | 24.7 | 56190408 | |
2017-09-20 00:00:00+00:00 | 25.06 | 25.24 | 24.67 | 24.88 | 82245970 | |
2017-09-21 00:00:00+00:00 | 25.16 | 25.28 | 24.92 | 25.05 | 57838755 | |
2017-09-22 00:00:00+00:00 | 25.02 | 25.07 | 24.85 | 25.01 | 61424966 | |
2017-09-25 00:00:00+00:00 | 24.76 | 25.08 | 24.58 | 24.94 | 62309242 | |
2017-09-26 00:00:00+00:00 | 24.81 | 24.955 | 24.685 | 24.83 | 52983481 | |
2017-09-27 00:00:00+00:00 | 25.41 | 25.64 | 25.18 | 25.31 | 92809403 | |
2017-09-28 00:00:00+00:00 | 25.45 | 25.64 | 25.34 | 25.59 | 64957669 | |
2017-09-29 00:00:00+00:00 | 25.34 | 25.48 | 25.31 | 25.41 | 66211902 | |
2017-10-02 00:00:00+00:00 | 25.62 | 25.65 | 25.39 | 25.46 | 54001378 | |
2017-10-03 00:00:00+00:00 | 25.86 | 25.93 | 25.62 | 25.75 | 55913268 | |
2017-10-04 00:00:00+00:00 | 25.71 | 25.95 | 25.7 | 25.86 | 53358291 | |
2017-10-05 00:00:00+00:00 | 26.13 | 26.23 | 25.66 | 25.77 | 62636891 | |
2017-10-06 00:00:00+00:00 | 26.21 | 26.3 | 26.04 | 26.25 | 53884276 | |
2017-10-09 00:00:00+00:00 | 25.85 | 26.27 | 25.76 | 26.26 | 55153704 | |
2017-10-10 00:00:00+00:00 | 25.93 | 25.95 | 25.71 | 25.83 | 46109501 | |
2017-10-11 00:00:00+00:00 | 25.83 | 25.93 | 25.65 | 25.93 | 51639142 | |
2017-10-12 00:00:00+00:00 | 25.45 | 25.93 | 25.34 | 25.87 | 71770031 | |
2017-10-13 00:00:00+00:00 | 25.83 | 26 | 25.12 | 25.38 | 104143172 | |
2017-10-16 00:00:00+00:00 | 26.24 | 26.33 | 25.87 | 25.87 | 71927178 | |
2017-10-17 00:00:00+00:00 | 26.2 | 26.43 | 26.11 | 26.37 | 48589546 | |
2017-10-18 00:00:00+00:00 | 26.48 | 26.55 | 26.33 | 26.34 | 55382456 | |
2017-10-19 00:00:00+00:00 | 26.58 | 26.59 | 26.15 | 26.2 | 54493609 | |
2017-10-20 00:00:00+00:00 | 27.17 | 27.18 | 26.9 | 27.04 | 83764655 | |
2017-10-23 00:00:00+00:00 | 27.16 | 27.43 | 27.09 | 27.22 | 69578809 | |
2017-10-24 00:00:00+00:00 | 27.68 | 27.84 | 27.34 | 27.35 | 90312306 | |
2017-10-25 00:00:00+00:00 | 27.635 | 27.92 | 27.34 | 27.89 | 74516438 | |
2017-10-26 00:00:00+00:00 | 27.74 | 27.98 | 27.68 | 27.69 | 55291119 | |
2017-10-27 00:00:00+00:00 | 27.8 | 27.96 | 27.618 | 27.68 | 58856675 | |
2017-10-30 00:00:00+00:00 | 27.6 | 27.76 | 27.46 | 27.65 | 51319946 | |
2017-10-31 00:00:00+00:00 | 27.39 | 27.72 | 27.34 | 27.64 | 50311194 | |
2017-11-01 00:00:00+00:00 | 27.53 | 27.73 | 27.36 | 27.64 | 46527703 | |
2017-11-02 00:00:00+00:00 | 27.87 | 27.935 | 27.275 | 27.51 | 59447338 | |
2017-11-03 00:00:00+00:00 | 27.82 | 27.82 | 27.62 | 27.73 | 37507075 | |
2017-11-06 00:00:00+00:00 | 27.75 | 27.82 | 27.62 | 27.74 | 37009301 | |
2017-11-07 00:00:00+00:00 | 27.18 | 27.77 | 27.02 | 27.74 | 67802318 | |
2017-11-08 00:00:00+00:00 | 26.79 | 26.96 | 26.49 | 26.96 | 82724245 | |
2017-11-09 00:00:00+00:00 | 26.49 | 26.66 | 26.12 | 26.49 | 94715530 | |
2017-11-10 00:00:00+00:00 | 26.51 | 26.74 | 26.46 | 26.59 | 60715402 | |
2017-11-13 00:00:00+00:00 | 26.4 | 26.48 | 26.14 | 26.26 | 55758728 | |
2017-11-14 00:00:00+00:00 | 26.24 | 26.35 | 26.08 | 26.26 | 61283437 | |
2017-11-15 00:00:00+00:00 | 26.79 | 26.93 | 25.81 | 25.99 | 98124987 | |
2017-11-16 00:00:00+00:00 | 26.76 | 27.05 | 26.74 | 26.91 | 54329575 | |
2017-11-17 00:00:00+00:00 | 26.62 | 26.73 | 26.445 | 26.51 | 48300548 | |
2017-11-20 00:00:00+00:00 | 26.74 | 26.84 | 26.54 | 26.74 | 45279556 | |
2017-11-21 00:00:00+00:00 | 26.73 | 26.86 | 26.69 | 26.8 | 57667414 | |
2017-11-22 00:00:00+00:00 | 26.66 | 26.9 | 26.59 | 26.77 | 37984951 | |
2017-11-24 00:00:00+00:00 | 26.59 | 26.74 | 26.57 | 26.72 | 14269207 | |
2017-11-27 00:00:00+00:00 | 26.59 | 26.92 | 26.51 | 26.52 | 44606588 | |
2017-11-28 00:00:00+00:00 | 27.64 | 27.7 | 26.565 | 26.62 | 95409925 | |
2017-11-29 00:00:00+00:00 | 28.28 | 28.49 | 27.84 | 28.02 | 118070904 | |
2017-11-30 00:00:00+00:00 | 28.17 | 28.72 | 28.08 | 28.62 | 120546324 | |
2017-12-01 00:00:00+00:00 | 28.1 | 28.51 | 27.5 | 28.25 | 133921854 | |
2017-12-04 00:00:00+00:00 | 29.06 | 29.31 | 28.82 | 28.85 | 143880151 | |
2017-12-05 00:00:00+00:00 | 28.93 | 29.3 | 28.85 | 29.27 | 91034474 | |
2017-12-06 00:00:00+00:00 | 28.64 | 28.97 | 28.57 | 28.73 | 73902305 | |
2017-12-07 00:00:00+00:00 | 28.78 | 28.97 | 28.37 | 28.45 | 80517375 | |
2017-12-08 00:00:00+00:00 | 29.05 | 29.09 | 28.76 | 29.08 | 60830895 | |
2017-12-11 00:00:00+00:00 | 28.94 | 29.17 | 28.9 | 29.05 | 53947667 | |
2017-12-12 00:00:00+00:00 | 29.32 | 29.5 | 28.91 | 29.05 | 75809939 | |
2017-12-13 00:00:00+00:00 | 28.84 | 29.44 | 28.83 | 29.31 | 78318859 | |
2017-12-14 00:00:00+00:00 | 28.73 | 29.18 | 28.71 | 29.065 | 59891544 | |
2017-12-15 00:00:00+00:00 | 29.04 | 29.18 | 28.74 | 28.905 | 119265023 | |
2017-12-18 00:00:00+00:00 | 29.48 | 29.5 | 29.27 | 29.32 | 69139708 | |
2017-12-19 00:00:00+00:00 | 29.45 | 29.64 | 29.37 | 29.625 | 65397545 | |
2017-12-20 00:00:00+00:00 | 29.48 | 29.84 | 29.45 | 29.765 | 81212183 | |
2017-12-21 00:00:00+00:00 | 29.82 | 29.98 | 29.58 | 29.595 | 72441696 | |
2017-12-22 00:00:00+00:00 | 29.88 | 30.03 | 29.62 | 29.965 | 54073301 | |
2017-12-26 00:00:00+00:00 | 29.78 | 29.94 | 29.58 | 29.74 | 42254735 | |
2017-12-27 00:00:00+00:00 | 29.73 | 29.73 | 29.601 | 29.66 | 31827793 | |
2017-12-28 00:00:00+00:00 | 29.8 | 29.82 | 29.66 | 29.73 | 37885624 | |
2017-12-29 00:00:00+00:00 | 29.52 | 29.88 | 29.52 | 29.85 | 40133412 | |
2018-01-02 00:00:00+00:00 | 29.9 | 29.9 | 29.61 | 29.75 | 56984205 | |
2018-01-03 00:00:00+00:00 | 29.8 | 29.94 | 29.69 | 29.9 | 57770689 | |
2018-01-04 00:00:00+00:00 | 30.19 | 30.44 | 29.88 | 29.97 | 75653541 | |
2018-01-05 00:00:00+00:00 | 30.33 | 30.42 | 30.05 | 30.37 | 56124567 | |
2018-01-08 00:00:00+00:00 | 30.12 | 30.27 | 30.05 | 30.23 | 42109249 | |
2018-01-09 00:00:00+00:00 | 30.27 | 30.54 | 30.13 | 30.2 | 69281639 | |
2018-01-10 00:00:00+00:00 | 30.55 | 30.73 | 30.31 | 30.37 | 62272175 | |
2018-01-11 00:00:00+00:00 | 30.66 | 30.69 | 30.45 | 30.66 | 59015496 | |
2018-01-12 00:00:00+00:00 | 31.19 | 31.2 | 30.77 | 30.88 | 66034385 | |
2018-01-16 00:00:00+00:00 | 31.24 | 31.79 | 31.03 | 31.74 | 104217423 | |
2018-01-17 00:00:00+00:00 | 31.18 | 31.29 | 30.34 | 31 | 122797447 | |
2018-01-18 00:00:00+00:00 | 31.48 | 31.71 | 31.21 | 31.33 | 76276795 | |
2018-01-19 00:00:00+00:00 | 31.72 | 31.74 | 31.46 | 31.58 | 66236420 | |
2018-01-22 00:00:00+00:00 | 31.94 | 31.94 | 31.58 | 31.67 | 52627095 | |
2018-01-23 00:00:00+00:00 | 31.92 | 32.13 | 31.785 | 31.86 | 56124675 | |
2018-01-24 00:00:00+00:00 | 32.09 | 32.21 | 31.82 | 32.009 | 80706611 | |
2018-01-25 00:00:00+00:00 | 32.09 | 32.25 | 31.93 | 32.24 | 60230741 | |
2018-01-26 00:00:00+00:00 | 32.2 | 32.2 | 31.95 | 32.11 | 50334774 | |
2018-01-29 00:00:00+00:00 | 32.28 | 32.45 | 32.18 | 32.25 | 58205136 | |
2018-01-30 00:00:00+00:00 | 31.88 | 32.2 | 31.85 | 31.95 | 58917317 | |
2018-01-31 00:00:00+00:00 | 32 | 32.29 | 31.95 | 32.049 | 65150630 | |
2018-02-01 00:00:00+00:00 | 32.5 | 32.5 | 31.96 | 32 | 62054274 | |
2018-02-02 00:00:00+00:00 | 31.95 | 32.67 | 31.86 | 32.439 | 96387301 | |
2018-02-05 00:00:00+00:00 | 30.26 | 31.98 | 29.15 | 31.12 | 153479504 | |
2018-02-06 00:00:00+00:00 | 31.2 | 31.29 | 29.3 | 29.41 | 161141551 | |
2018-02-07 00:00:00+00:00 | 31.25 | 31.74 | 30.86 | 31.12 | 99831300 | |
2018-02-08 00:00:00+00:00 | 29.74 | 31.38 | 29.73 | 31.31 | 128282047 | |
2018-02-09 00:00:00+00:00 | 30.33 | 30.61 | 29.13 | 30.17 | 142693575 | |
2018-02-12 00:00:00+00:00 | 31.12 | 31.44 | 30.54 | 30.62 | 94696662 | |
2018-02-13 00:00:00+00:00 | 31.18 | 31.4 | 30.76 | 31.04 | 76559902 | |
2018-02-14 00:00:00+00:00 | 32 | 32.03 | 31.055 | 31.17 | 92773022 | |
2018-02-15 00:00:00+00:00 | 32.21 | 32.47 | 31.87 | 32.39 | 67806215 | |
2018-02-16 00:00:00+00:00 | 31.97 | 32.259 | 31.92 | 32 | 61805677 | |
2018-02-20 00:00:00+00:00 | 31.93 | 32.159 | 31.75 | 31.96 | 58356720 | |
2018-02-21 00:00:00+00:00 | 31.87 | 32.365 | 31.77 | 31.8 | 71980440 | |
2018-02-22 00:00:00+00:00 | 31.69 | 32.14 | 31.59 | 31.99 | 64394876 | |
2018-02-23 00:00:00+00:00 | 32.03 | 32.06 | 31.76 | 31.8 | 50959564 | |
2018-02-26 00:00:00+00:00 | 32.42 | 32.439 | 32.049 | 32.17 | 60441481 | |
2018-02-27 00:00:00+00:00 | 32.33 | 32.85 | 32.32 | 32.32 | 68943159 | |
2018-02-28 00:00:00+00:00 | 32.1 | 32.77 | 32.09 | 32.49 | 70351619 | |
2018-03-01 00:00:00+00:00 | 31.48 | 32.345 | 31.36 | 32.07 | 90292066 | |
2018-03-02 00:00:00+00:00 | 31.63 | 31.69 | 30.63 | 31.12 | 82426632 | |
2018-03-05 00:00:00+00:00 | 32.13 | 32.354 | 31.165 | 31.35 | 70074106 | |
2018-03-06 00:00:00+00:00 | 32.11 | 32.32 | 31.95 | 32.299 | 50302159 | |
2018-03-07 00:00:00+00:00 | 32.18 | 32.229 | 31.58 | 31.7 | 52710046 | |
2018-03-08 00:00:00+00:00 | 32.2 | 32.27 | 31.81 | 32.189 | 51206287 | |
2018-03-09 00:00:00+00:00 | 32.72 | 32.729 | 32.4 | 32.47 | 73075006 | |
2018-03-12 00:00:00+00:00 | 32.84 | 33.05 | 32.64 | 32.689 | 59301639 | |
2018-03-13 00:00:00+00:00 | 32.36 | 32.99 | 32.299 | 32.97 | 62823477 | |
2018-03-14 00:00:00+00:00 | 32.14 | 32.549 | 31.93 | 32.549 | 57889408 | |
2018-03-15 00:00:00+00:00 | 32.1 | 32.34 | 31.93 | 32.29 | 41825422 | |
2018-03-16 00:00:00+00:00 | 32.17 | 32.57 | 32.08 | 32.11 | 82176070 | |
2018-03-19 00:00:00+00:00 | 31.98 | 32.22 | 31.68 | 32.13 | 57068585 | |
2018-03-20 00:00:00+00:00 | 31.98 | 32.2 | 31.88 | 32.06 | 44455976 | |
2018-03-21 00:00:00+00:00 | 31.87 | 32.45 | 31.68 | 32 | 64135559 | |
2018-03-22 00:00:00+00:00 | 30.55 | 31.49 | 30.42 | 31.44 | 109386636 | |
2018-03-23 00:00:00+00:00 | 29.17 | 30.82 | 29.05 | 30.69 | 114544472 | |
2018-03-26 00:00:00+00:00 | 30.44 | 30.555 | 29.65 | 29.89 | 81989080 | |
2018-03-27 00:00:00+00:00 | 29.52 | 30.67 | 29.21 | 30.65 | 85188716 | |
2018-03-28 00:00:00+00:00 | 29.39 | 29.96 | 29.01 | 29.79 | 76177788 | |
2018-03-29 00:00:00+00:00 | 29.99 | 30.14 | 29.31 | 29.57 | 65246291 | |
2018-04-02 00:00:00+00:00 | 29.31 | 30.055 | 28.75 | 29.8 | 85943825 | |
2018-04-03 00:00:00+00:00 | 29.59 | 29.65 | 29.1 | 29.54 | 69497275 | |
2018-04-04 00:00:00+00:00 | 29.88 | 29.97 | 28.95 | 29 | 65831606 | |
2018-04-05 00:00:00+00:00 | 30.32 | 30.55 | 30.115 | 30.18 | 55411276 | |
2018-04-06 00:00:00+00:00 | 29.63 | 30.29 | 29.37 | 30.01 | 79474397 | |
2018-04-09 00:00:00+00:00 | 29.87 | 30.625 | 29.76 | 29.82 | 65530400 | |
2018-04-10 00:00:00+00:00 | 30.48 | 30.58 | 30.23 | 30.48 | 60540481 | |
2018-04-11 00:00:00+00:00 | 29.9 | 30.24 | 29.89 | 30.15 | 61963905 | |
2018-04-12 00:00:00+00:00 | 30.65 | 30.8 | 30.165 | 30.22 | 57562976 | |
2018-04-13 00:00:00+00:00 | 29.8 | 31.17 | 29.56 | 31.13 | 98811401 | |
2018-04-16 00:00:00+00:00 | 29.93 | 30.37 | 29.47 | 30.08 | 109100369 | |
2018-04-17 00:00:00+00:00 | 30.04 | 30.37 | 29.88 | 30.17 | 82744702 | |
2018-04-18 00:00:00+00:00 | 29.53 | 30.22 | 29.5 | 30.03 | 79135270 | |
2018-04-19 00:00:00+00:00 | 30.18 | 30.24 | 29.54 | 29.55 | 80740174 | |
2018-04-20 00:00:00+00:00 | 30.26 | 30.53 | 30.13 | 30.26 | 63123024 | |
2018-04-23 00:00:00+00:00 | 30.32 | 30.4 | 30.12 | 30.27 | 50308425 | |
2018-04-24 00:00:00+00:00 | 30.19 | 30.86 | 30 | 30.46 | 81153889 | |
2018-04-25 00:00:00+00:00 | 30.14 | 30.34 | 29.8 | 30.09 | 64240382 | |
2018-04-26 00:00:00+00:00 | 30.07 | 30.28 | 29.99 | 30.04 | 44862773 | |
2018-04-27 00:00:00+00:00 | 30.15 | 30.21 | 29.97 | 29.99 | 43905512 | |
2018-04-30 00:00:00+00:00 | 29.92 | 30.43 | 29.92 | 30.27 | 54818739 | |
2018-05-01 00:00:00+00:00 | 29.95 | 29.96 | 29.62 | 29.92 | 52630875 | |
2018-05-02 00:00:00+00:00 | 29.58 | 30.07 | 29.56 | 29.95 | 67879858 | |
2018-05-03 00:00:00+00:00 | 29.2 | 29.52 | 28.43 | 29.51 | 110445605 | |
2018-05-04 00:00:00+00:00 | 29.3 | 29.42 | 28.82 | 28.99 | 58166030 | |
2018-05-07 00:00:00+00:00 | 29.64 | 29.788 | 29.33 | 29.44 | 46537019 | |
2018-05-08 00:00:00+00:00 | 29.93 | 30.125 | 29.665 | 29.77 | 64647835 | |
2018-05-09 00:00:00+00:00 | 30.72 | 30.79 | 30.08 | 30.1 | 71822529 | |
2018-05-10 00:00:00+00:00 | 30.89 | 31.07 | 30.42 | 30.61 | 54475803 | |
2018-05-11 00:00:00+00:00 | 30.92 | 31.03 | 30.81 | 30.91 | 41930557 | |
2018-05-14 00:00:00+00:00 | 31.12 | 31.23 | 31.03 | 31.04 | 41634010 | |
2018-05-15 00:00:00+00:00 | 31.22 | 31.36 | 30.85 | 30.92 | 62290774 | |
2018-05-16 00:00:00+00:00 | 31.06 | 31.25 | 31.02 | 31.18 | 43337537 | |
2018-05-17 00:00:00+00:00 | 30.81 | 31.08 | 30.67 | 31.04 | 45566657 | |
2018-05-18 00:00:00+00:00 | 30.26 | 30.79 | 30.23 | 30.77 | 55842588 | |
2018-05-21 00:00:00+00:00 | 30.55 | 30.68 | 30.48 | 30.52 | 34592438 | |
2018-05-22 00:00:00+00:00 | 30.89 | 31.14 | 30.54 | 30.6 | 60720402 | |
2018-05-23 00:00:00+00:00 | 30.44 | 30.76 | 30.081 | 30.71 | 73081366 | |
2018-05-24 00:00:00+00:00 | 30.21 | 30.41 | 29.83 | 30.4 | 61592751 | |
2018-05-25 00:00:00+00:00 | 30.16 | 30.19 | 29.88 | 30.01 | 43955277 | |
2018-05-29 00:00:00+00:00 | 28.96 | 29.8 | 28.7 | 29.76 | 135380296 | |
2018-05-30 00:00:00+00:00 | 29.49 | 29.6 | 29.16 | 29.29 | 78861008 | |
2018-05-31 00:00:00+00:00 | 29.04 | 29.26 | 28.825 | 29.2 | 94612359 | |
2018-06-01 00:00:00+00:00 | 29.4 | 29.66 | 29.31 | 29.49 | 64580957 | |
2018-06-04 00:00:00+00:00 | 29.4 | 29.59 | 29.33 | 29.55 | 42386702 | |
2018-06-05 00:00:00+00:00 | 29.12 | 29.34 | 29.06 | 29.31 | 46709649 | |
2018-06-06 00:00:00+00:00 | 30.04 | 30.05 | 29.28 | 29.29 | 74299170 | |
2018-06-07 00:00:00+00:00 | 30.09 | 30.31 | 29.89 | 30.2 | 66638273 | |
2018-06-08 00:00:00+00:00 | 30.01 | 30.05 | 29.81 | 30.01 | 49186782 | |
2018-06-11 00:00:00+00:00 | 30.06 | 30.41 | 30.06 | 30.13 | 46578051 | |
2018-06-12 00:00:00+00:00 | 29.9 | 30.27 | 29.75 | 30.17 | 49210024 | |
2018-06-13 00:00:00+00:00 | 29.84 | 30.355 | 29.695 | 29.98 | 68282762 | |
2018-06-14 00:00:00+00:00 | 29.5 | 30.05 | 29.38 | 30.04 | 77025742 | |
2018-06-15 00:00:00+00:00 | 29.28 | 29.41 | 28.865 | 29.25 | 102217511 | |
2018-06-18 00:00:00+00:00 | 29.4 | 29.5 | 28.81 | 28.94 | 52299539 | |
2018-06-19 00:00:00+00:00 | 29.26 | 29.37 | 28.95 | 29.04 | 62582970 | |
2018-06-20 00:00:00+00:00 | 29.24 | 29.5 | 29.18 | 29.42 | 51983832 | |
2018-06-21 00:00:00+00:00 | 29.29 | 29.45 | 29 | 29.21 | 67998508 | |
2018-06-22 00:00:00+00:00 | 28.99 | 29.52 | 28.99 | 29.46 | 76079178 | |
2018-06-25 00:00:00+00:00 | 28.48 | 28.91 | 28.26 | 28.86 | 72741314 | |
2018-06-26 00:00:00+00:00 | 28.54 | 28.67 | 28.01 | 28.52 | 55889852 | |
2018-06-27 00:00:00+00:00 | 28.24 | 28.845 | 28.24 | 28.51 | 61066628 | |
2018-06-28 00:00:00+00:00 | 28.67 | 28.84 | 28.3 | 28.4 | 69893231 | |
2018-06-29 00:00:00+00:00 | 28.19 | 29.17 | 28.18 | 29.09 | 101895880 | |
2018-07-02 00:00:00+00:00 | 28.28 | 28.28 | 27.83 | 28.08 | 52090546 | |
2018-07-03 00:00:00+00:00 | 27.78 | 28.43 | 27.74 | 28.33 | 40597682 | |
2018-07-05 00:00:00+00:00 | 27.92 | 28.079 | 27.81 | 27.95 | 44123567 | |
2018-07-06 00:00:00+00:00 | 28.03 | 28.15 | 27.63 | 27.81 | 39424933 | |
2018-07-09 00:00:00+00:00 | 29.05 | 29.09 | 28.22 | 28.23 | 60593658 | |
2018-07-10 00:00:00+00:00 | 28.83 | 29.33 | 28.75 | 29.22 | 58271064 | |
2018-07-11 00:00:00+00:00 | 28.68 | 28.91 | 28.62 | 28.67 | 48936591 | |
2018-07-12 00:00:00+00:00 | 28.77 | 28.97 | 28.66 | 28.9 | 50729700 | |
2018-07-13 00:00:00+00:00 | 28.55 | 28.69 | 28.221 | 28.62 | 74861603 | |
2018-07-16 00:00:00+00:00 | 29.78 | 29.85 | 28.73 | 28.78 | 128205773 | |
2018-07-17 00:00:00+00:00 | 30.01 | 30.2 | 29.58 | 29.89 | 86967476 | |
2018-07-18 00:00:00+00:00 | 30.13 | 30.29 | 29.84 | 29.92 | 58908069 | |
2018-07-19 00:00:00+00:00 | 29.67 | 30.015 | 29.64 | 29.93 | 59123021 | |
2018-07-20 00:00:00+00:00 | 30.13 | 30.16 | 29.46 | 29.62 | 80292243 | |
2018-07-23 00:00:00+00:00 | 30.75 | 30.85 | 30.13 | 30.14 | 73723538 | |
2018-07-24 00:00:00+00:00 | 30.83 | 31.11 | 30.65 | 30.85 | 64652784 | |
2018-07-25 00:00:00+00:00 | 31.07 | 31.11 | 30.67 | 30.7 | 57391699 | |
2018-07-26 00:00:00+00:00 | 30.94 | 31.21 | 30.89 | 31.19 | 45903175 | |
2018-07-27 00:00:00+00:00 | 31.06 | 31.14 | 30.8 | 30.99 | 56979036 | |
2018-07-30 00:00:00+00:00 | 31.31 | 31.45 | 31.13 | 31.14 | 58393904 | |
2018-07-31 00:00:00+00:00 | 30.88 | 31.415 | 30.82 | 31.36 | 63625038 | |
2018-08-01 00:00:00+00:00 | 31.25 | 31.55 | 31.17 | 31.22 | 68471546 | |
2018-08-02 00:00:00+00:00 | 31.28 | 31.31 | 30.87 | 30.98 | 46003576 | |
2018-08-03 00:00:00+00:00 | 31.51 | 31.51 | 31.16 | 31.18 | 49931310 | |
2018-08-06 00:00:00+00:00 | 31.52 | 31.59 | 31.325 | 31.45 | 41079640 | |
2018-08-07 00:00:00+00:00 | 31.51 | 31.8 | 31.49 | 31.55 | 43604958 | |
2018-08-08 00:00:00+00:00 | 31.8 | 31.91 | 31.48 | 31.51 | 43288591 | |
2018-08-09 00:00:00+00:00 | 31.6 | 31.79 | 31.55 | 31.73 | 37740761 | |
2018-08-10 00:00:00+00:00 | 31.19 | 31.325 | 30.9 | 31.23 | 54712011 | |
2018-08-13 00:00:00+00:00 | 30.48 | 31.12 | 30.45 | 31.08 | 62834822 | |
2018-08-14 00:00:00+00:00 | 30.79 | 30.9 | 30.42 | 30.61 | 42172878 | |
2018-08-15 00:00:00+00:00 | 30.36 | 30.62 | 30.155 | 30.48 | 59326967 | |
2018-08-16 00:00:00+00:00 | 30.72 | 30.84 | 30.52 | 30.56 | 43583825 | |
2018-08-17 00:00:00+00:00 | 30.74 | 30.855 | 30.545 | 30.66 | 38275432 | |
2018-08-20 00:00:00+00:00 | 30.87 | 30.925 | 30.65 | 30.69 | 39889778 | |
2018-08-21 00:00:00+00:00 | 31.02 | 31.25 | 30.83 | 30.91 | 53466881 | |
2018-08-22 00:00:00+00:00 | 30.98 | 31.085 | 30.88 | 30.92 | 33000899 | |
2018-08-23 00:00:00+00:00 | 30.84 | 30.98 | 30.74 | 30.94 | 37777195 | |
2018-08-24 00:00:00+00:00 | 30.89 | 31.08 | 30.85 | 30.94 | 42964476 | |
2018-08-27 00:00:00+00:00 | 31.31 | 31.49 | 30.99 | 31.03 | 47364148 | |
2018-08-28 00:00:00+00:00 | 31.27 | 31.41 | 31.19 | 31.36 | 38447811 | |
2018-08-29 00:00:00+00:00 | 31.14 | 31.31 | 31.07 | 31.24 | 45844152 | |
2018-08-30 00:00:00+00:00 | 31.01 | 31.15 | 30.95 | 31.02 | 46559486 | |
2018-08-31 00:00:00+00:00 | 30.93 | 30.95 | 30.62 | 30.88 | 48595082 | |
2018-09-04 00:00:00+00:00 | 31.14 | 31.16 | 30.82 | 30.91 | 44527968 | |
2018-09-05 00:00:00+00:00 | 31.18 | 31.24 | 30.97 | 31.07 | 46427457 | |
2018-09-06 00:00:00+00:00 | 30.85 | 31.11 | 30.73 | 31.05 | 41692185 | |
2018-09-07 00:00:00+00:00 | 30.86 | 31.12 | 30.63 | 31 | 48514924 | |
2018-09-10 00:00:00+00:00 | 30.82 | 31 | 30.75 | 30.88 | 33065157 | |
2018-09-11 00:00:00+00:00 | 30.85 | 30.9 | 30.7 | 30.72 | 48391250 | |
2018-09-12 00:00:00+00:00 | 30.43 | 30.96 | 30.38 | 30.88 | 58933386 | |
2018-09-13 00:00:00+00:00 | 30.14 | 30.67 | 30.08 | 30.53 | 57950233 | |
2018-09-14 00:00:00+00:00 | 30.37 | 30.39 | 30.11 | 30.13 | 33103669 | |
2018-09-17 00:00:00+00:00 | 30.28 | 30.49 | 30.15 | 30.34 | 34050278 | |
2018-09-18 00:00:00+00:00 | 30.21 | 30.4 | 30.13 | 30.33 | 49837464 | |
2018-09-19 00:00:00+00:00 | 31 | 31.2 | 30.13 | 30.13 | 91304426 | |
2018-09-20 00:00:00+00:00 | 31.19 | 31.37 | 31.14 | 31.3 | 82751190 | |
2018-09-21 00:00:00+00:00 | 31.03 | 31.37 | 30.97 | 31.34 | 85445286 | |
2018-09-24 00:00:00+00:00 | 30.74 | 31.2 | 30.66 | 30.98 | 45353607 | |
2018-09-25 00:00:00+00:00 | 30.67 | 31.04 | 30.6 | 30.85 | 40715873 | |
2018-09-26 00:00:00+00:00 | 30.13 | 30.74 | 30.06 | 30.71 | 57222952 | |
2018-09-27 00:00:00+00:00 | 29.94 | 30.31 | 29.93 | 30.24 | 45462288 | |
2018-09-28 00:00:00+00:00 | 29.46 | 29.85 | 29.42 | 29.65 | 73275235 | |
2018-10-01 00:00:00+00:00 | 29.65 | 29.94 | 29.54 | 29.68 | 48539096 | |
2018-10-02 00:00:00+00:00 | 29.58 | 29.72 | 29.27 | 29.58 | 42786255 | |
2018-10-03 00:00:00+00:00 | 30 | 30.175 | 29.72 | 29.81 | 60663616 | |
2018-10-04 00:00:00+00:00 | 30.43 | 30.79 | 30.14 | 30.17 | 71995080 | |
2018-10-05 00:00:00+00:00 | 30.23 | 30.65 | 30.05 | 30.6 | 51722674 | |
2018-10-08 00:00:00+00:00 | 30.27 | 30.42 | 29.91 | 30.05 | 47054908 | |
2018-10-09 00:00:00+00:00 | 29.98 | 30.19 | 29.87 | 30.03 | 57490810 | |
2018-10-10 00:00:00+00:00 | 29.24 | 30.13 | 29.21 | 29.98 | 88338165 | |
2018-10-11 00:00:00+00:00 | 28.36 | 29.25 | 28.26 | 28.89 | 111417275 | |
2018-10-12 00:00:00+00:00 | 28.46 | 29.04 | 27.73 | 28.99 | 101947113 | |
2018-10-15 00:00:00+00:00 | 27.92 | 28.62 | 27.64 | 28.49 | 112963592 | |
2018-10-16 00:00:00+00:00 | 28.53 | 28.58 | 27.9 | 28.24 | 87633865 | |
2018-10-17 00:00:00+00:00 | 28.9 | 29.19 | 28.16 | 28.37 | 78596745 | |
2018-10-18 00:00:00+00:00 | 28.25 | 28.9 | 28.14 | 28.67 | 77080398 | |
2018-10-19 00:00:00+00:00 | 28.32 | 28.52 | 27.97 | 28.14 | 64231096 | |
2018-10-22 00:00:00+00:00 | 27.38 | 28.35 | 27.31 | 28.32 | 89660977 | |
2018-10-23 00:00:00+00:00 | 27.02 | 27.2 | 26.105 | 26.39 | 102244478 | |
2018-10-24 00:00:00+00:00 | 26.19 | 26.9 | 26.08 | 26.86 | 85041168 | |
2018-10-25 00:00:00+00:00 | 26.59 | 26.93 | 26.18 | 26.38 | 73309321 | |
2018-10-26 00:00:00+00:00 | 26.39 | 26.58 | 25.88 | 26.33 | 90904719 | |
2018-10-29 00:00:00+00:00 | 26.61 | 27.18 | 26.29 | 26.69 | 87404322 | |
2018-10-30 00:00:00+00:00 | 26.78 | 26.86 | 26.32 | 26.76 | 78716947 | |
2018-10-31 00:00:00+00:00 | 27.5 | 27.88 | 27.06 | 27.1 | 91899324 | |
2018-11-01 00:00:00+00:00 | 27.81 | 28 | 27.61 | 27.77 | 49121761 | |
2018-11-02 00:00:00+00:00 | 27.89 | 28.38 | 27.52 | 28.01 | 68878579 | |
2018-11-05 00:00:00+00:00 | 28.06 | 28.24 | 27.89 | 27.95 | 40329161 | |
2018-11-06 00:00:00+00:00 | 28.21 | 28.245 | 27.74 | 28.02 | 43143905 | |
2018-11-07 00:00:00+00:00 | 28.54 | 28.67 | 28.14 | 28.415 | 55645361 | |
2018-11-08 00:00:00+00:00 | 28.87 | 29.12 | 28.43 | 28.5 | 48930851 | |
2018-11-09 00:00:00+00:00 | 28.52 | 28.89 | 28.384 | 28.75 | 43102533 | |
2018-11-12 00:00:00+00:00 | 27.75 | 28.52 | 27.64 | 28.38 | 50160927 | |
2018-11-13 00:00:00+00:00 | 27.76 | 28.14 | 27.69 | 27.75 | 57304289 | |
2018-11-14 00:00:00+00:00 | 27.21 | 28.1 | 26.78 | 27.95 | 68623979 | |
2018-11-15 00:00:00+00:00 | 27.9 | 27.97 | 26.88 | 27.14 | 65582163 | |
2018-11-16 00:00:00+00:00 | 27.75 | 27.89 | 27.42 | 27.69 | 52223527 |
from keras.models import Sequential | |
from keras.layers import Dropout | |
from keras.layers import Dense, Activation | |
def create_new_model(neurons=20,act_1='sigmoid',dropout_ratio=0.15): | |
model=Sequential() | |
model.add(Dense(neurons, | |
kernel_initializer='he_normal' | |
,input_shape=(5,) | |
, bias_initializer='zeros')) | |
model.add(Activation(act_1)) | |
model.add(Dropout(dropout_ratio)) | |
model.add(Dense(neurons*2, use_bias=True, kernel_initializer='he_normal' | |
, bias_initializer='zeros')) | |
model.add(Activation(act_1)) | |
model.add(Dropout(dropout_ratio)) | |
model.add(Dense(neurons*3, use_bias=True, kernel_initializer='he_normal' | |
, bias_initializer='zeros')) | |
model.add(Activation(act_1)) | |
model.add(Dropout(dropout_ratio)) | |
model.add(Dense(neurons*4, use_bias=True, kernel_initializer='he_normal' | |
, bias_initializer='zeros')) | |
model.add(Activation(act_1)) | |
model.add(Dropout(dropout_ratio)) | |
model.add(Dense(neurons*5, use_bias=True, kernel_initializer='he_normal' | |
, bias_initializer='zeros')) | |
model.add(Activation(act_1)) | |
model.add(Dropout(dropout_ratio)) | |
model.add(Dense(1,activation='sigmoid')) | |
model.compile(loss='binary_crossentropy', optimizer='adam', metrics = ['accuracy']) | |
return model |
"{\"class_name\": \"Sequential\", \"config\": {\"name\": \"sequential_46\", \"layers\": [{\"class_name\": \"Dense\", \"config\": {\"name\": \"dense_271\", \"trainable\": true, \"batch_input_shape\": [null, 5], \"dtype\": \"float32\", \"units\": 8, \"activation\": \"linear\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"VarianceScaling\", \"config\": {\"scale\": 2.0, \"mode\": \"fan_in\", \"distribution\": \"normal\", \"seed\": null}}, \"bias_initializer\": {\"class_name\": \"Zeros\", \"config\": {}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}}, {\"class_name\": \"Activation\", \"config\": {\"name\": \"activation_226\", \"trainable\": true, \"activation\": \"relu\"}}, {\"class_name\": \"Dropout\", \"config\": {\"name\": \"dropout_226\", \"trainable\": true, \"rate\": 0.02, \"noise_shape\": null, \"seed\": null}}, {\"class_name\": \"Dense\", \"config\": {\"name\": \"dense_272\", \"trainable\": true, \"units\": 16, \"activation\": \"linear\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"VarianceScaling\", \"config\": {\"scale\": 2.0, \"mode\": \"fan_in\", \"distribution\": \"normal\", \"seed\": null}}, \"bias_initializer\": {\"class_name\": \"Zeros\", \"config\": {}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}}, {\"class_name\": \"Activation\", \"config\": {\"name\": \"activation_227\", \"trainable\": true, \"activation\": \"relu\"}}, {\"class_name\": \"Dropout\", \"config\": {\"name\": \"dropout_227\", \"trainable\": true, \"rate\": 0.02, \"noise_shape\": null, \"seed\": null}}, {\"class_name\": \"Dense\", \"config\": {\"name\": \"dense_273\", \"trainable\": true, \"units\": 24, \"activation\": \"linear\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"VarianceScaling\", \"config\": {\"scale\": 2.0, \"mode\": \"fan_in\", \"distribution\": \"normal\", \"seed\": null}}, \"bias_initializer\": {\"class_name\": \"Zeros\", \"config\": {}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}}, {\"class_name\": \"Activation\", \"config\": {\"name\": \"activation_228\", \"trainable\": true, \"activation\": \"relu\"}}, {\"class_name\": \"Dropout\", \"config\": {\"name\": \"dropout_228\", \"trainable\": true, \"rate\": 0.02, \"noise_shape\": null, \"seed\": null}}, {\"class_name\": \"Dense\", \"config\": {\"name\": \"dense_274\", \"trainable\": true, \"units\": 32, \"activation\": \"linear\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"VarianceScaling\", \"config\": {\"scale\": 2.0, \"mode\": \"fan_in\", \"distribution\": \"normal\", \"seed\": null}}, \"bias_initializer\": {\"class_name\": \"Zeros\", \"config\": {}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}}, {\"class_name\": \"Activation\", \"config\": {\"name\": \"activation_229\", \"trainable\": true, \"activation\": \"relu\"}}, {\"class_name\": \"Dropout\", \"config\": {\"name\": \"dropout_229\", \"trainable\": true, \"rate\": 0.02, \"noise_shape\": null, \"seed\": null}}, {\"class_name\": \"Dense\", \"config\": {\"name\": \"dense_275\", \"trainable\": true, \"units\": 40, \"activation\": \"linear\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"VarianceScaling\", \"config\": {\"scale\": 2.0, \"mode\": \"fan_in\", \"distribution\": \"normal\", \"seed\": null}}, \"bias_initializer\": {\"class_name\": \"Zeros\", \"config\": {}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}}, {\"class_name\": \"Activation\", \"config\": {\"name\": \"activation_230\", \"trainable\": true, \"activation\": \"relu\"}}, {\"class_name\": \"Dropout\", \"config\": {\"name\": \"dropout_230\", \"trainable\": true, \"rate\": 0.02, \"noise_shape\": null, \"seed\": null}}, {\"class_name\": \"Dense\", \"config\": {\"name\": \"dense_276\", \"trainable\": true, \"units\": 1, \"activation\": \"sigmoid\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"VarianceScaling\", \"config\": {\"scale\": 1.0, \"mode\": \"fan_avg\", \"distribution\": \"uniform\", \"seed\": null}}, \"bias_initializer\": {\"class_name\": \"Zeros\", \"config\": {}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}}]}, \"keras_version\": \"2.2.4\", \"backend\": \"tensorflow\"}" |
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)