|
# -*- coding: utf-8 -*- |
|
"""ir_ai.ipynb |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
ライセンスはパブリックフリー(CC0)で良いですが、使用しているライブラリのライセンスは |
|
それぞれのライブラリに依存します。 |
|
https://creativecommons.org/publicdomain/zero/1.0/deed |
|
|
|
""" |
|
|
|
# ファイルのロード→HASH化 |
|
import urllib.request |
|
import json |
|
from collections import OrderedDict |
|
import pprint |
|
|
|
html_str = "" |
|
hashs = [] |
|
with urllib.request.urlopen("https://raw.githubusercontent.com/hakua-doublemoon/IRSendRev_M5Stack/with_test_data/dht12_log.txt") as res: |
|
html_str = res.read().decode("utf-8").split("\n") |
|
for index, line in enumerate(html_str): |
|
#print("{}: {}".format(index, line)) |
|
obj = None |
|
try: |
|
obj = json.loads(line) |
|
except: |
|
pass |
|
if (obj != None): |
|
hashs.append(obj) |
|
#for obj in hashs: |
|
# pprint.pprint(obj) |
|
pprint.pprint(hashs[1]) |
|
|
|
# kerasが処理できるようにデータ変換していく |
|
import numpy as np |
|
import numpy as math |
|
from datetime import datetime |
|
import keras.utils as kutl |
|
|
|
base_data = [] |
|
base_labl = [] |
|
btim = datetime.strptime("00:00:00", "%H:%M:%S") |
|
time_unt = 48 # 1日を量子化する |
|
time_div = 86400/time_unt |
|
tmp_unt = 20 |
|
tmp_div = 30/tmp_unt |
|
|
|
for obj in hashs: |
|
# 学習データ |
|
## 時間 |
|
time = datetime.strptime(obj["time"], "%H:%M:%S") |
|
dt = time - btim |
|
ttc = math.ceil(dt.total_seconds()/time_div) |
|
tts = (ttc/time_unt) # 1を超えないようにする |
|
## 温度 |
|
tmpc = math.ceil(obj["tempareture"]/tmp_div) # Ex. 24.1 -> 240.0 |
|
tmps = (tmpc/tmp_unt) |
|
## ベクトル化 |
|
base_data = np.append(base_data, np.array([tts, tmps, tts*tmps]) ) |
|
#base_data = np.append(base_data, np.array([tts, tmps, 0]) ) |
|
# 学習ラベル |
|
base_labl.append(1 if obj["AC_SW"] else 0) |
|
|
|
train_data = np.reshape(base_data, (-1, 3)) |
|
# to_categorical |
|
train_labl = kutl.to_categorical(base_labl) |
|
|
|
print("data0: ", train_data[0]) |
|
print("label0: ", base_labl[0]) |
|
print("label0(category): ", train_labl[0]) |
|
|
|
print('train_data.shape:', train_data.shape) |
|
|
|
# 可視化 |
|
import matplotlib.pyplot as plt |
|
|
|
def data_label_split_show(labels, isCat, threshold=0.5): |
|
#t_data = np.array([1, 1, 1]) |
|
#f_data = np.array([0, 0, 0]) |
|
t_data = np.array([]) |
|
f_data = np.array([]) |
|
for index,label in enumerate(labels): |
|
if isCat: |
|
lb = np.where(label > 0)[0][0] |
|
else: |
|
lb = (1 if label > threshold else 0) |
|
if lb == 0: |
|
f_data = np.append(f_data, train_data[index]) |
|
else: |
|
t_data = np.append(t_data, train_data[index]) |
|
|
|
f_data = f_data.reshape((-1, 3)) |
|
t_data = t_data.reshape((-1, 3)) |
|
plt.plot(f_data[:,0], f_data[:,1], 'y.') |
|
plt.plot(t_data[:,0], t_data[:,1], 'r.') |
|
|
|
|
|
|
|
#data_label_split_show(train_labl, True) |
|
data_label_split_show(base_labl, False) |
|
|
|
def plot_history_loss(fit, axL): |
|
# Plot the loss in the history |
|
axL.plot(fit.history['loss'],label="loss for training") |
|
axL.plot(fit.history['val_loss'],label="loss for validation") |
|
axL.set_title('model loss') |
|
axL.set_xlabel('epoch') |
|
axL.set_ylabel('loss') |
|
axL.legend(loc='upper right') |
|
|
|
# acc |
|
def plot_history_acc(fit, axR): |
|
# Plot the loss in the history |
|
axR.plot(fit.history['acc'],label="loss for training") |
|
axR.plot(fit.history['val_acc'],label="loss for validation") |
|
axR.set_title('model accuracy') |
|
axR.set_xlabel('epoch') |
|
axR.set_ylabel('accuracy') |
|
axR.legend(loc='upper right') |
|
|
|
from tensorflow.python.keras.models import Sequential |
|
from tensorflow.python.keras.layers import Dense |
|
from tensorflow.python.keras.callbacks import TensorBoard |
|
from tensorflow.python.keras import optimizers |
|
|
|
model = Sequential() |
|
model.add( |
|
Dense(32, input_dim=3, activation='relu') |
|
) |
|
model.add( |
|
Dense(16, activation='relu') |
|
) |
|
model.add( |
|
Dense(units=1, activation='sigmoid') |
|
) |
|
|
|
|
|
# - All parameter gradients will be clipped to |
|
# - a maximum norm of 1. |
|
sgd = optimizers.SGD(lr=0.1) |
|
model.compile( |
|
optimizer=sgd, |
|
metrics=['accuracy'], |
|
loss='binary_crossentropy' |
|
) |
|
tsb = TensorBoard(log_dir='./logs') |
|
fit = model.fit( |
|
train_data, |
|
base_labl, |
|
batch_size=2, |
|
epochs=100, |
|
validation_split=0.2, |
|
verbose=0, |
|
callbacks=[tsb] |
|
) |
|
score = model.evaluate(train_data, base_labl, verbose=0) |
|
|
|
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10,4)) |
|
plot_history_loss(fit, axL) |
|
plot_history_acc(fit, axR) |
|
fig.show() |
|
#plt.close() |
|
|
|
predicts = model.predict(train_data) |
|
print(predicts[0]) |
|
print(predicts[2]) |
|
|
|
data_label_split_show(predicts, False) |