Skip to content

Instantly share code, notes, and snippets.

@chizuchizu
Created September 21, 2020 03:56
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save chizuchizu/cf65e4a5ab4096a330ab2ec26a842bfe to your computer and use it in GitHub Desktop.
Save chizuchizu/cf65e4a5ab4096a330ab2ec26a842bfe to your computer and use it in GitHub Desktop.
import cv2
import time
import busio
import board
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import adafruit_amg88xx
i2c_bus = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_amg88xx.AMG88XX(i2c_bus)
time.sleep(.1)
logo = cv2.imread("../logo.png", -1)
logo2 = logo.copy()
logo2[:, :, :3] = np.where(logo[:, :, :3] == 0, 255, logo[:, :, :3])
HEIGHT = 1024
CAMERA_HEIGHT = 720
CAMERA_WIDTH = 960
WIDTH = 600
cap = cv2.VideoCapture(0) # cv2.CAP_V4L2
# cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('H', '2', '6', '4'));
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('Y', 'U', 'Y', 'V'))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
cap.set(cv2.CAP_PROP_FPS, 30)
avg = None
gain = 10
offset_x = 0.2
offset_green = 0.6
start_time = time.time()
start_deg = 0
interval = 5
sleep = False
def vconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):
w_min = min(im.shape[1] for im in im_list)
im_list_resize = [cv2.resize(im, (w_min, int(im.shape[0] * w_min / im.shape[1])), interpolation=interpolation)
for im in im_list]
return cv2.vconcat(im_list_resize)
def sigmoid(x, gain=1, offset_x=0):
return (np.tanh(((x + offset_x) * gain) / 2) + 1) / 2
def colorBarRGB(x):
x = (x * 2) - 1
red = sigmoid(x, gain, -1 * offset_x)
blue = 1 - sigmoid(x, gain, offset_x)
green = sigmoid(x, gain, offset_green) + (1 - sigmoid(x, gain, -1 * offset_green))
green = green - 1.0
return blue * 256, green * 256, red * 256
def object_detect(frame):
global avg
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 比較用のフレームを取得する
if avg is None:
avg = gray.copy().astype("float")
# continue
# 現在のフレームと移動平均との差を計算
cv2.accumulateWeighted(gray, avg, 0.95)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# デルタ画像を閾値処理を行う
thresh = cv2.threshold(frameDelta, 3, 255, cv2.THRESH_BINARY)[1]
# 画像の閾値に輪郭線を入れる
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours
def paste(fg, bg, x_=0, y_=0, mask_flg=False, random_flg=False):
"""
背景に前景を重ね合せる
[in] fg: 重ね合せる背景
[in] bg: 重ね合せる前景
[in] mask_flg: マスク処理を大きめにするフラグ
[in] random_flg: 前景をランダムに配置するフラグ
[out] 重ね合せた画像
"""
# Load two images
img1 = bg.copy()
# add alpha channel
img1 = np.insert(img1, 3, 100, axis=2)
if random_flg:# ランダム回転
img2, _ = rotateR(fg, [-90, 90], 1.0)
else:
img2 = fg.copy()
# I want to put logo on top-left corner, So I create a ROI
w1, h1 = img1.shape[:2]
w2, h2 = img2.shape[:2]
if random_flg:# ランダム移動
x = np.random.randint(0, w1 - w2 + 1)
y = np.random.randint(0, w1 - w2 + 1)
else:
x = x_
y = y_
roi = img1[x:x + w2, y:y + h2]
# Now create a mask of logo and create its inverse mask also
mask = img2[:, :, 3]
ret, mask_inv = cv2.threshold(
cv2.bitwise_not(mask),
200, 255, cv2.THRESH_BINARY
)
if mask_flg:# 縁を膨張・収縮で作成(膨張大きめ)
kernel1 = np.ones((5, 5), np.uint8)
kernel2 = np.ones((3, 3), np.uint8)
mask_inv = cv2.dilate(mask_inv, kernel1, iterations=1)
mask_inv = cv2.erode(mask_inv, kernel2, iterations=1)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2, img2, mask=mask)
# Put logo in ROI and modify the main image
dst = cv2.add(img1_bg, img2_fg)
img1[x:x + w2, y:y + h2, :] = dst
return img1
min_deg = max_deg = diff = count = key = deg = 0
memo = [26, 26]
min_deg = 20 # min(deg, min_deg)
max_deg = 37 # max(deg, max_deg)
pixels = None
while key != 27:
ret, frame = cap.read()
frame = frame[:, 180:-180, :]
# print(frame.shape)
# 動体検知 (https://qiita.com/K_M95/items/4eed79a7da6b3dafa96d)
contours = object_detect(frame)
if not sleep:
if count % 5 == 0:
pixels = np.array(sensor.pixels).T
deg = pixels.max()
diff = memo[1] - deg
memo = [memo[1], deg]
frame = cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)
# 結果を出力
if count == 0:
count += 1
start_deg = deg
continue
re_deg = memo[0] - diff * (1 + (count % 5)) / 5
nor_deg = (re_deg - min_deg) / (max_deg - min_deg)
colors = colorBarRGB(nor_deg)
new_img = np.zeros(shape=(HEIGHT - 720, WIDTH, 3), dtype="uint8")
new_img[:, :] = colors
frame = vconcat_resize_min([frame, new_img])
# print(frame.shape, new_img.shape)
w, h = np.unravel_index(np.argmax(pixels), pixels.shape)
frame = cv2.putText(frame, f"{'{:.2f}'.format(deg)} deg C", (100, 900), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 0, 0),
thickness=5)
frame = paste(logo, frame, x_=0, y_=20)
else:
frame = np.zeros(shape=(HEIGHT, WIDTH, 3), dtype="uint8")
frame = cv2.putText(frame, "sleeping", (100, 500), cv2.FONT_HERSHEY_SIMPLEX, 3.0, (256, 256, 256), thickness=5)
# print(logo.shape)
frame = paste(logo2, frame, x_=40, y_=110)
cv2.imshow("Frame", frame)
key = cv2.waitKey(30)
# print(w, h)
count += 1
if contours:
"""sleep -> normal"""
start_time = time.time()
sleep = False
elif (not contours) and (time.time() - start_time) > interval:
sleep = True
# if count % 10 == 0:
# print(sleep)
cap.release()
cv2.destroyAllWindows()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment