Skip to content

Instantly share code, notes, and snippets.

@cyamax
Last active November 7, 2018 12:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cyamax/8999660db10b12900cdcbb42de3a3bdb to your computer and use it in GitHub Desktop.
Save cyamax/8999660db10b12900cdcbb42de3a3bdb to your computer and use it in GitHub Desktop.
import argparse
import logging
import time
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
logger = logging.getLogger('TfPoseEstimator-WebCam')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fps_time = 0
import requests,time,random
sendPose_bef = 0 # 前回の命令
sendPose_now = 0 # 今回の命令
requestUrl_message=["該当なし","↑","→","←","↓","ストップ"]
requestUrl=[
'',
'http://mygopigo.com/execute/fast?to_run_code=gpg.set_speed(40)%0Agpg.forward()',
'http://mygopigo.com/execute/fast?to_run_code=gpg.set_speed(40)%0Agpg.right()',
'http://mygopigo.com/execute/fast?to_run_code=gpg.set_speed(40)%0Agpg.left()',
'http://mygopigo.com/execute/fast?to_run_code=gpg.set_speed(40)%0Agpg.backward()',
'http://mygopigo.com/execute/fast?to_run_code=gpg.stop()']
def send_url():
global sendPose_bef,sendPose_now,requestUrl # グローバル変数として利用
print(sendPose_bef,sendPose_now)
if sendPose_now == 0:
print("初期値:0")
elif sendPose_now == 99:
print("ポーズ該当なし")
elif sendPose_now == 100:
print("ポーズ取得不可")
elif sendPose_bef != sendPose_now:
print(requestUrl_message[sendPose_now])
requests.get(requestUrl[sendPose_now])
sendPose_bef = sendPose_now
else:
print("前回と同じだから実行しない:")
def arrow_estimate(estimation_list):
global sendPose_now,sendPose_bef,requestUrl # グローバル変数として利用
# --------------------------------------
try:
pos_0 = estimation_list[0] # 顔
pos_1 = estimation_list[1] # 喉
pos_2 = estimation_list[2] # migikata
pos_3 = estimation_list[3] # 右肘
pos_5 = estimation_list[5] # hidarikata
pos_6 = estimation_list[6] # 左肘
# pos_8 = estimation_list[8] # 右腰
# pos_11 = estimation_list[11] # 左腰
# goRight_MinY = toStop_RightMaxY = pos_1.y - ((pos_1.y - pos_8.y) / 3)
# goLeft_MinY = toStop_LeftMaxY = pos_1.y - ((pos_1.y - pos_11.y) / 3)
goRight_MaxY = goLeft_MaxY = goForward_MinY = pos_1.y
# if (pos_3.y > goForward_MinY) and (pos_6.y > goForward_MinY):
# sendPose_now = 1 #↑
# elif (pos_3.y > goRight_MinY) and (pos_3.y < goRight_MaxY):
# sendPose_now = 2 #→
# elif (pos_6.y > goLeft_MinY) and (pos_6.y < goLeft_MaxY):
# sendPose_now = 3 # ←
# elif (pos_3.y < 100):
# sendPose_now = 4 # ↓
# elif (pos_3.y < toStop_RightMaxY) and (pos_6.y < toStop_LeftMaxY):
# sendPose_now = 5 # stop
# else:
# sendPose_now = 99 # ポジション取れているけど該当しない
if (pos_3.y < goForward_MinY) and (pos_6.y < goForward_MinY):
sendPose_now = 1 #↑
elif (pos_6.x > pos_5.x) and (pos_2.x < pos_3.x):
sendPose_now = 2 #→
elif (pos_6.x < pos_5.x) and (pos_2.x > pos_3.x):
sendPose_now = 3 # ←
elif (pos_3.y < 0):
sendPose_now = 4 # ↓
elif (pos_6.x > pos_5.x) and (pos_2.x > pos_3.x):
sendPose_now = 5 # stop
else:
sendPose_now = 99 # ポジション取れているけど該当しない
# if 0.25 > estimation_list > 0.01:
# sendPose_now = 1 #↑
# elif 0.5 > estimation_list > 0.26:
# sendPose_now = 2 #→
# elif 0.75 > estimation_list > 0.5:
# sendPose_now = 3 # ←
# elif 0.9 > estimation_list > 0.75:
# sendPose_now = 4 # ↓
# elif 0.99 > estimation_list > 0.9:
# sendPose_now = 5 # stop
# else:
# sendPose_now = 99 # ポジション取れているけど該当しない
except:
print(estimation_list)
sendPose_now = 100 # エラー
send_url()
# ----------------------------------------------
return
#def arrow_estimate(estimation_list):
## --------------------------------------
# try:
# pos_0 = estimation_list[0] # 顔
# pos_1 = estimation_list[1] # 喉
# pos_3 = estimation_list[3] # 右肘
# pos_6 = estimation_list[6] # 左肘
#   pos_8 = estimation_list[8] # 右腰
#   pos_11 = estimation_list[11] # 左腰
#  
#   goRight_MinY = toStop_RightMaxY = pos_1.y - ((pos_1.y - pos_8.y) / 3)
# goLeft_MinY = toStop_LeftMaxY = pos_1.y - ((pos_1.y - pos_11.y) / 3)
# goRight_MaxY = goLeft_MaxY = goForward_MinY = pos_0.y
#  
# if (pos_3.y > goForward_MinY) and (pos_6.y > goForward_MinY):
# print(“↑”)
#  elif (pos_3.y > goRight_MinY) and (pos_3.y < goRight_MaxY):
# print(“→”)
# elif (pos_6.y > goLeft_MinY) and (pos_6.y < goLeft_MaxY):
#print(“←”)
#  elif (pos_3.y < toStop_RightMaxY) and (pos_6.y < toStop_LeftMaxY):
# print(“stop”)
#
# # print (pos_1.x)
# # print (pos_1.y)
# # print (pos_7.x)
# # print (pos_7.y)
# # print(estimation_list[0])
# if (pos_4.y < pos_1.y) and (pos_7.y < pos_1.y):
# print("↑")
# elif pos_7.y < pos_1.y:
# print("←")
# elif pos_4.y < pos_1.y:
# print("→")
#
# except:
# print("認識不可")
## ----------------------------------------------
# return
#
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser.add_argument('--camera', type=int, default=0)
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
args = parser.parse_args()
logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resize)
if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
logger.debug('cam read+')
cam = cv2.VideoCapture(args.camera)
ret_val, image = cam.read()
logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
estimation_list=[]
while True:
ret_val, image = cam.read()
#logger.debug('image process+')
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
## yamashita add
# try:
# print(humans[0].body_parts[0].part_idx)
# #print(humans[0])
# #estimation_list=humans[0][body_parts]
# arrow_estimate(humans[0].body_parts)
# except:
# print("not found")
try:
arrow_estimate(humans[0].body_parts)
except:
print("no human")
#logger.debug('postprocess+')
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
#logger.debug('show+')
cv2.putText(image,
"FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.imshow('tf-pose-estimation result', image)
fps_time = time.time()
if cv2.waitKey(1) == 27:
break
#logger.debug('finished+')
cv2.destroyAllWindows()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment