提交 6a0bf825 编写于 作者: Eric.Lee2021's avatar Eric.Lee2021 🚴🏻

add gesture app

上级 668d6c74
#-*-coding:utf-8-*-
'''
DpCas-Light
|||| ||||| |||| || |||||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||||
|| || ||||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || |||||||
/--------------------- HandPose_X ---------------------/
'''
# date:2021-03-12
# Author: Eric.Lee
# function: handpose demo
import os
import cv2
import time
from multiprocessing import Process
from multiprocessing import Manager
import cv2
import numpy as np
import random
import time
# 加载模型组件库
from hand_detect.yolo_v3_hand import yolo_v3_hand_model
from hand_keypoints.handpose_x import handpose_x_model
# from classify_imagenet.imagenet_c import classify_imagenet_model
# 加载工具库
import sys
sys.path.append("./lib/gesture_lib/")
from cores.handpose_fuction import handpose_track_keypoints21_pipeline
from cores.handpose_fuction import hand_tracking,audio_recognize,judge_click_stabel,draw_click_lines
from utils.utils import parse_data_cfg
from playsound import playsound
def audio_process_dw_edge_cnt(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["click"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
# time.sleep(1)
# playsound("./materials/audio/sentences/WelcomeAR.mp3")
# time.sleep(0.01)
# playsound("./materials/audio/sentences/MorningEric.mp3")
# time.sleep(1)
reg_cnt = 0
while True:
time.sleep(0.01)
try:
reg_cnt = info_dict["click_dw_cnt"]
for i in range(reg_cnt):
# playsound("./materials/audio/cue/winwin-1.mp3")
playsound("./materials/audio/sentences/welldone.mp3")
info_dict["click_dw_cnt"] = info_dict["click_dw_cnt"] - reg_cnt
except Exception as inst:
print(type(inst),inst) # exception instance
if info_dict["break"] == True:
break
def audio_process_up_edge_cnt(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["click"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
reg_cnt = 0
while True:
time.sleep(0.01)
# print(" --->>> audio_process")
try:
reg_cnt = info_dict["click_up_cnt"]
for i in range(reg_cnt):
# playsound("./materials/audio/cue/m2-0.mp3")
playsound("./materials/audio/sentences/Click.mp3")
info_dict["click_up_cnt"] = info_dict["click_up_cnt"] - reg_cnt
except Exception as inst:
print(type(inst),inst) # the exception instance
if info_dict["break"] == True:
break
def audio_process_dw_edge(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["click"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
while True:
time.sleep(0.01)
# print(" --->>> audio_process")
try:
for g_ in gesture_names:
if gesture_dict[g_] is None:
gesture_dict[g_] = info_dict[g_]
else:
if ("click"==g_):
if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==False:# 判断Click手势信号为下降沿,Click动作结束
playsound("./materials/audio/cue/winwin.mp3")
# playsound("./materials/audio/sentences/welldone.mp3")
gesture_dict[g_] = info_dict[g_]
except Exception as inst:
print(type(inst),inst) # the exception instance
if info_dict["break"] == True:
break
def audio_process_up_edge(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["click"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
while True:
time.sleep(0.01)
# print(" --->>> audio_process")
try:
for g_ in gesture_names:
if gesture_dict[g_] is None:
gesture_dict[g_] = info_dict[g_]
else:
if ("click"==g_):
if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==True:# 判断Click手势信号为上升沿,Click动作开始
playsound("./materials/audio/cue/m2.mp3")
# playsound("./materials/audio/sentences/clik_quick.mp3")
gesture_dict[g_] = info_dict[g_]
except Exception as inst:
print(type(inst),inst) # the exception instance
if info_dict["break"] == True:
break
'''
启动识别语音进程
'''
def audio_process_recognize_up_edge(info_dict):
while (info_dict["handpose_procss_ready"] == False): # 等待 模型加载
time.sleep(2)
gesture_names = ["double_en_pts"]
gesture_dict = {}
for k_ in gesture_names:
gesture_dict[k_] = None
while True:
time.sleep(0.01)
# print(" --->>> audio_process")
try:
for g_ in gesture_names:
if gesture_dict[g_] is None:
gesture_dict[g_] = info_dict[g_]
else:
if ("double_en_pts"==g_):
if (info_dict[g_]^gesture_dict[g_]) and info_dict[g_]==True:# 判断Click手势信号为上升沿,Click动作开始
playsound("./materials/audio/sentences/IdentifyingObjectsWait.mp3")
playsound("./materials/audio/sentences/ObjectMayBeIdentified.mp3")
if info_dict["reco_msg"] is not None:
print("process - (audio_process_recognize_up_edge) reco_msg : {} ".format(info_dict["reco_msg"]))
doc_name = info_dict["reco_msg"]["label_msg"]["doc_name"]
reco_audio_file = "./materials/audio/imagenet_2012/{}.mp3".format(doc_name)
if os.access(reco_audio_file,os.F_OK):# 判断语音文件是否存在
playsound(reco_audio_file)
info_dict["reco_msg"] = None
gesture_dict[g_] = info_dict[g_]
except Exception as inst:
print(type(inst),inst) # exception instance
if info_dict["break"] == True:
break
'''
/*****************************************/
算法 pipeline
/*****************************************/
'''
def handpose_x_process(info_dict,config):
# 模型初始化
print("load model component ...")
# yolo v3 手部检测模型初始化
hand_detect_model = yolo_v3_hand_model(conf_thres=float(config["detect_conf_thres"]),nms_thres=float(config["detect_nms_thres"]),
model_arch = config["detect_model_arch"],model_path = config["detect_model_path"],yolo_anchor_scale = float(config["yolo_anchor_scale"]),
img_size = float(config["detect_input_size"]),model_half = config["detect_model_half"],
)
# handpose_x 21 关键点回归模型初始化
handpose_model = handpose_x_model(model_arch = config["handpose_x_model_arch"],model_path = config["handpose_x_model_path"])
#
gesture_model = None # 目前缺省
#
# object_recognize_model = classify_imagenet_model(model_arch = config["classify_model_arch"],model_path = config["classify_model_path"],
# num_classes = int(config["classify_model_classify_num"])) # 识别分类模型
#
img_reco_crop = None
cap = cv2.VideoCapture(int(config["camera_id"])) # 开启摄像机
# cap.set(cv2.CAP_PROP_EXPOSURE, -5) # 设置相机曝光,(注意:不是所有相机有效)
# url="http://admin:admin@192.168.43.1:8081"
# cap=cv2.VideoCapture(url)
print("start handpose process ~")
info_dict["handpose_procss_ready"] = True #多进程间的开始同步信号
gesture_lines_dict = {} # 点击使能时的轨迹点
hands_dict = {} # 手的信息
hands_click_dict = {} #手的按键信息计数
track_index = 0 # 跟踪的全局索引
while True:
ret, img = cap.read()# 读取相机图像
if ret:# 读取相机图像成功
# img = cv2.flip(img,-1)
algo_img = img.copy()
st_ = time.time()
#------
hand_bbox =hand_detect_model.predict(img,vis = True) # 检测手,获取手的边界框
hands_dict,track_index = hand_tracking(data = hand_bbox,hands_dict = hands_dict,track_index = track_index) # 手跟踪,目前通过IOU方式进行目标跟踪
# 检测每个手的关键点及相关信息
handpose_list,gesture_list = handpose_track_keypoints21_pipeline(img,hands_dict = hands_dict,hands_click_dict = hands_click_dict,track_index = track_index,algo_img = algo_img,
handpose_model = handpose_model,gesture_model = gesture_model,
icon = None,vis = True)
et_ = time.time()
fps_ = 1./(et_-st_+1e-8)
#------------------------------------------ 跟踪手的 信息维护
#------------------ 获取跟踪到的手ID
id_list = []
for i in range(len(handpose_list)):
_,_,_,dict_ = handpose_list[i]
id_list.append(dict_["id"])
# print(id_list)
#----------------- 获取需要删除的手ID
id_del_list = []
for k_ in gesture_lines_dict.keys():
if k_ not in id_list:#去除过往已经跟踪失败的目标手的相关轨迹
id_del_list.append(k_)
#----------------- 删除无法跟踪到的手的相关信息
for k_ in id_del_list:
del gesture_lines_dict[k_]
del hands_click_dict[k_]
#----------------- 更新检测到手的轨迹信息,及手点击使能时的上升沿和下降沿信号
double_en_pts = []
for i in range(len(handpose_list)):
_,_,_,dict_ = handpose_list[i]
id_ = dict_["id"]
if dict_["click"]:
if id_ not in gesture_lines_dict.keys():
gesture_lines_dict[id_] = {}
gesture_lines_dict[id_]["pts"]=[]
gesture_lines_dict[id_]["line_color"] = (random.randint(100,255),random.randint(100,255),random.randint(100,255))
gesture_lines_dict[id_]["click"] = None
#判断是否上升沿
if gesture_lines_dict[id_]["click"] is not None:
if gesture_lines_dict[id_]["click"] == False:#上升沿计数器
info_dict["click_up_cnt"] += 1
#获得点击状态
gesture_lines_dict[id_]["click"] = True
#---获得坐标
gesture_lines_dict[id_]["pts"].append(dict_["choose_pt"])
double_en_pts.append(dict_["choose_pt"])
else:
if id_ not in gesture_lines_dict.keys():
gesture_lines_dict[id_] = {}
gesture_lines_dict[id_]["pts"]=[]
gesture_lines_dict[id_]["line_color"] = (random.randint(100,255),random.randint(100,255),random.randint(100,255))
gesture_lines_dict[id_]["click"] = None
elif id_ in gesture_lines_dict.keys():
gesture_lines_dict[id_]["pts"]=[]# 清除轨迹
#判断是否上升沿
if gesture_lines_dict[id_]["click"] == True:#下降沿计数器
info_dict["click_dw_cnt"] += 1
# 更新点击状态
gesture_lines_dict[id_]["click"] = False
#绘制手click 状态时的大拇指和食指中心坐标点轨迹
draw_click_lines(img,gesture_lines_dict,vis = bool(config["vis_gesture_lines"]))
# # 判断各手的click状态是否稳定,且满足设定阈值
# flag_click_stable = judge_click_stabel(img,handpose_list,int(config["charge_cycle_step"]))
# # 判断是否启动识别语音,且进行选中目标识别
# img_reco_crop,reco_msg = audio_recognize(img,algo_img,img_reco_crop,object_recognize_model,info_dict,double_en_pts,flag_click_stable)
# # print(reco_msg)
cv2.putText(img, 'HandNum:[{}]'.format(len(hand_bbox)), (5,25),cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0),5)
cv2.putText(img, 'HandNum:[{}]'.format(len(hand_bbox)), (5,25),cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255))
cv2.namedWindow("image",0)
cv2.imshow("image",img)
if cv2.waitKey(1) == 27:
info_dict["break"] = True
break
else:
break
cap.release()
cv2.destroyAllWindows()
def main_gesture_x(cfg_file):
config = parse_data_cfg(cfg_file)
print("\n/---------------------- main_handpose_x config ------------------------/\n")
for k_ in config.keys():
print("{} : {}".format(k_,config[k_]))
print("\n/------------------------------------------------------------------------/\n")
print(" loading handpose_x local demo ...")
g_info_dict = Manager().dict()# 多进程共享字典初始化:用于多进程间的 key:value 操作
g_info_dict["handpose_procss_ready"] = False # 进程间的开启同步信号
g_info_dict["break"] = False # 进程间的退出同步信号
g_info_dict["double_en_pts"] = False # 双手选中动作使能信号
g_info_dict["click_up_cnt"] = 0
g_info_dict["click_dw_cnt"] = 0
g_info_dict["reco_msg"] = None
print(" multiprocessing dict key:\n")
for key_ in g_info_dict.keys():
print( " -> ",key_)
print()
#-------------------------------------------------- 初始化各进程
process_list = []
t = Process(target=handpose_x_process,args=(g_info_dict,config,))
process_list.append(t)
# t = Process(target=audio_process_recognize_up_edge,args=(g_info_dict,)) # 上升沿播放
# process_list.append(t)
# t = Process(target=audio_process_dw_edge_cnt,args=(g_info_dict,)) # 下降沿播放
# process_list.append(t)
# t = Process(target=audio_process_up_edge_cnt,args=(g_info_dict,)) # 上升沿播放
# process_list.append(t)
for i in range(len(process_list)):
process_list[i].start()
for i in range(len(process_list)):
process_list[i].join()# 设置主线程等待子线程结束
del process_list
detect_model_path=./hand_416-2021-01-29.pt
detect_model_arch=yolo
detect_input_size = 416
yolo_anchor_scale=1.
detect_conf_thres=0.2
detect_nms_thres=0.45
detect_model_half = False
handpose_x_model_path=./ReXNetV1-size-256-wingloss102-0.1041.pth
handpose_x_model_arch=rexnetv1
classify_model_path=./imagenet_size-256_20210409.pth
classify_model_arch=resnet_50
classify_model_classify_num=1000
camera_id = 0
vis_gesture_lines = True
charge_cycle_step = 18
#-*-coding:utf-8-*-
'''
DpCas-Light
|||| ||||| |||| || |||||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||||
|| || ||||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || |||||||
/--------------------- HandPose_X ---------------------/
'''
# date:2019-12-10
# Author: Eric.Lee
# function: handpose :rotation & translation
import cv2
import numpy as np
# 人脸外轮廓
def get_face_outline(img_crop,face_crop_region,obj_crop_points,face_w,face_h):
face_mask = np.zeros((1,27,2),dtype = np.int32)
for m in range(obj_crop_points.shape[0]):
if m <=16:
x = int(face_crop_region[0]+obj_crop_points[m][0]*face_w)
y = int(face_crop_region[1]+obj_crop_points[m][1]*face_h)
# face_mask.append((x,y))
face_mask[0,m,0]=x
face_mask[0,m,1]=y
for k in range(16,26):
m = 42-k
x = int(face_crop_region[0]+obj_crop_points[m][0]*face_w)
y = int(face_crop_region[1]+obj_crop_points[m][1]*face_h)
# face_mask.append((x,y))
face_mask[0,k+1,0]=x
face_mask[0,k+1,1]=y
# print(x,y)
return face_mask
# 人脸公共模型三维坐标
object_pts = np.float32([
[0., 0.4,0.],#掌心
[0., 5.,0.],#hand 根部
# [-2, 2.5,0.],#thumb 第一指节
# [-4, 0.5,0.],#thumb 第二指节
[-2.7, -4.5, 0.],# index 根部
[0., -5., 0.],# middle 根部
[2.6, -4., 0.], # ring 根部
[5.2, -3., 0.],# pink 根部
]
)
# object_pts = np.float32([[-2.5, -7.45, 0.5],# pink 根部
#
# [-1.2, -7.45, 0.5], # ring 根部
#
#
# [1.2, -7.5, 0.5],# middle 根部
#
# [2.5, -7.45, 0.5],# index 根部
# [4.2, -3.45, 0.5],# thumb 第二指节
# [2.5, -2.0, 0.5],# thumb 根部
# [0.00, -0.0,0.5],#hand 根部
# ]
# )
# xyz 立体矩形框
# reprojectsrc = np.float32([[3.0, 11.0, 2.0],
# [3.0, 11.0, -4.0],
# [3.0, -7.0, -4.0],
# [3.0, -7.0, 2.0],
# [-3.0, 11.0, 2.0],
# [-3.0, 11.0, -4.0],
# [-3.0, -7.0, -4.0],
# [-3.0, -7.0, 2.0]])
reprojectsrc = np.float32([[5.0, 8.0, 2.0],
[5.0, 8.0, -2.0],
[5.0, -8.0, -2.0],
[5.0, -8.0, 2.0],
[-5.0, 8.0, 2.0],
[-5.0, 8.0, -2.0],
[-5.0, -8.0, -2.0],
[-5.0, -8.0, 2.0]])
# reprojectsrc = np.float32([[6.0, 4.0, 2.0],
# [6.0, 4.0, -4.0],
# [6.0, -3.0, -4.0],
# [6.0, -3.0, 2.0],
# [-6.0, 4.0, 2.0],
# [-6.0, 4.0, -4.0],
# [-6.0, -3.0, -4.0],
# [-6.0, -3.0, 2.0]])
# reprojectsrc = np.float32([[6.0, 6.0, 6.0],
# [6.0, 6.0, -6.0],
# [6.0, -6.0, -6.0],
# [6.0, -6.0, 6.0],
# [-6.0, 6.0, 6.0],
# [-6.0, 6.0, -6.0],
# [-6.0, -6.0, -6.0],
# [-6.0, -6.0, 6.0]])
# 立体矩形框连线,连接组合
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
[4, 5], [5, 6], [6, 7], [7, 4],
[0, 4], [1, 5], [2, 6], [3, 7]]
def get_hand_pose(shape,img,vis = True):
h,w,_=img.shape
K = [w, 0.0, w//2,
0.0, w, h//2,
0.0, 0.0, 1.0]
# Assuming no lens distortion
D = [0, 0, 0.0, 0.0, 0]
cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)# 相机矩阵
# dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)#相机畸变矩阵,默认无畸变
dist_coeffs = np.float32([0.0, 0.0, 0.0, 0.0, 0.0])
# 选取的人脸关键点的二维图像坐标
# image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
# shape[39], shape[42], shape[45],
# shape[27],shape[31], shape[35],shape[30],shape[33]])
image_pts = np.float32([shape[0], shape[1], shape[2], shape[3], shape[4], shape[5]
]
)
# PNP 计算图像二维和三维实际关系,获得旋转和偏移矩阵
_, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)
# _, rotation_vec, translation_vec = cv2.solvePnPRansac(object_pts, image_pts, cam_matrix, dist_coeffs)
# print("translation_vec:",translation_vec)
#print('translation_vec : {}'.format(translation_vec))
# 映射矩形框
reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,dist_coeffs)
reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))
# calc euler angle
rotation_mat, _ = cv2.Rodrigues(rotation_vec)#旋转向量转为旋转矩阵
pose_mat = cv2.hconcat((rotation_mat, translation_vec))# 拼接操作 旋转 + 偏移
_, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)#欧拉角估计
if vis:
for i,line_pair in enumerate(line_pairs):# 显示立体矩形框
x1 = int(reprojectdst[line_pair[0]][0])
y1 = int(reprojectdst[line_pair[0]][1])
x2 = int(reprojectdst[line_pair[1]][0])
y2 = int(reprojectdst[line_pair[1]][1])
if line_pair[0] in [0,3,4,7] and line_pair[1] in [0,3,4,7]:
cv2.line(img,(x1,y1),(x2,y2),(255,0,0),2)
elif line_pair[0] in [1,2,5,6] and line_pair[1] in [1,2,5,6]:
cv2.line(img,(x1,y1),(x2,y2),(250,150,0),2)
else:
cv2.line(img,(x1,y1),(x2,y2),(0,90,255),2)
return reprojectdst, euler_angle,translation_vec
#-*-coding:utf-8-*-
'''
DpCas-Light
|||| |||| |||| || ||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||
|| || |||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || ||||
/------------------ HandPose_X ------------------/
'''
# date:2021-03-09
# Author: Eric.Lee
# function: pipline
import cv2
import numpy as np
from hand_keypoints.handpose_x import handpose_x_model,draw_bd_handpose_c
import math
from cores.tracking_utils import tracking_bbox
from cores.hand_pnp import get_hand_pose
import numpy as np
'''
求解二维向量的角度
'''
def vector_2d_angle(v1,v2):
v1_x=v1[0]
v1_y=v1[1]
v2_x=v2[0]
v2_y=v2[1]
try:
angle_=math.degrees(math.acos((v1_x*v2_x+v1_y*v2_y)/(((v1_x**2+v1_y**2)**0.5)*((v2_x**2+v2_y**2)**0.5))))
except:
angle_ =65535.
if angle_ > 180.:
angle_ = 65535.
return angle_
'''
获取对应手相关向量的二维角度
'''
def hand_angle(hand_,x=0,y=0):
angle_list = []
#---------------------------- thumb 大拇指角度
angle_ = vector_2d_angle(
((int(hand_['0']['x']+x)- int(hand_['2']['x']+x)),(int(hand_['0']['y']+y)-int(hand_['2']['y']+y))),
((int(hand_['3']['x']+x)- int(hand_['4']['x']+x)),(int(hand_['3']['y']+y)- int(hand_['4']['y']+y)))
)
angle_list.append(angle_)
#---------------------------- index 食指角度
angle_ = vector_2d_angle(
((int(hand_['0']['x']+x)-int(hand_['6']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['6']['y']+y))),
((int(hand_['7']['x']+x)- int(hand_['8']['x']+x)),(int(hand_['7']['y']+y)- int(hand_['8']['y']+y)))
)
angle_list.append(angle_)
#---------------------------- middle 中指角度
angle_ = vector_2d_angle(
((int(hand_['0']['x']+x)- int(hand_['10']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['10']['y']+y))),
((int(hand_['11']['x']+x)- int(hand_['12']['x']+x)),(int(hand_['11']['y']+y)- int(hand_['12']['y']+y)))
)
angle_list.append(angle_)
#---------------------------- ring 无名指角度
angle_ = vector_2d_angle(
((int(hand_['0']['x']+x)- int(hand_['14']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['14']['y']+y))),
((int(hand_['15']['x']+x)- int(hand_['16']['x']+x)),(int(hand_['15']['y']+y)- int(hand_['16']['y']+y)))
)
angle_list.append(angle_)
#---------------------------- pink 小拇指角度
angle_ = vector_2d_angle(
((int(hand_['0']['x']+x)- int(hand_['18']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['18']['y']+y))),
((int(hand_['19']['x']+x)- int(hand_['20']['x']+x)),(int(hand_['19']['y']+y)- int(hand_['20']['y']+y)))
)
angle_list.append(angle_)
return angle_list
'''
# 二维约束的方法定义手势,由于受限于没有大量的静态手势数据集原因
# fist five gun love one six three thumbup yeah
# finger id: thumb index middle ring pink
'''
def h_gesture(img,angle_list):
thr_angle = 65.
thr_angle_thumb = 53.
thr_angle_s = 49.
gesture_str = None
if 65535. not in angle_list:
if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "fist"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]<thr_angle_s) and (angle_list[4]<thr_angle_s):
gesture_str = "five"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "gun"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]<thr_angle_s):
gesture_str = "love"
elif (angle_list[0]>5) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "one"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]<thr_angle_s):
gesture_str = "six"
elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]<thr_angle_s) and (angle_list[4]>thr_angle):
gesture_str = "three"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "thumbUp"
elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "yeah"
return gesture_str
# define gesture
# fist five gun love one six three thumbup yeah
# finger id: thumb index middle ring pink
def h_gesture(img,angle_list):
thr_angle = 65.
thr_angle_thumb = 53.
thr_angle_s = 45.
gesture_str = None
if 65535. not in angle_list:
if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "fist"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]<thr_angle_s) and (angle_list[4]<thr_angle_s):
gesture_str = "five"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "gun"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]<thr_angle_s):
gesture_str = "love"
elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "one"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]<thr_angle_s):
gesture_str = "six"
elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]<thr_angle_s) and (angle_list[4]>thr_angle):
gesture_str = "three"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "thumbup"
elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "yeah"
#------------------------------------------------
if gesture_str is not None:
img_gesture = cv2.imread("./materials/vison/gesture_show/{}.jpg".format(gesture_str))
cv2.rectangle(img, (img.shape[1]-1-152,img.shape[0]-1-152), (img.shape[1]-3,img.shape[0]-3), (255,200,22), 5)
cv2.rectangle(img, (img.shape[1]-1-152,img.shape[0]-1-152), (img.shape[1]-3,img.shape[0]-3), (25,10,222), 2)
if True:#gesture_str == "fist" or gesture_str == "one":
img_pk = cv2.resize(img_gesture,(150,150))
img[(img.shape[0]-1-150):(img.shape[0]-1),(img.shape[1]-1-150):(img.shape[1]-1),:] = img_pk
cv2.putText(img, ' [{}]'.format(gesture_str), (img.shape[1]-1-152,img.shape[0]-1-162),
cv2.FONT_HERSHEY_COMPLEX, 1.21, (255, 155, 25),5)
cv2.putText(img, ' [{}]'.format(gesture_str), (img.shape[1]-1-152,img.shape[0]-1-162),
cv2.FONT_HERSHEY_COMPLEX, 1.21, (0, 0, 255))
return gesture_str
#-------------------------------------
'''
手部跟踪算法:采用边界框的IOU方式
'''
def hand_tracking(data,hands_dict,track_index):
if data is None:
hands_dict = {}
track_index = 0
hands_dict,track_index = tracking_bbox(data,hands_dict,track_index) # 目标跟踪
return hands_dict,track_index
#-------------------------------------
'''
DpCas-Light
/------------------ HandPose_X ------------------/
1) 手的21关键点回归检测
2) 食指和大拇指的捏和放开判断,即点击(click)判断
3) 二维关键点角度约束手势
'''
def handpose_track_keypoints21_pipeline(img,hands_dict,hands_click_dict,track_index,algo_img = None,handpose_model = None,gesture_model = None, icon=None,vis = False,dst_thr = 35,angle_thr = 16.):
hands_list = []
gesture_list = []
if algo_img is not None:
for idx,id_ in enumerate(sorted(hands_dict.keys(), key=lambda x:x, reverse=False)):
x_min,y_min,x_max,y_max,score,iou_,cnt_,ui_cnt = hands_dict[id_]
cv2.putText(img, 'ID {}'.format(id_), (int(x_min+2),int(y_min+15)),cv2.FONT_HERSHEY_COMPLEX, 0.45, (255, 0, 0),5)
cv2.putText(img, 'ID {}'.format(id_), (int(x_min+2),int(y_min+15)),cv2.FONT_HERSHEY_COMPLEX, 0.45, (173,255,73))
# x_min,y_min,x_max,y_max,score = bbox
w_ = max(abs(x_max-x_min),abs(y_max-y_min))
if w_< 60:
continue
w_ = w_*1.26
x_mid = (x_max+x_min)/2
y_mid = (y_max+y_min)/2
x1,y1,x2,y2 = int(x_mid-w_/2),int(y_mid-w_/2),int(x_mid+w_/2),int(y_mid+w_/2)
x1 = np.clip(x1,0,img.shape[1]-1)
x2 = np.clip(x2,0,img.shape[1]-1)
y1 = np.clip(y1,0,img.shape[0]-1)
y2 = np.clip(y2,0,img.shape[0]-1)
bbox_ = x1,y1,x2,y2
gesture_name = None
pts_ = handpose_model.predict(algo_img[y1:y2,x1:x2,:])
plam_list = []
pts_hand = {}
for ptk in range(int(pts_.shape[0]/2)):
xh = (pts_[ptk*2+0]*float(x2-x1))
yh = (pts_[ptk*2+1]*float(y2-y1))
pts_hand[str(ptk)] = {
"x":xh,
"y":yh,
}
if ptk in [0,1,5,9,13,17]:
plam_list.append((xh+x1,yh+y1))
if ptk == 0: #手掌根部
hand_root_ = int(xh+x1),int(yh+y1)
if ptk == 4: # 大拇指
thumb_ = int(xh+x1),int(yh+y1)
if ptk == 8: # 食指
index_ = int(xh+x1),int(yh+y1)
if vis:
if ptk == 0:# 绘制腕关节点
cv2.circle(img, (int(xh+x1),int(yh+y1)), 9, (250,60,255),-1)
cv2.circle(img, (int(xh+x1),int(yh+y1)), 5, (20,180,255),-1)
cv2.circle(img, (int(xh+x1),int(yh+y1)), 4, (255,50,60),-1)
cv2.circle(img, (int(xh+x1),int(yh+y1)), 3, (25,160,255),-1)
# 获得二维角度
angle_list = hand_angle(pts_hand)
gesture_ = h_gesture(img,angle_list)
cv2.putText(img, 'Gesture: {}'.format(gesture_), (int(x_min+2),y2+21),cv2.FONT_HERSHEY_COMPLEX, 0.45, (255, 195, 0),5)
cv2.putText(img, 'Gesture: {}'.format(gesture_), (int(x_min+2),y2+21),cv2.FONT_HERSHEY_COMPLEX, 0.45, (0, 150, 255))
gesture_list.append(gesture_)
# print(angle_list)
# 计算食指和大拇指中心坐标
choose_pt = (int((index_[0]+thumb_[0])/2),int((index_[1]+thumb_[1])/2))
# 计算掌心
plam_list = np.array(plam_list)
plam_center = (np.mean(plam_list[:,0]),np.mean(plam_list[:,1]))
# 绘制掌心坐标圆
if vis:
cv2.circle(img, (int(plam_center[0]),int(plam_center[1])), 12, (25,160,255),9)
cv2.circle(img, (int(plam_center[0]),int(plam_center[1])), 12, (255,190,30),2)
# 计算食指大拇指的距离
dst = np.sqrt(np.square(thumb_[0]-index_[0]) +np.square(thumb_[1]-index_[1]))
# 计算大拇指和手指相对手掌根部的角度:
angle_ = vector_2d_angle((thumb_[0]-hand_root_[0],thumb_[1]-hand_root_[1]),(index_[0]-hand_root_[0],index_[1]-hand_root_[1]))
# 判断手的点击click状态,即大拇指和食指是否捏合
click_state = False
if dst<dst_thr and angle_<angle_thr: # 食指和大拇指的坐标欧氏距离,以及相对手掌根部的相对角度,两个约束关系判断是否点击
click_state = True
cv2.circle(img, choose_pt, 6, (0,0,255),-1) # 绘制点击坐标,为轨迹的坐标
cv2.circle(img, choose_pt, 2, (255,220,30),-1)
cv2.putText(img, 'Click {:.1f} {:.1f}'.format(dst,angle_), (int(x_min+2),y2-1),cv2.FONT_HERSHEY_COMPLEX, 0.45, (255, 0, 0),5)
cv2.putText(img, 'Click {:.1f} {:.1f}'.format(dst,angle_), (int(x_min+2),y2-1),cv2.FONT_HERSHEY_COMPLEX, 0.45, (0, 0, 255))
else:
click_state = False
cv2.putText(img, 'NONE {:.1f} {:.1f}'.format(dst,angle_), (int(x_min+2),y2-1),cv2.FONT_HERSHEY_COMPLEX, 0.45, (255, 0, 0),5)
cv2.putText(img, 'NONE {:.1f} {:.1f}'.format(dst,angle_), (int(x_min+2),y2-1),cv2.FONT_HERSHEY_COMPLEX, 0.45, (0, 0, 255))
#----------------------------------------------------
# 记录手的点击(click)计数器,用于判断click稳定输出状态
if id_ not in hands_click_dict.keys():# 记录手的点击(click)计数器,用于稳定输出
hands_click_dict[id_] = 0
if click_state == False:
hands_click_dict[id_] = 0
elif click_state == True:
hands_click_dict[id_] += 1
#----------------------------------------------------
hands_list.append((pts_hand,(x1,y1),plam_center,{"id":id_,"click":click_state,"click_cnt":hands_click_dict[id_],"choose_pt":choose_pt})) # 局部21关键点坐标,全局bbox左上坐标,全局掌心坐标
#--------------------- 绘制手的关键点连线
draw_bd_handpose_c(img,pts_hand,x1,y1,2)
'''
shape_ = []
shape_.append(plam_center)
for i in range(18):
if i in [0,5,9,13,17]:
shape_.append((pts_hand[str(i)]["x"]+x1,pts_hand[str(i)]["y"]+y1))
reprojectdst, euler_angle,translation_vec = get_hand_pose(np.array(shape_).reshape((len(shape_),2)),img,vis = False)
x_,y_,z_ = translation_vec[0][0],translation_vec[1][0],translation_vec[2][0]
cv2.putText(img, 'x,y,z:({:.1f},{:.1f},{:.1f})'.format(x_,y_,z_), (int(x_min+2),y2+19),cv2.FONT_HERSHEY_COMPLEX, 0.45, (255,10,10),5)
cv2.putText(img, 'x,y,z:({:.1f},{:.1f},{:.1f})'.format(x_,y_,z_), (int(x_min+2),y2+19),cv2.FONT_HERSHEY_COMPLEX, 0.45, (185, 255, 55))
'''
return hands_list,gesture_list
'''
判断是否启动识别语音
'''
def audio_recognize(img,algo_img,img_reco_crop,object_recognize_model,info_dict,double_en_pts,flag_click_stable):
# 开启识别
reco_msg = None
if (len(double_en_pts) == 2) and (flag_click_stable == True):
x1,y1 = int(double_en_pts[0][0]),int(double_en_pts[0][1])
x2,y2 = int(double_en_pts[1][0]),int(double_en_pts[1][1])
x1_,y1_,x2_,y2_ = min(x1,x2),min(y1,y2),max(x1,x2),max(y1,y2)
x1_ = int(np.clip(x1_,0,algo_img.shape[1]-1))
x2_ = int(np.clip(x2_,0,algo_img.shape[1]-1))
y1_ = int(np.clip(y1_,0,algo_img.shape[0]-1))
y2_ = int(np.clip(y2_,0,algo_img.shape[0]-1))
#----------------------------------------- 选中区域且在目标上升沿进行一次识别
#----------------------------------------- object_recognize_model todo
if info_dict["double_en_pts"] == False:
if ((x2_-x1_)>0) and ((y2_-y1_)>0):
img_reco_crop = cv2.resize(algo_img[y1_:y2_,x1_:x2_,:], (130,130)) #待识别区域块
print("------------------------>>> start object_recognize_model ")
max_index,label_msg,score_ = object_recognize_model.predict(img_reco_crop)
reco_msg = {"index":max_index,"label_msg":label_msg,"score":score_}
# print(" audio_recognize function ->> reco_msg : ",reco_msg)
info_dict["reco_msg"] = reco_msg
if img_reco_crop is not None: # 绘制识别区域在左下角
h,w,_ = img.shape
img[(h-131):(h-1),(w-131):(w-1),:] = img_reco_crop
cv2.rectangle(img, (w-131,h-131), (w-1,h-1), (225,66,66), 5)
#-----------------------------------------
info_dict["double_en_pts"] = True
cv2.rectangle(img, (x1_,y1_), (x2_,y2_), (225,255,62), 5)
cv2.rectangle(img, (x1_,y1_), (x2_,y2_), (100,180,255), 2)
cv2.putText(img, ' recognize{}'.format(""), (x1_,y1_),cv2.FONT_HERSHEY_COMPLEX, 0.65, (255, 0, 0),5)
cv2.putText(img, ' recognize{}'.format(""), (x1_,y1_),cv2.FONT_HERSHEY_COMPLEX, 0.65, (0,33,255),1)
else:
info_dict["double_en_pts"] = False
return img_reco_crop,reco_msg
'''
判断各手的click状态是否稳定(点击稳定充电环),即click是否持续一定阈值
注意:charge_cycle_step 充电步长越大,触发时间越短
'''
def judge_click_stabel(img,handpose_list,charge_cycle_step = 32):
flag_click_stable = True
for i in range(len(handpose_list)):
_,_,_,dict_ = handpose_list[i]
id_ = dict_["id"]
click_cnt_ = dict_["click_cnt"]
pt_ = dict_["choose_pt"]
if click_cnt_ > 0:
# print("double_en_pts --->>> id : {}, click_cnt : <{}> , pt : {}".format(id_,click_cnt_,pt_))
# 绘制稳定充电环
# 充电环时间控制
charge_cycle_step = charge_cycle_step # 充电步长越大,触发时间越短
fill_cnt = int(click_cnt_*charge_cycle_step)
if fill_cnt < 360:
cv2.ellipse(img,pt_,(16,16),0,0,fill_cnt,(255,255,0),2)
else:
cv2.ellipse(img,pt_,(16,16),0,0,fill_cnt,(0,150,255),4)
# 充电环未充满,置为 False
if fill_cnt<360:
flag_click_stable = False
else:
flag_click_stable = False
return flag_click_stable
'''
绘制手click 状态时的大拇指和食指中心坐标点轨迹
'''
def draw_click_lines(img,gesture_lines_dict,vis = False):
# 绘制点击使能后的轨迹
if vis :
for id_ in gesture_lines_dict.keys():
if len(gesture_lines_dict[id_]["pts"]) >=2:
for i in range(len(gesture_lines_dict[id_]["pts"])-1):
pt1 = gesture_lines_dict[id_]["pts"][i]
pt2 = gesture_lines_dict[id_]["pts"][i+1]
cv2.line(img,pt1,pt2,gesture_lines_dict[id_]["line_color"],2,cv2.LINE_AA)
#-*-coding:utf-8-*-
'''
DpCas-Light
|||| ||||| |||| || |||||||
|| || || || || || |||| || ||
|| || || || || || || || ||
|| || || || || ||====|| ||||||
|| || ||||| || || ||======|| ||
|| || || || || || || || ||
|||| || |||| || || |||||||
/--------------------- HandPose_X ---------------------/
'''
import copy
def compute_iou_tk(rec1, rec2):
"""
computing IoU
:param rec1: (y0, x0, y1, x1), which reflects
(top, left, bottom, right)
:param rec2: (y0, x0, y1, x1)
:return: scala value of IoU
"""
# computing area of each rectangles
S_rec1 = (rec1[2] - rec1[0]) * (rec1[3] - rec1[1])
S_rec2 = (rec2[2] - rec2[0]) * (rec2[3] - rec2[1])
# computing the sum_area
sum_area = S_rec1 + S_rec2
# find the each edge of intersect rectangle
left_line = max(rec1[1], rec2[1])
right_line = min(rec1[3], rec2[3])
top_line = max(rec1[0], rec2[0])
bottom_line = min(rec1[2], rec2[2])
# judge if there is an intersect
if left_line >= right_line or top_line >= bottom_line:
return 0.
else:
intersect = (right_line - left_line) * (bottom_line - top_line)
return (intersect / (sum_area - intersect)) * 1.0
def tracking_bbox(data,hand_dict,index,iou_thr = 0.5):
track_index = index
reg_dict = {}
Flag_ = True if hand_dict else False
if Flag_ == False:
# print("------------------->>. False")
for bbox in data:
x_min,y_min,x_max,y_max,score = bbox
reg_dict[track_index] = (x_min,y_min,x_max,y_max,score,0.,1,1)
track_index += 1
if track_index >= 65535:
track_index = 0
else:
# print("------------------->>. True ")
for bbox in data:
xa0,ya0,xa1,ya1,score = bbox
is_track = False
for k_ in hand_dict.keys():
xb0,yb0,xb1,yb1,_,_,cnt_,bbox_stanbel_cnt = hand_dict[k_]
iou_ = compute_iou_tk((ya0,xa0,ya1,xa1),(yb0,xb0,yb1,xb1))
# print((ya0,xa0,ya1,xa1),(yb0,xb0,yb1,xb1))
# print("iou : ",iou_)
if iou_ > iou_thr: # 跟踪成功目标
UI_CNT = 1
if iou_ > 0.888:
UI_CNT = bbox_stanbel_cnt + 1
reg_dict[k_] = (xa0,ya0,xa1,ya1,score,iou_,cnt_ + 1,UI_CNT)
is_track = True
# print("is_track : " ,cnt_ + 1)
if is_track == False: # 新目标
reg_dict[track_index] = (xa0,ya0,xa1,ya1,score,0.,1,1)
track_index += 1
if track_index >=65535: #索引越界归零
track_index = 0
if track_index>=100:
track_index = 0
hand_dict = copy.deepcopy(reg_dict)
# print("a:",hand_dict)
return hand_dict,track_index
# 项目2:手势识别项目(二维关键点约束方式)
## 项目案例介绍
## 项目配置
### 1、软件
* Python 3.7
* PyTorch >= 1.5.1
* opencv-python
* playsound
### 2、硬件
* 普通USB彩色(RGB)网络摄像头
## 相关项目
### 1、手部检测项目(yolo_v3)
* 项目地址:https://codechina.csdn.net/EricLee/yolo_v3
* [预训练模型下载地址(百度网盘 Password: 7mk0 )](https://pan.baidu.com/s/1hqzvz0MeFX0EdpWXUV6aFg)
* 另外同学们可以根据自己需求替换检测模型。
### 2、手21关键点回归项目(handpose_x)
* 项目地址:https://codechina.csdn.net/EricLee/handpose_x
* [预训练模型下载地址(百度网盘 Password: 99f3 )](https://pan.baidu.com/s/1Ur6Ikp31XGEuA3hQjYzwIw)
## 项目使用方法
### 项目1:手势交互项目(local 本地版本)
### 1、下载手部检测模型和21关键点回归模型。
### 2、确定摄像头连接成功。
### 3、打开配置文件 lib/hand_lib/cfg/[handpose.cfg](https://codechina.csdn.net/EricLee/dpcas/-/blob/master/lib/hand_lib/cfg/handpose.cfg) 进行相关参数配置,具体配置参数如下,请仔细阅读(一般只需要配置模型路径及模型结构)
```
detect_model_path=./latest_416.pt #手部检测模型地址
detect_model_arch=yolo_v3 #检测模型类型 ,yolo or yolo-tiny
yolo_anchor_scale=1.0 # yolo anchor 比例,默认为 1
detect_conf_thres=0.5 # 检测模型阈值
detect_nms_thres=0.45 # 检测模型 nms 阈值
handpose_x_model_path=./ReXNetV1-size-256-wingloss102-0.1063.pth # 21点手回归模型地址
handpose_x_model_arch=rexnetv1 # 回归模型结构
camera_id = 0 # 相机 ID ,一般默认为0,如果不是请自行确认
vis_gesture_lines = True # True: 点击时的轨迹可视化, False:点击时的轨迹不可视化
charge_cycle_step = 32 # 点击稳定状态计数器,点击稳定充电环。
```
### 4、根目录下运行命令: python main.py
## 联系方式 (Contact)
* E-mails: 305141918@qq.com
import os
"""Parses the data configuration file"""
def parse_data_cfg(path):
print('data_cfg : ',path)
options = dict()
with open(path, 'r',encoding='UTF-8') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, value = line.split('=')
if value.strip() == "False":
options[key.strip()]= False
elif value.strip() == "True":
options[key.strip()]= True
else:
options[key.strip()] = value.strip()
return options
......@@ -36,15 +36,16 @@ if __name__ == '__main__':
demo_logo()
parser = argparse.ArgumentParser(description= " DpCas : << Deep Learning Componentized Application System >> ")
parser.add_argument('-app', type=int, default = 0,
help = "handpose_x:0, wyw2s:1, face_bioassay:2, video_ana:3, face_pay:4, drive:5") # 设置 App Example
help = "handpose_x:0, gesture:1 ,wyw2s:2, face_bioassay:3, video_ana:4, face_pay:5, drive:6") # 设置 App Example
app_dict = {
0:"handpose_x",
1:"wyw2s",
2:"face_bioassay",
3:"video_ana",
4:"face_pay",
5:"drive"}
1:"gesture",
2:"wyw2s",
3:"face_bioassay",
4:"video_ana",
5:"face_pay",
6:"drive"}
args = parser.parse_args()# 解析添加参数
......@@ -54,25 +55,29 @@ if __name__ == '__main__':
from applications.handpose_local_app import main_handpose_x #加载 handpose 应用
cfg_file = "./lib/hand_lib/cfg/handpose.cfg"
main_handpose_x(cfg_file)#加载 handpose 应用
elif APP_P == "gesture": # 手势识别
from applications.gesture_local_app import main_gesture_x #加载 gesture 应用
cfg_file = "./lib/gesture_lib/cfg/handpose.cfg"
main_gesture_x(cfg_file)#加载 handpose 应用
elif APP_P == "wyw2s": # 基于人脸识别的视频剪辑
from applications.wyw2s_local_app import main_wyw2s #加载 who you want 2 see 应用
cfg_file = "./lib/wyw2s_lib/cfg/wyw2s.cfg"
main_wyw2s(video_path = "./video/f1.mp4",cfg_file = cfg_file)#加载 who you want 2 see 应用
main_wyw2s(video_path = "./video/rw_11.mp4",cfg_file = cfg_file)#加载 who you want 2 see 应用
elif APP_P == "face_bioassay":
from applications.face_bioassay_local_app import main_face_bioassay #face_bioassay 应用
cfg_file = "./lib/face_bioassay_lib/cfg/face_bioassay.cfg"
main_face_bioassay(video_path = "./video/f1.mp4",cfg_file = cfg_file)#加载 face_bioassay 应用
video_path = "./video/f1.mp4"
main_face_bioassay(video_path = 0,cfg_file = cfg_file)#加载 face_bioassay 应用
# elif APP_P == "video_ana":
# from applications.VideoAnalysis_app import main_VideoAnalysis #加载 video_analysis 应用
# main_VideoAnalysis(video_path = "./video/f3.mp4")#加载 video_analysis 应用
#
# elif APP_P == "face_pay":
# cfg_file = "./lib/facepay_lib/cfg/facepay.cfg"
# from applications.FacePay_local_app import main_facePay #加载 face pay 应用
# main_facePay(video_path = 0,cfg_file = cfg_file) # 加载 face pay 应用
elif APP_P == "face_pay":
cfg_file = "./lib/facepay_lib/cfg/facepay.cfg"
from applications.FacePay_local_app import main_facePay #加载 face pay 应用
main_facePay(video_path = 0,cfg_file = cfg_file) # 加载 face pay 应用
#
# elif APP_P == "drive":
# from applications.DangerousDriveWarning_local_app import main_DangerousDriveWarning #加载 危险驾驶预警 应用
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册